xref: /wlan-dirver/platform/cnss2/pci.c (revision 2fd60f792b64ddb3dbe6cb492429e28ff9f5242f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include <linux/completion.h>
8 #include <linux/io.h>
9 #include <linux/irq.h>
10 #include <linux/memblock.h>
11 #include <linux/module.h>
12 #include <linux/msi.h>
13 #include <linux/of.h>
14 #include <linux/of_gpio.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/suspend.h>
17 #include <linux/version.h>
18 #include <linux/sched.h>
19 
20 #include "main.h"
21 #include "bus.h"
22 #include "debug.h"
23 #include "pci.h"
24 #include "pci_platform.h"
25 #include "reg.h"
26 
27 #define PCI_LINK_UP			1
28 #define PCI_LINK_DOWN			0
29 
30 #define SAVE_PCI_CONFIG_SPACE		1
31 #define RESTORE_PCI_CONFIG_SPACE	0
32 
33 #define PCI_BAR_NUM			0
34 #define PCI_INVALID_READ(val)		((val) == U32_MAX)
35 
36 #define PCI_DMA_MASK_32_BIT		DMA_BIT_MASK(32)
37 #define PCI_DMA_MASK_36_BIT		DMA_BIT_MASK(36)
38 #define PCI_DMA_MASK_64_BIT		DMA_BIT_MASK(64)
39 
40 #define MHI_NODE_NAME			"qcom,mhi"
41 #define MHI_MSI_NAME			"MHI"
42 
43 #define QCA6390_PATH_PREFIX		"qca6390/"
44 #define QCA6490_PATH_PREFIX		"qca6490/"
45 #define KIWI_PATH_PREFIX		"kiwi/"
46 #define MANGO_PATH_PREFIX		"mango/"
47 #define DEFAULT_PHY_M3_FILE_NAME	"m3.bin"
48 #define DEFAULT_PHY_UCODE_FILE_NAME	"phy_ucode.elf"
49 #define PHY_UCODE_V2_FILE_NAME		"phy_ucode20.elf"
50 #define DEFAULT_FW_FILE_NAME		"amss.bin"
51 #define FW_V2_FILE_NAME			"amss20.bin"
52 #define FW_V2_FTM_FILE_NAME		"amss20_ftm.bin"
53 #define DEVICE_MAJOR_VERSION_MASK	0xF
54 
55 #define WAKE_MSI_NAME			"WAKE"
56 
57 #define DEV_RDDM_TIMEOUT		5000
58 #define WAKE_EVENT_TIMEOUT		5000
59 
60 #ifdef CONFIG_CNSS_EMULATION
61 #define EMULATION_HW			1
62 #else
63 #define EMULATION_HW			0
64 #endif
65 
66 #define RAMDUMP_SIZE_DEFAULT		0x420000
67 #define CNSS_256KB_SIZE			0x40000
68 #define DEVICE_RDDM_COOKIE		0xCAFECACE
69 
70 static DEFINE_SPINLOCK(pci_link_down_lock);
71 static DEFINE_SPINLOCK(pci_reg_window_lock);
72 static DEFINE_SPINLOCK(time_sync_lock);
73 
74 #define MHI_TIMEOUT_OVERWRITE_MS	(plat_priv->ctrl_params.mhi_timeout)
75 #define MHI_M2_TIMEOUT_MS		(plat_priv->ctrl_params.mhi_m2_timeout)
76 
77 #define WLAON_PWR_CTRL_SHUTDOWN_DELAY_MIN_US	1000
78 #define WLAON_PWR_CTRL_SHUTDOWN_DELAY_MAX_US	2000
79 
80 #define FORCE_WAKE_DELAY_MIN_US			4000
81 #define FORCE_WAKE_DELAY_MAX_US			6000
82 #define FORCE_WAKE_DELAY_TIMEOUT_US		60000
83 
84 #define MHI_SUSPEND_RETRY_MAX_TIMES		3
85 #define MHI_SUSPEND_RETRY_DELAY_US		5000
86 
87 #define BOOT_DEBUG_TIMEOUT_MS			7000
88 
89 #define HANG_DATA_LENGTH		384
90 #define HST_HANG_DATA_OFFSET		((3 * 1024 * 1024) - HANG_DATA_LENGTH)
91 #define HSP_HANG_DATA_OFFSET		((2 * 1024 * 1024) - HANG_DATA_LENGTH)
92 
93 #define AFC_SLOT_SIZE                   0x1000
94 #define AFC_MAX_SLOT                    2
95 #define AFC_MEM_SIZE                    (AFC_SLOT_SIZE * AFC_MAX_SLOT)
96 #define AFC_AUTH_STATUS_OFFSET          1
97 #define AFC_AUTH_SUCCESS                1
98 #define AFC_AUTH_ERROR                  0
99 
100 static const struct mhi_channel_config cnss_mhi_channels[] = {
101 	{
102 		.num = 0,
103 		.name = "LOOPBACK",
104 		.num_elements = 32,
105 		.event_ring = 1,
106 		.dir = DMA_TO_DEVICE,
107 		.ee_mask = 0x4,
108 		.pollcfg = 0,
109 		.doorbell = MHI_DB_BRST_DISABLE,
110 		.lpm_notify = false,
111 		.offload_channel = false,
112 		.doorbell_mode_switch = false,
113 		.auto_queue = false,
114 	},
115 	{
116 		.num = 1,
117 		.name = "LOOPBACK",
118 		.num_elements = 32,
119 		.event_ring = 1,
120 		.dir = DMA_FROM_DEVICE,
121 		.ee_mask = 0x4,
122 		.pollcfg = 0,
123 		.doorbell = MHI_DB_BRST_DISABLE,
124 		.lpm_notify = false,
125 		.offload_channel = false,
126 		.doorbell_mode_switch = false,
127 		.auto_queue = false,
128 	},
129 	{
130 		.num = 4,
131 		.name = "DIAG",
132 		.num_elements = 64,
133 		.event_ring = 1,
134 		.dir = DMA_TO_DEVICE,
135 		.ee_mask = 0x4,
136 		.pollcfg = 0,
137 		.doorbell = MHI_DB_BRST_DISABLE,
138 		.lpm_notify = false,
139 		.offload_channel = false,
140 		.doorbell_mode_switch = false,
141 		.auto_queue = false,
142 	},
143 	{
144 		.num = 5,
145 		.name = "DIAG",
146 		.num_elements = 64,
147 		.event_ring = 1,
148 		.dir = DMA_FROM_DEVICE,
149 		.ee_mask = 0x4,
150 		.pollcfg = 0,
151 		.doorbell = MHI_DB_BRST_DISABLE,
152 		.lpm_notify = false,
153 		.offload_channel = false,
154 		.doorbell_mode_switch = false,
155 		.auto_queue = false,
156 	},
157 	{
158 		.num = 20,
159 		.name = "IPCR",
160 		.num_elements = 64,
161 		.event_ring = 1,
162 		.dir = DMA_TO_DEVICE,
163 		.ee_mask = 0x4,
164 		.pollcfg = 0,
165 		.doorbell = MHI_DB_BRST_DISABLE,
166 		.lpm_notify = false,
167 		.offload_channel = false,
168 		.doorbell_mode_switch = false,
169 		.auto_queue = false,
170 	},
171 	{
172 		.num = 21,
173 		.name = "IPCR",
174 		.num_elements = 64,
175 		.event_ring = 1,
176 		.dir = DMA_FROM_DEVICE,
177 		.ee_mask = 0x4,
178 		.pollcfg = 0,
179 		.doorbell = MHI_DB_BRST_DISABLE,
180 		.lpm_notify = false,
181 		.offload_channel = false,
182 		.doorbell_mode_switch = false,
183 		.auto_queue = true,
184 	},
185 /* All MHI satellite config to be at the end of data struct */
186 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
187 	{
188 		.num = 50,
189 		.name = "ADSP_0",
190 		.num_elements = 64,
191 		.event_ring = 3,
192 		.dir = DMA_BIDIRECTIONAL,
193 		.ee_mask = 0x4,
194 		.pollcfg = 0,
195 		.doorbell = MHI_DB_BRST_DISABLE,
196 		.lpm_notify = false,
197 		.offload_channel = true,
198 		.doorbell_mode_switch = false,
199 		.auto_queue = false,
200 	},
201 	{
202 		.num = 51,
203 		.name = "ADSP_1",
204 		.num_elements = 64,
205 		.event_ring = 3,
206 		.dir = DMA_BIDIRECTIONAL,
207 		.ee_mask = 0x4,
208 		.pollcfg = 0,
209 		.doorbell = MHI_DB_BRST_DISABLE,
210 		.lpm_notify = false,
211 		.offload_channel = true,
212 		.doorbell_mode_switch = false,
213 		.auto_queue = false,
214 	},
215 	{
216 		.num = 70,
217 		.name = "ADSP_2",
218 		.num_elements = 64,
219 		.event_ring = 3,
220 		.dir = DMA_BIDIRECTIONAL,
221 		.ee_mask = 0x4,
222 		.pollcfg = 0,
223 		.doorbell = MHI_DB_BRST_DISABLE,
224 		.lpm_notify = false,
225 		.offload_channel = true,
226 		.doorbell_mode_switch = false,
227 		.auto_queue = false,
228 	},
229 	{
230 		.num = 71,
231 		.name = "ADSP_3",
232 		.num_elements = 64,
233 		.event_ring = 3,
234 		.dir = DMA_BIDIRECTIONAL,
235 		.ee_mask = 0x4,
236 		.pollcfg = 0,
237 		.doorbell = MHI_DB_BRST_DISABLE,
238 		.lpm_notify = false,
239 		.offload_channel = true,
240 		.doorbell_mode_switch = false,
241 		.auto_queue = false,
242 	},
243 #endif
244 };
245 
246 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0))
247 static struct mhi_event_config cnss_mhi_events[] = {
248 #else
249 static const struct mhi_event_config cnss_mhi_events[] = {
250 #endif
251 	{
252 		.num_elements = 32,
253 		.irq_moderation_ms = 0,
254 		.irq = 1,
255 		.mode = MHI_DB_BRST_DISABLE,
256 		.data_type = MHI_ER_CTRL,
257 		.priority = 0,
258 		.hardware_event = false,
259 		.client_managed = false,
260 		.offload_channel = false,
261 	},
262 	{
263 		.num_elements = 256,
264 		.irq_moderation_ms = 0,
265 		.irq = 2,
266 		.mode = MHI_DB_BRST_DISABLE,
267 		.priority = 1,
268 		.hardware_event = false,
269 		.client_managed = false,
270 		.offload_channel = false,
271 	},
272 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
273 	{
274 		.num_elements = 32,
275 		.irq_moderation_ms = 0,
276 		.irq = 1,
277 		.mode = MHI_DB_BRST_DISABLE,
278 		.data_type = MHI_ER_BW_SCALE,
279 		.priority = 2,
280 		.hardware_event = false,
281 		.client_managed = false,
282 		.offload_channel = false,
283 	},
284 #endif
285 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
286 	{
287 		.num_elements = 256,
288 		.irq_moderation_ms = 0,
289 		.irq = 2,
290 		.mode = MHI_DB_BRST_DISABLE,
291 		.data_type = MHI_ER_DATA,
292 		.priority = 1,
293 		.hardware_event = false,
294 		.client_managed = true,
295 		.offload_channel = true,
296 	},
297 #endif
298 };
299 
300 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
301 #define CNSS_MHI_SATELLITE_CH_CFG_COUNT 4
302 #define CNSS_MHI_SATELLITE_EVT_COUNT 1
303 #else
304 #define CNSS_MHI_SATELLITE_CH_CFG_COUNT 0
305 #define CNSS_MHI_SATELLITE_EVT_COUNT 0
306 #endif
307 
308 static const struct mhi_controller_config cnss_mhi_config_default = {
309 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
310 	.max_channels = 72,
311 #else
312 	.max_channels = 32,
313 #endif
314 	.timeout_ms = 10000,
315 	.use_bounce_buf = false,
316 	.buf_len = 0x8000,
317 	.num_channels = ARRAY_SIZE(cnss_mhi_channels),
318 	.ch_cfg = cnss_mhi_channels,
319 	.num_events = ARRAY_SIZE(cnss_mhi_events),
320 	.event_cfg = cnss_mhi_events,
321 	.m2_no_db = true,
322 };
323 
324 static const struct mhi_controller_config cnss_mhi_config_no_satellite = {
325 	.max_channels = 32,
326 	.timeout_ms = 10000,
327 	.use_bounce_buf = false,
328 	.buf_len = 0x8000,
329 	.num_channels = ARRAY_SIZE(cnss_mhi_channels) -
330 			CNSS_MHI_SATELLITE_CH_CFG_COUNT,
331 	.ch_cfg = cnss_mhi_channels,
332 	.num_events = ARRAY_SIZE(cnss_mhi_events) -
333 			CNSS_MHI_SATELLITE_EVT_COUNT,
334 	.event_cfg = cnss_mhi_events,
335 	.m2_no_db = true,
336 };
337 
338 static struct cnss_pci_reg ce_src[] = {
339 	{ "SRC_RING_BASE_LSB", CE_SRC_RING_BASE_LSB_OFFSET },
340 	{ "SRC_RING_BASE_MSB", CE_SRC_RING_BASE_MSB_OFFSET },
341 	{ "SRC_RING_ID", CE_SRC_RING_ID_OFFSET },
342 	{ "SRC_RING_MISC", CE_SRC_RING_MISC_OFFSET },
343 	{ "SRC_CTRL", CE_SRC_CTRL_OFFSET },
344 	{ "SRC_R0_CE_CH_SRC_IS", CE_SRC_R0_CE_CH_SRC_IS_OFFSET },
345 	{ "SRC_RING_HP", CE_SRC_RING_HP_OFFSET },
346 	{ "SRC_RING_TP", CE_SRC_RING_TP_OFFSET },
347 	{ NULL },
348 };
349 
350 static struct cnss_pci_reg ce_dst[] = {
351 	{ "DEST_RING_BASE_LSB", CE_DEST_RING_BASE_LSB_OFFSET },
352 	{ "DEST_RING_BASE_MSB", CE_DEST_RING_BASE_MSB_OFFSET },
353 	{ "DEST_RING_ID", CE_DEST_RING_ID_OFFSET },
354 	{ "DEST_RING_MISC", CE_DEST_RING_MISC_OFFSET },
355 	{ "DEST_CTRL", CE_DEST_CTRL_OFFSET },
356 	{ "CE_CH_DST_IS", CE_CH_DST_IS_OFFSET },
357 	{ "CE_CH_DEST_CTRL2", CE_CH_DEST_CTRL2_OFFSET },
358 	{ "DEST_RING_HP", CE_DEST_RING_HP_OFFSET },
359 	{ "DEST_RING_TP", CE_DEST_RING_TP_OFFSET },
360 	{ "STATUS_RING_BASE_LSB", CE_STATUS_RING_BASE_LSB_OFFSET },
361 	{ "STATUS_RING_BASE_MSB", CE_STATUS_RING_BASE_MSB_OFFSET },
362 	{ "STATUS_RING_ID", CE_STATUS_RING_ID_OFFSET },
363 	{ "STATUS_RING_MISC", CE_STATUS_RING_MISC_OFFSET },
364 	{ "STATUS_RING_HP", CE_STATUS_RING_HP_OFFSET },
365 	{ "STATUS_RING_TP", CE_STATUS_RING_TP_OFFSET },
366 	{ NULL },
367 };
368 
369 static struct cnss_pci_reg ce_cmn[] = {
370 	{ "GXI_ERR_INTS", CE_COMMON_GXI_ERR_INTS },
371 	{ "GXI_ERR_STATS", CE_COMMON_GXI_ERR_STATS },
372 	{ "GXI_WDOG_STATUS", CE_COMMON_GXI_WDOG_STATUS },
373 	{ "TARGET_IE_0", CE_COMMON_TARGET_IE_0 },
374 	{ "TARGET_IE_1", CE_COMMON_TARGET_IE_1 },
375 	{ NULL },
376 };
377 
378 static struct cnss_pci_reg qdss_csr[] = {
379 	{ "QDSSCSR_ETRIRQCTRL", QDSS_APB_DEC_CSR_ETRIRQCTRL_OFFSET },
380 	{ "QDSSCSR_PRESERVEETF", QDSS_APB_DEC_CSR_PRESERVEETF_OFFSET },
381 	{ "QDSSCSR_PRESERVEETR0", QDSS_APB_DEC_CSR_PRESERVEETR0_OFFSET },
382 	{ "QDSSCSR_PRESERVEETR1", QDSS_APB_DEC_CSR_PRESERVEETR1_OFFSET },
383 	{ NULL },
384 };
385 
386 static struct cnss_pci_reg pci_scratch[] = {
387 	{ "PCIE_SCRATCH_0", PCIE_SCRATCH_0_SOC_PCIE_REG },
388 	{ "PCIE_SCRATCH_1", PCIE_SCRATCH_1_SOC_PCIE_REG },
389 	{ "PCIE_SCRATCH_2", PCIE_SCRATCH_2_SOC_PCIE_REG },
390 	{ NULL },
391 };
392 
393 /* First field of the structure is the device bit mask. Use
394  * enum cnss_pci_reg_mask as reference for the value.
395  */
396 static struct cnss_misc_reg wcss_reg_access_seq[] = {
397 	{1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
398 	{1, 1, QCA6390_GCC_DEBUG_CLK_CTL, 0x802},
399 	{1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
400 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_PLL_MODE, 0},
401 	{1, 1, QCA6390_GCC_DEBUG_CLK_CTL, 0x805},
402 	{1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
403 	{1, 0, QCA6390_WCSS_WFSS_PMM_WFSS_PMM_R0_PMM_CTRL, 0},
404 	{1, 0, QCA6390_WCSS_PMM_TOP_PMU_CX_CSR, 0},
405 	{1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_RAW_STAT, 0},
406 	{1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_EN, 0},
407 	{1, 0, QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_STS, 0},
408 	{1, 1, QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_CTL, 0xD},
409 	{1, 0, QCA6390_WCSS_PMM_TOP_TESTBUS_STS, 0},
410 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG, 0},
411 	{1, 1, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG, 0},
412 	{1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x8},
413 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
414 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_STS, 0},
415 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_CTL, 0},
416 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_0, 0},
417 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_9, 0},
418 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS0, 0},
419 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS1, 0},
420 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS2, 0},
421 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS3, 0},
422 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS4, 0},
423 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS5, 0},
424 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS6, 0},
425 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE0, 0},
426 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE1, 0},
427 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE2, 0},
428 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE3, 0},
429 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE4, 0},
430 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE5, 0},
431 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE6, 0},
432 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING0, 0},
433 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING1, 0},
434 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING2, 0},
435 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING3, 0},
436 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING4, 0},
437 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING5, 0},
438 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING6, 0},
439 	{1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x30040},
440 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
441 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
442 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
443 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
444 	{1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x30105},
445 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
446 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
447 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
448 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
449 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
450 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
451 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
452 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
453 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
454 	{1, 0, QCA6390_WCSS_CC_WCSS_UMAC_NOC_CBCR, 0},
455 	{1, 0, QCA6390_WCSS_CC_WCSS_UMAC_AHB_CBCR, 0},
456 	{1, 0, QCA6390_WCSS_CC_WCSS_UMAC_GDSCR, 0},
457 	{1, 0, QCA6390_WCSS_CC_WCSS_WLAN1_GDSCR, 0},
458 	{1, 0, QCA6390_WCSS_CC_WCSS_WLAN2_GDSCR, 0},
459 	{1, 0, QCA6390_WCSS_PMM_TOP_PMM_INT_CLR, 0},
460 	{1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_STICKY_EN, 0},
461 };
462 
463 static struct cnss_misc_reg pcie_reg_access_seq[] = {
464 	{1, 0, QCA6390_PCIE_PCIE_WCSS_STATUS_FOR_DEBUG_LOW_PCIE_LOCAL_REG, 0},
465 	{1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
466 	{1, 1, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0x18},
467 	{1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
468 	{1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
469 	{1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_STATUS_SOC_PCIE_REG, 0},
470 	{1, 0, QCA6390_PCIE_SOC_COMMIT_REPLAY_SOC_PCIE_REG, 0},
471 	{1, 0, QCA6390_TLMM_GPIO_IN_OUT57, 0},
472 	{1, 0, QCA6390_TLMM_GPIO_INTR_CFG57, 0},
473 	{1, 0, QCA6390_TLMM_GPIO_INTR_STATUS57, 0},
474 	{1, 0, QCA6390_TLMM_GPIO_IN_OUT59, 0},
475 	{1, 0, QCA6390_TLMM_GPIO_INTR_CFG59, 0},
476 	{1, 0, QCA6390_TLMM_GPIO_INTR_STATUS59, 0},
477 	{1, 0, QCA6390_PCIE_PCIE_PARF_LTSSM, 0},
478 	{1, 0, QCA6390_PCIE_PCIE_PARF_PM_STTS, 0},
479 	{1, 0, QCA6390_PCIE_PCIE_PARF_PM_STTS_1, 0},
480 	{1, 0, QCA6390_PCIE_PCIE_PARF_INT_STATUS, 0},
481 	{1, 0, QCA6390_PCIE_PCIE_INT_ALL_STATUS, 0},
482 	{1, 0, QCA6390_PCIE_PCIE_INT_ALL_MASK, 0},
483 	{1, 0, QCA6390_PCIE_PCIE_PARF_BDF_TO_SID_CFG, 0},
484 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_4, 0},
485 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_3, 0},
486 	{1, 0, QCA6390_PCIE_PCIE_PARF_MHI_CLOCK_RESET_CTRL, 0},
487 	{1, 0, QCA6390_PCIE_PCIE_PARF_MHI_BASE_ADDR_LOWER, 0},
488 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_STATUS, 0},
489 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_CFG, 0},
490 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2, 0},
491 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1SUB, 0},
492 	{1, 0, QCA6390_PCIE_PCIE_CORE_CONFIG, 0},
493 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_4, 0},
494 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L2, 0},
495 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1, 0},
496 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1, 0},
497 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2, 0},
498 	{1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_HIGH, 0},
499 	{1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_LOW, 0},
500 	{1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_HIGH, 0},
501 	{1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_LOW, 0},
502 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_STATUS_REG2, 0},
503 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_STATUS_REG2, 0},
504 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN2_CFG_REG1, 0},
505 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN1_CFG_REG1, 0},
506 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_APS_STATUS_REG1, 0},
507 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_APS_STATUS_REG1, 0},
508 	{1, 0, QCA6390_PCIE_PCIE_BHI_EXECENV_REG, 0},
509 };
510 
511 static struct cnss_misc_reg wlaon_reg_access_seq[] = {
512 	{3, 0, WLAON_SOC_POWER_CTRL, 0},
513 	{3, 0, WLAON_SOC_PWR_WDG_BARK_THRSHD, 0},
514 	{3, 0, WLAON_SOC_PWR_WDG_BITE_THRSHD, 0},
515 	{3, 0, WLAON_SW_COLD_RESET, 0},
516 	{3, 0, WLAON_RFA_MEM_SLP_NRET_N_OVERRIDE, 0},
517 	{3, 0, WLAON_GDSC_DELAY_SETTING, 0},
518 	{3, 0, WLAON_GDSC_DELAY_SETTING2, 0},
519 	{3, 0, WLAON_WL_PWR_STATUS_REG, 0},
520 	{3, 0, WLAON_WL_AON_DBG_CFG_REG, 0},
521 	{2, 0, WLAON_WL_AON_DBG_ENABLE_GRP0_REG, 0},
522 	{2, 0, WLAON_WL_AON_DBG_ENABLE_GRP1_REG, 0},
523 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL0, 0},
524 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL1, 0},
525 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL2, 0},
526 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL3, 0},
527 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL4, 0},
528 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL5, 0},
529 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL5_1, 0},
530 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL6, 0},
531 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL6_1, 0},
532 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL7, 0},
533 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL8, 0},
534 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL8_1, 0},
535 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL9, 0},
536 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL9_1, 0},
537 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL10, 0},
538 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL11, 0},
539 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL12, 0},
540 	{2, 0, WLAON_WL_AON_APM_OVERRIDE_REG, 0},
541 	{2, 0, WLAON_WL_AON_CXPC_REG, 0},
542 	{2, 0, WLAON_WL_AON_APM_STATUS0, 0},
543 	{2, 0, WLAON_WL_AON_APM_STATUS1, 0},
544 	{2, 0, WLAON_WL_AON_APM_STATUS2, 0},
545 	{2, 0, WLAON_WL_AON_APM_STATUS3, 0},
546 	{2, 0, WLAON_WL_AON_APM_STATUS4, 0},
547 	{2, 0, WLAON_WL_AON_APM_STATUS5, 0},
548 	{2, 0, WLAON_WL_AON_APM_STATUS6, 0},
549 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL1, 0},
550 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL6, 0},
551 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL7, 0},
552 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL3, 0},
553 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL4, 0},
554 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL5, 0},
555 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL8, 0},
556 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL2, 0},
557 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL9, 0},
558 	{3, 0, WLAON_RTC_CLK_CAL_CTRL1, 0},
559 	{3, 0, WLAON_RTC_CLK_CAL_CTRL2, 0},
560 	{3, 0, WLAON_RTC_CLK_CAL_CTRL3, 0},
561 	{3, 0, WLAON_RTC_CLK_CAL_CTRL4, 0},
562 	{3, 0, WLAON_RTC_CLK_CAL_CTRL5, 0},
563 	{3, 0, WLAON_RTC_CLK_CAL_CTRL6, 0},
564 	{3, 0, WLAON_RTC_CLK_CAL_CTRL7, 0},
565 	{3, 0, WLAON_RTC_CLK_CAL_CTRL8, 0},
566 	{3, 0, WLAON_RTC_CLK_CAL_CTRL9, 0},
567 	{3, 0, WLAON_WCSSAON_CONFIG_REG, 0},
568 	{3, 0, WLAON_WLAN_OEM_DEBUG_REG, 0},
569 	{3, 0, WLAON_WLAN_RAM_DUMP_REG, 0},
570 	{3, 0, WLAON_QDSS_WCSS_REG, 0},
571 	{3, 0, WLAON_QDSS_WCSS_ACK, 0},
572 	{3, 0, WLAON_WL_CLK_CNTL_KDF_REG, 0},
573 	{3, 0, WLAON_WL_CLK_CNTL_PMU_HFRC_REG, 0},
574 	{3, 0, WLAON_QFPROM_PWR_CTRL_REG, 0},
575 	{3, 0, WLAON_DLY_CONFIG, 0},
576 	{3, 0, WLAON_WLAON_Q6_IRQ_REG, 0},
577 	{3, 0, WLAON_PCIE_INTF_SW_CFG_REG, 0},
578 	{3, 0, WLAON_PCIE_INTF_STICKY_SW_CFG_REG, 0},
579 	{3, 0, WLAON_PCIE_INTF_PHY_SW_CFG_REG, 0},
580 	{3, 0, WLAON_PCIE_INTF_PHY_NOCSR_SW_CFG_REG, 0},
581 	{3, 0, WLAON_Q6_COOKIE_BIT, 0},
582 	{3, 0, WLAON_WARM_SW_ENTRY, 0},
583 	{3, 0, WLAON_RESET_DBG_SW_ENTRY, 0},
584 	{3, 0, WLAON_WL_PMUNOC_CFG_REG, 0},
585 	{3, 0, WLAON_RESET_CAUSE_CFG_REG, 0},
586 	{3, 0, WLAON_SOC_WCSSAON_WAKEUP_IRQ_7_EN_REG, 0},
587 	{3, 0, WLAON_DEBUG, 0},
588 	{3, 0, WLAON_SOC_PARAMETERS, 0},
589 	{3, 0, WLAON_WLPM_SIGNAL, 0},
590 	{3, 0, WLAON_SOC_RESET_CAUSE_REG, 0},
591 	{3, 0, WLAON_WAKEUP_PCIE_SOC_REG, 0},
592 	{3, 0, WLAON_PBL_STACK_CANARY, 0},
593 	{3, 0, WLAON_MEM_TOT_NUM_GRP_REG, 0},
594 	{3, 0, WLAON_MEM_TOT_BANKS_IN_GRP0_REG, 0},
595 	{3, 0, WLAON_MEM_TOT_BANKS_IN_GRP1_REG, 0},
596 	{3, 0, WLAON_MEM_TOT_BANKS_IN_GRP2_REG, 0},
597 	{3, 0, WLAON_MEM_TOT_BANKS_IN_GRP3_REG, 0},
598 	{3, 0, WLAON_MEM_TOT_SIZE_IN_GRP0_REG, 0},
599 	{3, 0, WLAON_MEM_TOT_SIZE_IN_GRP1_REG, 0},
600 	{3, 0, WLAON_MEM_TOT_SIZE_IN_GRP2_REG, 0},
601 	{3, 0, WLAON_MEM_TOT_SIZE_IN_GRP3_REG, 0},
602 	{3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP0_REG, 0},
603 	{3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP1_REG, 0},
604 	{3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP2_REG, 0},
605 	{3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP3_REG, 0},
606 	{3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP0_REG, 0},
607 	{3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP1_REG, 0},
608 	{3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP2_REG, 0},
609 	{3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP3_REG, 0},
610 	{3, 0, WLAON_MEM_CNT_SEL_REG, 0},
611 	{3, 0, WLAON_MEM_NO_EXTBHS_REG, 0},
612 	{3, 0, WLAON_MEM_DEBUG_REG, 0},
613 	{3, 0, WLAON_MEM_DEBUG_BUS_REG, 0},
614 	{3, 0, WLAON_MEM_REDUN_CFG_REG, 0},
615 	{3, 0, WLAON_WL_AON_SPARE2, 0},
616 	{3, 0, WLAON_VSEL_CFG_FOR_WL_RET_DISABLE_REG, 0},
617 	{3, 0, WLAON_BTFM_WLAN_IPC_STATUS_REG, 0},
618 	{3, 0, WLAON_MPM_COUNTER_CHICKEN_BITS, 0},
619 	{3, 0, WLAON_WLPM_CHICKEN_BITS, 0},
620 	{3, 0, WLAON_PCIE_PHY_PWR_REG, 0},
621 	{3, 0, WLAON_WL_CLK_CNTL_PMU_LPO2M_REG, 0},
622 	{3, 0, WLAON_WL_SS_ROOT_CLK_SWITCH_REG, 0},
623 	{3, 0, WLAON_POWERCTRL_PMU_REG, 0},
624 	{3, 0, WLAON_POWERCTRL_MEM_REG, 0},
625 	{3, 0, WLAON_PCIE_PWR_CTRL_REG, 0},
626 	{3, 0, WLAON_SOC_PWR_PROFILE_REG, 0},
627 	{3, 0, WLAON_WCSSAON_PCIE_SR_STATUS_HI_REG, 0},
628 	{3, 0, WLAON_WCSSAON_PCIE_SR_STATUS_LO_REG, 0},
629 	{3, 0, WLAON_WCSS_TCSR_PMM_SR_STATUS_HI_REG, 0},
630 	{3, 0, WLAON_WCSS_TCSR_PMM_SR_STATUS_LO_REG, 0},
631 	{3, 0, WLAON_MEM_SVS_CFG_REG, 0},
632 	{3, 0, WLAON_CMN_AON_MISC_REG, 0},
633 	{3, 0, WLAON_INTR_STATUS, 0},
634 	{2, 0, WLAON_INTR_ENABLE, 0},
635 	{2, 0, WLAON_NOC_DBG_BUS_SEL_REG, 0},
636 	{2, 0, WLAON_NOC_DBG_BUS_REG, 0},
637 	{2, 0, WLAON_WL_CTRL_MISC_REG, 0},
638 	{2, 0, WLAON_DBG_STATUS0, 0},
639 	{2, 0, WLAON_DBG_STATUS1, 0},
640 	{2, 0, WLAON_TIMERSYNC_OFFSET_L, 0},
641 	{2, 0, WLAON_TIMERSYNC_OFFSET_H, 0},
642 	{2, 0, WLAON_PMU_LDO_SETTLE_REG, 0},
643 };
644 
645 static struct cnss_misc_reg syspm_reg_access_seq[] = {
646 	{1, 0, QCA6390_SYSPM_SYSPM_PWR_STATUS, 0},
647 	{1, 0, QCA6390_SYSPM_DBG_BTFM_AON_REG, 0},
648 	{1, 0, QCA6390_SYSPM_DBG_BUS_SEL_REG, 0},
649 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
650 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
651 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
652 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
653 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
654 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
655 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
656 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
657 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
658 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
659 };
660 
661 static struct cnss_print_optimize print_optimize;
662 
663 #define WCSS_REG_SIZE ARRAY_SIZE(wcss_reg_access_seq)
664 #define PCIE_REG_SIZE ARRAY_SIZE(pcie_reg_access_seq)
665 #define WLAON_REG_SIZE ARRAY_SIZE(wlaon_reg_access_seq)
666 #define SYSPM_REG_SIZE ARRAY_SIZE(syspm_reg_access_seq)
667 
668 static int cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv);
669 
670 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
671 static void cnss_mhi_debug_reg_dump(struct cnss_pci_data *pci_priv)
672 {
673 	mhi_debug_reg_dump(pci_priv->mhi_ctrl);
674 }
675 
676 static void cnss_mhi_dump_sfr(struct cnss_pci_data *pci_priv)
677 {
678 	mhi_dump_sfr(pci_priv->mhi_ctrl);
679 }
680 
681 static bool cnss_mhi_scan_rddm_cookie(struct cnss_pci_data *pci_priv,
682 				      u32 cookie)
683 {
684 	return mhi_scan_rddm_cookie(pci_priv->mhi_ctrl, cookie);
685 }
686 
687 static int cnss_mhi_pm_fast_suspend(struct cnss_pci_data *pci_priv,
688 				    bool notify_clients)
689 {
690 	return mhi_pm_fast_suspend(pci_priv->mhi_ctrl, notify_clients);
691 }
692 
693 static int cnss_mhi_pm_fast_resume(struct cnss_pci_data *pci_priv,
694 				   bool notify_clients)
695 {
696 	return mhi_pm_fast_resume(pci_priv->mhi_ctrl, notify_clients);
697 }
698 
699 static void cnss_mhi_set_m2_timeout_ms(struct cnss_pci_data *pci_priv,
700 				       u32 timeout)
701 {
702 	return mhi_set_m2_timeout_ms(pci_priv->mhi_ctrl, timeout);
703 }
704 
705 static int cnss_mhi_device_get_sync_atomic(struct cnss_pci_data *pci_priv,
706 					   int timeout_us, bool in_panic)
707 {
708 	return mhi_device_get_sync_atomic(pci_priv->mhi_ctrl->mhi_dev,
709 					  timeout_us, in_panic);
710 }
711 
712 static void
713 cnss_mhi_controller_set_bw_scale_cb(struct cnss_pci_data *pci_priv,
714 				    int (*cb)(struct mhi_controller *mhi_ctrl,
715 					      struct mhi_link_info *link_info))
716 {
717 	mhi_controller_set_bw_scale_cb(pci_priv->mhi_ctrl, cb);
718 }
719 
720 static int cnss_mhi_force_reset(struct cnss_pci_data *pci_priv)
721 {
722 	return mhi_force_reset(pci_priv->mhi_ctrl);
723 }
724 
725 void cnss_mhi_controller_set_base(struct cnss_pci_data *pci_priv,
726 				  phys_addr_t base)
727 {
728 	return mhi_controller_set_base(pci_priv->mhi_ctrl, base);
729 }
730 #else
731 static void cnss_mhi_debug_reg_dump(struct cnss_pci_data *pci_priv)
732 {
733 }
734 
735 static void cnss_mhi_dump_sfr(struct cnss_pci_data *pci_priv)
736 {
737 }
738 
739 static bool cnss_mhi_scan_rddm_cookie(struct cnss_pci_data *pci_priv,
740 				      u32 cookie)
741 {
742 	return false;
743 }
744 
745 static int cnss_mhi_pm_fast_suspend(struct cnss_pci_data *pci_priv,
746 				    bool notify_clients)
747 {
748 	return -EOPNOTSUPP;
749 }
750 
751 static int cnss_mhi_pm_fast_resume(struct cnss_pci_data *pci_priv,
752 				   bool notify_clients)
753 {
754 	return -EOPNOTSUPP;
755 }
756 
757 static void cnss_mhi_set_m2_timeout_ms(struct cnss_pci_data *pci_priv,
758 				       u32 timeout)
759 {
760 }
761 
762 static int cnss_mhi_device_get_sync_atomic(struct cnss_pci_data *pci_priv,
763 					   int timeout_us, bool in_panic)
764 {
765 	return -EOPNOTSUPP;
766 }
767 
768 static void
769 cnss_mhi_controller_set_bw_scale_cb(struct cnss_pci_data *pci_priv,
770 				    int (*cb)(struct mhi_controller *mhi_ctrl,
771 					      struct mhi_link_info *link_info))
772 {
773 }
774 
775 static int cnss_mhi_force_reset(struct cnss_pci_data *pci_priv)
776 {
777 	return -EOPNOTSUPP;
778 }
779 
780 void cnss_mhi_controller_set_base(struct cnss_pci_data *pci_priv,
781 				  phys_addr_t base)
782 {
783 }
784 #endif /* CONFIG_MHI_BUS_MISC */
785 
786 int cnss_pci_check_link_status(struct cnss_pci_data *pci_priv)
787 {
788 	u16 device_id;
789 
790 	if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
791 		cnss_pr_dbg("%ps: PCIe link is in suspend state\n",
792 			    (void *)_RET_IP_);
793 		return -EACCES;
794 	}
795 
796 	if (pci_priv->pci_link_down_ind) {
797 		cnss_pr_err("%ps: PCIe link is down\n", (void *)_RET_IP_);
798 		return -EIO;
799 	}
800 
801 	pci_read_config_word(pci_priv->pci_dev, PCI_DEVICE_ID, &device_id);
802 	if (device_id != pci_priv->device_id)  {
803 		cnss_fatal_err("%ps: PCI device ID mismatch, link possibly down, current read ID: 0x%x, record ID: 0x%x\n",
804 			       (void *)_RET_IP_, device_id,
805 			       pci_priv->device_id);
806 		return -EIO;
807 	}
808 
809 	return 0;
810 }
811 
812 static void cnss_pci_select_window(struct cnss_pci_data *pci_priv, u32 offset)
813 {
814 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
815 
816 	u32 window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
817 	u32 window_enable = WINDOW_ENABLE_BIT | window;
818 	u32 val;
819 
820 	writel_relaxed(window_enable, pci_priv->bar +
821 		       QCA6390_PCIE_REMAP_BAR_CTRL_OFFSET);
822 
823 	if (window != pci_priv->remap_window) {
824 		pci_priv->remap_window = window;
825 		cnss_pr_dbg("Config PCIe remap window register to 0x%x\n",
826 			    window_enable);
827 	}
828 
829 	/* Read it back to make sure the write has taken effect */
830 	val = readl_relaxed(pci_priv->bar + QCA6390_PCIE_REMAP_BAR_CTRL_OFFSET);
831 	if (val != window_enable) {
832 		cnss_pr_err("Failed to config window register to 0x%x, current value: 0x%x\n",
833 			    window_enable, val);
834 		if (!cnss_pci_check_link_status(pci_priv) &&
835 		    !test_bit(CNSS_IN_PANIC, &plat_priv->driver_state))
836 			CNSS_ASSERT(0);
837 	}
838 }
839 
840 static int cnss_pci_reg_read(struct cnss_pci_data *pci_priv,
841 			     u32 offset, u32 *val)
842 {
843 	int ret;
844 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
845 
846 	if (!in_interrupt() && !irqs_disabled()) {
847 		ret = cnss_pci_check_link_status(pci_priv);
848 		if (ret)
849 			return ret;
850 	}
851 
852 	if (pci_priv->pci_dev->device == QCA6174_DEVICE_ID ||
853 	    offset < MAX_UNWINDOWED_ADDRESS) {
854 		*val = readl_relaxed(pci_priv->bar + offset);
855 		return 0;
856 	}
857 
858 	/* If in panic, assumption is kernel panic handler will hold all threads
859 	 * and interrupts. Further pci_reg_window_lock could be held before
860 	 * panic. So only lock during normal operation.
861 	 */
862 	if (test_bit(CNSS_IN_PANIC, &plat_priv->driver_state)) {
863 		cnss_pci_select_window(pci_priv, offset);
864 		*val = readl_relaxed(pci_priv->bar + WINDOW_START +
865 				     (offset & WINDOW_RANGE_MASK));
866 	} else {
867 		spin_lock_bh(&pci_reg_window_lock);
868 		cnss_pci_select_window(pci_priv, offset);
869 		*val = readl_relaxed(pci_priv->bar + WINDOW_START +
870 				     (offset & WINDOW_RANGE_MASK));
871 		spin_unlock_bh(&pci_reg_window_lock);
872 	}
873 
874 	return 0;
875 }
876 
877 static int cnss_pci_reg_write(struct cnss_pci_data *pci_priv, u32 offset,
878 			      u32 val)
879 {
880 	int ret;
881 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
882 
883 	if (!in_interrupt() && !irqs_disabled()) {
884 		ret = cnss_pci_check_link_status(pci_priv);
885 		if (ret)
886 			return ret;
887 	}
888 
889 	if (pci_priv->pci_dev->device == QCA6174_DEVICE_ID ||
890 	    offset < MAX_UNWINDOWED_ADDRESS) {
891 		writel_relaxed(val, pci_priv->bar + offset);
892 		return 0;
893 	}
894 
895 	/* Same constraint as PCI register read in panic */
896 	if (test_bit(CNSS_IN_PANIC, &plat_priv->driver_state)) {
897 		cnss_pci_select_window(pci_priv, offset);
898 		writel_relaxed(val, pci_priv->bar + WINDOW_START +
899 			  (offset & WINDOW_RANGE_MASK));
900 	} else {
901 		spin_lock_bh(&pci_reg_window_lock);
902 		cnss_pci_select_window(pci_priv, offset);
903 		writel_relaxed(val, pci_priv->bar + WINDOW_START +
904 			  (offset & WINDOW_RANGE_MASK));
905 		spin_unlock_bh(&pci_reg_window_lock);
906 	}
907 
908 	return 0;
909 }
910 
911 static int cnss_pci_force_wake_get(struct cnss_pci_data *pci_priv)
912 {
913 	struct device *dev = &pci_priv->pci_dev->dev;
914 	int ret;
915 
916 	ret = cnss_pci_force_wake_request_sync(dev,
917 					       FORCE_WAKE_DELAY_TIMEOUT_US);
918 	if (ret) {
919 		if (ret != -EAGAIN)
920 			cnss_pr_err("Failed to request force wake\n");
921 		return ret;
922 	}
923 
924 	/* If device's M1 state-change event races here, it can be ignored,
925 	 * as the device is expected to immediately move from M2 to M0
926 	 * without entering low power state.
927 	 */
928 	if (cnss_pci_is_device_awake(dev) != true)
929 		cnss_pr_warn("MHI not in M0, while reg still accessible\n");
930 
931 	return 0;
932 }
933 
934 static int cnss_pci_force_wake_put(struct cnss_pci_data *pci_priv)
935 {
936 	struct device *dev = &pci_priv->pci_dev->dev;
937 	int ret;
938 
939 	ret = cnss_pci_force_wake_release(dev);
940 	if (ret && ret != -EAGAIN)
941 		cnss_pr_err("Failed to release force wake\n");
942 
943 	return ret;
944 }
945 
946 #if IS_ENABLED(CONFIG_INTERCONNECT)
947 /**
948  * cnss_setup_bus_bandwidth() - Setup interconnect vote for given bandwidth
949  * @plat_priv: Platform private data struct
950  * @bw: bandwidth
951  * @save: toggle flag to save bandwidth to current_bw_vote
952  *
953  * Setup bandwidth votes for configured interconnect paths
954  *
955  * Return: 0 for success
956  */
957 static int cnss_setup_bus_bandwidth(struct cnss_plat_data *plat_priv,
958 				    u32 bw, bool save)
959 {
960 	int ret = 0;
961 	struct cnss_bus_bw_info *bus_bw_info;
962 
963 	if (!plat_priv->icc.path_count)
964 		return -EOPNOTSUPP;
965 
966 	if (bw >= plat_priv->icc.bus_bw_cfg_count) {
967 		cnss_pr_err("Invalid bus bandwidth Type: %d", bw);
968 		return -EINVAL;
969 	}
970 
971 	cnss_pr_buf("Bandwidth vote to %d, save %d\n", bw, save);
972 
973 	list_for_each_entry(bus_bw_info, &plat_priv->icc.list_head, list) {
974 		ret = icc_set_bw(bus_bw_info->icc_path,
975 				 bus_bw_info->cfg_table[bw].avg_bw,
976 				 bus_bw_info->cfg_table[bw].peak_bw);
977 		if (ret) {
978 			cnss_pr_err("Could not set BW Cfg: %d, err = %d ICC Path: %s Val: %d %d\n",
979 				    bw, ret, bus_bw_info->icc_name,
980 				    bus_bw_info->cfg_table[bw].avg_bw,
981 				    bus_bw_info->cfg_table[bw].peak_bw);
982 			break;
983 		}
984 	}
985 	if (ret == 0 && save)
986 		plat_priv->icc.current_bw_vote = bw;
987 	return ret;
988 }
989 
990 int cnss_request_bus_bandwidth(struct device *dev, int bandwidth)
991 {
992 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
993 
994 	if (!plat_priv)
995 		return -ENODEV;
996 
997 	if (bandwidth < 0)
998 		return -EINVAL;
999 
1000 	return cnss_setup_bus_bandwidth(plat_priv, (u32)bandwidth, true);
1001 }
1002 #else
1003 static int cnss_setup_bus_bandwidth(struct cnss_plat_data *plat_priv,
1004 				    u32 bw, bool save)
1005 {
1006 	return 0;
1007 }
1008 
1009 int cnss_request_bus_bandwidth(struct device *dev, int bandwidth)
1010 {
1011 	return 0;
1012 }
1013 #endif
1014 EXPORT_SYMBOL(cnss_request_bus_bandwidth);
1015 
1016 int cnss_pci_debug_reg_read(struct cnss_pci_data *pci_priv, u32 offset,
1017 			    u32 *val, bool raw_access)
1018 {
1019 	int ret = 0;
1020 	bool do_force_wake_put = true;
1021 
1022 	if (raw_access) {
1023 		ret = cnss_pci_reg_read(pci_priv, offset, val);
1024 		goto out;
1025 	}
1026 
1027 	ret = cnss_pci_is_device_down(&pci_priv->pci_dev->dev);
1028 	if (ret)
1029 		goto out;
1030 
1031 	ret = cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS);
1032 	if (ret < 0)
1033 		goto runtime_pm_put;
1034 
1035 	ret = cnss_pci_force_wake_get(pci_priv);
1036 	if (ret)
1037 		do_force_wake_put = false;
1038 
1039 	ret = cnss_pci_reg_read(pci_priv, offset, val);
1040 	if (ret) {
1041 		cnss_pr_err("Failed to read register offset 0x%x, err = %d\n",
1042 			    offset, ret);
1043 		goto force_wake_put;
1044 	}
1045 
1046 force_wake_put:
1047 	if (do_force_wake_put)
1048 		cnss_pci_force_wake_put(pci_priv);
1049 runtime_pm_put:
1050 	cnss_pci_pm_runtime_mark_last_busy(pci_priv);
1051 	cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
1052 out:
1053 	return ret;
1054 }
1055 
1056 int cnss_pci_debug_reg_write(struct cnss_pci_data *pci_priv, u32 offset,
1057 			     u32 val, bool raw_access)
1058 {
1059 	int ret = 0;
1060 	bool do_force_wake_put = true;
1061 
1062 	if (raw_access) {
1063 		ret = cnss_pci_reg_write(pci_priv, offset, val);
1064 		goto out;
1065 	}
1066 
1067 	ret = cnss_pci_is_device_down(&pci_priv->pci_dev->dev);
1068 	if (ret)
1069 		goto out;
1070 
1071 	ret = cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS);
1072 	if (ret < 0)
1073 		goto runtime_pm_put;
1074 
1075 	ret = cnss_pci_force_wake_get(pci_priv);
1076 	if (ret)
1077 		do_force_wake_put = false;
1078 
1079 	ret = cnss_pci_reg_write(pci_priv, offset, val);
1080 	if (ret) {
1081 		cnss_pr_err("Failed to write 0x%x to register offset 0x%x, err = %d\n",
1082 			    val, offset, ret);
1083 		goto force_wake_put;
1084 	}
1085 
1086 force_wake_put:
1087 	if (do_force_wake_put)
1088 		cnss_pci_force_wake_put(pci_priv);
1089 runtime_pm_put:
1090 	cnss_pci_pm_runtime_mark_last_busy(pci_priv);
1091 	cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
1092 out:
1093 	return ret;
1094 }
1095 
1096 static int cnss_set_pci_config_space(struct cnss_pci_data *pci_priv, bool save)
1097 {
1098 	struct pci_dev *pci_dev = pci_priv->pci_dev;
1099 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1100 	bool link_down_or_recovery;
1101 
1102 	if (!plat_priv)
1103 		return -ENODEV;
1104 
1105 	link_down_or_recovery = pci_priv->pci_link_down_ind ||
1106 		(test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state));
1107 
1108 	if (save) {
1109 		if (link_down_or_recovery) {
1110 			pci_priv->saved_state = NULL;
1111 		} else {
1112 			pci_save_state(pci_dev);
1113 			pci_priv->saved_state = pci_store_saved_state(pci_dev);
1114 		}
1115 	} else {
1116 		if (link_down_or_recovery) {
1117 			pci_load_saved_state(pci_dev, pci_priv->default_state);
1118 			pci_restore_state(pci_dev);
1119 		} else if (pci_priv->saved_state) {
1120 			pci_load_and_free_saved_state(pci_dev,
1121 						      &pci_priv->saved_state);
1122 			pci_restore_state(pci_dev);
1123 		}
1124 	}
1125 
1126 	return 0;
1127 }
1128 
1129 static int cnss_pci_get_link_status(struct cnss_pci_data *pci_priv)
1130 {
1131 	u16 link_status;
1132 	int ret;
1133 
1134 	ret = pcie_capability_read_word(pci_priv->pci_dev, PCI_EXP_LNKSTA,
1135 					&link_status);
1136 	if (ret)
1137 		return ret;
1138 
1139 	cnss_pr_dbg("Get PCI link status register: %u\n", link_status);
1140 
1141 	pci_priv->def_link_speed = link_status & PCI_EXP_LNKSTA_CLS;
1142 	pci_priv->def_link_width =
1143 		(link_status & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT;
1144 	pci_priv->cur_link_speed = pci_priv->def_link_speed;
1145 
1146 	cnss_pr_dbg("Default PCI link speed is 0x%x, link width is 0x%x\n",
1147 		    pci_priv->def_link_speed, pci_priv->def_link_width);
1148 
1149 	return 0;
1150 }
1151 
1152 static void cnss_pci_soc_scratch_reg_dump(struct cnss_pci_data *pci_priv)
1153 {
1154 	u32 reg_offset, val;
1155 	int i;
1156 
1157 	switch (pci_priv->device_id) {
1158 	case QCA6390_DEVICE_ID:
1159 	case QCA6490_DEVICE_ID:
1160 	case KIWI_DEVICE_ID:
1161 	case MANGO_DEVICE_ID:
1162 		break;
1163 	default:
1164 		return;
1165 	}
1166 
1167 	if (in_interrupt() || irqs_disabled())
1168 		return;
1169 
1170 	if (cnss_pci_check_link_status(pci_priv))
1171 		return;
1172 
1173 	cnss_pr_dbg("Start to dump SOC Scratch registers\n");
1174 
1175 	for (i = 0; pci_scratch[i].name; i++) {
1176 		reg_offset = pci_scratch[i].offset;
1177 		if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
1178 			return;
1179 		cnss_pr_dbg("PCIE_SOC_REG_%s = 0x%x\n",
1180 			    pci_scratch[i].name, val);
1181 	}
1182 }
1183 
1184 int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv)
1185 {
1186 	int ret = 0;
1187 
1188 	if (!pci_priv)
1189 		return -ENODEV;
1190 
1191 	if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
1192 		cnss_pr_info("PCI link is already suspended\n");
1193 		goto out;
1194 	}
1195 
1196 	pci_clear_master(pci_priv->pci_dev);
1197 
1198 	ret = cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE);
1199 	if (ret)
1200 		goto out;
1201 
1202 	pci_disable_device(pci_priv->pci_dev);
1203 
1204 	if (pci_priv->pci_dev->device != QCA6174_DEVICE_ID) {
1205 		if (pci_set_power_state(pci_priv->pci_dev, PCI_D3hot))
1206 			cnss_pr_err("Failed to set D3Hot, err =  %d\n", ret);
1207 	}
1208 
1209 	/* Always do PCIe L2 suspend during power off/PCIe link recovery */
1210 	pci_priv->drv_connected_last = 0;
1211 
1212 	ret = cnss_set_pci_link(pci_priv, PCI_LINK_DOWN);
1213 	if (ret)
1214 		goto out;
1215 
1216 	pci_priv->pci_link_state = PCI_LINK_DOWN;
1217 
1218 	return 0;
1219 out:
1220 	return ret;
1221 }
1222 
1223 int cnss_resume_pci_link(struct cnss_pci_data *pci_priv)
1224 {
1225 	int ret = 0;
1226 
1227 	if (!pci_priv)
1228 		return -ENODEV;
1229 
1230 	if (pci_priv->pci_link_state == PCI_LINK_UP) {
1231 		cnss_pr_info("PCI link is already resumed\n");
1232 		goto out;
1233 	}
1234 
1235 	ret = cnss_set_pci_link(pci_priv, PCI_LINK_UP);
1236 	if (ret) {
1237 		ret = -EAGAIN;
1238 		goto out;
1239 	}
1240 
1241 	pci_priv->pci_link_state = PCI_LINK_UP;
1242 
1243 	if (pci_priv->pci_dev->device != QCA6174_DEVICE_ID) {
1244 		ret = pci_set_power_state(pci_priv->pci_dev, PCI_D0);
1245 		if (ret) {
1246 			cnss_pr_err("Failed to set D0, err = %d\n", ret);
1247 			goto out;
1248 		}
1249 	}
1250 
1251 	ret = pci_enable_device(pci_priv->pci_dev);
1252 	if (ret) {
1253 		cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
1254 		goto out;
1255 	}
1256 
1257 	ret = cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
1258 	if (ret)
1259 		goto out;
1260 
1261 	pci_set_master(pci_priv->pci_dev);
1262 
1263 	if (pci_priv->pci_link_down_ind)
1264 		pci_priv->pci_link_down_ind = false;
1265 
1266 	return 0;
1267 out:
1268 	return ret;
1269 }
1270 
1271 int cnss_pci_recover_link_down(struct cnss_pci_data *pci_priv)
1272 {
1273 	int ret;
1274 
1275 	switch (pci_priv->device_id) {
1276 	case QCA6390_DEVICE_ID:
1277 	case QCA6490_DEVICE_ID:
1278 	case KIWI_DEVICE_ID:
1279 	case MANGO_DEVICE_ID:
1280 		break;
1281 	default:
1282 		return -EOPNOTSUPP;
1283 	}
1284 
1285 	/* Always wait here to avoid missing WAKE assert for RDDM
1286 	 * before link recovery
1287 	 */
1288 	msleep(WAKE_EVENT_TIMEOUT);
1289 
1290 	ret = cnss_suspend_pci_link(pci_priv);
1291 	if (ret)
1292 		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
1293 
1294 	ret = cnss_resume_pci_link(pci_priv);
1295 	if (ret) {
1296 		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
1297 		del_timer(&pci_priv->dev_rddm_timer);
1298 		return ret;
1299 	}
1300 
1301 	mod_timer(&pci_priv->dev_rddm_timer,
1302 		  jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
1303 
1304 	cnss_mhi_debug_reg_dump(pci_priv);
1305 	cnss_pci_soc_scratch_reg_dump(pci_priv);
1306 
1307 	return 0;
1308 }
1309 
1310 static void cnss_pci_update_link_event(struct cnss_pci_data *pci_priv,
1311 				       enum cnss_bus_event_type type,
1312 				       void *data)
1313 {
1314 	struct cnss_bus_event bus_event;
1315 
1316 	bus_event.etype = type;
1317 	bus_event.event_data = data;
1318 	cnss_pci_call_driver_uevent(pci_priv, CNSS_BUS_EVENT, &bus_event);
1319 }
1320 
1321 void cnss_pci_handle_linkdown(struct cnss_pci_data *pci_priv)
1322 {
1323 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1324 	struct pci_dev *pci_dev = pci_priv->pci_dev;
1325 	unsigned long flags;
1326 
1327 	if (test_bit(ENABLE_PCI_LINK_DOWN_PANIC,
1328 		     &plat_priv->ctrl_params.quirks))
1329 		panic("cnss: PCI link is down\n");
1330 
1331 	spin_lock_irqsave(&pci_link_down_lock, flags);
1332 	if (pci_priv->pci_link_down_ind) {
1333 		cnss_pr_dbg("PCI link down recovery is in progress, ignore\n");
1334 		spin_unlock_irqrestore(&pci_link_down_lock, flags);
1335 		return;
1336 	}
1337 	pci_priv->pci_link_down_ind = true;
1338 	spin_unlock_irqrestore(&pci_link_down_lock, flags);
1339 	/* Notify MHI about link down*/
1340 	mhi_report_error(pci_priv->mhi_ctrl);
1341 
1342 	if (pci_dev->device == QCA6174_DEVICE_ID)
1343 		disable_irq(pci_dev->irq);
1344 
1345 	/* Notify bus related event. Now for all supported chips.
1346 	 * Here PCIe LINK_DOWN notification taken care.
1347 	 * uevent buffer can be extended later, to cover more bus info.
1348 	 */
1349 	cnss_pci_update_link_event(pci_priv, BUS_EVENT_PCI_LINK_DOWN, NULL);
1350 
1351 	cnss_fatal_err("PCI link down, schedule recovery\n");
1352 	cnss_schedule_recovery(&pci_dev->dev, CNSS_REASON_LINK_DOWN);
1353 }
1354 
1355 int cnss_pci_link_down(struct device *dev)
1356 {
1357 	struct pci_dev *pci_dev = to_pci_dev(dev);
1358 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
1359 	struct cnss_plat_data *plat_priv = NULL;
1360 	int ret;
1361 
1362 	if (!pci_priv) {
1363 		cnss_pr_err("pci_priv is NULL\n");
1364 		return -EINVAL;
1365 	}
1366 
1367 	plat_priv = pci_priv->plat_priv;
1368 	if (!plat_priv) {
1369 		cnss_pr_err("plat_priv is NULL\n");
1370 		return -ENODEV;
1371 	}
1372 
1373 	if (pci_priv->pci_link_down_ind) {
1374 		cnss_pr_dbg("PCI link down recovery is already in progress\n");
1375 		return -EBUSY;
1376 	}
1377 
1378 	if (pci_priv->drv_connected_last &&
1379 	    of_property_read_bool(plat_priv->plat_dev->dev.of_node,
1380 				  "cnss-enable-self-recovery"))
1381 		plat_priv->ctrl_params.quirks |= BIT(LINK_DOWN_SELF_RECOVERY);
1382 
1383 	cnss_pr_err("PCI link down is detected by drivers\n");
1384 
1385 	ret = cnss_pci_assert_perst(pci_priv);
1386 	if (ret)
1387 		cnss_pci_handle_linkdown(pci_priv);
1388 
1389 	return ret;
1390 }
1391 EXPORT_SYMBOL(cnss_pci_link_down);
1392 
1393 int cnss_pci_get_reg_dump(struct device *dev, uint8_t *buffer, uint32_t len)
1394 {
1395 	struct pci_dev *pci_dev = to_pci_dev(dev);
1396 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
1397 
1398 	if (!pci_priv) {
1399 		cnss_pr_err("pci_priv is NULL\n");
1400 		return -ENODEV;
1401 	}
1402 
1403 	if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
1404 		cnss_pr_dbg("No PCIe reg dump since PCIe is suspended(D3)\n");
1405 		return -EACCES;
1406 	}
1407 
1408 	cnss_pr_dbg("Start to get PCIe reg dump\n");
1409 
1410 	return _cnss_pci_get_reg_dump(pci_priv, buffer, len);
1411 }
1412 EXPORT_SYMBOL(cnss_pci_get_reg_dump);
1413 
1414 int cnss_pcie_is_device_down(struct cnss_pci_data *pci_priv)
1415 {
1416 	struct cnss_plat_data *plat_priv;
1417 
1418 	if (!pci_priv) {
1419 		cnss_pr_err("pci_priv is NULL\n");
1420 		return -ENODEV;
1421 	}
1422 
1423 	plat_priv = pci_priv->plat_priv;
1424 	if (!plat_priv) {
1425 		cnss_pr_err("plat_priv is NULL\n");
1426 		return -ENODEV;
1427 	}
1428 
1429 	return test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) |
1430 		pci_priv->pci_link_down_ind;
1431 }
1432 
1433 int cnss_pci_is_device_down(struct device *dev)
1434 {
1435 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
1436 
1437 	return cnss_pcie_is_device_down(pci_priv);
1438 }
1439 EXPORT_SYMBOL(cnss_pci_is_device_down);
1440 
1441 void cnss_pci_lock_reg_window(struct device *dev, unsigned long *flags)
1442 {
1443 	spin_lock_bh(&pci_reg_window_lock);
1444 }
1445 EXPORT_SYMBOL(cnss_pci_lock_reg_window);
1446 
1447 void cnss_pci_unlock_reg_window(struct device *dev, unsigned long *flags)
1448 {
1449 	spin_unlock_bh(&pci_reg_window_lock);
1450 }
1451 EXPORT_SYMBOL(cnss_pci_unlock_reg_window);
1452 
1453 int cnss_get_pci_slot(struct device *dev)
1454 {
1455 	struct pci_dev *pci_dev = to_pci_dev(dev);
1456 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
1457 	struct cnss_plat_data *plat_priv = NULL;
1458 
1459 	if (!pci_priv) {
1460 		cnss_pr_err("pci_priv is NULL\n");
1461 		return -EINVAL;
1462 	}
1463 
1464 	plat_priv = pci_priv->plat_priv;
1465 	if (!plat_priv) {
1466 		cnss_pr_err("plat_priv is NULL\n");
1467 		return -ENODEV;
1468 	}
1469 
1470 	return plat_priv->rc_num;
1471 }
1472 EXPORT_SYMBOL(cnss_get_pci_slot);
1473 
1474 /**
1475  * cnss_pci_dump_bl_sram_mem - Dump WLAN device bootloader debug log
1476  * @pci_priv: driver PCI bus context pointer
1477  *
1478  * Dump primary and secondary bootloader debug log data. For SBL check the
1479  * log struct address and size for validity.
1480  *
1481  * Return: None
1482  */
1483 static void cnss_pci_dump_bl_sram_mem(struct cnss_pci_data *pci_priv)
1484 {
1485 	u32 mem_addr, val, pbl_log_max_size, sbl_log_max_size;
1486 	u32 pbl_log_sram_start;
1487 	u32 pbl_stage, sbl_log_start, sbl_log_size;
1488 	u32 pbl_wlan_boot_cfg, pbl_bootstrap_status;
1489 	u32 pbl_bootstrap_status_reg = PBL_BOOTSTRAP_STATUS;
1490 	u32 sbl_log_def_start = SRAM_START;
1491 	u32 sbl_log_def_end = SRAM_END;
1492 	int i;
1493 
1494 	switch (pci_priv->device_id) {
1495 	case QCA6390_DEVICE_ID:
1496 		pbl_log_sram_start = QCA6390_DEBUG_PBL_LOG_SRAM_START;
1497 		pbl_log_max_size = QCA6390_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1498 		sbl_log_max_size = QCA6390_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1499 		break;
1500 	case QCA6490_DEVICE_ID:
1501 		pbl_log_sram_start = QCA6490_DEBUG_PBL_LOG_SRAM_START;
1502 		pbl_log_max_size = QCA6490_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1503 		sbl_log_max_size = QCA6490_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1504 		break;
1505 	case KIWI_DEVICE_ID:
1506 		pbl_bootstrap_status_reg = KIWI_PBL_BOOTSTRAP_STATUS;
1507 		pbl_log_sram_start = KIWI_DEBUG_PBL_LOG_SRAM_START;
1508 		pbl_log_max_size = KIWI_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1509 		sbl_log_max_size = KIWI_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1510 		break;
1511 	case MANGO_DEVICE_ID:
1512 		pbl_bootstrap_status_reg = MANGO_PBL_BOOTSTRAP_STATUS;
1513 		pbl_log_sram_start = MANGO_DEBUG_PBL_LOG_SRAM_START;
1514 		pbl_log_max_size = MANGO_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1515 		sbl_log_max_size = MANGO_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1516 		break;
1517 	default:
1518 		return;
1519 	}
1520 
1521 	if (cnss_pci_check_link_status(pci_priv))
1522 		return;
1523 
1524 	cnss_pci_reg_read(pci_priv, TCSR_PBL_LOGGING_REG, &pbl_stage);
1525 	cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG2_REG, &sbl_log_start);
1526 	cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG3_REG, &sbl_log_size);
1527 	cnss_pci_reg_read(pci_priv, PBL_WLAN_BOOT_CFG, &pbl_wlan_boot_cfg);
1528 	cnss_pci_reg_read(pci_priv, pbl_bootstrap_status_reg,
1529 			  &pbl_bootstrap_status);
1530 	cnss_pr_dbg("TCSR_PBL_LOGGING: 0x%08x PCIE_BHI_ERRDBG: Start: 0x%08x Size:0x%08x\n",
1531 		    pbl_stage, sbl_log_start, sbl_log_size);
1532 	cnss_pr_dbg("PBL_WLAN_BOOT_CFG: 0x%08x PBL_BOOTSTRAP_STATUS: 0x%08x\n",
1533 		    pbl_wlan_boot_cfg, pbl_bootstrap_status);
1534 
1535 	cnss_pr_dbg("Dumping PBL log data\n");
1536 	for (i = 0; i < pbl_log_max_size; i += sizeof(val)) {
1537 		mem_addr = pbl_log_sram_start + i;
1538 		if (cnss_pci_reg_read(pci_priv, mem_addr, &val))
1539 			break;
1540 		cnss_pr_dbg("SRAM[0x%x] = 0x%x\n", mem_addr, val);
1541 	}
1542 
1543 	sbl_log_size = (sbl_log_size > sbl_log_max_size ?
1544 			sbl_log_max_size : sbl_log_size);
1545 	if (sbl_log_start < sbl_log_def_start ||
1546 	    sbl_log_start > sbl_log_def_end ||
1547 	    (sbl_log_start + sbl_log_size) > sbl_log_def_end) {
1548 		cnss_pr_err("Invalid SBL log data\n");
1549 		return;
1550 	}
1551 
1552 	cnss_pr_dbg("Dumping SBL log data\n");
1553 	for (i = 0; i < sbl_log_size; i += sizeof(val)) {
1554 		mem_addr = sbl_log_start + i;
1555 		if (cnss_pci_reg_read(pci_priv, mem_addr, &val))
1556 			break;
1557 		cnss_pr_dbg("SRAM[0x%x] = 0x%x\n", mem_addr, val);
1558 	}
1559 }
1560 
1561 static void cnss_pci_dump_sram(struct cnss_pci_data *pci_priv)
1562 {
1563 	struct cnss_plat_data *plat_priv;
1564 	u32 i, mem_addr;
1565 	u32 *dump_ptr;
1566 
1567 	plat_priv = pci_priv->plat_priv;
1568 
1569 	if (plat_priv->device_id != QCA6490_DEVICE_ID ||
1570 	    cnss_get_host_build_type() != QMI_HOST_BUILD_TYPE_PRIMARY_V01)
1571 		return;
1572 
1573 	if (!plat_priv->sram_dump) {
1574 		cnss_pr_err("SRAM dump memory is not allocated\n");
1575 		return;
1576 	}
1577 
1578 	if (cnss_pci_check_link_status(pci_priv))
1579 		return;
1580 
1581 	cnss_pr_dbg("Dumping SRAM at 0x%lx\n", plat_priv->sram_dump);
1582 
1583 	for (i = 0; i < SRAM_DUMP_SIZE; i += sizeof(u32)) {
1584 		mem_addr = SRAM_START + i;
1585 		dump_ptr = (u32 *)(plat_priv->sram_dump + i);
1586 		if (cnss_pci_reg_read(pci_priv, mem_addr, dump_ptr)) {
1587 			cnss_pr_err("SRAM Dump failed at 0x%x\n", mem_addr);
1588 			break;
1589 		}
1590 		/* Relinquish CPU after dumping 256KB chunks*/
1591 		if (!(i % CNSS_256KB_SIZE))
1592 			cond_resched();
1593 	}
1594 }
1595 
1596 static int cnss_pci_handle_mhi_poweron_timeout(struct cnss_pci_data *pci_priv)
1597 {
1598 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1599 
1600 	cnss_fatal_err("MHI power up returns timeout\n");
1601 
1602 	if (cnss_mhi_scan_rddm_cookie(pci_priv, DEVICE_RDDM_COOKIE) ||
1603 	    cnss_get_dev_sol_value(plat_priv) > 0) {
1604 		/* Wait for RDDM if RDDM cookie is set or device SOL GPIO is
1605 		 * high. If RDDM times out, PBL/SBL error region may have been
1606 		 * erased so no need to dump them either.
1607 		 */
1608 		if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
1609 		    !pci_priv->pci_link_down_ind) {
1610 			mod_timer(&pci_priv->dev_rddm_timer,
1611 				  jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
1612 		}
1613 	} else {
1614 		cnss_pr_dbg("RDDM cookie is not set and device SOL is low\n");
1615 		cnss_mhi_debug_reg_dump(pci_priv);
1616 		cnss_pci_soc_scratch_reg_dump(pci_priv);
1617 		/* Dump PBL/SBL error log if RDDM cookie is not set */
1618 		cnss_pci_dump_bl_sram_mem(pci_priv);
1619 		cnss_pci_dump_sram(pci_priv);
1620 		return -ETIMEDOUT;
1621 	}
1622 
1623 	return 0;
1624 }
1625 
1626 static char *cnss_mhi_state_to_str(enum cnss_mhi_state mhi_state)
1627 {
1628 	switch (mhi_state) {
1629 	case CNSS_MHI_INIT:
1630 		return "INIT";
1631 	case CNSS_MHI_DEINIT:
1632 		return "DEINIT";
1633 	case CNSS_MHI_POWER_ON:
1634 		return "POWER_ON";
1635 	case CNSS_MHI_POWERING_OFF:
1636 		return "POWERING_OFF";
1637 	case CNSS_MHI_POWER_OFF:
1638 		return "POWER_OFF";
1639 	case CNSS_MHI_FORCE_POWER_OFF:
1640 		return "FORCE_POWER_OFF";
1641 	case CNSS_MHI_SUSPEND:
1642 		return "SUSPEND";
1643 	case CNSS_MHI_RESUME:
1644 		return "RESUME";
1645 	case CNSS_MHI_TRIGGER_RDDM:
1646 		return "TRIGGER_RDDM";
1647 	case CNSS_MHI_RDDM_DONE:
1648 		return "RDDM_DONE";
1649 	default:
1650 		return "UNKNOWN";
1651 	}
1652 };
1653 
1654 static int cnss_pci_check_mhi_state_bit(struct cnss_pci_data *pci_priv,
1655 					enum cnss_mhi_state mhi_state)
1656 {
1657 	switch (mhi_state) {
1658 	case CNSS_MHI_INIT:
1659 		if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state))
1660 			return 0;
1661 		break;
1662 	case CNSS_MHI_DEINIT:
1663 	case CNSS_MHI_POWER_ON:
1664 		if (test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state) &&
1665 		    !test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
1666 			return 0;
1667 		break;
1668 	case CNSS_MHI_FORCE_POWER_OFF:
1669 		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
1670 			return 0;
1671 		break;
1672 	case CNSS_MHI_POWER_OFF:
1673 	case CNSS_MHI_SUSPEND:
1674 		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
1675 		    !test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
1676 			return 0;
1677 		break;
1678 	case CNSS_MHI_RESUME:
1679 		if (test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
1680 			return 0;
1681 		break;
1682 	case CNSS_MHI_TRIGGER_RDDM:
1683 		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
1684 		    !test_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state))
1685 			return 0;
1686 		break;
1687 	case CNSS_MHI_RDDM_DONE:
1688 		return 0;
1689 	default:
1690 		cnss_pr_err("Unhandled MHI state: %s(%d)\n",
1691 			    cnss_mhi_state_to_str(mhi_state), mhi_state);
1692 	}
1693 
1694 	cnss_pr_err("Cannot set MHI state %s(%d) in current MHI state (0x%lx)\n",
1695 		    cnss_mhi_state_to_str(mhi_state), mhi_state,
1696 		    pci_priv->mhi_state);
1697 	if (mhi_state != CNSS_MHI_TRIGGER_RDDM)
1698 		CNSS_ASSERT(0);
1699 
1700 	return -EINVAL;
1701 }
1702 
1703 static int cnss_rddm_trigger_debug(struct cnss_pci_data *pci_priv)
1704 {
1705 	int read_val, ret;
1706 
1707 	if (!pci_priv || pci_priv->device_id != QCA6490_DEVICE_ID)
1708 		return -EOPNOTSUPP;
1709 
1710 	cnss_pr_err("Write GCC Spare with ACE55 Pattern");
1711 	cnss_pci_reg_write(pci_priv, GCC_GCC_SPARE_REG_1, 0xACE55);
1712 	ret = cnss_pci_reg_read(pci_priv, GCC_GCC_SPARE_REG_1, &read_val);
1713 	cnss_pr_err("Read back GCC Spare: 0x%x, ret: %d", read_val, ret);
1714 	ret = cnss_pci_reg_read(pci_priv, GCC_PRE_ARES_DEBUG_TIMER_VAL,
1715 				&read_val);
1716 	cnss_pr_err("Warm reset allowed check: 0x%x, ret: %d", read_val, ret);
1717 	return ret;
1718 }
1719 
1720 static int cnss_rddm_trigger_check(struct cnss_pci_data *pci_priv)
1721 {
1722 	int read_val, ret;
1723 
1724 	if (!pci_priv || pci_priv->device_id != QCA6490_DEVICE_ID)
1725 		return -EOPNOTSUPP;
1726 
1727 	ret = cnss_pci_reg_read(pci_priv, GCC_GCC_SPARE_REG_1, &read_val);
1728 	cnss_pr_err("Read GCC spare to check reset status: 0x%x, ret: %d",
1729 		    read_val, ret);
1730 	return ret;
1731 }
1732 
1733 static void cnss_pci_set_mhi_state_bit(struct cnss_pci_data *pci_priv,
1734 				       enum cnss_mhi_state mhi_state)
1735 {
1736 	switch (mhi_state) {
1737 	case CNSS_MHI_INIT:
1738 		set_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
1739 		break;
1740 	case CNSS_MHI_DEINIT:
1741 		clear_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
1742 		break;
1743 	case CNSS_MHI_POWER_ON:
1744 		set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
1745 		break;
1746 	case CNSS_MHI_POWERING_OFF:
1747 		set_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state);
1748 		break;
1749 	case CNSS_MHI_POWER_OFF:
1750 	case CNSS_MHI_FORCE_POWER_OFF:
1751 		clear_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
1752 		clear_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state);
1753 		clear_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state);
1754 		clear_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
1755 		break;
1756 	case CNSS_MHI_SUSPEND:
1757 		set_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
1758 		break;
1759 	case CNSS_MHI_RESUME:
1760 		clear_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
1761 		break;
1762 	case CNSS_MHI_TRIGGER_RDDM:
1763 		set_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state);
1764 		break;
1765 	case CNSS_MHI_RDDM_DONE:
1766 		set_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
1767 		break;
1768 	default:
1769 		cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
1770 	}
1771 }
1772 
1773 static int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv,
1774 				  enum cnss_mhi_state mhi_state)
1775 {
1776 	int ret = 0, retry = 0;
1777 
1778 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
1779 		return 0;
1780 
1781 	if (mhi_state < 0) {
1782 		cnss_pr_err("Invalid MHI state (%d)\n", mhi_state);
1783 		return -EINVAL;
1784 	}
1785 
1786 	ret = cnss_pci_check_mhi_state_bit(pci_priv, mhi_state);
1787 	if (ret)
1788 		goto out;
1789 
1790 	cnss_pr_vdbg("Setting MHI state: %s(%d)\n",
1791 		     cnss_mhi_state_to_str(mhi_state), mhi_state);
1792 
1793 	switch (mhi_state) {
1794 	case CNSS_MHI_INIT:
1795 		ret = mhi_prepare_for_power_up(pci_priv->mhi_ctrl);
1796 		break;
1797 	case CNSS_MHI_DEINIT:
1798 		mhi_unprepare_after_power_down(pci_priv->mhi_ctrl);
1799 		ret = 0;
1800 		break;
1801 	case CNSS_MHI_POWER_ON:
1802 		ret = mhi_sync_power_up(pci_priv->mhi_ctrl);
1803 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
1804 		/* Only set img_pre_alloc when power up succeeds */
1805 		if (!ret && !pci_priv->mhi_ctrl->img_pre_alloc) {
1806 			cnss_pr_dbg("Notify MHI to use already allocated images\n");
1807 			pci_priv->mhi_ctrl->img_pre_alloc = true;
1808 		}
1809 #endif
1810 		break;
1811 	case CNSS_MHI_POWER_OFF:
1812 		mhi_power_down(pci_priv->mhi_ctrl, true);
1813 		ret = 0;
1814 		break;
1815 	case CNSS_MHI_FORCE_POWER_OFF:
1816 		mhi_power_down(pci_priv->mhi_ctrl, false);
1817 		ret = 0;
1818 		break;
1819 	case CNSS_MHI_SUSPEND:
1820 retry_mhi_suspend:
1821 		mutex_lock(&pci_priv->mhi_ctrl->pm_mutex);
1822 		if (pci_priv->drv_connected_last)
1823 			ret = cnss_mhi_pm_fast_suspend(pci_priv, true);
1824 		else
1825 			ret = mhi_pm_suspend(pci_priv->mhi_ctrl);
1826 		mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
1827 		if (ret == -EBUSY && retry++ < MHI_SUSPEND_RETRY_MAX_TIMES) {
1828 			cnss_pr_dbg("Retry MHI suspend #%d\n", retry);
1829 			usleep_range(MHI_SUSPEND_RETRY_DELAY_US,
1830 				     MHI_SUSPEND_RETRY_DELAY_US + 1000);
1831 			goto retry_mhi_suspend;
1832 		}
1833 		break;
1834 	case CNSS_MHI_RESUME:
1835 		mutex_lock(&pci_priv->mhi_ctrl->pm_mutex);
1836 		if (pci_priv->drv_connected_last) {
1837 			ret = cnss_pci_prevent_l1(&pci_priv->pci_dev->dev);
1838 			if (ret) {
1839 				mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
1840 				break;
1841 			}
1842 			ret = cnss_mhi_pm_fast_resume(pci_priv, true);
1843 			cnss_pci_allow_l1(&pci_priv->pci_dev->dev);
1844 		} else {
1845 			ret = mhi_pm_resume(pci_priv->mhi_ctrl);
1846 		}
1847 		mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
1848 		break;
1849 	case CNSS_MHI_TRIGGER_RDDM:
1850 		cnss_rddm_trigger_debug(pci_priv);
1851 		ret = mhi_force_rddm_mode(pci_priv->mhi_ctrl);
1852 		if (ret) {
1853 			cnss_pr_err("Failed to trigger RDDM, err = %d\n", ret);
1854 
1855 			cnss_pr_dbg("Sending host reset req\n");
1856 			ret = cnss_mhi_force_reset(pci_priv);
1857 			cnss_rddm_trigger_check(pci_priv);
1858 		}
1859 		break;
1860 	case CNSS_MHI_RDDM_DONE:
1861 		break;
1862 	default:
1863 		cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
1864 		ret = -EINVAL;
1865 	}
1866 
1867 	if (ret)
1868 		goto out;
1869 
1870 	cnss_pci_set_mhi_state_bit(pci_priv, mhi_state);
1871 
1872 	return 0;
1873 
1874 out:
1875 	cnss_pr_err("Failed to set MHI state: %s(%d), err = %d\n",
1876 		    cnss_mhi_state_to_str(mhi_state), mhi_state, ret);
1877 	return ret;
1878 }
1879 
1880 static int cnss_pci_config_msi_data(struct cnss_pci_data *pci_priv)
1881 {
1882 	struct msi_desc *msi_desc;
1883 	struct pci_dev *pci_dev = pci_priv->pci_dev;
1884 
1885 	msi_desc = irq_get_msi_desc(pci_dev->irq);
1886 	if (!msi_desc) {
1887 		cnss_pr_err("msi_desc is NULL!\n");
1888 		return -EINVAL;
1889 	}
1890 
1891 	pci_priv->msi_ep_base_data = msi_desc->msg.data;
1892 	cnss_pr_dbg("MSI base data is %d\n", pci_priv->msi_ep_base_data);
1893 
1894 	return 0;
1895 }
1896 
1897 int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv)
1898 {
1899 	int ret = 0;
1900 	struct cnss_plat_data *plat_priv;
1901 	unsigned int timeout = 0;
1902 
1903 	if (!pci_priv) {
1904 		cnss_pr_err("pci_priv is NULL\n");
1905 		return -ENODEV;
1906 	}
1907 
1908 	plat_priv = pci_priv->plat_priv;
1909 	if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
1910 		return 0;
1911 
1912 	if (MHI_TIMEOUT_OVERWRITE_MS)
1913 		pci_priv->mhi_ctrl->timeout_ms = MHI_TIMEOUT_OVERWRITE_MS;
1914 	cnss_mhi_set_m2_timeout_ms(pci_priv, MHI_M2_TIMEOUT_MS);
1915 
1916 	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_INIT);
1917 	if (ret)
1918 		return ret;
1919 
1920 	timeout = pci_priv->mhi_ctrl->timeout_ms;
1921 	/* For non-perf builds the timeout is 10 (default) * 6 seconds */
1922 	if (cnss_get_host_build_type() == QMI_HOST_BUILD_TYPE_PRIMARY_V01)
1923 		pci_priv->mhi_ctrl->timeout_ms *= 6;
1924 	else /* For perf builds the timeout is 10 (default) * 3 seconds */
1925 		pci_priv->mhi_ctrl->timeout_ms *= 3;
1926 
1927 	/* Start the timer to dump MHI/PBL/SBL debug data periodically */
1928 	mod_timer(&pci_priv->boot_debug_timer,
1929 		  jiffies + msecs_to_jiffies(BOOT_DEBUG_TIMEOUT_MS));
1930 
1931 	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_ON);
1932 	del_timer_sync(&pci_priv->boot_debug_timer);
1933 	if (ret == 0)
1934 		cnss_wlan_adsp_pc_enable(pci_priv, false);
1935 
1936 	pci_priv->mhi_ctrl->timeout_ms = timeout;
1937 
1938 	if (ret == -ETIMEDOUT) {
1939 		/* This is a special case needs to be handled that if MHI
1940 		 * power on returns -ETIMEDOUT, controller needs to take care
1941 		 * the cleanup by calling MHI power down. Force to set the bit
1942 		 * for driver internal MHI state to make sure it can be handled
1943 		 * properly later.
1944 		 */
1945 		set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
1946 		ret = cnss_pci_handle_mhi_poweron_timeout(pci_priv);
1947 	} else if (!ret) {
1948 		/* kernel may allocate a dummy vector before request_irq and
1949 		 * then allocate a real vector when request_irq is called.
1950 		 * So get msi_data here again to avoid spurious interrupt
1951 		 * as msi_data will configured to srngs.
1952 		 */
1953 		if (cnss_pci_is_one_msi(pci_priv))
1954 			ret = cnss_pci_config_msi_data(pci_priv);
1955 	}
1956 
1957 	return ret;
1958 }
1959 
1960 static void cnss_pci_power_off_mhi(struct cnss_pci_data *pci_priv)
1961 {
1962 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1963 
1964 	if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
1965 		return;
1966 
1967 	if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state)) {
1968 		cnss_pr_dbg("MHI is already powered off\n");
1969 		return;
1970 	}
1971 	cnss_wlan_adsp_pc_enable(pci_priv, true);
1972 	cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_RESUME);
1973 	cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_POWERING_OFF);
1974 
1975 	if (!pci_priv->pci_link_down_ind)
1976 		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_OFF);
1977 	else
1978 		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_FORCE_POWER_OFF);
1979 }
1980 
1981 static void cnss_pci_deinit_mhi(struct cnss_pci_data *pci_priv)
1982 {
1983 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1984 
1985 	if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
1986 		return;
1987 
1988 	if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state)) {
1989 		cnss_pr_dbg("MHI is already deinited\n");
1990 		return;
1991 	}
1992 
1993 	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_DEINIT);
1994 }
1995 
1996 static void cnss_pci_set_wlaon_pwr_ctrl(struct cnss_pci_data *pci_priv,
1997 					bool set_vddd4blow, bool set_shutdown,
1998 					bool do_force_wake)
1999 {
2000 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2001 	int ret;
2002 	u32 val;
2003 
2004 	if (!plat_priv->set_wlaon_pwr_ctrl)
2005 		return;
2006 
2007 	if (pci_priv->pci_link_state == PCI_LINK_DOWN ||
2008 	    pci_priv->pci_link_down_ind)
2009 		return;
2010 
2011 	if (do_force_wake)
2012 		if (cnss_pci_force_wake_get(pci_priv))
2013 			return;
2014 
2015 	ret = cnss_pci_reg_read(pci_priv, WLAON_QFPROM_PWR_CTRL_REG, &val);
2016 	if (ret) {
2017 		cnss_pr_err("Failed to read register offset 0x%x, err = %d\n",
2018 			    WLAON_QFPROM_PWR_CTRL_REG, ret);
2019 		goto force_wake_put;
2020 	}
2021 
2022 	cnss_pr_dbg("Read register offset 0x%x, val = 0x%x\n",
2023 		    WLAON_QFPROM_PWR_CTRL_REG, val);
2024 
2025 	if (set_vddd4blow)
2026 		val |= QFPROM_PWR_CTRL_VDD4BLOW_SW_EN_MASK;
2027 	else
2028 		val &= ~QFPROM_PWR_CTRL_VDD4BLOW_SW_EN_MASK;
2029 
2030 	if (set_shutdown)
2031 		val |= QFPROM_PWR_CTRL_SHUTDOWN_EN_MASK;
2032 	else
2033 		val &= ~QFPROM_PWR_CTRL_SHUTDOWN_EN_MASK;
2034 
2035 	ret = cnss_pci_reg_write(pci_priv, WLAON_QFPROM_PWR_CTRL_REG, val);
2036 	if (ret) {
2037 		cnss_pr_err("Failed to write register offset 0x%x, err = %d\n",
2038 			    WLAON_QFPROM_PWR_CTRL_REG, ret);
2039 		goto force_wake_put;
2040 	}
2041 
2042 	cnss_pr_dbg("Write val 0x%x to register offset 0x%x\n", val,
2043 		    WLAON_QFPROM_PWR_CTRL_REG);
2044 
2045 	if (set_shutdown)
2046 		usleep_range(WLAON_PWR_CTRL_SHUTDOWN_DELAY_MIN_US,
2047 			     WLAON_PWR_CTRL_SHUTDOWN_DELAY_MAX_US);
2048 
2049 force_wake_put:
2050 	if (do_force_wake)
2051 		cnss_pci_force_wake_put(pci_priv);
2052 }
2053 
2054 static int cnss_pci_get_device_timestamp(struct cnss_pci_data *pci_priv,
2055 					 u64 *time_us)
2056 {
2057 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2058 	u32 low, high;
2059 	u64 device_ticks;
2060 
2061 	if (!plat_priv->device_freq_hz) {
2062 		cnss_pr_err("Device time clock frequency is not valid\n");
2063 		return -EINVAL;
2064 	}
2065 
2066 	switch (pci_priv->device_id) {
2067 	case KIWI_DEVICE_ID:
2068 	case MANGO_DEVICE_ID:
2069 		cnss_pci_reg_read(pci_priv, PCIE_MHI_TIME_LOW, &low);
2070 		cnss_pci_reg_read(pci_priv, PCIE_MHI_TIME_HIGH, &high);
2071 		break;
2072 	default:
2073 		cnss_pci_reg_read(pci_priv, WLAON_GLOBAL_COUNTER_CTRL3, &low);
2074 		cnss_pci_reg_read(pci_priv, WLAON_GLOBAL_COUNTER_CTRL4, &high);
2075 		break;
2076 	}
2077 
2078 	device_ticks = (u64)high << 32 | low;
2079 	do_div(device_ticks, plat_priv->device_freq_hz / 100000);
2080 	*time_us = device_ticks * 10;
2081 
2082 	return 0;
2083 }
2084 
2085 static void cnss_pci_enable_time_sync_counter(struct cnss_pci_data *pci_priv)
2086 {
2087 	switch (pci_priv->device_id) {
2088 	case KIWI_DEVICE_ID:
2089 	case MANGO_DEVICE_ID:
2090 		return;
2091 	default:
2092 		break;
2093 	}
2094 
2095 	cnss_pci_reg_write(pci_priv, WLAON_GLOBAL_COUNTER_CTRL5,
2096 			   TIME_SYNC_ENABLE);
2097 }
2098 
2099 static void cnss_pci_clear_time_sync_counter(struct cnss_pci_data *pci_priv)
2100 {
2101 	switch (pci_priv->device_id) {
2102 	case KIWI_DEVICE_ID:
2103 	case MANGO_DEVICE_ID:
2104 		return;
2105 	default:
2106 		break;
2107 	}
2108 
2109 	cnss_pci_reg_write(pci_priv, WLAON_GLOBAL_COUNTER_CTRL5,
2110 			   TIME_SYNC_CLEAR);
2111 }
2112 
2113 
2114 static void cnss_pci_time_sync_reg_update(struct cnss_pci_data *pci_priv,
2115 					  u32 low, u32 high)
2116 {
2117 	u32 time_reg_low;
2118 	u32 time_reg_high;
2119 
2120 	switch (pci_priv->device_id) {
2121 	case KIWI_DEVICE_ID:
2122 	case MANGO_DEVICE_ID:
2123 		/* Use the next two shadow registers after host's usage */
2124 		time_reg_low = PCIE_SHADOW_REG_VALUE_0 +
2125 				(pci_priv->plat_priv->num_shadow_regs_v3 *
2126 				 SHADOW_REG_LEN_BYTES);
2127 		time_reg_high = time_reg_low + SHADOW_REG_LEN_BYTES;
2128 		break;
2129 	default:
2130 		time_reg_low = PCIE_SHADOW_REG_VALUE_34;
2131 		time_reg_high = PCIE_SHADOW_REG_VALUE_35;
2132 		break;
2133 	}
2134 
2135 	cnss_pci_reg_write(pci_priv, time_reg_low, low);
2136 	cnss_pci_reg_write(pci_priv, time_reg_high, high);
2137 
2138 	cnss_pci_reg_read(pci_priv, time_reg_low, &low);
2139 	cnss_pci_reg_read(pci_priv, time_reg_high, &high);
2140 
2141 	cnss_pr_dbg("Updated time sync regs [0x%x] = 0x%x, [0x%x] = 0x%x\n",
2142 		    time_reg_low, low, time_reg_high, high);
2143 }
2144 
2145 static int cnss_pci_update_timestamp(struct cnss_pci_data *pci_priv)
2146 {
2147 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2148 	struct device *dev = &pci_priv->pci_dev->dev;
2149 	unsigned long flags = 0;
2150 	u64 host_time_us, device_time_us, offset;
2151 	u32 low, high;
2152 	int ret;
2153 
2154 	ret = cnss_pci_prevent_l1(dev);
2155 	if (ret)
2156 		goto out;
2157 
2158 	ret = cnss_pci_force_wake_get(pci_priv);
2159 	if (ret)
2160 		goto allow_l1;
2161 
2162 	spin_lock_irqsave(&time_sync_lock, flags);
2163 	cnss_pci_clear_time_sync_counter(pci_priv);
2164 	cnss_pci_enable_time_sync_counter(pci_priv);
2165 	host_time_us = cnss_get_host_timestamp(plat_priv);
2166 	ret = cnss_pci_get_device_timestamp(pci_priv, &device_time_us);
2167 	cnss_pci_clear_time_sync_counter(pci_priv);
2168 	spin_unlock_irqrestore(&time_sync_lock, flags);
2169 	if (ret)
2170 		goto force_wake_put;
2171 
2172 	if (host_time_us < device_time_us) {
2173 		cnss_pr_err("Host time (%llu us) is smaller than device time (%llu us), stop\n",
2174 			    host_time_us, device_time_us);
2175 		ret = -EINVAL;
2176 		goto force_wake_put;
2177 	}
2178 
2179 	offset = host_time_us - device_time_us;
2180 	cnss_pr_dbg("Host time = %llu us, device time = %llu us, offset = %llu us\n",
2181 		    host_time_us, device_time_us, offset);
2182 
2183 	low = offset & 0xFFFFFFFF;
2184 	high = offset >> 32;
2185 
2186 	cnss_pci_time_sync_reg_update(pci_priv, low, high);
2187 
2188 force_wake_put:
2189 	cnss_pci_force_wake_put(pci_priv);
2190 allow_l1:
2191 	cnss_pci_allow_l1(dev);
2192 out:
2193 	return ret;
2194 }
2195 
2196 static void cnss_pci_time_sync_work_hdlr(struct work_struct *work)
2197 {
2198 	struct cnss_pci_data *pci_priv =
2199 		container_of(work, struct cnss_pci_data, time_sync_work.work);
2200 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2201 	unsigned int time_sync_period_ms =
2202 		plat_priv->ctrl_params.time_sync_period;
2203 
2204 	if (test_bit(DISABLE_TIME_SYNC, &plat_priv->ctrl_params.quirks)) {
2205 		cnss_pr_dbg("Time sync is disabled\n");
2206 		return;
2207 	}
2208 
2209 	if (!time_sync_period_ms) {
2210 		cnss_pr_dbg("Skip time sync as time period is 0\n");
2211 		return;
2212 	}
2213 
2214 	if (cnss_pci_is_device_down(&pci_priv->pci_dev->dev))
2215 		return;
2216 
2217 	if (cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS) < 0)
2218 		goto runtime_pm_put;
2219 
2220 	mutex_lock(&pci_priv->bus_lock);
2221 	cnss_pci_update_timestamp(pci_priv);
2222 	mutex_unlock(&pci_priv->bus_lock);
2223 	schedule_delayed_work(&pci_priv->time_sync_work,
2224 			      msecs_to_jiffies(time_sync_period_ms));
2225 
2226 runtime_pm_put:
2227 	cnss_pci_pm_runtime_mark_last_busy(pci_priv);
2228 	cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
2229 }
2230 
2231 static int cnss_pci_start_time_sync_update(struct cnss_pci_data *pci_priv)
2232 {
2233 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2234 
2235 	switch (pci_priv->device_id) {
2236 	case QCA6390_DEVICE_ID:
2237 	case QCA6490_DEVICE_ID:
2238 	case KIWI_DEVICE_ID:
2239 	case MANGO_DEVICE_ID:
2240 		break;
2241 	default:
2242 		return -EOPNOTSUPP;
2243 	}
2244 
2245 	if (!plat_priv->device_freq_hz) {
2246 		cnss_pr_dbg("Device time clock frequency is not valid, skip time sync\n");
2247 		return -EINVAL;
2248 	}
2249 
2250 	cnss_pci_time_sync_work_hdlr(&pci_priv->time_sync_work.work);
2251 
2252 	return 0;
2253 }
2254 
2255 static void cnss_pci_stop_time_sync_update(struct cnss_pci_data *pci_priv)
2256 {
2257 	switch (pci_priv->device_id) {
2258 	case QCA6390_DEVICE_ID:
2259 	case QCA6490_DEVICE_ID:
2260 	case KIWI_DEVICE_ID:
2261 	case MANGO_DEVICE_ID:
2262 		break;
2263 	default:
2264 		return;
2265 	}
2266 
2267 	cancel_delayed_work_sync(&pci_priv->time_sync_work);
2268 }
2269 
2270 int cnss_pci_update_time_sync_period(struct cnss_pci_data *pci_priv,
2271 				     unsigned int time_sync_period)
2272 {
2273 	struct cnss_plat_data *plat_priv;
2274 
2275 	if (!pci_priv)
2276 		return -ENODEV;
2277 
2278 	plat_priv = pci_priv->plat_priv;
2279 
2280 	cnss_pci_stop_time_sync_update(pci_priv);
2281 	plat_priv->ctrl_params.time_sync_period = time_sync_period;
2282 	cnss_pci_start_time_sync_update(pci_priv);
2283 	cnss_pr_dbg("WLAN time sync period %u ms\n",
2284 		    plat_priv->ctrl_params.time_sync_period);
2285 
2286 	return 0;
2287 }
2288 
2289 int cnss_pci_call_driver_probe(struct cnss_pci_data *pci_priv)
2290 {
2291 	int ret = 0;
2292 	struct cnss_plat_data *plat_priv;
2293 
2294 	if (!pci_priv)
2295 		return -ENODEV;
2296 
2297 	plat_priv = pci_priv->plat_priv;
2298 	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
2299 		cnss_pr_err("Reboot is in progress, skip driver probe\n");
2300 		return -EINVAL;
2301 	}
2302 
2303 	if (test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
2304 		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
2305 		cnss_pr_dbg("Skip driver probe\n");
2306 		goto out;
2307 	}
2308 
2309 	if (!pci_priv->driver_ops) {
2310 		cnss_pr_err("driver_ops is NULL\n");
2311 		ret = -EINVAL;
2312 		goto out;
2313 	}
2314 
2315 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
2316 	    test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
2317 		ret = pci_priv->driver_ops->reinit(pci_priv->pci_dev,
2318 						   pci_priv->pci_device_id);
2319 		if (ret) {
2320 			cnss_pr_err("Failed to reinit host driver, err = %d\n",
2321 				    ret);
2322 			goto out;
2323 		}
2324 		complete(&plat_priv->recovery_complete);
2325 	} else if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state)) {
2326 		ret = pci_priv->driver_ops->probe(pci_priv->pci_dev,
2327 						  pci_priv->pci_device_id);
2328 		if (ret) {
2329 			cnss_pr_err("Failed to probe host driver, err = %d\n",
2330 				    ret);
2331 			goto out;
2332 		}
2333 		clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
2334 		set_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
2335 		complete_all(&plat_priv->power_up_complete);
2336 	} else if (test_bit(CNSS_DRIVER_IDLE_RESTART,
2337 			    &plat_priv->driver_state)) {
2338 		ret = pci_priv->driver_ops->idle_restart(pci_priv->pci_dev,
2339 			pci_priv->pci_device_id);
2340 		if (ret) {
2341 			cnss_pr_err("Failed to idle restart host driver, err = %d\n",
2342 				    ret);
2343 			plat_priv->power_up_error = ret;
2344 			complete_all(&plat_priv->power_up_complete);
2345 			goto out;
2346 		}
2347 		clear_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state);
2348 		complete_all(&plat_priv->power_up_complete);
2349 	} else {
2350 		complete(&plat_priv->power_up_complete);
2351 	}
2352 
2353 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
2354 		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
2355 		__pm_relax(plat_priv->recovery_ws);
2356 	}
2357 
2358 	cnss_pci_start_time_sync_update(pci_priv);
2359 
2360 	return 0;
2361 
2362 out:
2363 	return ret;
2364 }
2365 
2366 int cnss_pci_call_driver_remove(struct cnss_pci_data *pci_priv)
2367 {
2368 	struct cnss_plat_data *plat_priv;
2369 	int ret;
2370 
2371 	if (!pci_priv)
2372 		return -ENODEV;
2373 
2374 	plat_priv = pci_priv->plat_priv;
2375 
2376 	if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state) ||
2377 	    test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state) ||
2378 	    test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
2379 		cnss_pr_dbg("Skip driver remove\n");
2380 		return 0;
2381 	}
2382 
2383 	if (!pci_priv->driver_ops) {
2384 		cnss_pr_err("driver_ops is NULL\n");
2385 		return -EINVAL;
2386 	}
2387 
2388 	cnss_pci_stop_time_sync_update(pci_priv);
2389 
2390 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
2391 	    test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
2392 		pci_priv->driver_ops->shutdown(pci_priv->pci_dev);
2393 	} else if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
2394 		pci_priv->driver_ops->remove(pci_priv->pci_dev);
2395 		clear_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
2396 	} else if (test_bit(CNSS_DRIVER_IDLE_SHUTDOWN,
2397 			    &plat_priv->driver_state)) {
2398 		ret = pci_priv->driver_ops->idle_shutdown(pci_priv->pci_dev);
2399 		if (ret == -EAGAIN) {
2400 			clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN,
2401 				  &plat_priv->driver_state);
2402 			return ret;
2403 		}
2404 	}
2405 
2406 	plat_priv->get_info_cb_ctx = NULL;
2407 	plat_priv->get_info_cb = NULL;
2408 
2409 	return 0;
2410 }
2411 
2412 int cnss_pci_call_driver_modem_status(struct cnss_pci_data *pci_priv,
2413 				      int modem_current_status)
2414 {
2415 	struct cnss_wlan_driver *driver_ops;
2416 
2417 	if (!pci_priv)
2418 		return -ENODEV;
2419 
2420 	driver_ops = pci_priv->driver_ops;
2421 	if (!driver_ops || !driver_ops->modem_status)
2422 		return -EINVAL;
2423 
2424 	driver_ops->modem_status(pci_priv->pci_dev, modem_current_status);
2425 
2426 	return 0;
2427 }
2428 
2429 int cnss_pci_update_status(struct cnss_pci_data *pci_priv,
2430 			   enum cnss_driver_status status)
2431 {
2432 	struct cnss_wlan_driver *driver_ops;
2433 
2434 	if (!pci_priv)
2435 		return -ENODEV;
2436 
2437 	driver_ops = pci_priv->driver_ops;
2438 	if (!driver_ops || !driver_ops->update_status)
2439 		return -EINVAL;
2440 
2441 	cnss_pr_dbg("Update driver status: %d\n", status);
2442 
2443 	driver_ops->update_status(pci_priv->pci_dev, status);
2444 
2445 	return 0;
2446 }
2447 
2448 static void cnss_pci_misc_reg_dump(struct cnss_pci_data *pci_priv,
2449 				   struct cnss_misc_reg *misc_reg,
2450 				   u32 misc_reg_size,
2451 				   char *reg_name)
2452 {
2453 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2454 	bool do_force_wake_put = true;
2455 	int i;
2456 
2457 	if (!misc_reg)
2458 		return;
2459 
2460 	if (in_interrupt() || irqs_disabled())
2461 		return;
2462 
2463 	if (cnss_pci_check_link_status(pci_priv))
2464 		return;
2465 
2466 	if (cnss_pci_force_wake_get(pci_priv)) {
2467 		/* Continue to dump when device has entered RDDM already */
2468 		if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
2469 			return;
2470 		do_force_wake_put = false;
2471 	}
2472 
2473 	cnss_pr_dbg("Start to dump %s registers\n", reg_name);
2474 
2475 	for (i = 0; i < misc_reg_size; i++) {
2476 		if (!test_bit(pci_priv->misc_reg_dev_mask,
2477 			      &misc_reg[i].dev_mask))
2478 			continue;
2479 
2480 		if (misc_reg[i].wr) {
2481 			if (misc_reg[i].offset ==
2482 			    QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG &&
2483 			    i >= 1)
2484 				misc_reg[i].val =
2485 				QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG_MSK |
2486 				misc_reg[i - 1].val;
2487 			if (cnss_pci_reg_write(pci_priv,
2488 					       misc_reg[i].offset,
2489 					       misc_reg[i].val))
2490 				goto force_wake_put;
2491 			cnss_pr_vdbg("Write 0x%X to 0x%X\n",
2492 				     misc_reg[i].val,
2493 				     misc_reg[i].offset);
2494 
2495 		} else {
2496 			if (cnss_pci_reg_read(pci_priv,
2497 					      misc_reg[i].offset,
2498 					      &misc_reg[i].val))
2499 				goto force_wake_put;
2500 		}
2501 	}
2502 
2503 force_wake_put:
2504 	if (do_force_wake_put)
2505 		cnss_pci_force_wake_put(pci_priv);
2506 }
2507 
2508 static void cnss_pci_dump_misc_reg(struct cnss_pci_data *pci_priv)
2509 {
2510 	if (in_interrupt() || irqs_disabled())
2511 		return;
2512 
2513 	if (cnss_pci_check_link_status(pci_priv))
2514 		return;
2515 
2516 	cnss_pci_misc_reg_dump(pci_priv, pci_priv->wcss_reg,
2517 			       WCSS_REG_SIZE, "wcss");
2518 	cnss_pci_misc_reg_dump(pci_priv, pci_priv->pcie_reg,
2519 			       PCIE_REG_SIZE, "pcie");
2520 	cnss_pci_misc_reg_dump(pci_priv, pci_priv->wlaon_reg,
2521 			       WLAON_REG_SIZE, "wlaon");
2522 	cnss_pci_misc_reg_dump(pci_priv, pci_priv->syspm_reg,
2523 			       SYSPM_REG_SIZE, "syspm");
2524 }
2525 
2526 static void cnss_pci_dump_shadow_reg(struct cnss_pci_data *pci_priv)
2527 {
2528 	int i, j = 0, array_size = SHADOW_REG_COUNT + SHADOW_REG_INTER_COUNT;
2529 	u32 reg_offset;
2530 	bool do_force_wake_put = true;
2531 
2532 	if (in_interrupt() || irqs_disabled())
2533 		return;
2534 
2535 	if (cnss_pci_check_link_status(pci_priv))
2536 		return;
2537 
2538 	if (!pci_priv->debug_reg) {
2539 		pci_priv->debug_reg = devm_kzalloc(&pci_priv->pci_dev->dev,
2540 						   sizeof(*pci_priv->debug_reg)
2541 						   * array_size, GFP_KERNEL);
2542 		if (!pci_priv->debug_reg)
2543 			return;
2544 	}
2545 
2546 	if (cnss_pci_force_wake_get(pci_priv))
2547 		do_force_wake_put = false;
2548 
2549 	cnss_pr_dbg("Start to dump shadow registers\n");
2550 
2551 	for (i = 0; i < SHADOW_REG_COUNT; i++, j++) {
2552 		reg_offset = PCIE_SHADOW_REG_VALUE_0 + i * 4;
2553 		pci_priv->debug_reg[j].offset = reg_offset;
2554 		if (cnss_pci_reg_read(pci_priv, reg_offset,
2555 				      &pci_priv->debug_reg[j].val))
2556 			goto force_wake_put;
2557 	}
2558 
2559 	for (i = 0; i < SHADOW_REG_INTER_COUNT; i++, j++) {
2560 		reg_offset = PCIE_SHADOW_REG_INTER_0 + i * 4;
2561 		pci_priv->debug_reg[j].offset = reg_offset;
2562 		if (cnss_pci_reg_read(pci_priv, reg_offset,
2563 				      &pci_priv->debug_reg[j].val))
2564 			goto force_wake_put;
2565 	}
2566 
2567 force_wake_put:
2568 	if (do_force_wake_put)
2569 		cnss_pci_force_wake_put(pci_priv);
2570 }
2571 
2572 static int cnss_qca6174_powerup(struct cnss_pci_data *pci_priv)
2573 {
2574 	int ret = 0;
2575 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2576 
2577 	ret = cnss_power_on_device(plat_priv);
2578 	if (ret) {
2579 		cnss_pr_err("Failed to power on device, err = %d\n", ret);
2580 		goto out;
2581 	}
2582 
2583 	ret = cnss_resume_pci_link(pci_priv);
2584 	if (ret) {
2585 		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
2586 		goto power_off;
2587 	}
2588 
2589 	ret = cnss_pci_call_driver_probe(pci_priv);
2590 	if (ret)
2591 		goto suspend_link;
2592 
2593 	return 0;
2594 suspend_link:
2595 	cnss_suspend_pci_link(pci_priv);
2596 power_off:
2597 	cnss_power_off_device(plat_priv);
2598 out:
2599 	return ret;
2600 }
2601 
2602 static int cnss_qca6174_shutdown(struct cnss_pci_data *pci_priv)
2603 {
2604 	int ret = 0;
2605 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2606 
2607 	cnss_pci_pm_runtime_resume(pci_priv);
2608 
2609 	ret = cnss_pci_call_driver_remove(pci_priv);
2610 	if (ret == -EAGAIN)
2611 		goto out;
2612 
2613 	cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
2614 				   CNSS_BUS_WIDTH_NONE);
2615 	cnss_pci_set_monitor_wake_intr(pci_priv, false);
2616 	cnss_pci_set_auto_suspended(pci_priv, 0);
2617 
2618 	ret = cnss_suspend_pci_link(pci_priv);
2619 	if (ret)
2620 		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
2621 
2622 	cnss_power_off_device(plat_priv);
2623 
2624 	clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
2625 	clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state);
2626 
2627 out:
2628 	return ret;
2629 }
2630 
2631 static void cnss_qca6174_crash_shutdown(struct cnss_pci_data *pci_priv)
2632 {
2633 	if (pci_priv->driver_ops && pci_priv->driver_ops->crash_shutdown)
2634 		pci_priv->driver_ops->crash_shutdown(pci_priv->pci_dev);
2635 }
2636 
2637 static int cnss_qca6174_ramdump(struct cnss_pci_data *pci_priv)
2638 {
2639 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2640 	struct cnss_ramdump_info *ramdump_info;
2641 
2642 	ramdump_info = &plat_priv->ramdump_info;
2643 	if (!ramdump_info->ramdump_size)
2644 		return -EINVAL;
2645 
2646 	return cnss_do_ramdump(plat_priv);
2647 }
2648 
2649 static void cnss_get_driver_mode_update_fw_name(struct cnss_plat_data *plat_priv)
2650 {
2651 	struct cnss_pci_data *pci_priv;
2652 	struct cnss_wlan_driver *driver_ops;
2653 
2654 	pci_priv = plat_priv->bus_priv;
2655 	driver_ops = pci_priv->driver_ops;
2656 
2657 	if (driver_ops && driver_ops->get_driver_mode) {
2658 		plat_priv->driver_mode = driver_ops->get_driver_mode();
2659 		cnss_pci_update_fw_name(pci_priv);
2660 		cnss_pr_dbg("New driver mode is %d", plat_priv->driver_mode);
2661 	}
2662 }
2663 
2664 static int cnss_qca6290_powerup(struct cnss_pci_data *pci_priv)
2665 {
2666 	int ret = 0;
2667 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2668 	unsigned int timeout;
2669 	int retry = 0, bt_en_gpio = plat_priv->pinctrl_info.bt_en_gpio;
2670 	int sw_ctrl_gpio = plat_priv->pinctrl_info.sw_ctrl_gpio;
2671 
2672 	if (plat_priv->ramdump_info_v2.dump_data_valid) {
2673 		cnss_pci_clear_dump_info(pci_priv);
2674 		cnss_pci_power_off_mhi(pci_priv);
2675 		cnss_suspend_pci_link(pci_priv);
2676 		cnss_pci_deinit_mhi(pci_priv);
2677 		cnss_power_off_device(plat_priv);
2678 	}
2679 
2680 	/* Clear QMI send usage count during every power up */
2681 	pci_priv->qmi_send_usage_count = 0;
2682 
2683 	plat_priv->power_up_error = 0;
2684 
2685 	cnss_get_driver_mode_update_fw_name(plat_priv);
2686 retry:
2687 	ret = cnss_power_on_device(plat_priv);
2688 	if (ret) {
2689 		cnss_pr_err("Failed to power on device, err = %d\n", ret);
2690 		goto out;
2691 	}
2692 
2693 	ret = cnss_resume_pci_link(pci_priv);
2694 	if (ret) {
2695 		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
2696 		cnss_pr_dbg("Value of SW_CTRL GPIO: %d\n",
2697 			    cnss_get_input_gpio_value(plat_priv, sw_ctrl_gpio));
2698 		if (test_bit(IGNORE_PCI_LINK_FAILURE,
2699 			     &plat_priv->ctrl_params.quirks)) {
2700 			cnss_pr_dbg("Ignore PCI link resume failure\n");
2701 			ret = 0;
2702 			goto out;
2703 		}
2704 		if (ret == -EAGAIN && retry++ < POWER_ON_RETRY_MAX_TIMES) {
2705 			cnss_power_off_device(plat_priv);
2706 			/* Force toggle BT_EN GPIO low */
2707 			if (retry == POWER_ON_RETRY_MAX_TIMES) {
2708 				cnss_pr_dbg("Retry #%d. Set BT_EN GPIO(%u) low\n",
2709 					    retry, bt_en_gpio);
2710 				if (bt_en_gpio >= 0)
2711 					gpio_direction_output(bt_en_gpio, 0);
2712 				cnss_pr_dbg("BT_EN GPIO val: %d\n",
2713 					    gpio_get_value(bt_en_gpio));
2714 			}
2715 			cnss_pr_dbg("Retry to resume PCI link #%d\n", retry);
2716 			cnss_pr_dbg("Value of SW_CTRL GPIO: %d\n",
2717 				    cnss_get_input_gpio_value(plat_priv,
2718 							      sw_ctrl_gpio));
2719 			msleep(POWER_ON_RETRY_DELAY_MS * retry);
2720 			goto retry;
2721 		}
2722 		/* Assert when it reaches maximum retries */
2723 		CNSS_ASSERT(0);
2724 		goto power_off;
2725 	}
2726 
2727 	cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, false, false);
2728 	timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_QMI);
2729 
2730 	ret = cnss_pci_start_mhi(pci_priv);
2731 	if (ret) {
2732 		cnss_fatal_err("Failed to start MHI, err = %d\n", ret);
2733 		if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
2734 		    !pci_priv->pci_link_down_ind && timeout) {
2735 			/* Start recovery directly for MHI start failures */
2736 			cnss_schedule_recovery(&pci_priv->pci_dev->dev,
2737 					       CNSS_REASON_DEFAULT);
2738 		}
2739 		return 0;
2740 	}
2741 
2742 	if (test_bit(USE_CORE_ONLY_FW, &plat_priv->ctrl_params.quirks)) {
2743 		clear_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state);
2744 		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
2745 		return 0;
2746 	}
2747 
2748 	cnss_set_pin_connect_status(plat_priv);
2749 
2750 	if (test_bit(QMI_BYPASS, &plat_priv->ctrl_params.quirks)) {
2751 		ret = cnss_pci_call_driver_probe(pci_priv);
2752 		if (ret)
2753 			goto stop_mhi;
2754 	} else if (timeout) {
2755 		if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state))
2756 			timeout += WLAN_COLD_BOOT_CAL_TIMEOUT;
2757 		else
2758 			timeout += WLAN_MISSION_MODE_TIMEOUT;
2759 		mod_timer(&plat_priv->fw_boot_timer,
2760 			  jiffies + msecs_to_jiffies(timeout));
2761 	}
2762 
2763 	return 0;
2764 
2765 stop_mhi:
2766 	cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, true);
2767 	cnss_pci_power_off_mhi(pci_priv);
2768 	cnss_suspend_pci_link(pci_priv);
2769 	cnss_pci_deinit_mhi(pci_priv);
2770 power_off:
2771 	cnss_power_off_device(plat_priv);
2772 out:
2773 	return ret;
2774 }
2775 
2776 static int cnss_qca6290_shutdown(struct cnss_pci_data *pci_priv)
2777 {
2778 	int ret = 0;
2779 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2780 	int do_force_wake = true;
2781 
2782 	cnss_pci_pm_runtime_resume(pci_priv);
2783 
2784 	ret = cnss_pci_call_driver_remove(pci_priv);
2785 	if (ret == -EAGAIN)
2786 		goto out;
2787 
2788 	cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
2789 				   CNSS_BUS_WIDTH_NONE);
2790 	cnss_pci_set_monitor_wake_intr(pci_priv, false);
2791 	cnss_pci_set_auto_suspended(pci_priv, 0);
2792 
2793 	if ((test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
2794 	     test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
2795 	     test_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state) ||
2796 	     test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state) ||
2797 	     test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state)) &&
2798 	    test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
2799 		del_timer(&pci_priv->dev_rddm_timer);
2800 		cnss_pci_collect_dump_info(pci_priv, false);
2801 		CNSS_ASSERT(0);
2802 	}
2803 
2804 	if (!cnss_is_device_powered_on(plat_priv)) {
2805 		cnss_pr_dbg("Device is already powered off, ignore\n");
2806 		goto skip_power_off;
2807 	}
2808 
2809 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
2810 		do_force_wake = false;
2811 	cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, do_force_wake);
2812 
2813 	/* FBC image will be freed after powering off MHI, so skip
2814 	 * if RAM dump data is still valid.
2815 	 */
2816 	if (plat_priv->ramdump_info_v2.dump_data_valid)
2817 		goto skip_power_off;
2818 
2819 	cnss_pci_power_off_mhi(pci_priv);
2820 	ret = cnss_suspend_pci_link(pci_priv);
2821 	if (ret)
2822 		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
2823 	cnss_pci_deinit_mhi(pci_priv);
2824 	cnss_power_off_device(plat_priv);
2825 
2826 skip_power_off:
2827 	pci_priv->remap_window = 0;
2828 
2829 	clear_bit(CNSS_FW_READY, &plat_priv->driver_state);
2830 	clear_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
2831 	if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
2832 	    test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state)) {
2833 		clear_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
2834 		pci_priv->pci_link_down_ind = false;
2835 	}
2836 	clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
2837 	clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state);
2838 	memset(&print_optimize, 0, sizeof(print_optimize));
2839 
2840 out:
2841 	return ret;
2842 }
2843 
2844 static void cnss_qca6290_crash_shutdown(struct cnss_pci_data *pci_priv)
2845 {
2846 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2847 
2848 	set_bit(CNSS_IN_PANIC, &plat_priv->driver_state);
2849 	cnss_pr_dbg("Crash shutdown with driver_state 0x%lx\n",
2850 		    plat_priv->driver_state);
2851 
2852 	cnss_pci_collect_dump_info(pci_priv, true);
2853 	clear_bit(CNSS_IN_PANIC, &plat_priv->driver_state);
2854 }
2855 
2856 static int cnss_qca6290_ramdump(struct cnss_pci_data *pci_priv)
2857 {
2858 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2859 	struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
2860 	struct cnss_dump_data *dump_data = &info_v2->dump_data;
2861 	struct cnss_dump_seg *dump_seg = info_v2->dump_data_vaddr;
2862 	int ret = 0;
2863 
2864 	if (!info_v2->dump_data_valid || !dump_seg ||
2865 	    dump_data->nentries == 0)
2866 		return 0;
2867 
2868 	ret = cnss_do_elf_ramdump(plat_priv);
2869 
2870 	cnss_pci_clear_dump_info(pci_priv);
2871 	cnss_pci_power_off_mhi(pci_priv);
2872 	cnss_suspend_pci_link(pci_priv);
2873 	cnss_pci_deinit_mhi(pci_priv);
2874 	cnss_power_off_device(plat_priv);
2875 
2876 	return ret;
2877 }
2878 
2879 int cnss_pci_dev_powerup(struct cnss_pci_data *pci_priv)
2880 {
2881 	int ret = 0;
2882 
2883 	if (!pci_priv) {
2884 		cnss_pr_err("pci_priv is NULL\n");
2885 		return -ENODEV;
2886 	}
2887 
2888 	switch (pci_priv->device_id) {
2889 	case QCA6174_DEVICE_ID:
2890 		ret = cnss_qca6174_powerup(pci_priv);
2891 		break;
2892 	case QCA6290_DEVICE_ID:
2893 	case QCA6390_DEVICE_ID:
2894 	case QCA6490_DEVICE_ID:
2895 	case KIWI_DEVICE_ID:
2896 	case MANGO_DEVICE_ID:
2897 		ret = cnss_qca6290_powerup(pci_priv);
2898 		break;
2899 	default:
2900 		cnss_pr_err("Unknown device_id found: 0x%x\n",
2901 			    pci_priv->device_id);
2902 		ret = -ENODEV;
2903 	}
2904 
2905 	return ret;
2906 }
2907 
2908 int cnss_pci_dev_shutdown(struct cnss_pci_data *pci_priv)
2909 {
2910 	int ret = 0;
2911 
2912 	if (!pci_priv) {
2913 		cnss_pr_err("pci_priv is NULL\n");
2914 		return -ENODEV;
2915 	}
2916 
2917 	switch (pci_priv->device_id) {
2918 	case QCA6174_DEVICE_ID:
2919 		ret = cnss_qca6174_shutdown(pci_priv);
2920 		break;
2921 	case QCA6290_DEVICE_ID:
2922 	case QCA6390_DEVICE_ID:
2923 	case QCA6490_DEVICE_ID:
2924 	case KIWI_DEVICE_ID:
2925 	case MANGO_DEVICE_ID:
2926 		ret = cnss_qca6290_shutdown(pci_priv);
2927 		break;
2928 	default:
2929 		cnss_pr_err("Unknown device_id found: 0x%x\n",
2930 			    pci_priv->device_id);
2931 		ret = -ENODEV;
2932 	}
2933 
2934 	return ret;
2935 }
2936 
2937 int cnss_pci_dev_crash_shutdown(struct cnss_pci_data *pci_priv)
2938 {
2939 	int ret = 0;
2940 
2941 	if (!pci_priv) {
2942 		cnss_pr_err("pci_priv is NULL\n");
2943 		return -ENODEV;
2944 	}
2945 
2946 	switch (pci_priv->device_id) {
2947 	case QCA6174_DEVICE_ID:
2948 		cnss_qca6174_crash_shutdown(pci_priv);
2949 		break;
2950 	case QCA6290_DEVICE_ID:
2951 	case QCA6390_DEVICE_ID:
2952 	case QCA6490_DEVICE_ID:
2953 	case KIWI_DEVICE_ID:
2954 	case MANGO_DEVICE_ID:
2955 		cnss_qca6290_crash_shutdown(pci_priv);
2956 		break;
2957 	default:
2958 		cnss_pr_err("Unknown device_id found: 0x%x\n",
2959 			    pci_priv->device_id);
2960 		ret = -ENODEV;
2961 	}
2962 
2963 	return ret;
2964 }
2965 
2966 int cnss_pci_dev_ramdump(struct cnss_pci_data *pci_priv)
2967 {
2968 	int ret = 0;
2969 
2970 	if (!pci_priv) {
2971 		cnss_pr_err("pci_priv is NULL\n");
2972 		return -ENODEV;
2973 	}
2974 
2975 	switch (pci_priv->device_id) {
2976 	case QCA6174_DEVICE_ID:
2977 		ret = cnss_qca6174_ramdump(pci_priv);
2978 		break;
2979 	case QCA6290_DEVICE_ID:
2980 	case QCA6390_DEVICE_ID:
2981 	case QCA6490_DEVICE_ID:
2982 	case KIWI_DEVICE_ID:
2983 	case MANGO_DEVICE_ID:
2984 		ret = cnss_qca6290_ramdump(pci_priv);
2985 		break;
2986 	default:
2987 		cnss_pr_err("Unknown device_id found: 0x%x\n",
2988 			    pci_priv->device_id);
2989 		ret = -ENODEV;
2990 	}
2991 
2992 	return ret;
2993 }
2994 
2995 int cnss_pci_is_drv_connected(struct device *dev)
2996 {
2997 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
2998 
2999 	if (!pci_priv)
3000 		return -ENODEV;
3001 
3002 	return pci_priv->drv_connected_last;
3003 }
3004 EXPORT_SYMBOL(cnss_pci_is_drv_connected);
3005 
3006 static void cnss_wlan_reg_driver_work(struct work_struct *work)
3007 {
3008 	struct cnss_plat_data *plat_priv =
3009 	container_of(work, struct cnss_plat_data, wlan_reg_driver_work.work);
3010 	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
3011 	struct cnss_cal_info *cal_info;
3012 	unsigned int timeout;
3013 
3014 	if (test_bit(CNSS_WLAN_HW_DISABLED, &plat_priv->driver_state))
3015 		return;
3016 
3017 	if (test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state)) {
3018 		goto reg_driver;
3019 	} else {
3020 		if (plat_priv->charger_mode) {
3021 			cnss_pr_err("Ignore calibration timeout in charger mode\n");
3022 			return;
3023 		}
3024 		if (!test_bit(CNSS_IN_COLD_BOOT_CAL,
3025 			      &plat_priv->driver_state)) {
3026 			timeout = cnss_get_timeout(plat_priv,
3027 						   CNSS_TIMEOUT_CALIBRATION);
3028 			cnss_pr_dbg("File system not ready to start calibration. Wait for %ds..\n",
3029 				    timeout / 1000);
3030 			schedule_delayed_work(&plat_priv->wlan_reg_driver_work,
3031 					      msecs_to_jiffies(timeout));
3032 			return;
3033 		}
3034 
3035 		del_timer(&plat_priv->fw_boot_timer);
3036 		if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state) &&
3037 		    !test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
3038 			cnss_pr_err("Timeout waiting for calibration to complete\n");
3039 			CNSS_ASSERT(0);
3040 		}
3041 		cal_info = kzalloc(sizeof(*cal_info), GFP_KERNEL);
3042 		if (!cal_info)
3043 			return;
3044 		cal_info->cal_status = CNSS_CAL_TIMEOUT;
3045 		cnss_driver_event_post(plat_priv,
3046 				       CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
3047 				       0, cal_info);
3048 	}
3049 reg_driver:
3050 	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
3051 		cnss_pr_dbg("Reboot/Shutdown is in progress, ignore register driver\n");
3052 		return;
3053 	}
3054 	reinit_completion(&plat_priv->power_up_complete);
3055 	cnss_driver_event_post(plat_priv,
3056 			       CNSS_DRIVER_EVENT_REGISTER_DRIVER,
3057 			       CNSS_EVENT_SYNC_UNKILLABLE,
3058 			       pci_priv->driver_ops);
3059 }
3060 
3061 int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops)
3062 {
3063 	int ret = 0;
3064 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
3065 	struct cnss_pci_data *pci_priv;
3066 	const struct pci_device_id *id_table = driver_ops->id_table;
3067 	unsigned int timeout;
3068 
3069 	if (!cnss_check_driver_loading_allowed()) {
3070 		cnss_pr_info("No cnss2 dtsi entry present");
3071 		return -ENODEV;
3072 	}
3073 
3074 	if (!plat_priv) {
3075 		cnss_pr_buf("plat_priv is not ready for register driver\n");
3076 		return -EAGAIN;
3077 	}
3078 
3079 	pci_priv = plat_priv->bus_priv;
3080 
3081 	if (test_bit(CNSS_WLAN_HW_DISABLED, &plat_priv->driver_state)) {
3082 		while (id_table && id_table->device) {
3083 			if (plat_priv->device_id == id_table->device) {
3084 				if (plat_priv->device_id == KIWI_DEVICE_ID &&
3085 				    driver_ops->chip_version != 2) {
3086 					cnss_pr_err("WLAN HW disabled. kiwi_v2 only supported\n");
3087 					return -ENODEV;
3088 				}
3089 				cnss_pr_info("WLAN register driver deferred for device ID: 0x%x due to HW disable\n",
3090 					     id_table->device);
3091 				plat_priv->driver_ops = driver_ops;
3092 				return 0;
3093 			}
3094 			id_table++;
3095 		}
3096 		return -ENODEV;
3097 	}
3098 
3099 	if (!test_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state)) {
3100 		cnss_pr_info("pci probe not yet done for register driver\n");
3101 		return -EAGAIN;
3102 	}
3103 
3104 	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state)) {
3105 		cnss_pr_err("Driver has already registered\n");
3106 		return -EEXIST;
3107 	}
3108 
3109 	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
3110 		cnss_pr_dbg("Reboot/Shutdown is in progress, ignore register driver\n");
3111 		return -EINVAL;
3112 	}
3113 
3114 	if (!id_table || !pci_dev_present(id_table)) {
3115 		/* id_table pointer will move from pci_dev_present(),
3116 		 * so check again using local pointer.
3117 		 */
3118 		id_table = driver_ops->id_table;
3119 		while (id_table && id_table->vendor) {
3120 			cnss_pr_info("Host driver is built for PCIe device ID 0x%x\n",
3121 				     id_table->device);
3122 			id_table++;
3123 		}
3124 		cnss_pr_err("Enumerated PCIe device id is 0x%x, reject unsupported driver\n",
3125 			    pci_priv->device_id);
3126 		return -ENODEV;
3127 	}
3128 
3129 	if (driver_ops->chip_version != CNSS_CHIP_VER_ANY &&
3130 	    driver_ops->chip_version != plat_priv->device_version.major_version) {
3131 		cnss_pr_err("Driver built for chip ver 0x%x, enumerated ver 0x%x, reject unsupported driver\n",
3132 			    driver_ops->chip_version,
3133 			    plat_priv->device_version.major_version);
3134 		return -ENODEV;
3135 	}
3136 
3137 	cnss_get_driver_mode_update_fw_name(plat_priv);
3138 	set_bit(CNSS_DRIVER_REGISTER, &plat_priv->driver_state);
3139 
3140 	if (!plat_priv->cbc_enabled ||
3141 	    test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state))
3142 		goto register_driver;
3143 
3144 	pci_priv->driver_ops = driver_ops;
3145 	/* If Cold Boot Calibration is enabled, it is the 1st step in init
3146 	 * sequence.CBC is done on file system_ready trigger. Qcacld will be
3147 	 * loaded from vendor_modprobe.sh at early boot and must be deferred
3148 	 * until CBC is complete
3149 	 */
3150 	timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_CALIBRATION);
3151 	INIT_DELAYED_WORK(&plat_priv->wlan_reg_driver_work,
3152 			  cnss_wlan_reg_driver_work);
3153 	schedule_delayed_work(&plat_priv->wlan_reg_driver_work,
3154 			      msecs_to_jiffies(timeout));
3155 	cnss_pr_info("WLAN register driver deferred for Calibration\n");
3156 	return 0;
3157 register_driver:
3158 	reinit_completion(&plat_priv->power_up_complete);
3159 	ret = cnss_driver_event_post(plat_priv,
3160 				     CNSS_DRIVER_EVENT_REGISTER_DRIVER,
3161 				     CNSS_EVENT_SYNC_UNKILLABLE,
3162 				     driver_ops);
3163 
3164 	return ret;
3165 }
3166 EXPORT_SYMBOL(cnss_wlan_register_driver);
3167 
3168 void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver_ops)
3169 {
3170 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
3171 	int ret = 0;
3172 	unsigned int timeout;
3173 
3174 	if (!plat_priv) {
3175 		cnss_pr_err("plat_priv is NULL\n");
3176 		return;
3177 	}
3178 
3179 	mutex_lock(&plat_priv->driver_ops_lock);
3180 
3181 	if (plat_priv->device_id == QCA6174_DEVICE_ID)
3182 		goto skip_wait_power_up;
3183 
3184 	timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_WLAN_WATCHDOG);
3185 	ret = wait_for_completion_timeout(&plat_priv->power_up_complete,
3186 					  msecs_to_jiffies(timeout));
3187 	if (!ret) {
3188 		cnss_pr_err("Timeout (%ums) waiting for driver power up to complete\n",
3189 			    timeout);
3190 		CNSS_ASSERT(0);
3191 	}
3192 
3193 skip_wait_power_up:
3194 	if (!test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
3195 	    !test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
3196 		goto skip_wait_recovery;
3197 
3198 	reinit_completion(&plat_priv->recovery_complete);
3199 	timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_RECOVERY);
3200 	ret = wait_for_completion_timeout(&plat_priv->recovery_complete,
3201 					  msecs_to_jiffies(timeout));
3202 	if (!ret) {
3203 		cnss_pr_err("Timeout (%ums) waiting for recovery to complete\n",
3204 			    timeout);
3205 		CNSS_ASSERT(0);
3206 	}
3207 
3208 skip_wait_recovery:
3209 	cnss_driver_event_post(plat_priv,
3210 			       CNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
3211 			       CNSS_EVENT_SYNC_UNKILLABLE, NULL);
3212 
3213 	mutex_unlock(&plat_priv->driver_ops_lock);
3214 }
3215 EXPORT_SYMBOL(cnss_wlan_unregister_driver);
3216 
3217 int cnss_pci_register_driver_hdlr(struct cnss_pci_data *pci_priv,
3218 				  void *data)
3219 {
3220 	int ret = 0;
3221 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3222 
3223 	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
3224 		cnss_pr_dbg("Reboot or shutdown is in progress, ignore register driver\n");
3225 		return -EINVAL;
3226 	}
3227 
3228 	set_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
3229 	pci_priv->driver_ops = data;
3230 
3231 	ret = cnss_pci_dev_powerup(pci_priv);
3232 	if (ret) {
3233 		clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
3234 		pci_priv->driver_ops = NULL;
3235 	} else {
3236 		set_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state);
3237 	}
3238 
3239 	return ret;
3240 }
3241 
3242 int cnss_pci_unregister_driver_hdlr(struct cnss_pci_data *pci_priv)
3243 {
3244 	struct cnss_plat_data *plat_priv;
3245 
3246 	if (!pci_priv)
3247 		return -EINVAL;
3248 
3249 	plat_priv = pci_priv->plat_priv;
3250 	set_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
3251 	cnss_pci_dev_shutdown(pci_priv);
3252 	pci_priv->driver_ops = NULL;
3253 	clear_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state);
3254 
3255 	return 0;
3256 }
3257 
3258 static int cnss_pci_suspend_driver(struct cnss_pci_data *pci_priv)
3259 {
3260 	struct pci_dev *pci_dev = pci_priv->pci_dev;
3261 	struct cnss_wlan_driver *driver_ops = pci_priv->driver_ops;
3262 	int ret = 0;
3263 
3264 	pm_message_t state = { .event = PM_EVENT_SUSPEND };
3265 
3266 	if (driver_ops && driver_ops->suspend) {
3267 		ret = driver_ops->suspend(pci_dev, state);
3268 		if (ret) {
3269 			cnss_pr_err("Failed to suspend host driver, err = %d\n",
3270 				    ret);
3271 			ret = -EAGAIN;
3272 		}
3273 	}
3274 
3275 	return ret;
3276 }
3277 
3278 static int cnss_pci_resume_driver(struct cnss_pci_data *pci_priv)
3279 {
3280 	struct pci_dev *pci_dev = pci_priv->pci_dev;
3281 	struct cnss_wlan_driver *driver_ops = pci_priv->driver_ops;
3282 	int ret = 0;
3283 
3284 	if (driver_ops && driver_ops->resume) {
3285 		ret = driver_ops->resume(pci_dev);
3286 		if (ret)
3287 			cnss_pr_err("Failed to resume host driver, err = %d\n",
3288 				    ret);
3289 	}
3290 
3291 	return ret;
3292 }
3293 
3294 int cnss_pci_suspend_bus(struct cnss_pci_data *pci_priv)
3295 {
3296 	struct pci_dev *pci_dev = pci_priv->pci_dev;
3297 	int ret = 0;
3298 
3299 	if (pci_priv->pci_link_state == PCI_LINK_DOWN)
3300 		goto out;
3301 
3302 	if (cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_SUSPEND)) {
3303 		ret = -EAGAIN;
3304 		goto out;
3305 	}
3306 
3307 	if (pci_priv->drv_connected_last)
3308 		goto skip_disable_pci;
3309 
3310 	pci_clear_master(pci_dev);
3311 	cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE);
3312 	pci_disable_device(pci_dev);
3313 
3314 	ret = pci_set_power_state(pci_dev, PCI_D3hot);
3315 	if (ret)
3316 		cnss_pr_err("Failed to set D3Hot, err = %d\n", ret);
3317 
3318 skip_disable_pci:
3319 	if (cnss_set_pci_link(pci_priv, PCI_LINK_DOWN)) {
3320 		ret = -EAGAIN;
3321 		goto resume_mhi;
3322 	}
3323 	pci_priv->pci_link_state = PCI_LINK_DOWN;
3324 
3325 	return 0;
3326 
3327 resume_mhi:
3328 	if (!pci_is_enabled(pci_dev))
3329 		if (pci_enable_device(pci_dev))
3330 			cnss_pr_err("Failed to enable PCI device\n");
3331 	if (pci_priv->saved_state)
3332 		cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
3333 	pci_set_master(pci_dev);
3334 	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
3335 out:
3336 	return ret;
3337 }
3338 
3339 int cnss_pci_resume_bus(struct cnss_pci_data *pci_priv)
3340 {
3341 	struct pci_dev *pci_dev = pci_priv->pci_dev;
3342 	int ret = 0;
3343 
3344 	if (pci_priv->pci_link_state == PCI_LINK_UP)
3345 		goto out;
3346 
3347 	if (cnss_set_pci_link(pci_priv, PCI_LINK_UP)) {
3348 		cnss_fatal_err("Failed to resume PCI link from suspend\n");
3349 		cnss_pci_link_down(&pci_dev->dev);
3350 		ret = -EAGAIN;
3351 		goto out;
3352 	}
3353 
3354 	pci_priv->pci_link_state = PCI_LINK_UP;
3355 
3356 	if (pci_priv->drv_connected_last)
3357 		goto skip_enable_pci;
3358 
3359 	ret = pci_enable_device(pci_dev);
3360 	if (ret) {
3361 		cnss_pr_err("Failed to enable PCI device, err = %d\n",
3362 			    ret);
3363 		goto out;
3364 	}
3365 
3366 	if (pci_priv->saved_state)
3367 		cnss_set_pci_config_space(pci_priv,
3368 					  RESTORE_PCI_CONFIG_SPACE);
3369 	pci_set_master(pci_dev);
3370 
3371 skip_enable_pci:
3372 	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
3373 out:
3374 	return ret;
3375 }
3376 
3377 static int cnss_pci_suspend(struct device *dev)
3378 {
3379 	int ret = 0;
3380 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
3381 	struct cnss_plat_data *plat_priv;
3382 
3383 	if (!pci_priv)
3384 		goto out;
3385 
3386 	plat_priv = pci_priv->plat_priv;
3387 	if (!plat_priv)
3388 		goto out;
3389 
3390 	if (!cnss_is_device_powered_on(plat_priv))
3391 		goto out;
3392 
3393 	if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks) &&
3394 	    pci_priv->drv_supported) {
3395 		pci_priv->drv_connected_last =
3396 			cnss_pci_get_drv_connected(pci_priv);
3397 		if (!pci_priv->drv_connected_last) {
3398 			cnss_pr_dbg("Firmware does not support non-DRV suspend, reject\n");
3399 			ret = -EAGAIN;
3400 			goto out;
3401 		}
3402 	}
3403 
3404 	set_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
3405 
3406 	ret = cnss_pci_suspend_driver(pci_priv);
3407 	if (ret)
3408 		goto clear_flag;
3409 
3410 	if (!pci_priv->disable_pc) {
3411 		mutex_lock(&pci_priv->bus_lock);
3412 		ret = cnss_pci_suspend_bus(pci_priv);
3413 		mutex_unlock(&pci_priv->bus_lock);
3414 		if (ret)
3415 			goto resume_driver;
3416 	}
3417 
3418 	cnss_pci_set_monitor_wake_intr(pci_priv, false);
3419 
3420 	return 0;
3421 
3422 resume_driver:
3423 	cnss_pci_resume_driver(pci_priv);
3424 clear_flag:
3425 	pci_priv->drv_connected_last = 0;
3426 	clear_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
3427 out:
3428 	return ret;
3429 }
3430 
3431 static int cnss_pci_resume(struct device *dev)
3432 {
3433 	int ret = 0;
3434 	struct pci_dev *pci_dev = to_pci_dev(dev);
3435 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
3436 	struct cnss_plat_data *plat_priv;
3437 
3438 	if (!pci_priv)
3439 		goto out;
3440 
3441 	plat_priv = pci_priv->plat_priv;
3442 	if (!plat_priv)
3443 		goto out;
3444 
3445 	if (pci_priv->pci_link_down_ind)
3446 		goto out;
3447 
3448 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
3449 		goto out;
3450 
3451 	if (!pci_priv->disable_pc) {
3452 		ret = cnss_pci_resume_bus(pci_priv);
3453 		if (ret)
3454 			goto out;
3455 	}
3456 
3457 	ret = cnss_pci_resume_driver(pci_priv);
3458 
3459 	pci_priv->drv_connected_last = 0;
3460 	clear_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
3461 
3462 out:
3463 	return ret;
3464 }
3465 
3466 static int cnss_pci_suspend_noirq(struct device *dev)
3467 {
3468 	int ret = 0;
3469 	struct pci_dev *pci_dev = to_pci_dev(dev);
3470 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
3471 	struct cnss_wlan_driver *driver_ops;
3472 
3473 	if (!pci_priv)
3474 		goto out;
3475 
3476 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
3477 		goto out;
3478 
3479 	driver_ops = pci_priv->driver_ops;
3480 	if (driver_ops && driver_ops->suspend_noirq)
3481 		ret = driver_ops->suspend_noirq(pci_dev);
3482 
3483 	if (pci_priv->disable_pc && !pci_dev->state_saved &&
3484 	    !pci_priv->plat_priv->use_pm_domain)
3485 		pci_save_state(pci_dev);
3486 
3487 out:
3488 	return ret;
3489 }
3490 
3491 static int cnss_pci_resume_noirq(struct device *dev)
3492 {
3493 	int ret = 0;
3494 	struct pci_dev *pci_dev = to_pci_dev(dev);
3495 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
3496 	struct cnss_wlan_driver *driver_ops;
3497 
3498 	if (!pci_priv)
3499 		goto out;
3500 
3501 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
3502 		goto out;
3503 
3504 	driver_ops = pci_priv->driver_ops;
3505 	if (driver_ops && driver_ops->resume_noirq &&
3506 	    !pci_priv->pci_link_down_ind)
3507 		ret = driver_ops->resume_noirq(pci_dev);
3508 
3509 out:
3510 	return ret;
3511 }
3512 
3513 static int cnss_pci_runtime_suspend(struct device *dev)
3514 {
3515 	int ret = 0;
3516 	struct pci_dev *pci_dev = to_pci_dev(dev);
3517 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
3518 	struct cnss_plat_data *plat_priv;
3519 	struct cnss_wlan_driver *driver_ops;
3520 
3521 	if (!pci_priv)
3522 		return -EAGAIN;
3523 
3524 	plat_priv = pci_priv->plat_priv;
3525 	if (!plat_priv)
3526 		return -EAGAIN;
3527 
3528 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
3529 		return -EAGAIN;
3530 
3531 	if (pci_priv->pci_link_down_ind) {
3532 		cnss_pr_dbg("PCI link down recovery is in progress!\n");
3533 		return -EAGAIN;
3534 	}
3535 
3536 	if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks) &&
3537 	    pci_priv->drv_supported) {
3538 		pci_priv->drv_connected_last =
3539 			cnss_pci_get_drv_connected(pci_priv);
3540 		if (!pci_priv->drv_connected_last) {
3541 			cnss_pr_dbg("Firmware does not support non-DRV suspend, reject\n");
3542 			return -EAGAIN;
3543 		}
3544 	}
3545 
3546 	cnss_pr_vdbg("Runtime suspend start\n");
3547 
3548 	driver_ops = pci_priv->driver_ops;
3549 	if (driver_ops && driver_ops->runtime_ops &&
3550 	    driver_ops->runtime_ops->runtime_suspend)
3551 		ret = driver_ops->runtime_ops->runtime_suspend(pci_dev);
3552 	else
3553 		ret = cnss_auto_suspend(dev);
3554 
3555 	if (ret)
3556 		pci_priv->drv_connected_last = 0;
3557 
3558 	cnss_pr_vdbg("Runtime suspend status: %d\n", ret);
3559 
3560 	return ret;
3561 }
3562 
3563 static int cnss_pci_runtime_resume(struct device *dev)
3564 {
3565 	int ret = 0;
3566 	struct pci_dev *pci_dev = to_pci_dev(dev);
3567 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
3568 	struct cnss_wlan_driver *driver_ops;
3569 
3570 	if (!pci_priv)
3571 		return -EAGAIN;
3572 
3573 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
3574 		return -EAGAIN;
3575 
3576 	if (pci_priv->pci_link_down_ind) {
3577 		cnss_pr_dbg("PCI link down recovery is in progress!\n");
3578 		return -EAGAIN;
3579 	}
3580 
3581 	cnss_pr_vdbg("Runtime resume start\n");
3582 
3583 	driver_ops = pci_priv->driver_ops;
3584 	if (driver_ops && driver_ops->runtime_ops &&
3585 	    driver_ops->runtime_ops->runtime_resume)
3586 		ret = driver_ops->runtime_ops->runtime_resume(pci_dev);
3587 	else
3588 		ret = cnss_auto_resume(dev);
3589 
3590 	if (!ret)
3591 		pci_priv->drv_connected_last = 0;
3592 
3593 	cnss_pr_vdbg("Runtime resume status: %d\n", ret);
3594 
3595 	return ret;
3596 }
3597 
3598 static int cnss_pci_runtime_idle(struct device *dev)
3599 {
3600 	cnss_pr_vdbg("Runtime idle\n");
3601 
3602 	pm_request_autosuspend(dev);
3603 
3604 	return -EBUSY;
3605 }
3606 
3607 int cnss_wlan_pm_control(struct device *dev, bool vote)
3608 {
3609 	struct pci_dev *pci_dev = to_pci_dev(dev);
3610 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
3611 	int ret = 0;
3612 
3613 	if (!pci_priv)
3614 		return -ENODEV;
3615 
3616 	ret = cnss_pci_disable_pc(pci_priv, vote);
3617 	if (ret)
3618 		return ret;
3619 
3620 	pci_priv->disable_pc = vote;
3621 	cnss_pr_dbg("%s PCIe power collapse\n", vote ? "disable" : "enable");
3622 
3623 	return 0;
3624 }
3625 EXPORT_SYMBOL(cnss_wlan_pm_control);
3626 
3627 static void cnss_pci_pm_runtime_get_record(struct cnss_pci_data *pci_priv,
3628 					   enum cnss_rtpm_id id)
3629 {
3630 	if (id >= RTPM_ID_MAX)
3631 		return;
3632 
3633 	atomic_inc(&pci_priv->pm_stats.runtime_get);
3634 	atomic_inc(&pci_priv->pm_stats.runtime_get_id[id]);
3635 	pci_priv->pm_stats.runtime_get_timestamp_id[id] =
3636 		cnss_get_host_timestamp(pci_priv->plat_priv);
3637 }
3638 
3639 static void cnss_pci_pm_runtime_put_record(struct cnss_pci_data *pci_priv,
3640 					   enum cnss_rtpm_id id)
3641 {
3642 	if (id >= RTPM_ID_MAX)
3643 		return;
3644 
3645 	atomic_inc(&pci_priv->pm_stats.runtime_put);
3646 	atomic_inc(&pci_priv->pm_stats.runtime_put_id[id]);
3647 	pci_priv->pm_stats.runtime_put_timestamp_id[id] =
3648 		cnss_get_host_timestamp(pci_priv->plat_priv);
3649 }
3650 
3651 void cnss_pci_pm_runtime_show_usage_count(struct cnss_pci_data *pci_priv)
3652 {
3653 	struct device *dev;
3654 
3655 	if (!pci_priv)
3656 		return;
3657 
3658 	dev = &pci_priv->pci_dev->dev;
3659 
3660 	cnss_pr_dbg("Runtime PM usage count: %d\n",
3661 		    atomic_read(&dev->power.usage_count));
3662 }
3663 
3664 int cnss_pci_pm_request_resume(struct cnss_pci_data *pci_priv)
3665 {
3666 	struct device *dev;
3667 	enum rpm_status status;
3668 
3669 	if (!pci_priv)
3670 		return -ENODEV;
3671 
3672 	dev = &pci_priv->pci_dev->dev;
3673 
3674 	status = dev->power.runtime_status;
3675 	if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
3676 		cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
3677 			     (void *)_RET_IP_);
3678 
3679 	return pm_request_resume(dev);
3680 }
3681 
3682 int cnss_pci_pm_runtime_resume(struct cnss_pci_data *pci_priv)
3683 {
3684 	struct device *dev;
3685 	enum rpm_status status;
3686 
3687 	if (!pci_priv)
3688 		return -ENODEV;
3689 
3690 	dev = &pci_priv->pci_dev->dev;
3691 
3692 	status = dev->power.runtime_status;
3693 	if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
3694 		cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
3695 			     (void *)_RET_IP_);
3696 
3697 	return pm_runtime_resume(dev);
3698 }
3699 
3700 int cnss_pci_pm_runtime_get(struct cnss_pci_data *pci_priv,
3701 			    enum cnss_rtpm_id id)
3702 {
3703 	struct device *dev;
3704 	enum rpm_status status;
3705 
3706 	if (!pci_priv)
3707 		return -ENODEV;
3708 
3709 	dev = &pci_priv->pci_dev->dev;
3710 
3711 	status = dev->power.runtime_status;
3712 	if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
3713 		cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
3714 			     (void *)_RET_IP_);
3715 
3716 	cnss_pci_pm_runtime_get_record(pci_priv, id);
3717 
3718 	return pm_runtime_get(dev);
3719 }
3720 
3721 int cnss_pci_pm_runtime_get_sync(struct cnss_pci_data *pci_priv,
3722 				 enum cnss_rtpm_id id)
3723 {
3724 	struct device *dev;
3725 	enum rpm_status status;
3726 
3727 	if (!pci_priv)
3728 		return -ENODEV;
3729 
3730 	dev = &pci_priv->pci_dev->dev;
3731 
3732 	status = dev->power.runtime_status;
3733 	if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
3734 		cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
3735 			     (void *)_RET_IP_);
3736 
3737 	cnss_pci_pm_runtime_get_record(pci_priv, id);
3738 
3739 	return pm_runtime_get_sync(dev);
3740 }
3741 
3742 void cnss_pci_pm_runtime_get_noresume(struct cnss_pci_data *pci_priv,
3743 				      enum cnss_rtpm_id id)
3744 {
3745 	if (!pci_priv)
3746 		return;
3747 
3748 	cnss_pci_pm_runtime_get_record(pci_priv, id);
3749 	pm_runtime_get_noresume(&pci_priv->pci_dev->dev);
3750 }
3751 
3752 int cnss_pci_pm_runtime_put_autosuspend(struct cnss_pci_data *pci_priv,
3753 					enum cnss_rtpm_id id)
3754 {
3755 	struct device *dev;
3756 
3757 	if (!pci_priv)
3758 		return -ENODEV;
3759 
3760 	dev = &pci_priv->pci_dev->dev;
3761 
3762 	if (atomic_read(&dev->power.usage_count) == 0) {
3763 		cnss_pr_dbg("Ignore excessive runtime PM put operation\n");
3764 		return -EINVAL;
3765 	}
3766 
3767 	cnss_pci_pm_runtime_put_record(pci_priv, id);
3768 
3769 	return pm_runtime_put_autosuspend(&pci_priv->pci_dev->dev);
3770 }
3771 
3772 void cnss_pci_pm_runtime_put_noidle(struct cnss_pci_data *pci_priv,
3773 				    enum cnss_rtpm_id id)
3774 {
3775 	struct device *dev;
3776 
3777 	if (!pci_priv)
3778 		return;
3779 
3780 	dev = &pci_priv->pci_dev->dev;
3781 
3782 	if (atomic_read(&dev->power.usage_count) == 0) {
3783 		cnss_pr_dbg("Ignore excessive runtime PM put operation\n");
3784 		return;
3785 	}
3786 
3787 	cnss_pci_pm_runtime_put_record(pci_priv, id);
3788 	pm_runtime_put_noidle(&pci_priv->pci_dev->dev);
3789 }
3790 
3791 void cnss_pci_pm_runtime_mark_last_busy(struct cnss_pci_data *pci_priv)
3792 {
3793 	if (!pci_priv)
3794 		return;
3795 
3796 	pm_runtime_mark_last_busy(&pci_priv->pci_dev->dev);
3797 }
3798 
3799 int cnss_auto_suspend(struct device *dev)
3800 {
3801 	int ret = 0;
3802 	struct pci_dev *pci_dev = to_pci_dev(dev);
3803 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
3804 	struct cnss_plat_data *plat_priv;
3805 
3806 	if (!pci_priv)
3807 		return -ENODEV;
3808 
3809 	plat_priv = pci_priv->plat_priv;
3810 	if (!plat_priv)
3811 		return -ENODEV;
3812 
3813 	mutex_lock(&pci_priv->bus_lock);
3814 	if (!pci_priv->qmi_send_usage_count) {
3815 		ret = cnss_pci_suspend_bus(pci_priv);
3816 		if (ret) {
3817 			mutex_unlock(&pci_priv->bus_lock);
3818 			return ret;
3819 		}
3820 	}
3821 
3822 	cnss_pci_set_auto_suspended(pci_priv, 1);
3823 	mutex_unlock(&pci_priv->bus_lock);
3824 
3825 	cnss_pci_set_monitor_wake_intr(pci_priv, true);
3826 
3827 	/* For suspend temporarily set bandwidth vote to NONE and dont save in
3828 	 * current_bw_vote as in resume path we should vote for last used
3829 	 * bandwidth vote. Also ignore error if bw voting is not setup.
3830 	 */
3831 	cnss_setup_bus_bandwidth(plat_priv, CNSS_BUS_WIDTH_NONE, false);
3832 	return 0;
3833 }
3834 EXPORT_SYMBOL(cnss_auto_suspend);
3835 
3836 int cnss_auto_resume(struct device *dev)
3837 {
3838 	int ret = 0;
3839 	struct pci_dev *pci_dev = to_pci_dev(dev);
3840 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
3841 	struct cnss_plat_data *plat_priv;
3842 
3843 	if (!pci_priv)
3844 		return -ENODEV;
3845 
3846 	plat_priv = pci_priv->plat_priv;
3847 	if (!plat_priv)
3848 		return -ENODEV;
3849 
3850 	mutex_lock(&pci_priv->bus_lock);
3851 	ret = cnss_pci_resume_bus(pci_priv);
3852 	if (ret) {
3853 		mutex_unlock(&pci_priv->bus_lock);
3854 		return ret;
3855 	}
3856 
3857 	cnss_pci_set_auto_suspended(pci_priv, 0);
3858 	mutex_unlock(&pci_priv->bus_lock);
3859 
3860 	cnss_request_bus_bandwidth(dev, plat_priv->icc.current_bw_vote);
3861 
3862 	return 0;
3863 }
3864 EXPORT_SYMBOL(cnss_auto_resume);
3865 
3866 int cnss_pci_force_wake_request_sync(struct device *dev, int timeout_us)
3867 {
3868 	struct pci_dev *pci_dev = to_pci_dev(dev);
3869 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
3870 	struct cnss_plat_data *plat_priv;
3871 	struct mhi_controller *mhi_ctrl;
3872 
3873 	if (!pci_priv)
3874 		return -ENODEV;
3875 
3876 	switch (pci_priv->device_id) {
3877 	case QCA6390_DEVICE_ID:
3878 	case QCA6490_DEVICE_ID:
3879 	case KIWI_DEVICE_ID:
3880 	case MANGO_DEVICE_ID:
3881 		break;
3882 	default:
3883 		return 0;
3884 	}
3885 
3886 	mhi_ctrl = pci_priv->mhi_ctrl;
3887 	if (!mhi_ctrl)
3888 		return -EINVAL;
3889 
3890 	plat_priv = pci_priv->plat_priv;
3891 	if (!plat_priv)
3892 		return -ENODEV;
3893 
3894 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
3895 		return -EAGAIN;
3896 
3897 	if (timeout_us) {
3898 		/* Busy wait for timeout_us */
3899 		return cnss_mhi_device_get_sync_atomic(pci_priv,
3900 						       timeout_us, false);
3901 	} else {
3902 		/* Sleep wait for mhi_ctrl->timeout_ms */
3903 		return mhi_device_get_sync(mhi_ctrl->mhi_dev);
3904 	}
3905 }
3906 EXPORT_SYMBOL(cnss_pci_force_wake_request_sync);
3907 
3908 int cnss_pci_force_wake_request(struct device *dev)
3909 {
3910 	struct pci_dev *pci_dev = to_pci_dev(dev);
3911 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
3912 	struct cnss_plat_data *plat_priv;
3913 	struct mhi_controller *mhi_ctrl;
3914 
3915 	if (!pci_priv)
3916 		return -ENODEV;
3917 
3918 	switch (pci_priv->device_id) {
3919 	case QCA6390_DEVICE_ID:
3920 	case QCA6490_DEVICE_ID:
3921 	case KIWI_DEVICE_ID:
3922 	case MANGO_DEVICE_ID:
3923 		break;
3924 	default:
3925 		return 0;
3926 	}
3927 
3928 	mhi_ctrl = pci_priv->mhi_ctrl;
3929 	if (!mhi_ctrl)
3930 		return -EINVAL;
3931 
3932 	plat_priv = pci_priv->plat_priv;
3933 	if (!plat_priv)
3934 		return -ENODEV;
3935 
3936 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
3937 		return -EAGAIN;
3938 
3939 	mhi_device_get(mhi_ctrl->mhi_dev);
3940 
3941 	return 0;
3942 }
3943 EXPORT_SYMBOL(cnss_pci_force_wake_request);
3944 
3945 int cnss_pci_is_device_awake(struct device *dev)
3946 {
3947 	struct pci_dev *pci_dev = to_pci_dev(dev);
3948 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
3949 	struct mhi_controller *mhi_ctrl;
3950 
3951 	if (!pci_priv)
3952 		return -ENODEV;
3953 
3954 	switch (pci_priv->device_id) {
3955 	case QCA6390_DEVICE_ID:
3956 	case QCA6490_DEVICE_ID:
3957 	case KIWI_DEVICE_ID:
3958 	case MANGO_DEVICE_ID:
3959 		break;
3960 	default:
3961 		return 0;
3962 	}
3963 
3964 	mhi_ctrl = pci_priv->mhi_ctrl;
3965 	if (!mhi_ctrl)
3966 		return -EINVAL;
3967 
3968 	return (mhi_ctrl->dev_state == MHI_STATE_M0);
3969 }
3970 EXPORT_SYMBOL(cnss_pci_is_device_awake);
3971 
3972 int cnss_pci_force_wake_release(struct device *dev)
3973 {
3974 	struct pci_dev *pci_dev = to_pci_dev(dev);
3975 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
3976 	struct cnss_plat_data *plat_priv;
3977 	struct mhi_controller *mhi_ctrl;
3978 
3979 	if (!pci_priv)
3980 		return -ENODEV;
3981 
3982 	switch (pci_priv->device_id) {
3983 	case QCA6390_DEVICE_ID:
3984 	case QCA6490_DEVICE_ID:
3985 	case KIWI_DEVICE_ID:
3986 	case MANGO_DEVICE_ID:
3987 		break;
3988 	default:
3989 		return 0;
3990 	}
3991 
3992 	mhi_ctrl = pci_priv->mhi_ctrl;
3993 	if (!mhi_ctrl)
3994 		return -EINVAL;
3995 
3996 	plat_priv = pci_priv->plat_priv;
3997 	if (!plat_priv)
3998 		return -ENODEV;
3999 
4000 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
4001 		return -EAGAIN;
4002 
4003 	mhi_device_put(mhi_ctrl->mhi_dev);
4004 
4005 	return 0;
4006 }
4007 EXPORT_SYMBOL(cnss_pci_force_wake_release);
4008 
4009 int cnss_pci_qmi_send_get(struct cnss_pci_data *pci_priv)
4010 {
4011 	int ret = 0;
4012 
4013 	if (!pci_priv)
4014 		return -ENODEV;
4015 
4016 	mutex_lock(&pci_priv->bus_lock);
4017 	if (cnss_pci_get_auto_suspended(pci_priv) &&
4018 	    !pci_priv->qmi_send_usage_count)
4019 		ret = cnss_pci_resume_bus(pci_priv);
4020 	pci_priv->qmi_send_usage_count++;
4021 	cnss_pr_buf("Increased QMI send usage count to %d\n",
4022 		    pci_priv->qmi_send_usage_count);
4023 	mutex_unlock(&pci_priv->bus_lock);
4024 
4025 	return ret;
4026 }
4027 
4028 int cnss_pci_qmi_send_put(struct cnss_pci_data *pci_priv)
4029 {
4030 	int ret = 0;
4031 
4032 	if (!pci_priv)
4033 		return -ENODEV;
4034 
4035 	mutex_lock(&pci_priv->bus_lock);
4036 	if (pci_priv->qmi_send_usage_count)
4037 		pci_priv->qmi_send_usage_count--;
4038 	cnss_pr_buf("Decreased QMI send usage count to %d\n",
4039 		    pci_priv->qmi_send_usage_count);
4040 	if (cnss_pci_get_auto_suspended(pci_priv) &&
4041 	    !pci_priv->qmi_send_usage_count &&
4042 	    !cnss_pcie_is_device_down(pci_priv))
4043 		ret = cnss_pci_suspend_bus(pci_priv);
4044 	mutex_unlock(&pci_priv->bus_lock);
4045 
4046 	return ret;
4047 }
4048 
4049 int cnss_send_buffer_to_afcmem(struct device *dev, char *afcdb, uint32_t len,
4050 			       uint8_t slotid)
4051 {
4052 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
4053 	struct cnss_fw_mem *fw_mem;
4054 	void *mem = NULL;
4055 	int i, ret;
4056 	u32 *status;
4057 
4058 	if (!plat_priv)
4059 		return -EINVAL;
4060 
4061 	fw_mem = plat_priv->fw_mem;
4062 	if (slotid >= AFC_MAX_SLOT) {
4063 		cnss_pr_err("Invalid slot id %d\n", slotid);
4064 		ret = -EINVAL;
4065 		goto err;
4066 	}
4067 	if (len > AFC_SLOT_SIZE) {
4068 		cnss_pr_err("len %d greater than slot size", len);
4069 		ret = -EINVAL;
4070 		goto err;
4071 	}
4072 
4073 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
4074 		if (fw_mem[i].type == QMI_WLFW_AFC_MEM_V01) {
4075 			mem = fw_mem[i].va;
4076 			status = mem + (slotid * AFC_SLOT_SIZE);
4077 			break;
4078 		}
4079 	}
4080 
4081 	if (!mem) {
4082 		cnss_pr_err("AFC mem is not available\n");
4083 		ret = -ENOMEM;
4084 		goto err;
4085 	}
4086 
4087 	memcpy(mem + (slotid * AFC_SLOT_SIZE), afcdb, len);
4088 	if (len < AFC_SLOT_SIZE)
4089 		memset(mem + (slotid * AFC_SLOT_SIZE) + len,
4090 		       0, AFC_SLOT_SIZE - len);
4091 	status[AFC_AUTH_STATUS_OFFSET] = cpu_to_le32(AFC_AUTH_SUCCESS);
4092 
4093 	return 0;
4094 err:
4095 	return ret;
4096 }
4097 EXPORT_SYMBOL(cnss_send_buffer_to_afcmem);
4098 
4099 int cnss_reset_afcmem(struct device *dev, uint8_t slotid)
4100 {
4101 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
4102 	struct cnss_fw_mem *fw_mem;
4103 	void *mem = NULL;
4104 	int i, ret;
4105 
4106 	if (!plat_priv)
4107 		return -EINVAL;
4108 
4109 	fw_mem = plat_priv->fw_mem;
4110 	if (slotid >= AFC_MAX_SLOT) {
4111 		cnss_pr_err("Invalid slot id %d\n", slotid);
4112 		ret = -EINVAL;
4113 		goto err;
4114 	}
4115 
4116 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
4117 		if (fw_mem[i].type == QMI_WLFW_AFC_MEM_V01) {
4118 			mem = fw_mem[i].va;
4119 			break;
4120 		}
4121 	}
4122 
4123 	if (!mem) {
4124 		cnss_pr_err("AFC mem is not available\n");
4125 		ret = -ENOMEM;
4126 		goto err;
4127 	}
4128 
4129 	memset(mem + (slotid * AFC_SLOT_SIZE), 0, AFC_SLOT_SIZE);
4130 	return 0;
4131 
4132 err:
4133 	return ret;
4134 }
4135 EXPORT_SYMBOL(cnss_reset_afcmem);
4136 
4137 int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv)
4138 {
4139 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4140 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
4141 	struct device *dev = &pci_priv->pci_dev->dev;
4142 	int i;
4143 
4144 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
4145 		if (!fw_mem[i].va && fw_mem[i].size) {
4146 retry:
4147 			fw_mem[i].va =
4148 				dma_alloc_attrs(dev, fw_mem[i].size,
4149 						&fw_mem[i].pa, GFP_KERNEL,
4150 						fw_mem[i].attrs);
4151 
4152 			if (!fw_mem[i].va) {
4153 				if ((fw_mem[i].attrs &
4154 				    DMA_ATTR_FORCE_CONTIGUOUS)) {
4155 					fw_mem[i].attrs &=
4156 						~DMA_ATTR_FORCE_CONTIGUOUS;
4157 
4158 					cnss_pr_dbg("Fallback to non-contiguous memory for FW, Mem type: %u\n",
4159 						    fw_mem[i].type);
4160 					goto retry;
4161 				}
4162 				cnss_pr_err("Failed to allocate memory for FW, size: 0x%zx, type: %u\n",
4163 					    fw_mem[i].size, fw_mem[i].type);
4164 				CNSS_ASSERT(0);
4165 				return -ENOMEM;
4166 			}
4167 		}
4168 	}
4169 
4170 	return 0;
4171 }
4172 
4173 static void cnss_pci_free_fw_mem(struct cnss_pci_data *pci_priv)
4174 {
4175 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4176 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
4177 	struct device *dev = &pci_priv->pci_dev->dev;
4178 	int i;
4179 
4180 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
4181 		if (fw_mem[i].va && fw_mem[i].size) {
4182 			cnss_pr_dbg("Freeing memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n",
4183 				    fw_mem[i].va, &fw_mem[i].pa,
4184 				    fw_mem[i].size, fw_mem[i].type);
4185 			dma_free_attrs(dev, fw_mem[i].size,
4186 				       fw_mem[i].va, fw_mem[i].pa,
4187 				       fw_mem[i].attrs);
4188 			fw_mem[i].va = NULL;
4189 			fw_mem[i].pa = 0;
4190 			fw_mem[i].size = 0;
4191 			fw_mem[i].type = 0;
4192 		}
4193 	}
4194 
4195 	plat_priv->fw_mem_seg_len = 0;
4196 }
4197 
4198 int cnss_pci_alloc_qdss_mem(struct cnss_pci_data *pci_priv)
4199 {
4200 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4201 	struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
4202 	int i, j;
4203 
4204 	for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
4205 		if (!qdss_mem[i].va && qdss_mem[i].size) {
4206 			qdss_mem[i].va =
4207 				dma_alloc_coherent(&pci_priv->pci_dev->dev,
4208 						   qdss_mem[i].size,
4209 						   &qdss_mem[i].pa,
4210 						   GFP_KERNEL);
4211 			if (!qdss_mem[i].va) {
4212 				cnss_pr_err("Failed to allocate QDSS memory for FW, size: 0x%zx, type: %u, chuck-ID: %d\n",
4213 					    qdss_mem[i].size,
4214 					    qdss_mem[i].type, i);
4215 				break;
4216 			}
4217 		}
4218 	}
4219 
4220 	/* Best-effort allocation for QDSS trace */
4221 	if (i < plat_priv->qdss_mem_seg_len) {
4222 		for (j = i; j < plat_priv->qdss_mem_seg_len; j++) {
4223 			qdss_mem[j].type = 0;
4224 			qdss_mem[j].size = 0;
4225 		}
4226 		plat_priv->qdss_mem_seg_len = i;
4227 	}
4228 
4229 	return 0;
4230 }
4231 
4232 void cnss_pci_free_qdss_mem(struct cnss_pci_data *pci_priv)
4233 {
4234 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4235 	struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
4236 	int i;
4237 
4238 	for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
4239 		if (qdss_mem[i].va && qdss_mem[i].size) {
4240 			cnss_pr_dbg("Freeing memory for QDSS: pa: %pa, size: 0x%zx, type: %u\n",
4241 				    &qdss_mem[i].pa, qdss_mem[i].size,
4242 				    qdss_mem[i].type);
4243 			dma_free_coherent(&pci_priv->pci_dev->dev,
4244 					  qdss_mem[i].size, qdss_mem[i].va,
4245 					  qdss_mem[i].pa);
4246 			qdss_mem[i].va = NULL;
4247 			qdss_mem[i].pa = 0;
4248 			qdss_mem[i].size = 0;
4249 			qdss_mem[i].type = 0;
4250 		}
4251 	}
4252 	plat_priv->qdss_mem_seg_len = 0;
4253 }
4254 
4255 int cnss_pci_load_m3(struct cnss_pci_data *pci_priv)
4256 {
4257 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4258 	struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
4259 	char filename[MAX_FIRMWARE_NAME_LEN];
4260 	char *phy_filename = DEFAULT_PHY_UCODE_FILE_NAME;
4261 	const struct firmware *fw_entry;
4262 	int ret = 0;
4263 
4264 	/* Use forward compatibility here since for any recent device
4265 	 * it should use DEFAULT_PHY_UCODE_FILE_NAME.
4266 	 */
4267 	switch (pci_priv->device_id) {
4268 	case QCA6174_DEVICE_ID:
4269 		cnss_pr_err("Invalid device ID (0x%x) to load phy image\n",
4270 			    pci_priv->device_id);
4271 		return -EINVAL;
4272 	case QCA6290_DEVICE_ID:
4273 	case QCA6390_DEVICE_ID:
4274 	case QCA6490_DEVICE_ID:
4275 		phy_filename = DEFAULT_PHY_M3_FILE_NAME;
4276 		break;
4277 	case KIWI_DEVICE_ID:
4278 	case MANGO_DEVICE_ID:
4279 		switch (plat_priv->device_version.major_version) {
4280 		case FW_V2_NUMBER:
4281 			phy_filename = PHY_UCODE_V2_FILE_NAME;
4282 			break;
4283 		default:
4284 			break;
4285 		}
4286 		break;
4287 	default:
4288 		break;
4289 	}
4290 
4291 	if (!m3_mem->va && !m3_mem->size) {
4292 		cnss_pci_add_fw_prefix_name(pci_priv, filename,
4293 					    phy_filename);
4294 
4295 		ret = firmware_request_nowarn(&fw_entry, filename,
4296 					      &pci_priv->pci_dev->dev);
4297 		if (ret) {
4298 			cnss_pr_err("Failed to load M3 image: %s\n", filename);
4299 			return ret;
4300 		}
4301 
4302 		m3_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
4303 						fw_entry->size, &m3_mem->pa,
4304 						GFP_KERNEL);
4305 		if (!m3_mem->va) {
4306 			cnss_pr_err("Failed to allocate memory for M3, size: 0x%zx\n",
4307 				    fw_entry->size);
4308 			release_firmware(fw_entry);
4309 			return -ENOMEM;
4310 		}
4311 
4312 		memcpy(m3_mem->va, fw_entry->data, fw_entry->size);
4313 		m3_mem->size = fw_entry->size;
4314 		release_firmware(fw_entry);
4315 	}
4316 
4317 	return 0;
4318 }
4319 
4320 static void cnss_pci_free_m3_mem(struct cnss_pci_data *pci_priv)
4321 {
4322 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4323 	struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
4324 
4325 	if (m3_mem->va && m3_mem->size) {
4326 		cnss_pr_dbg("Freeing memory for M3, va: 0x%pK, pa: %pa, size: 0x%zx\n",
4327 			    m3_mem->va, &m3_mem->pa, m3_mem->size);
4328 		dma_free_coherent(&pci_priv->pci_dev->dev, m3_mem->size,
4329 				  m3_mem->va, m3_mem->pa);
4330 	}
4331 
4332 	m3_mem->va = NULL;
4333 	m3_mem->pa = 0;
4334 	m3_mem->size = 0;
4335 }
4336 
4337 void cnss_pci_fw_boot_timeout_hdlr(struct cnss_pci_data *pci_priv)
4338 {
4339 	struct cnss_plat_data *plat_priv;
4340 
4341 	if (!pci_priv)
4342 		return;
4343 
4344 	cnss_fatal_err("Timeout waiting for FW ready indication\n");
4345 
4346 	plat_priv = pci_priv->plat_priv;
4347 	if (!plat_priv)
4348 		return;
4349 
4350 	if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state)) {
4351 		cnss_pr_dbg("Ignore FW ready timeout for calibration mode\n");
4352 		return;
4353 	}
4354 
4355 	cnss_schedule_recovery(&pci_priv->pci_dev->dev,
4356 			       CNSS_REASON_TIMEOUT);
4357 }
4358 
4359 static void cnss_pci_deinit_smmu(struct cnss_pci_data *pci_priv)
4360 {
4361 	pci_priv->iommu_domain = NULL;
4362 }
4363 
4364 int cnss_pci_get_iova(struct cnss_pci_data *pci_priv, u64 *addr, u64 *size)
4365 {
4366 	if (!pci_priv)
4367 		return -ENODEV;
4368 
4369 	if (!pci_priv->smmu_iova_len)
4370 		return -EINVAL;
4371 
4372 	*addr = pci_priv->smmu_iova_start;
4373 	*size = pci_priv->smmu_iova_len;
4374 
4375 	return 0;
4376 }
4377 
4378 int cnss_pci_get_iova_ipa(struct cnss_pci_data *pci_priv, u64 *addr, u64 *size)
4379 {
4380 	if (!pci_priv)
4381 		return -ENODEV;
4382 
4383 	if (!pci_priv->smmu_iova_ipa_len)
4384 		return -EINVAL;
4385 
4386 	*addr = pci_priv->smmu_iova_ipa_start;
4387 	*size = pci_priv->smmu_iova_ipa_len;
4388 
4389 	return 0;
4390 }
4391 
4392 bool cnss_pci_is_smmu_s1_enabled(struct cnss_pci_data *pci_priv)
4393 {
4394 	if (pci_priv)
4395 		return pci_priv->smmu_s1_enable;
4396 
4397 	return false;
4398 }
4399 struct iommu_domain *cnss_smmu_get_domain(struct device *dev)
4400 {
4401 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
4402 
4403 	if (!pci_priv)
4404 		return NULL;
4405 
4406 	return pci_priv->iommu_domain;
4407 }
4408 EXPORT_SYMBOL(cnss_smmu_get_domain);
4409 
4410 int cnss_smmu_map(struct device *dev,
4411 		  phys_addr_t paddr, uint32_t *iova_addr, size_t size)
4412 {
4413 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
4414 	struct cnss_plat_data *plat_priv;
4415 	unsigned long iova;
4416 	size_t len;
4417 	int ret = 0;
4418 	int flag = IOMMU_READ | IOMMU_WRITE;
4419 	struct pci_dev *root_port;
4420 	struct device_node *root_of_node;
4421 	bool dma_coherent = false;
4422 
4423 	if (!pci_priv)
4424 		return -ENODEV;
4425 
4426 	if (!iova_addr) {
4427 		cnss_pr_err("iova_addr is NULL, paddr %pa, size %zu\n",
4428 			    &paddr, size);
4429 		return -EINVAL;
4430 	}
4431 
4432 	plat_priv = pci_priv->plat_priv;
4433 
4434 	len = roundup(size + paddr - rounddown(paddr, PAGE_SIZE), PAGE_SIZE);
4435 	iova = roundup(pci_priv->smmu_iova_ipa_current, PAGE_SIZE);
4436 
4437 	if (pci_priv->iommu_geometry &&
4438 	    iova >= pci_priv->smmu_iova_ipa_start +
4439 		    pci_priv->smmu_iova_ipa_len) {
4440 		cnss_pr_err("No IOVA space to map, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n",
4441 			    iova,
4442 			    &pci_priv->smmu_iova_ipa_start,
4443 			    pci_priv->smmu_iova_ipa_len);
4444 		return -ENOMEM;
4445 	}
4446 
4447 	if (!test_bit(DISABLE_IO_COHERENCY,
4448 		      &plat_priv->ctrl_params.quirks)) {
4449 		root_port = pcie_find_root_port(pci_priv->pci_dev);
4450 		if (!root_port) {
4451 			cnss_pr_err("Root port is null, so dma_coherent is disabled\n");
4452 		} else {
4453 			root_of_node = root_port->dev.of_node;
4454 			if (root_of_node && root_of_node->parent) {
4455 				dma_coherent =
4456 				    of_property_read_bool(root_of_node->parent,
4457 							  "dma-coherent");
4458 			cnss_pr_dbg("dma-coherent is %s\n",
4459 				    dma_coherent ? "enabled" : "disabled");
4460 			if (dma_coherent)
4461 				flag |= IOMMU_CACHE;
4462 			}
4463 		}
4464 	}
4465 
4466 	cnss_pr_dbg("IOMMU map: iova %lx, len %zu\n", iova, len);
4467 
4468 	ret = iommu_map(pci_priv->iommu_domain, iova,
4469 			rounddown(paddr, PAGE_SIZE), len, flag);
4470 	if (ret) {
4471 		cnss_pr_err("PA to IOVA mapping failed, ret %d\n", ret);
4472 		return ret;
4473 	}
4474 
4475 	pci_priv->smmu_iova_ipa_current = iova + len;
4476 	*iova_addr = (uint32_t)(iova + paddr - rounddown(paddr, PAGE_SIZE));
4477 	cnss_pr_dbg("IOMMU map: iova_addr %lx\n", *iova_addr);
4478 
4479 	return 0;
4480 }
4481 EXPORT_SYMBOL(cnss_smmu_map);
4482 
4483 int cnss_smmu_unmap(struct device *dev, uint32_t iova_addr, size_t size)
4484 {
4485 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
4486 	unsigned long iova;
4487 	size_t unmapped;
4488 	size_t len;
4489 
4490 	if (!pci_priv)
4491 		return -ENODEV;
4492 
4493 	iova = rounddown(iova_addr, PAGE_SIZE);
4494 	len = roundup(size + iova_addr - iova, PAGE_SIZE);
4495 
4496 	if (iova >= pci_priv->smmu_iova_ipa_start +
4497 		    pci_priv->smmu_iova_ipa_len) {
4498 		cnss_pr_err("Out of IOVA space to unmap, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n",
4499 			    iova,
4500 			    &pci_priv->smmu_iova_ipa_start,
4501 			    pci_priv->smmu_iova_ipa_len);
4502 		return -ENOMEM;
4503 	}
4504 
4505 	cnss_pr_dbg("IOMMU unmap: iova %lx, len %zu\n", iova, len);
4506 
4507 	unmapped = iommu_unmap(pci_priv->iommu_domain, iova, len);
4508 	if (unmapped != len) {
4509 		cnss_pr_err("IOMMU unmap failed, unmapped = %zu, requested = %zu\n",
4510 			    unmapped, len);
4511 		return -EINVAL;
4512 	}
4513 
4514 	pci_priv->smmu_iova_ipa_current = iova;
4515 	return 0;
4516 }
4517 EXPORT_SYMBOL(cnss_smmu_unmap);
4518 
4519 int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info)
4520 {
4521 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
4522 	struct cnss_plat_data *plat_priv;
4523 
4524 	if (!pci_priv)
4525 		return -ENODEV;
4526 
4527 	plat_priv = pci_priv->plat_priv;
4528 	if (!plat_priv)
4529 		return -ENODEV;
4530 
4531 	info->va = pci_priv->bar;
4532 	info->pa = pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM);
4533 	info->chip_id = plat_priv->chip_info.chip_id;
4534 	info->chip_family = plat_priv->chip_info.chip_family;
4535 	info->board_id = plat_priv->board_info.board_id;
4536 	info->soc_id = plat_priv->soc_info.soc_id;
4537 	info->fw_version = plat_priv->fw_version_info.fw_version;
4538 	strlcpy(info->fw_build_timestamp,
4539 		plat_priv->fw_version_info.fw_build_timestamp,
4540 		sizeof(info->fw_build_timestamp));
4541 	memcpy(&info->device_version, &plat_priv->device_version,
4542 	       sizeof(info->device_version));
4543 	memcpy(&info->dev_mem_info, &plat_priv->dev_mem_info,
4544 	       sizeof(info->dev_mem_info));
4545 	memcpy(&info->fw_build_id, &plat_priv->fw_build_id,
4546 	       sizeof(info->fw_build_id));
4547 
4548 	return 0;
4549 }
4550 EXPORT_SYMBOL(cnss_get_soc_info);
4551 
4552 static int cnss_pci_enable_msi(struct cnss_pci_data *pci_priv)
4553 {
4554 	int ret = 0;
4555 	struct pci_dev *pci_dev = pci_priv->pci_dev;
4556 	int num_vectors;
4557 	struct cnss_msi_config *msi_config;
4558 
4559 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
4560 		return 0;
4561 
4562 	if (cnss_pci_is_force_one_msi(pci_priv)) {
4563 		ret = cnss_pci_get_one_msi_assignment(pci_priv);
4564 		cnss_pr_dbg("force one msi\n");
4565 	} else {
4566 		ret = cnss_pci_get_msi_assignment(pci_priv);
4567 	}
4568 	if (ret) {
4569 		cnss_pr_err("Failed to get MSI assignment, err = %d\n", ret);
4570 		goto out;
4571 	}
4572 
4573 	msi_config = pci_priv->msi_config;
4574 	if (!msi_config) {
4575 		cnss_pr_err("msi_config is NULL!\n");
4576 		ret = -EINVAL;
4577 		goto out;
4578 	}
4579 
4580 	num_vectors = pci_alloc_irq_vectors(pci_dev,
4581 					    msi_config->total_vectors,
4582 					    msi_config->total_vectors,
4583 					    PCI_IRQ_MSI);
4584 	if ((num_vectors != msi_config->total_vectors) &&
4585 	    !cnss_pci_fallback_one_msi(pci_priv, &num_vectors)) {
4586 		cnss_pr_err("Failed to get enough MSI vectors (%d), available vectors = %d",
4587 			    msi_config->total_vectors, num_vectors);
4588 		if (num_vectors >= 0)
4589 			ret = -EINVAL;
4590 		goto reset_msi_config;
4591 	}
4592 
4593 	if (cnss_pci_config_msi_data(pci_priv)) {
4594 		ret = -EINVAL;
4595 		goto free_msi_vector;
4596 	}
4597 
4598 	return 0;
4599 
4600 free_msi_vector:
4601 	pci_free_irq_vectors(pci_priv->pci_dev);
4602 reset_msi_config:
4603 	pci_priv->msi_config = NULL;
4604 out:
4605 	return ret;
4606 }
4607 
4608 static void cnss_pci_disable_msi(struct cnss_pci_data *pci_priv)
4609 {
4610 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
4611 		return;
4612 
4613 	pci_free_irq_vectors(pci_priv->pci_dev);
4614 }
4615 
4616 int cnss_get_user_msi_assignment(struct device *dev, char *user_name,
4617 				 int *num_vectors, u32 *user_base_data,
4618 				 u32 *base_vector)
4619 {
4620 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
4621 	struct cnss_msi_config *msi_config;
4622 	int idx;
4623 
4624 	if (!pci_priv)
4625 		return -ENODEV;
4626 
4627 	msi_config = pci_priv->msi_config;
4628 	if (!msi_config) {
4629 		cnss_pr_err("MSI is not supported.\n");
4630 		return -EINVAL;
4631 	}
4632 
4633 	for (idx = 0; idx < msi_config->total_users; idx++) {
4634 		if (strcmp(user_name, msi_config->users[idx].name) == 0) {
4635 			*num_vectors = msi_config->users[idx].num_vectors;
4636 			*user_base_data = msi_config->users[idx].base_vector
4637 				+ pci_priv->msi_ep_base_data;
4638 			*base_vector = msi_config->users[idx].base_vector;
4639 			/*Add only single print for each user*/
4640 			if (print_optimize.msi_log_chk[idx]++)
4641 				goto skip_print;
4642 
4643 			cnss_pr_dbg("Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
4644 				    user_name, *num_vectors, *user_base_data,
4645 				    *base_vector);
4646 skip_print:
4647 			return 0;
4648 		}
4649 	}
4650 
4651 	cnss_pr_err("Failed to find MSI assignment for %s!\n", user_name);
4652 
4653 	return -EINVAL;
4654 }
4655 EXPORT_SYMBOL(cnss_get_user_msi_assignment);
4656 
4657 int cnss_get_msi_irq(struct device *dev, unsigned int vector)
4658 {
4659 	struct pci_dev *pci_dev = to_pci_dev(dev);
4660 	int irq_num;
4661 
4662 	irq_num = pci_irq_vector(pci_dev, vector);
4663 	cnss_pr_dbg("Get IRQ number %d for vector index %d\n", irq_num, vector);
4664 
4665 	return irq_num;
4666 }
4667 EXPORT_SYMBOL(cnss_get_msi_irq);
4668 
4669 bool cnss_is_one_msi(struct device *dev)
4670 {
4671 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
4672 
4673 	if (!pci_priv)
4674 		return false;
4675 
4676 	return cnss_pci_is_one_msi(pci_priv);
4677 }
4678 EXPORT_SYMBOL(cnss_is_one_msi);
4679 
4680 void cnss_get_msi_address(struct device *dev, u32 *msi_addr_low,
4681 			  u32 *msi_addr_high)
4682 {
4683 	struct pci_dev *pci_dev = to_pci_dev(dev);
4684 	u16 control;
4685 
4686 	pci_read_config_word(pci_dev, pci_dev->msi_cap + PCI_MSI_FLAGS,
4687 			     &control);
4688 	pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
4689 			      msi_addr_low);
4690 	/* Return MSI high address only when device supports 64-bit MSI */
4691 	if (control & PCI_MSI_FLAGS_64BIT)
4692 		pci_read_config_dword(pci_dev,
4693 				      pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
4694 				      msi_addr_high);
4695 	else
4696 		*msi_addr_high = 0;
4697 	 /*Add only single print as the address is constant*/
4698 	 if (!print_optimize.msi_addr_chk++)
4699 		cnss_pr_dbg("Get MSI low addr = 0x%x, high addr = 0x%x\n",
4700 			    *msi_addr_low, *msi_addr_high);
4701 }
4702 EXPORT_SYMBOL(cnss_get_msi_address);
4703 
4704 u32 cnss_pci_get_wake_msi(struct cnss_pci_data *pci_priv)
4705 {
4706 	int ret, num_vectors;
4707 	u32 user_base_data, base_vector;
4708 
4709 	if (!pci_priv)
4710 		return -ENODEV;
4711 
4712 	ret = cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
4713 					   WAKE_MSI_NAME, &num_vectors,
4714 					   &user_base_data, &base_vector);
4715 	if (ret) {
4716 		cnss_pr_err("WAKE MSI is not valid\n");
4717 		return 0;
4718 	}
4719 
4720 	return user_base_data;
4721 }
4722 
4723 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0))
4724 static inline int cnss_pci_set_dma_mask(struct pci_dev *pci_dev, u64 mask)
4725 {
4726 	return dma_set_mask(&pci_dev->dev, mask);
4727 }
4728 
4729 static inline int cnss_pci_set_coherent_dma_mask(struct pci_dev *pci_dev,
4730 	u64 mask)
4731 {
4732 	return dma_set_coherent_mask(&pci_dev->dev, mask);
4733 }
4734 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) */
4735 static inline int cnss_pci_set_dma_mask(struct pci_dev *pci_dev, u64 mask)
4736 {
4737 	return pci_set_dma_mask(pci_dev, mask);
4738 }
4739 
4740 static inline int cnss_pci_set_coherent_dma_mask(struct pci_dev *pci_dev,
4741 	u64 mask)
4742 {
4743 	return pci_set_consistent_dma_mask(pci_dev, mask);
4744 }
4745 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) */
4746 
4747 static int cnss_pci_enable_bus(struct cnss_pci_data *pci_priv)
4748 {
4749 	int ret = 0;
4750 	struct pci_dev *pci_dev = pci_priv->pci_dev;
4751 	u16 device_id;
4752 
4753 	pci_read_config_word(pci_dev, PCI_DEVICE_ID, &device_id);
4754 	if (device_id != pci_priv->pci_device_id->device)  {
4755 		cnss_pr_err("PCI device ID mismatch, config ID: 0x%x, probe ID: 0x%x\n",
4756 			    device_id, pci_priv->pci_device_id->device);
4757 		ret = -EIO;
4758 		goto out;
4759 	}
4760 
4761 	ret = pci_assign_resource(pci_dev, PCI_BAR_NUM);
4762 	if (ret) {
4763 		pr_err("Failed to assign PCI resource, err = %d\n", ret);
4764 		goto out;
4765 	}
4766 
4767 	ret = pci_enable_device(pci_dev);
4768 	if (ret) {
4769 		cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
4770 		goto out;
4771 	}
4772 
4773 	ret = pci_request_region(pci_dev, PCI_BAR_NUM, "cnss");
4774 	if (ret) {
4775 		cnss_pr_err("Failed to request PCI region, err = %d\n", ret);
4776 		goto disable_device;
4777 	}
4778 
4779 	switch (device_id) {
4780 	case QCA6174_DEVICE_ID:
4781 		pci_priv->dma_bit_mask = PCI_DMA_MASK_32_BIT;
4782 		break;
4783 	case QCA6390_DEVICE_ID:
4784 	case QCA6490_DEVICE_ID:
4785 	case KIWI_DEVICE_ID:
4786 	case MANGO_DEVICE_ID:
4787 		pci_priv->dma_bit_mask = PCI_DMA_MASK_36_BIT;
4788 		break;
4789 	default:
4790 		pci_priv->dma_bit_mask = PCI_DMA_MASK_32_BIT;
4791 		break;
4792 	}
4793 
4794 	cnss_pr_dbg("Set PCI DMA MASK (0x%llx)\n", pci_priv->dma_bit_mask);
4795 
4796 	ret = cnss_pci_set_dma_mask(pci_dev, pci_priv->dma_bit_mask);
4797 	if (ret) {
4798 		cnss_pr_err("Failed to set PCI DMA mask, err = %d\n", ret);
4799 		goto release_region;
4800 	}
4801 
4802 	ret = cnss_pci_set_coherent_dma_mask(pci_dev, pci_priv->dma_bit_mask);
4803 	if (ret) {
4804 		cnss_pr_err("Failed to set PCI coherent DMA mask, err = %d\n",
4805 			    ret);
4806 		goto release_region;
4807 	}
4808 
4809 	pci_priv->bar = pci_iomap(pci_dev, PCI_BAR_NUM, 0);
4810 	if (!pci_priv->bar) {
4811 		cnss_pr_err("Failed to do PCI IO map!\n");
4812 		ret = -EIO;
4813 		goto release_region;
4814 	}
4815 
4816 	/* Save default config space without BME enabled */
4817 	pci_save_state(pci_dev);
4818 	pci_priv->default_state = pci_store_saved_state(pci_dev);
4819 
4820 	pci_set_master(pci_dev);
4821 
4822 	return 0;
4823 
4824 release_region:
4825 	pci_release_region(pci_dev, PCI_BAR_NUM);
4826 disable_device:
4827 	pci_disable_device(pci_dev);
4828 out:
4829 	return ret;
4830 }
4831 
4832 static void cnss_pci_disable_bus(struct cnss_pci_data *pci_priv)
4833 {
4834 	struct pci_dev *pci_dev = pci_priv->pci_dev;
4835 
4836 	pci_clear_master(pci_dev);
4837 	pci_load_and_free_saved_state(pci_dev, &pci_priv->saved_state);
4838 	pci_load_and_free_saved_state(pci_dev, &pci_priv->default_state);
4839 
4840 	if (pci_priv->bar) {
4841 		pci_iounmap(pci_dev, pci_priv->bar);
4842 		pci_priv->bar = NULL;
4843 	}
4844 
4845 	pci_release_region(pci_dev, PCI_BAR_NUM);
4846 	if (pci_is_enabled(pci_dev))
4847 		pci_disable_device(pci_dev);
4848 }
4849 
4850 static void cnss_pci_dump_qdss_reg(struct cnss_pci_data *pci_priv)
4851 {
4852 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4853 	int i, array_size = ARRAY_SIZE(qdss_csr) - 1;
4854 	gfp_t gfp = GFP_KERNEL;
4855 	u32 reg_offset;
4856 
4857 	if (in_interrupt() || irqs_disabled())
4858 		gfp = GFP_ATOMIC;
4859 
4860 	if (!plat_priv->qdss_reg) {
4861 		plat_priv->qdss_reg = devm_kzalloc(&pci_priv->pci_dev->dev,
4862 						   sizeof(*plat_priv->qdss_reg)
4863 						   * array_size, gfp);
4864 		if (!plat_priv->qdss_reg)
4865 			return;
4866 	}
4867 
4868 	cnss_pr_dbg("Start to dump qdss registers\n");
4869 
4870 	for (i = 0; qdss_csr[i].name; i++) {
4871 		reg_offset = QDSS_APB_DEC_CSR_BASE + qdss_csr[i].offset;
4872 		if (cnss_pci_reg_read(pci_priv, reg_offset,
4873 				      &plat_priv->qdss_reg[i]))
4874 			return;
4875 		cnss_pr_dbg("%s[0x%x] = 0x%x\n", qdss_csr[i].name, reg_offset,
4876 			    plat_priv->qdss_reg[i]);
4877 	}
4878 }
4879 
4880 static void cnss_pci_dump_ce_reg(struct cnss_pci_data *pci_priv,
4881 				 enum cnss_ce_index ce)
4882 {
4883 	int i;
4884 	u32 ce_base = ce * CE_REG_INTERVAL;
4885 	u32 reg_offset, src_ring_base, dst_ring_base, cmn_base, val;
4886 
4887 	switch (pci_priv->device_id) {
4888 	case QCA6390_DEVICE_ID:
4889 		src_ring_base = QCA6390_CE_SRC_RING_REG_BASE;
4890 		dst_ring_base = QCA6390_CE_DST_RING_REG_BASE;
4891 		cmn_base = QCA6390_CE_COMMON_REG_BASE;
4892 		break;
4893 	case QCA6490_DEVICE_ID:
4894 		src_ring_base = QCA6490_CE_SRC_RING_REG_BASE;
4895 		dst_ring_base = QCA6490_CE_DST_RING_REG_BASE;
4896 		cmn_base = QCA6490_CE_COMMON_REG_BASE;
4897 		break;
4898 	default:
4899 		return;
4900 	}
4901 
4902 	switch (ce) {
4903 	case CNSS_CE_09:
4904 	case CNSS_CE_10:
4905 		for (i = 0; ce_src[i].name; i++) {
4906 			reg_offset = src_ring_base + ce_base + ce_src[i].offset;
4907 			if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
4908 				return;
4909 			cnss_pr_dbg("CE_%02d_%s[0x%x] = 0x%x\n",
4910 				    ce, ce_src[i].name, reg_offset, val);
4911 		}
4912 
4913 		for (i = 0; ce_dst[i].name; i++) {
4914 			reg_offset = dst_ring_base + ce_base + ce_dst[i].offset;
4915 			if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
4916 				return;
4917 			cnss_pr_dbg("CE_%02d_%s[0x%x] = 0x%x\n",
4918 				    ce, ce_dst[i].name, reg_offset, val);
4919 		}
4920 		break;
4921 	case CNSS_CE_COMMON:
4922 		for (i = 0; ce_cmn[i].name; i++) {
4923 			reg_offset = cmn_base  + ce_cmn[i].offset;
4924 			if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
4925 				return;
4926 			cnss_pr_dbg("CE_COMMON_%s[0x%x] = 0x%x\n",
4927 				    ce_cmn[i].name, reg_offset, val);
4928 		}
4929 		break;
4930 	default:
4931 		cnss_pr_err("Unsupported CE[%d] registers dump\n", ce);
4932 	}
4933 }
4934 
4935 static void cnss_pci_dump_debug_reg(struct cnss_pci_data *pci_priv)
4936 {
4937 	if (cnss_pci_check_link_status(pci_priv))
4938 		return;
4939 
4940 	cnss_pr_dbg("Start to dump debug registers\n");
4941 
4942 	cnss_mhi_debug_reg_dump(pci_priv);
4943 	cnss_pci_soc_scratch_reg_dump(pci_priv);
4944 	cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_COMMON);
4945 	cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_09);
4946 	cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_10);
4947 }
4948 
4949 static int cnss_pci_assert_host_sol(struct cnss_pci_data *pci_priv)
4950 {
4951 	if (cnss_get_host_sol_value(pci_priv->plat_priv))
4952 		return -EINVAL;
4953 
4954 	cnss_pr_dbg("Assert host SOL GPIO to retry RDDM, expecting link down\n");
4955 	cnss_set_host_sol_value(pci_priv->plat_priv, 1);
4956 
4957 	return 0;
4958 }
4959 
4960 static void cnss_pci_mhi_reg_dump(struct cnss_pci_data *pci_priv)
4961 {
4962 	if (!cnss_pci_check_link_status(pci_priv))
4963 		cnss_mhi_debug_reg_dump(pci_priv);
4964 
4965 	cnss_pci_soc_scratch_reg_dump(pci_priv);
4966 	cnss_pci_dump_misc_reg(pci_priv);
4967 	cnss_pci_dump_shadow_reg(pci_priv);
4968 }
4969 
4970 int cnss_pci_force_fw_assert_hdlr(struct cnss_pci_data *pci_priv)
4971 {
4972 	int ret;
4973 	struct cnss_plat_data *plat_priv;
4974 
4975 	if (!pci_priv)
4976 		return -ENODEV;
4977 
4978 	plat_priv = pci_priv->plat_priv;
4979 	if (!plat_priv)
4980 		return -ENODEV;
4981 
4982 	if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) ||
4983 	    test_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state))
4984 		return -EINVAL;
4985 
4986 	cnss_auto_resume(&pci_priv->pci_dev->dev);
4987 
4988 	if (!pci_priv->is_smmu_fault)
4989 		cnss_pci_mhi_reg_dump(pci_priv);
4990 
4991 	/* If link is still down here, directly trigger link down recovery */
4992 	ret = cnss_pci_check_link_status(pci_priv);
4993 	if (ret) {
4994 		cnss_pci_link_down(&pci_priv->pci_dev->dev);
4995 		return 0;
4996 	}
4997 
4998 	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_TRIGGER_RDDM);
4999 	if (ret) {
5000 		if (pci_priv->is_smmu_fault) {
5001 			cnss_pci_mhi_reg_dump(pci_priv);
5002 			pci_priv->is_smmu_fault = false;
5003 		}
5004 		if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) ||
5005 		    test_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state)) {
5006 			cnss_pr_dbg("MHI is not powered on, ignore RDDM failure\n");
5007 			return 0;
5008 		}
5009 		cnss_fatal_err("Failed to trigger RDDM, err = %d\n", ret);
5010 		if (!cnss_pci_assert_host_sol(pci_priv))
5011 			return 0;
5012 		cnss_pci_dump_debug_reg(pci_priv);
5013 		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
5014 				       CNSS_REASON_DEFAULT);
5015 		return ret;
5016 	}
5017 
5018 	if (pci_priv->is_smmu_fault) {
5019 		cnss_pci_mhi_reg_dump(pci_priv);
5020 		pci_priv->is_smmu_fault = false;
5021 	}
5022 
5023 	if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
5024 		mod_timer(&pci_priv->dev_rddm_timer,
5025 			  jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
5026 	}
5027 
5028 	return 0;
5029 }
5030 
5031 static void cnss_pci_add_dump_seg(struct cnss_pci_data *pci_priv,
5032 				  struct cnss_dump_seg *dump_seg,
5033 				  enum cnss_fw_dump_type type, int seg_no,
5034 				  void *va, dma_addr_t dma, size_t size)
5035 {
5036 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5037 	struct device *dev = &pci_priv->pci_dev->dev;
5038 	phys_addr_t pa;
5039 
5040 	dump_seg->address = dma;
5041 	dump_seg->v_address = va;
5042 	dump_seg->size = size;
5043 	dump_seg->type = type;
5044 
5045 	cnss_pr_dbg("Seg: %x, va: %pK, dma: %pa, size: 0x%zx\n",
5046 		    seg_no, va, &dma, size);
5047 
5048 	if (cnss_va_to_pa(dev, size, va, dma, &pa, DMA_ATTR_FORCE_CONTIGUOUS))
5049 		return;
5050 
5051 	cnss_minidump_add_region(plat_priv, type, seg_no, va, pa, size);
5052 }
5053 
5054 static void cnss_pci_remove_dump_seg(struct cnss_pci_data *pci_priv,
5055 				     struct cnss_dump_seg *dump_seg,
5056 				     enum cnss_fw_dump_type type, int seg_no,
5057 				     void *va, dma_addr_t dma, size_t size)
5058 {
5059 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5060 	struct device *dev = &pci_priv->pci_dev->dev;
5061 	phys_addr_t pa;
5062 
5063 	cnss_va_to_pa(dev, size, va, dma, &pa, DMA_ATTR_FORCE_CONTIGUOUS);
5064 	cnss_minidump_remove_region(plat_priv, type, seg_no, va, pa, size);
5065 }
5066 
5067 int cnss_pci_call_driver_uevent(struct cnss_pci_data *pci_priv,
5068 				enum cnss_driver_status status, void *data)
5069 {
5070 	struct cnss_uevent_data uevent_data;
5071 	struct cnss_wlan_driver *driver_ops;
5072 
5073 	driver_ops = pci_priv->driver_ops;
5074 	if (!driver_ops || !driver_ops->update_event) {
5075 		cnss_pr_dbg("Hang event driver ops is NULL\n");
5076 		return -EINVAL;
5077 	}
5078 
5079 	cnss_pr_dbg("Calling driver uevent: %d\n", status);
5080 
5081 	uevent_data.status = status;
5082 	uevent_data.data = data;
5083 
5084 	return driver_ops->update_event(pci_priv->pci_dev, &uevent_data);
5085 }
5086 
5087 static void cnss_pci_send_hang_event(struct cnss_pci_data *pci_priv)
5088 {
5089 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5090 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
5091 	struct cnss_hang_event hang_event;
5092 	void *hang_data_va = NULL;
5093 	u64 offset = 0;
5094 	u16 length = 0;
5095 	int i = 0;
5096 
5097 	if (!fw_mem || !plat_priv->fw_mem_seg_len)
5098 		return;
5099 
5100 	memset(&hang_event, 0, sizeof(hang_event));
5101 	switch (pci_priv->device_id) {
5102 	case QCA6390_DEVICE_ID:
5103 		offset = HST_HANG_DATA_OFFSET;
5104 		length = HANG_DATA_LENGTH;
5105 		break;
5106 	case QCA6490_DEVICE_ID:
5107 		/* Fallback to hard-coded values if hang event params not
5108 		 * present in QMI. Once all the firmware branches have the
5109 		 * fix to send params over QMI, this can be removed.
5110 		 */
5111 		if (plat_priv->hang_event_data_len) {
5112 			offset = plat_priv->hang_data_addr_offset;
5113 			length = plat_priv->hang_event_data_len;
5114 		} else {
5115 			offset = HSP_HANG_DATA_OFFSET;
5116 			length = HANG_DATA_LENGTH;
5117 		}
5118 		break;
5119 	case KIWI_DEVICE_ID:
5120 	case MANGO_DEVICE_ID:
5121 		offset = plat_priv->hang_data_addr_offset;
5122 		length = plat_priv->hang_event_data_len;
5123 		break;
5124 	default:
5125 		cnss_pr_err("Skip Hang Event Data as unsupported Device ID received: %d\n",
5126 			    pci_priv->device_id);
5127 		return;
5128 	}
5129 
5130 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
5131 		if (fw_mem[i].type == QMI_WLFW_MEM_TYPE_DDR_V01 &&
5132 		    fw_mem[i].va) {
5133 			/* The offset must be < (fw_mem size- hangdata length) */
5134 			if (!(offset <= fw_mem[i].size - length))
5135 				goto exit;
5136 
5137 			hang_data_va = fw_mem[i].va + offset;
5138 			hang_event.hang_event_data = kmemdup(hang_data_va,
5139 							     length,
5140 							     GFP_ATOMIC);
5141 			if (!hang_event.hang_event_data) {
5142 				cnss_pr_dbg("Hang data memory alloc failed\n");
5143 				return;
5144 			}
5145 			hang_event.hang_event_data_len = length;
5146 			break;
5147 		}
5148 	}
5149 
5150 	cnss_pci_call_driver_uevent(pci_priv, CNSS_HANG_EVENT, &hang_event);
5151 
5152 	kfree(hang_event.hang_event_data);
5153 	hang_event.hang_event_data = NULL;
5154 	return;
5155 exit:
5156 	cnss_pr_dbg("Invalid hang event params, offset:0x%x, length:0x%x\n",
5157 		    plat_priv->hang_data_addr_offset,
5158 		    plat_priv->hang_event_data_len);
5159 }
5160 
5161 void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic)
5162 {
5163 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5164 	struct cnss_dump_data *dump_data =
5165 		&plat_priv->ramdump_info_v2.dump_data;
5166 	struct cnss_dump_seg *dump_seg =
5167 		plat_priv->ramdump_info_v2.dump_data_vaddr;
5168 	struct image_info *fw_image, *rddm_image;
5169 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
5170 	int ret, i, j;
5171 
5172 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
5173 	    !test_bit(CNSS_IN_PANIC, &plat_priv->driver_state))
5174 		cnss_pci_send_hang_event(pci_priv);
5175 
5176 	if (test_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state)) {
5177 		cnss_pr_dbg("RAM dump is already collected, skip\n");
5178 		return;
5179 	}
5180 
5181 	if (!cnss_is_device_powered_on(plat_priv)) {
5182 		cnss_pr_dbg("Device is already powered off, skip\n");
5183 		return;
5184 	}
5185 
5186 	if (!in_panic) {
5187 		mutex_lock(&pci_priv->bus_lock);
5188 		ret = cnss_pci_check_link_status(pci_priv);
5189 		if (ret) {
5190 			if (ret != -EACCES) {
5191 				mutex_unlock(&pci_priv->bus_lock);
5192 				return;
5193 			}
5194 			if (cnss_pci_resume_bus(pci_priv)) {
5195 				mutex_unlock(&pci_priv->bus_lock);
5196 				return;
5197 			}
5198 		}
5199 		mutex_unlock(&pci_priv->bus_lock);
5200 	} else {
5201 		if (cnss_pci_check_link_status(pci_priv))
5202 			return;
5203 		/* Inside panic handler, reduce timeout for RDDM to avoid
5204 		 * unnecessary hypervisor watchdog bite.
5205 		 */
5206 		pci_priv->mhi_ctrl->timeout_ms /= 2;
5207 	}
5208 
5209 	cnss_mhi_debug_reg_dump(pci_priv);
5210 	cnss_pci_soc_scratch_reg_dump(pci_priv);
5211 	cnss_pci_dump_misc_reg(pci_priv);
5212 	cnss_pci_dump_shadow_reg(pci_priv);
5213 
5214 	cnss_rddm_trigger_debug(pci_priv);
5215 	ret = mhi_download_rddm_image(pci_priv->mhi_ctrl, in_panic);
5216 	if (ret) {
5217 		cnss_fatal_err("Failed to download RDDM image, err = %d\n",
5218 			       ret);
5219 		if (!cnss_pci_assert_host_sol(pci_priv))
5220 			return;
5221 		cnss_rddm_trigger_check(pci_priv);
5222 		cnss_pci_dump_debug_reg(pci_priv);
5223 		return;
5224 	}
5225 	cnss_rddm_trigger_check(pci_priv);
5226 	fw_image = pci_priv->mhi_ctrl->fbc_image;
5227 	rddm_image = pci_priv->mhi_ctrl->rddm_image;
5228 	dump_data->nentries = 0;
5229 
5230 	if (plat_priv->qdss_mem_seg_len)
5231 		cnss_pci_dump_qdss_reg(pci_priv);
5232 	cnss_mhi_dump_sfr(pci_priv);
5233 
5234 	if (!dump_seg) {
5235 		cnss_pr_warn("FW image dump collection not setup");
5236 		goto skip_dump;
5237 	}
5238 
5239 	cnss_pr_dbg("Collect FW image dump segment, nentries %d\n",
5240 		    fw_image->entries);
5241 
5242 	for (i = 0; i < fw_image->entries; i++) {
5243 		cnss_pci_add_dump_seg(pci_priv, dump_seg, CNSS_FW_IMAGE, i,
5244 				      fw_image->mhi_buf[i].buf,
5245 				      fw_image->mhi_buf[i].dma_addr,
5246 				      fw_image->mhi_buf[i].len);
5247 		dump_seg++;
5248 	}
5249 
5250 	dump_data->nentries += fw_image->entries;
5251 
5252 	cnss_pr_dbg("Collect RDDM image dump segment, nentries %d\n",
5253 		    rddm_image->entries);
5254 
5255 	for (i = 0; i < rddm_image->entries; i++) {
5256 		cnss_pci_add_dump_seg(pci_priv, dump_seg, CNSS_FW_RDDM, i,
5257 				      rddm_image->mhi_buf[i].buf,
5258 				      rddm_image->mhi_buf[i].dma_addr,
5259 				      rddm_image->mhi_buf[i].len);
5260 		dump_seg++;
5261 	}
5262 
5263 	dump_data->nentries += rddm_image->entries;
5264 
5265 	for (i = 0, j = 0; i < plat_priv->fw_mem_seg_len; i++) {
5266 		if (fw_mem[i].type == CNSS_MEM_TYPE_DDR) {
5267 			if (fw_mem[i].attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
5268 				cnss_pr_dbg("Collect remote heap dump segment\n");
5269 				cnss_pci_add_dump_seg(pci_priv, dump_seg,
5270 						      CNSS_FW_REMOTE_HEAP, j,
5271 						      fw_mem[i].va,
5272 						      fw_mem[i].pa,
5273 						      fw_mem[i].size);
5274 				dump_seg++;
5275 				dump_data->nentries++;
5276 				j++;
5277 			} else {
5278 				cnss_pr_dbg("Skip remote heap dumps as it is non-contiguous\n");
5279 			}
5280 		}
5281 	}
5282 
5283 	if (dump_data->nentries > 0)
5284 		plat_priv->ramdump_info_v2.dump_data_valid = true;
5285 
5286 	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RDDM_DONE);
5287 
5288 skip_dump:
5289 	complete(&plat_priv->rddm_complete);
5290 }
5291 
5292 void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv)
5293 {
5294 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5295 	struct cnss_dump_seg *dump_seg =
5296 		plat_priv->ramdump_info_v2.dump_data_vaddr;
5297 	struct image_info *fw_image, *rddm_image;
5298 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
5299 	int i, j;
5300 
5301 	if (!dump_seg)
5302 		return;
5303 
5304 	fw_image = pci_priv->mhi_ctrl->fbc_image;
5305 	rddm_image = pci_priv->mhi_ctrl->rddm_image;
5306 
5307 	for (i = 0; i < fw_image->entries; i++) {
5308 		cnss_pci_remove_dump_seg(pci_priv, dump_seg, CNSS_FW_IMAGE, i,
5309 					 fw_image->mhi_buf[i].buf,
5310 					 fw_image->mhi_buf[i].dma_addr,
5311 					 fw_image->mhi_buf[i].len);
5312 		dump_seg++;
5313 	}
5314 
5315 	for (i = 0; i < rddm_image->entries; i++) {
5316 		cnss_pci_remove_dump_seg(pci_priv, dump_seg, CNSS_FW_RDDM, i,
5317 					 rddm_image->mhi_buf[i].buf,
5318 					 rddm_image->mhi_buf[i].dma_addr,
5319 					 rddm_image->mhi_buf[i].len);
5320 		dump_seg++;
5321 	}
5322 
5323 	for (i = 0, j = 0; i < plat_priv->fw_mem_seg_len; i++) {
5324 		if (fw_mem[i].type == CNSS_MEM_TYPE_DDR &&
5325 		    (fw_mem[i].attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
5326 			cnss_pci_remove_dump_seg(pci_priv, dump_seg,
5327 						 CNSS_FW_REMOTE_HEAP, j,
5328 						 fw_mem[i].va, fw_mem[i].pa,
5329 						 fw_mem[i].size);
5330 			dump_seg++;
5331 			j++;
5332 		}
5333 	}
5334 
5335 	plat_priv->ramdump_info_v2.dump_data.nentries = 0;
5336 	plat_priv->ramdump_info_v2.dump_data_valid = false;
5337 }
5338 
5339 void cnss_pci_device_crashed(struct cnss_pci_data *pci_priv)
5340 {
5341 	if (!pci_priv)
5342 		return;
5343 
5344 	cnss_device_crashed(&pci_priv->pci_dev->dev);
5345 }
5346 
5347 static int cnss_mhi_pm_runtime_get(struct mhi_controller *mhi_ctrl)
5348 {
5349 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
5350 
5351 	return cnss_pci_pm_runtime_get(pci_priv, RTPM_ID_MHI);
5352 }
5353 
5354 static void cnss_mhi_pm_runtime_put_noidle(struct mhi_controller *mhi_ctrl)
5355 {
5356 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
5357 
5358 	cnss_pci_pm_runtime_put_noidle(pci_priv, RTPM_ID_MHI);
5359 }
5360 
5361 void cnss_pci_add_fw_prefix_name(struct cnss_pci_data *pci_priv,
5362 				 char *prefix_name, char *name)
5363 {
5364 	struct cnss_plat_data *plat_priv;
5365 
5366 	if (!pci_priv)
5367 		return;
5368 
5369 	plat_priv = pci_priv->plat_priv;
5370 
5371 	if (!plat_priv->use_fw_path_with_prefix) {
5372 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, "%s", name);
5373 		return;
5374 	}
5375 
5376 	switch (pci_priv->device_id) {
5377 	case QCA6390_DEVICE_ID:
5378 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
5379 			  QCA6390_PATH_PREFIX "%s", name);
5380 		break;
5381 	case QCA6490_DEVICE_ID:
5382 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
5383 			  QCA6490_PATH_PREFIX "%s", name);
5384 		break;
5385 	case KIWI_DEVICE_ID:
5386 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
5387 			  KIWI_PATH_PREFIX "%s", name);
5388 		break;
5389 	case MANGO_DEVICE_ID:
5390 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
5391 			  MANGO_PATH_PREFIX "%s", name);
5392 		break;
5393 	default:
5394 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, "%s", name);
5395 		break;
5396 	}
5397 
5398 	cnss_pr_dbg("FW name added with prefix: %s\n", prefix_name);
5399 }
5400 
5401 static int cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv)
5402 {
5403 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5404 
5405 	switch (pci_priv->device_id) {
5406 	case QCA6390_DEVICE_ID:
5407 		if (plat_priv->device_version.major_version < FW_V2_NUMBER) {
5408 			cnss_pr_dbg("Device ID:version (0x%lx:%d) is not supported\n",
5409 				    pci_priv->device_id,
5410 				    plat_priv->device_version.major_version);
5411 			return -EINVAL;
5412 		}
5413 		cnss_pci_add_fw_prefix_name(pci_priv, plat_priv->firmware_name,
5414 					    FW_V2_FILE_NAME);
5415 		snprintf(plat_priv->fw_fallback_name, MAX_FIRMWARE_NAME_LEN,
5416 			 FW_V2_FILE_NAME);
5417 		break;
5418 	case QCA6490_DEVICE_ID:
5419 		switch (plat_priv->device_version.major_version) {
5420 		case FW_V2_NUMBER:
5421 				cnss_pci_add_fw_prefix_name(pci_priv,
5422 							    plat_priv->firmware_name,
5423 							    FW_V2_FILE_NAME);
5424 				snprintf(plat_priv->fw_fallback_name,
5425 					 MAX_FIRMWARE_NAME_LEN,
5426 					 FW_V2_FILE_NAME);
5427 			break;
5428 		default:
5429 			cnss_pci_add_fw_prefix_name(pci_priv,
5430 						    plat_priv->firmware_name,
5431 						    DEFAULT_FW_FILE_NAME);
5432 			snprintf(plat_priv->fw_fallback_name,
5433 				 MAX_FIRMWARE_NAME_LEN,
5434 				 DEFAULT_FW_FILE_NAME);
5435 			break;
5436 		}
5437 		break;
5438 	case KIWI_DEVICE_ID:
5439 	case MANGO_DEVICE_ID:
5440 		switch (plat_priv->device_version.major_version) {
5441 		case FW_V2_NUMBER:
5442 			/*
5443 			 * kiwiv2 using seprate fw binary for MM and FTM mode,
5444 			 * platform driver loads corresponding binary according
5445 			 * to current mode indicated by wlan driver. Otherwise
5446 			 * use default binary.
5447 			 * Mission mode using same binary name as before,
5448 			 * if seprate binary is not there, fall back to default.
5449 			 */
5450 			if (plat_priv->driver_mode == CNSS_MISSION) {
5451 				cnss_pci_add_fw_prefix_name(pci_priv,
5452 							    plat_priv->firmware_name,
5453 							    FW_V2_FILE_NAME);
5454 				cnss_pci_add_fw_prefix_name(pci_priv,
5455 							    plat_priv->fw_fallback_name,
5456 							    FW_V2_FILE_NAME);
5457 			} else if (plat_priv->driver_mode == CNSS_FTM) {
5458 				cnss_pci_add_fw_prefix_name(pci_priv,
5459 							    plat_priv->firmware_name,
5460 							    FW_V2_FTM_FILE_NAME);
5461 				cnss_pci_add_fw_prefix_name(pci_priv,
5462 							    plat_priv->fw_fallback_name,
5463 							    FW_V2_FILE_NAME);
5464 			} else {
5465 				/*
5466 				 * Since during cold boot calibration phase,
5467 				 * wlan driver has not registered, so default
5468 				 * fw binary will be used.
5469 				 */
5470 				cnss_pci_add_fw_prefix_name(pci_priv,
5471 							    plat_priv->firmware_name,
5472 							    FW_V2_FILE_NAME);
5473 				snprintf(plat_priv->fw_fallback_name,
5474 					 MAX_FIRMWARE_NAME_LEN,
5475 					 FW_V2_FILE_NAME);
5476 			}
5477 			break;
5478 		default:
5479 			cnss_pci_add_fw_prefix_name(pci_priv,
5480 						    plat_priv->firmware_name,
5481 						    DEFAULT_FW_FILE_NAME);
5482 			snprintf(plat_priv->fw_fallback_name,
5483 				 MAX_FIRMWARE_NAME_LEN,
5484 				 DEFAULT_FW_FILE_NAME);
5485 			break;
5486 		}
5487 		break;
5488 	default:
5489 		cnss_pci_add_fw_prefix_name(pci_priv, plat_priv->firmware_name,
5490 					    DEFAULT_FW_FILE_NAME);
5491 		snprintf(plat_priv->fw_fallback_name, MAX_FIRMWARE_NAME_LEN,
5492 			 DEFAULT_FW_FILE_NAME);
5493 		break;
5494 	}
5495 
5496 	cnss_pr_dbg("FW name is %s, FW fallback name is %s\n",
5497 		    plat_priv->firmware_name, plat_priv->fw_fallback_name);
5498 
5499 	return 0;
5500 }
5501 
5502 static char *cnss_mhi_notify_status_to_str(enum mhi_callback status)
5503 {
5504 	switch (status) {
5505 	case MHI_CB_IDLE:
5506 		return "IDLE";
5507 	case MHI_CB_EE_RDDM:
5508 		return "RDDM";
5509 	case MHI_CB_SYS_ERROR:
5510 		return "SYS_ERROR";
5511 	case MHI_CB_FATAL_ERROR:
5512 		return "FATAL_ERROR";
5513 	case MHI_CB_EE_MISSION_MODE:
5514 		return "MISSION_MODE";
5515 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
5516 	case MHI_CB_FALLBACK_IMG:
5517 		return "FW_FALLBACK";
5518 #endif
5519 	default:
5520 		return "UNKNOWN";
5521 	}
5522 };
5523 
5524 static void cnss_dev_rddm_timeout_hdlr(struct timer_list *t)
5525 {
5526 	struct cnss_pci_data *pci_priv =
5527 		from_timer(pci_priv, t, dev_rddm_timer);
5528 	enum mhi_ee_type mhi_ee;
5529 
5530 	if (!pci_priv)
5531 		return;
5532 
5533 	cnss_fatal_err("Timeout waiting for RDDM notification\n");
5534 
5535 	if (!cnss_pci_assert_host_sol(pci_priv))
5536 		return;
5537 
5538 	mhi_ee = mhi_get_exec_env(pci_priv->mhi_ctrl);
5539 	if (mhi_ee == MHI_EE_PBL)
5540 		cnss_pr_err("Unable to collect ramdumps due to abrupt reset\n");
5541 
5542 	if (mhi_ee == MHI_EE_RDDM) {
5543 		cnss_pr_info("Device MHI EE is RDDM, try to collect dump\n");
5544 		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
5545 				       CNSS_REASON_RDDM);
5546 	} else {
5547 		cnss_mhi_debug_reg_dump(pci_priv);
5548 		cnss_pci_soc_scratch_reg_dump(pci_priv);
5549 		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
5550 				       CNSS_REASON_TIMEOUT);
5551 	}
5552 }
5553 
5554 static void cnss_boot_debug_timeout_hdlr(struct timer_list *t)
5555 {
5556 	struct cnss_pci_data *pci_priv =
5557 		from_timer(pci_priv, t, boot_debug_timer);
5558 
5559 	if (!pci_priv)
5560 		return;
5561 
5562 	if (cnss_pci_check_link_status(pci_priv))
5563 		return;
5564 
5565 	if (cnss_pci_is_device_down(&pci_priv->pci_dev->dev))
5566 		return;
5567 
5568 	if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
5569 		return;
5570 
5571 	if (cnss_mhi_scan_rddm_cookie(pci_priv, DEVICE_RDDM_COOKIE))
5572 		return;
5573 
5574 	cnss_pr_dbg("Dump MHI/PBL/SBL debug data every %ds during MHI power on\n",
5575 		    BOOT_DEBUG_TIMEOUT_MS / 1000);
5576 	cnss_mhi_debug_reg_dump(pci_priv);
5577 	cnss_pci_soc_scratch_reg_dump(pci_priv);
5578 	cnss_pci_dump_bl_sram_mem(pci_priv);
5579 
5580 	mod_timer(&pci_priv->boot_debug_timer,
5581 		  jiffies + msecs_to_jiffies(BOOT_DEBUG_TIMEOUT_MS));
5582 }
5583 
5584 static int cnss_pci_handle_mhi_sys_err(struct cnss_pci_data *pci_priv)
5585 {
5586 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5587 
5588 	cnss_ignore_qmi_failure(true);
5589 	set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
5590 	del_timer(&plat_priv->fw_boot_timer);
5591 	mod_timer(&pci_priv->dev_rddm_timer,
5592 		  jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
5593 	cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
5594 
5595 	return 0;
5596 }
5597 
5598 int cnss_pci_handle_dev_sol_irq(struct cnss_pci_data *pci_priv)
5599 {
5600 	return cnss_pci_handle_mhi_sys_err(pci_priv);
5601 }
5602 
5603 static void cnss_mhi_notify_status(struct mhi_controller *mhi_ctrl,
5604 				   enum mhi_callback reason)
5605 {
5606 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
5607 	struct cnss_plat_data *plat_priv;
5608 	enum cnss_recovery_reason cnss_reason;
5609 
5610 	if (!pci_priv) {
5611 		cnss_pr_err("pci_priv is NULL");
5612 		return;
5613 	}
5614 
5615 	plat_priv = pci_priv->plat_priv;
5616 
5617 	if (reason != MHI_CB_IDLE)
5618 		cnss_pr_dbg("MHI status cb is called with reason %s(%d)\n",
5619 			    cnss_mhi_notify_status_to_str(reason), reason);
5620 
5621 	switch (reason) {
5622 	case MHI_CB_IDLE:
5623 	case MHI_CB_EE_MISSION_MODE:
5624 		return;
5625 	case MHI_CB_FATAL_ERROR:
5626 		cnss_ignore_qmi_failure(true);
5627 		set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
5628 		del_timer(&plat_priv->fw_boot_timer);
5629 		cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
5630 		cnss_reason = CNSS_REASON_DEFAULT;
5631 		break;
5632 	case MHI_CB_SYS_ERROR:
5633 		cnss_pci_handle_mhi_sys_err(pci_priv);
5634 		return;
5635 	case MHI_CB_EE_RDDM:
5636 		cnss_ignore_qmi_failure(true);
5637 		set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
5638 		del_timer(&plat_priv->fw_boot_timer);
5639 		del_timer(&pci_priv->dev_rddm_timer);
5640 		cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
5641 		cnss_reason = CNSS_REASON_RDDM;
5642 		break;
5643 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
5644 	case MHI_CB_FALLBACK_IMG:
5645 		/* for kiwi_v2 binary fallback is used, skip path fallback here */
5646 		if (!(pci_priv->device_id == KIWI_DEVICE_ID &&
5647 		      plat_priv->device_version.major_version == FW_V2_NUMBER)) {
5648 			plat_priv->use_fw_path_with_prefix = false;
5649 			cnss_pci_update_fw_name(pci_priv);
5650 		}
5651 		return;
5652 #endif
5653 	default:
5654 		cnss_pr_err("Unsupported MHI status cb reason: %d\n", reason);
5655 		return;
5656 	}
5657 
5658 	cnss_schedule_recovery(&pci_priv->pci_dev->dev, cnss_reason);
5659 }
5660 
5661 static int cnss_pci_get_mhi_msi(struct cnss_pci_data *pci_priv)
5662 {
5663 	int ret, num_vectors, i;
5664 	u32 user_base_data, base_vector;
5665 	int *irq;
5666 	unsigned int msi_data;
5667 	bool is_one_msi = false;
5668 
5669 	ret = cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
5670 					   MHI_MSI_NAME, &num_vectors,
5671 					   &user_base_data, &base_vector);
5672 	if (ret)
5673 		return ret;
5674 
5675 	if (cnss_pci_is_one_msi(pci_priv)) {
5676 		is_one_msi = true;
5677 		num_vectors = cnss_pci_get_one_msi_mhi_irq_array_size(pci_priv);
5678 	}
5679 	cnss_pr_dbg("Number of assigned MSI for MHI is %d, base vector is %d\n",
5680 		    num_vectors, base_vector);
5681 
5682 	irq = kcalloc(num_vectors, sizeof(int), GFP_KERNEL);
5683 	if (!irq)
5684 		return -ENOMEM;
5685 
5686 	for (i = 0; i < num_vectors; i++) {
5687 		msi_data = base_vector;
5688 		if (!is_one_msi)
5689 			msi_data += i;
5690 		irq[i] = cnss_get_msi_irq(&pci_priv->pci_dev->dev, msi_data);
5691 	}
5692 
5693 	pci_priv->mhi_ctrl->irq = irq;
5694 	pci_priv->mhi_ctrl->nr_irqs = num_vectors;
5695 
5696 	return 0;
5697 }
5698 
5699 static int cnss_mhi_bw_scale(struct mhi_controller *mhi_ctrl,
5700 			     struct mhi_link_info *link_info)
5701 {
5702 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
5703 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5704 	int ret = 0;
5705 
5706 	cnss_pr_dbg("Setting link speed:0x%x, width:0x%x\n",
5707 		    link_info->target_link_speed,
5708 		    link_info->target_link_width);
5709 
5710 	/* It has to set target link speed here before setting link bandwidth
5711 	 * when device requests link speed change. This can avoid setting link
5712 	 * bandwidth getting rejected if requested link speed is higher than
5713 	 * current one.
5714 	 */
5715 	ret = cnss_pci_set_max_link_speed(pci_priv, plat_priv->rc_num,
5716 					  link_info->target_link_speed);
5717 	if (ret)
5718 		cnss_pr_err("Failed to set target link speed to 0x%x, err = %d\n",
5719 			    link_info->target_link_speed, ret);
5720 
5721 	ret = cnss_pci_set_link_bandwidth(pci_priv,
5722 					  link_info->target_link_speed,
5723 					  link_info->target_link_width);
5724 
5725 	if (ret) {
5726 		cnss_pr_err("Failed to set link bandwidth, err = %d\n", ret);
5727 		return ret;
5728 	}
5729 
5730 	pci_priv->def_link_speed = link_info->target_link_speed;
5731 	pci_priv->def_link_width = link_info->target_link_width;
5732 
5733 	return 0;
5734 }
5735 
5736 static int cnss_mhi_read_reg(struct mhi_controller *mhi_ctrl,
5737 			     void __iomem *addr, u32 *out)
5738 {
5739 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
5740 
5741 	u32 tmp = readl_relaxed(addr);
5742 
5743 	/* Unexpected value, query the link status */
5744 	if (PCI_INVALID_READ(tmp) &&
5745 	    cnss_pci_check_link_status(pci_priv))
5746 		return -EIO;
5747 
5748 	*out = tmp;
5749 
5750 	return 0;
5751 }
5752 
5753 static void cnss_mhi_write_reg(struct mhi_controller *mhi_ctrl,
5754 			       void __iomem *addr, u32 val)
5755 {
5756 	writel_relaxed(val, addr);
5757 }
5758 
5759 static int cnss_get_mhi_soc_info(struct cnss_plat_data *plat_priv,
5760 				 struct mhi_controller *mhi_ctrl)
5761 {
5762 	int ret = 0;
5763 
5764 	ret = mhi_get_soc_info(mhi_ctrl);
5765 	if (ret)
5766 		goto exit;
5767 
5768 	plat_priv->device_version.family_number = mhi_ctrl->family_number;
5769 	plat_priv->device_version.device_number = mhi_ctrl->device_number;
5770 	plat_priv->device_version.major_version = mhi_ctrl->major_version;
5771 	plat_priv->device_version.minor_version = mhi_ctrl->minor_version;
5772 
5773 	cnss_pr_dbg("Get device version info, family number: 0x%x, device number: 0x%x, major version: 0x%x, minor version: 0x%x\n",
5774 		    plat_priv->device_version.family_number,
5775 		    plat_priv->device_version.device_number,
5776 		    plat_priv->device_version.major_version,
5777 		    plat_priv->device_version.minor_version);
5778 
5779 	/* Only keep lower 4 bits as real device major version */
5780 	plat_priv->device_version.major_version &= DEVICE_MAJOR_VERSION_MASK;
5781 
5782 exit:
5783 	return ret;
5784 }
5785 
5786 static int cnss_pci_register_mhi(struct cnss_pci_data *pci_priv)
5787 {
5788 	int ret = 0;
5789 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5790 	struct pci_dev *pci_dev = pci_priv->pci_dev;
5791 	struct mhi_controller *mhi_ctrl;
5792 	phys_addr_t bar_start;
5793 	const struct mhi_controller_config *cnss_mhi_config =
5794 						&cnss_mhi_config_default;
5795 
5796 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
5797 		return 0;
5798 
5799 	mhi_ctrl = mhi_alloc_controller();
5800 	if (!mhi_ctrl) {
5801 		cnss_pr_err("Invalid MHI controller context\n");
5802 		return -EINVAL;
5803 	}
5804 
5805 	pci_priv->mhi_ctrl = mhi_ctrl;
5806 	mhi_ctrl->cntrl_dev = &pci_dev->dev;
5807 
5808 	mhi_ctrl->fw_image = plat_priv->firmware_name;
5809 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
5810 	mhi_ctrl->fallback_fw_image = plat_priv->fw_fallback_name;
5811 #endif
5812 
5813 	mhi_ctrl->regs = pci_priv->bar;
5814 	mhi_ctrl->reg_len = pci_resource_len(pci_priv->pci_dev, PCI_BAR_NUM);
5815 	bar_start = pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM);
5816 	cnss_pr_dbg("BAR starts at %pa, length is %x\n",
5817 		    &bar_start, mhi_ctrl->reg_len);
5818 
5819 	ret = cnss_pci_get_mhi_msi(pci_priv);
5820 	if (ret) {
5821 		cnss_pr_err("Failed to get MSI for MHI, err = %d\n", ret);
5822 		goto free_mhi_ctrl;
5823 	}
5824 
5825 	if (cnss_pci_is_one_msi(pci_priv))
5826 		mhi_ctrl->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
5827 
5828 	if (pci_priv->smmu_s1_enable) {
5829 		mhi_ctrl->iova_start = pci_priv->smmu_iova_start;
5830 		mhi_ctrl->iova_stop = pci_priv->smmu_iova_start +
5831 					pci_priv->smmu_iova_len;
5832 	} else {
5833 		mhi_ctrl->iova_start = 0;
5834 		mhi_ctrl->iova_stop = pci_priv->dma_bit_mask;
5835 	}
5836 
5837 	mhi_ctrl->status_cb = cnss_mhi_notify_status;
5838 	mhi_ctrl->runtime_get = cnss_mhi_pm_runtime_get;
5839 	mhi_ctrl->runtime_put = cnss_mhi_pm_runtime_put_noidle;
5840 	mhi_ctrl->read_reg = cnss_mhi_read_reg;
5841 	mhi_ctrl->write_reg = cnss_mhi_write_reg;
5842 
5843 	mhi_ctrl->rddm_size = pci_priv->plat_priv->ramdump_info_v2.ramdump_size;
5844 	if (!mhi_ctrl->rddm_size)
5845 		mhi_ctrl->rddm_size = RAMDUMP_SIZE_DEFAULT;
5846 	mhi_ctrl->sbl_size = SZ_512K;
5847 	mhi_ctrl->seg_len = SZ_512K;
5848 	mhi_ctrl->fbc_download = true;
5849 
5850 	ret = cnss_get_mhi_soc_info(plat_priv, mhi_ctrl);
5851 	if (ret)
5852 		goto free_mhi_irq;
5853 
5854 	/* Satellite config only supported on KIWI V2 and later chipset */
5855 	if (plat_priv->device_id <= QCA6490_DEVICE_ID ||
5856 	    (plat_priv->device_id == KIWI_DEVICE_ID &&
5857 	     plat_priv->device_version.major_version == 1))
5858 		cnss_mhi_config = &cnss_mhi_config_no_satellite;
5859 
5860 	ret = mhi_register_controller(mhi_ctrl, cnss_mhi_config);
5861 	if (ret) {
5862 		cnss_pr_err("Failed to register to MHI bus, err = %d\n", ret);
5863 		goto free_mhi_irq;
5864 	}
5865 
5866 	/* MHI satellite driver only needs to connect when DRV is supported */
5867 	if (cnss_pci_is_drv_supported(pci_priv))
5868 		cnss_mhi_controller_set_base(pci_priv, bar_start);
5869 
5870 	/* BW scale CB needs to be set after registering MHI per requirement */
5871 	cnss_mhi_controller_set_bw_scale_cb(pci_priv, cnss_mhi_bw_scale);
5872 
5873 	ret = cnss_pci_update_fw_name(pci_priv);
5874 	if (ret)
5875 		goto unreg_mhi;
5876 
5877 	return 0;
5878 
5879 unreg_mhi:
5880 	mhi_unregister_controller(mhi_ctrl);
5881 free_mhi_irq:
5882 	kfree(mhi_ctrl->irq);
5883 free_mhi_ctrl:
5884 	mhi_free_controller(mhi_ctrl);
5885 
5886 	return ret;
5887 }
5888 
5889 static void cnss_pci_unregister_mhi(struct cnss_pci_data *pci_priv)
5890 {
5891 	struct mhi_controller *mhi_ctrl = pci_priv->mhi_ctrl;
5892 
5893 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
5894 		return;
5895 
5896 	mhi_unregister_controller(mhi_ctrl);
5897 	kfree(mhi_ctrl->irq);
5898 	mhi_ctrl->irq = NULL;
5899 	mhi_free_controller(mhi_ctrl);
5900 	pci_priv->mhi_ctrl = NULL;
5901 }
5902 
5903 static void cnss_pci_config_regs(struct cnss_pci_data *pci_priv)
5904 {
5905 	switch (pci_priv->device_id) {
5906 	case QCA6390_DEVICE_ID:
5907 		pci_priv->misc_reg_dev_mask = REG_MASK_QCA6390;
5908 		pci_priv->wcss_reg = wcss_reg_access_seq;
5909 		pci_priv->pcie_reg = pcie_reg_access_seq;
5910 		pci_priv->wlaon_reg = wlaon_reg_access_seq;
5911 		pci_priv->syspm_reg = syspm_reg_access_seq;
5912 
5913 		/* Configure WDOG register with specific value so that we can
5914 		 * know if HW is in the process of WDOG reset recovery or not
5915 		 * when reading the registers.
5916 		 */
5917 		cnss_pci_reg_write
5918 		(pci_priv,
5919 		QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG,
5920 		QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG_VAL);
5921 		break;
5922 	case QCA6490_DEVICE_ID:
5923 		pci_priv->misc_reg_dev_mask = REG_MASK_QCA6490;
5924 		pci_priv->wlaon_reg = wlaon_reg_access_seq;
5925 		break;
5926 	default:
5927 		return;
5928 	}
5929 }
5930 
5931 #if !IS_ENABLED(CONFIG_ARCH_QCOM)
5932 static int cnss_pci_of_reserved_mem_device_init(struct cnss_pci_data *pci_priv)
5933 {
5934 	return 0;
5935 }
5936 
5937 static irqreturn_t cnss_pci_wake_handler(int irq, void *data)
5938 {
5939 	struct cnss_pci_data *pci_priv = data;
5940 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5941 	enum rpm_status status;
5942 	struct device *dev;
5943 
5944 	pci_priv->wake_counter++;
5945 	cnss_pr_dbg("WLAN PCI wake IRQ (%u) is asserted #%u\n",
5946 		    pci_priv->wake_irq, pci_priv->wake_counter);
5947 
5948 	/* Make sure abort current suspend */
5949 	cnss_pm_stay_awake(plat_priv);
5950 	cnss_pm_relax(plat_priv);
5951 	/* Above two pm* API calls will abort system suspend only when
5952 	 * plat_dev->dev->ws is initiated by device_init_wakeup() API, and
5953 	 * calling pm_system_wakeup() is just to guarantee system suspend
5954 	 * can be aborted if it is not initiated in any case.
5955 	 */
5956 	pm_system_wakeup();
5957 
5958 	dev = &pci_priv->pci_dev->dev;
5959 	status = dev->power.runtime_status;
5960 
5961 	if ((cnss_pci_get_monitor_wake_intr(pci_priv) &&
5962 	     cnss_pci_get_auto_suspended(pci_priv)) ||
5963 	    (status == RPM_SUSPENDING || status == RPM_SUSPENDED)) {
5964 		cnss_pci_set_monitor_wake_intr(pci_priv, false);
5965 		cnss_pci_pm_request_resume(pci_priv);
5966 	}
5967 
5968 	return IRQ_HANDLED;
5969 }
5970 
5971 /**
5972  * cnss_pci_wake_gpio_init() - Setup PCI wake GPIO for WLAN
5973  * @pci_priv: driver PCI bus context pointer
5974  *
5975  * This function initializes WLAN PCI wake GPIO and corresponding
5976  * interrupt. It should be used in non-MSM platforms whose PCIe
5977  * root complex driver doesn't handle the GPIO.
5978  *
5979  * Return: 0 for success or skip, negative value for error
5980  */
5981 static int cnss_pci_wake_gpio_init(struct cnss_pci_data *pci_priv)
5982 {
5983 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5984 	struct device *dev = &plat_priv->plat_dev->dev;
5985 	int ret = 0;
5986 
5987 	pci_priv->wake_gpio = of_get_named_gpio(dev->of_node,
5988 						"wlan-pci-wake-gpio", 0);
5989 	if (pci_priv->wake_gpio < 0)
5990 		goto out;
5991 
5992 	cnss_pr_dbg("Get PCI wake GPIO (%d) from device node\n",
5993 		    pci_priv->wake_gpio);
5994 
5995 	ret = gpio_request(pci_priv->wake_gpio, "wlan_pci_wake_gpio");
5996 	if (ret) {
5997 		cnss_pr_err("Failed to request PCI wake GPIO, err = %d\n",
5998 			    ret);
5999 		goto out;
6000 	}
6001 
6002 	gpio_direction_input(pci_priv->wake_gpio);
6003 	pci_priv->wake_irq = gpio_to_irq(pci_priv->wake_gpio);
6004 
6005 	ret = request_irq(pci_priv->wake_irq, cnss_pci_wake_handler,
6006 			  IRQF_TRIGGER_FALLING, "wlan_pci_wake_irq", pci_priv);
6007 	if (ret) {
6008 		cnss_pr_err("Failed to request PCI wake IRQ, err = %d\n", ret);
6009 		goto free_gpio;
6010 	}
6011 
6012 	ret = enable_irq_wake(pci_priv->wake_irq);
6013 	if (ret) {
6014 		cnss_pr_err("Failed to enable PCI wake IRQ, err = %d\n", ret);
6015 		goto free_irq;
6016 	}
6017 
6018 	return 0;
6019 
6020 free_irq:
6021 	free_irq(pci_priv->wake_irq, pci_priv);
6022 free_gpio:
6023 	gpio_free(pci_priv->wake_gpio);
6024 out:
6025 	return ret;
6026 }
6027 
6028 static void cnss_pci_wake_gpio_deinit(struct cnss_pci_data *pci_priv)
6029 {
6030 	if (pci_priv->wake_gpio < 0)
6031 		return;
6032 
6033 	disable_irq_wake(pci_priv->wake_irq);
6034 	free_irq(pci_priv->wake_irq, pci_priv);
6035 	gpio_free(pci_priv->wake_gpio);
6036 }
6037 #endif
6038 
6039 /* Setting to use this cnss_pm_domain ops will let PM framework override the
6040  * ops from dev->bus->pm which is pci_dev_pm_ops from pci-driver.c. This ops
6041  * has to take care everything device driver needed which is currently done
6042  * from pci_dev_pm_ops.
6043  */
6044 static struct dev_pm_domain cnss_pm_domain = {
6045 	.ops = {
6046 		SET_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend, cnss_pci_resume)
6047 		SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend_noirq,
6048 					      cnss_pci_resume_noirq)
6049 		SET_RUNTIME_PM_OPS(cnss_pci_runtime_suspend,
6050 				   cnss_pci_runtime_resume,
6051 				   cnss_pci_runtime_idle)
6052 	}
6053 };
6054 
6055 static int cnss_pci_get_dev_cfg_node(struct cnss_plat_data *plat_priv)
6056 {
6057 	struct device_node *child;
6058 	u32 id, i;
6059 	int id_n, ret;
6060 
6061 	if (plat_priv->dt_type != CNSS_DTT_MULTIEXCHG)
6062 		return 0;
6063 
6064 	if (!plat_priv->device_id) {
6065 		cnss_pr_err("Invalid device id\n");
6066 		return -EINVAL;
6067 	}
6068 
6069 	for_each_available_child_of_node(plat_priv->plat_dev->dev.of_node,
6070 					 child) {
6071 		if (strcmp(child->name, "chip_cfg"))
6072 			continue;
6073 
6074 		id_n = of_property_count_u32_elems(child, "supported-ids");
6075 		if (id_n <= 0) {
6076 			cnss_pr_err("Device id is NOT set\n");
6077 			return -EINVAL;
6078 		}
6079 
6080 		for (i = 0; i < id_n; i++) {
6081 			ret = of_property_read_u32_index(child,
6082 							 "supported-ids",
6083 							 i, &id);
6084 			if (ret) {
6085 				cnss_pr_err("Failed to read supported ids\n");
6086 				return -EINVAL;
6087 			}
6088 
6089 			if (id == plat_priv->device_id) {
6090 				plat_priv->dev_node = child;
6091 				cnss_pr_dbg("got node[%s@%d] for device[0x%x]\n",
6092 					    child->name, i, id);
6093 				return 0;
6094 			}
6095 		}
6096 	}
6097 
6098 	return -EINVAL;
6099 }
6100 
6101 #ifdef CONFIG_CNSS2_CONDITIONAL_POWEROFF
6102 static bool cnss_should_suspend_pwroff(struct pci_dev *pci_dev)
6103 {
6104 	bool suspend_pwroff;
6105 
6106 	switch (pci_dev->device) {
6107 	case QCA6390_DEVICE_ID:
6108 	case QCA6490_DEVICE_ID:
6109 		suspend_pwroff = false;
6110 		break;
6111 	default:
6112 		suspend_pwroff = true;
6113 	}
6114 
6115 	return suspend_pwroff;
6116 }
6117 #else
6118 static bool cnss_should_suspend_pwroff(struct pci_dev *pci_dev)
6119 {
6120 	return true;
6121 }
6122 #endif
6123 
6124 static void cnss_pci_suspend_pwroff(struct pci_dev *pci_dev)
6125 {
6126 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
6127 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
6128 	int ret = 0;
6129 	bool suspend_pwroff = cnss_should_suspend_pwroff(pci_dev);
6130 
6131 	if (suspend_pwroff) {
6132 		ret = cnss_suspend_pci_link(pci_priv);
6133 		if (ret)
6134 			cnss_pr_err("Failed to suspend PCI link, err = %d\n",
6135 				    ret);
6136 		cnss_power_off_device(plat_priv);
6137 	} else {
6138 		cnss_pr_dbg("bus suspend and dev power off disabled for device [0x%x]\n",
6139 			    pci_dev->device);
6140 	}
6141 }
6142 
6143 static int cnss_pci_probe(struct pci_dev *pci_dev,
6144 			  const struct pci_device_id *id)
6145 {
6146 	int ret = 0;
6147 	struct cnss_pci_data *pci_priv;
6148 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
6149 	struct device *dev = &pci_dev->dev;
6150 
6151 	cnss_pr_dbg("PCI is probing, vendor ID: 0x%x, device ID: 0x%x\n",
6152 		    id->vendor, pci_dev->device);
6153 
6154 	pci_priv = devm_kzalloc(dev, sizeof(*pci_priv), GFP_KERNEL);
6155 	if (!pci_priv) {
6156 		ret = -ENOMEM;
6157 		goto out;
6158 	}
6159 
6160 	pci_priv->pci_link_state = PCI_LINK_UP;
6161 	pci_priv->plat_priv = plat_priv;
6162 	pci_priv->pci_dev = pci_dev;
6163 	pci_priv->pci_device_id = id;
6164 	pci_priv->device_id = pci_dev->device;
6165 	cnss_set_pci_priv(pci_dev, pci_priv);
6166 	plat_priv->device_id = pci_dev->device;
6167 	plat_priv->bus_priv = pci_priv;
6168 	mutex_init(&pci_priv->bus_lock);
6169 	if (plat_priv->use_pm_domain)
6170 		dev->pm_domain = &cnss_pm_domain;
6171 
6172 	ret = cnss_pci_get_dev_cfg_node(plat_priv);
6173 	if (ret) {
6174 		cnss_pr_err("Failed to get device cfg node, err = %d\n", ret);
6175 		goto reset_ctx;
6176 	}
6177 
6178 	ret = cnss_dev_specific_power_on(plat_priv);
6179 	if (ret)
6180 		goto reset_ctx;
6181 
6182 	cnss_pci_of_reserved_mem_device_init(pci_priv);
6183 
6184 	ret = cnss_register_subsys(plat_priv);
6185 	if (ret)
6186 		goto reset_ctx;
6187 
6188 	ret = cnss_register_ramdump(plat_priv);
6189 	if (ret)
6190 		goto unregister_subsys;
6191 
6192 	ret = cnss_pci_init_smmu(pci_priv);
6193 	if (ret)
6194 		goto unregister_ramdump;
6195 
6196 	ret = cnss_reg_pci_event(pci_priv);
6197 	if (ret) {
6198 		cnss_pr_err("Failed to register PCI event, err = %d\n", ret);
6199 		goto deinit_smmu;
6200 	}
6201 
6202 	ret = cnss_pci_enable_bus(pci_priv);
6203 	if (ret)
6204 		goto dereg_pci_event;
6205 
6206 	ret = cnss_pci_enable_msi(pci_priv);
6207 	if (ret)
6208 		goto disable_bus;
6209 
6210 	ret = cnss_pci_register_mhi(pci_priv);
6211 	if (ret)
6212 		goto disable_msi;
6213 
6214 	switch (pci_dev->device) {
6215 	case QCA6174_DEVICE_ID:
6216 		pci_read_config_word(pci_dev, QCA6174_REV_ID_OFFSET,
6217 				     &pci_priv->revision_id);
6218 		break;
6219 	case QCA6290_DEVICE_ID:
6220 	case QCA6390_DEVICE_ID:
6221 	case QCA6490_DEVICE_ID:
6222 	case KIWI_DEVICE_ID:
6223 	case MANGO_DEVICE_ID:
6224 		cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, false, false);
6225 		timer_setup(&pci_priv->dev_rddm_timer,
6226 			    cnss_dev_rddm_timeout_hdlr, 0);
6227 		timer_setup(&pci_priv->boot_debug_timer,
6228 			    cnss_boot_debug_timeout_hdlr, 0);
6229 		INIT_DELAYED_WORK(&pci_priv->time_sync_work,
6230 				  cnss_pci_time_sync_work_hdlr);
6231 		cnss_pci_get_link_status(pci_priv);
6232 		cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, false);
6233 		cnss_pci_wake_gpio_init(pci_priv);
6234 		break;
6235 	default:
6236 		cnss_pr_err("Unknown PCI device found: 0x%x\n",
6237 			    pci_dev->device);
6238 		ret = -ENODEV;
6239 		goto unreg_mhi;
6240 	}
6241 
6242 	cnss_pci_config_regs(pci_priv);
6243 	if (EMULATION_HW)
6244 		goto out;
6245 	cnss_pci_suspend_pwroff(pci_dev);
6246 	set_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state);
6247 
6248 	return 0;
6249 
6250 unreg_mhi:
6251 	cnss_pci_unregister_mhi(pci_priv);
6252 disable_msi:
6253 	cnss_pci_disable_msi(pci_priv);
6254 disable_bus:
6255 	cnss_pci_disable_bus(pci_priv);
6256 dereg_pci_event:
6257 	cnss_dereg_pci_event(pci_priv);
6258 deinit_smmu:
6259 	cnss_pci_deinit_smmu(pci_priv);
6260 unregister_ramdump:
6261 	cnss_unregister_ramdump(plat_priv);
6262 unregister_subsys:
6263 	cnss_unregister_subsys(plat_priv);
6264 reset_ctx:
6265 	plat_priv->bus_priv = NULL;
6266 out:
6267 	return ret;
6268 }
6269 
6270 static void cnss_pci_remove(struct pci_dev *pci_dev)
6271 {
6272 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
6273 	struct cnss_plat_data *plat_priv =
6274 		cnss_bus_dev_to_plat_priv(&pci_dev->dev);
6275 
6276 	clear_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state);
6277 	cnss_pci_unregister_driver_hdlr(pci_priv);
6278 	cnss_pci_free_m3_mem(pci_priv);
6279 	cnss_pci_free_fw_mem(pci_priv);
6280 	cnss_pci_free_qdss_mem(pci_priv);
6281 
6282 	switch (pci_dev->device) {
6283 	case QCA6290_DEVICE_ID:
6284 	case QCA6390_DEVICE_ID:
6285 	case QCA6490_DEVICE_ID:
6286 	case KIWI_DEVICE_ID:
6287 	case MANGO_DEVICE_ID:
6288 		cnss_pci_wake_gpio_deinit(pci_priv);
6289 		del_timer(&pci_priv->boot_debug_timer);
6290 		del_timer(&pci_priv->dev_rddm_timer);
6291 		break;
6292 	default:
6293 		break;
6294 	}
6295 
6296 	cnss_pci_unregister_mhi(pci_priv);
6297 	cnss_pci_disable_msi(pci_priv);
6298 	cnss_pci_disable_bus(pci_priv);
6299 	cnss_dereg_pci_event(pci_priv);
6300 	cnss_pci_deinit_smmu(pci_priv);
6301 	if (plat_priv) {
6302 		cnss_unregister_ramdump(plat_priv);
6303 		cnss_unregister_subsys(plat_priv);
6304 		plat_priv->bus_priv = NULL;
6305 	} else {
6306 		cnss_pr_err("Plat_priv is null, Unable to unregister ramdump,subsys\n");
6307 	}
6308 }
6309 
6310 static const struct pci_device_id cnss_pci_id_table[] = {
6311 	{ QCA6174_VENDOR_ID, QCA6174_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
6312 	{ QCA6290_VENDOR_ID, QCA6290_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
6313 	{ QCA6390_VENDOR_ID, QCA6390_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
6314 	{ QCA6490_VENDOR_ID, QCA6490_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
6315 	{ KIWI_VENDOR_ID, KIWI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
6316 	{ MANGO_VENDOR_ID, MANGO_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
6317 	{ 0 }
6318 };
6319 MODULE_DEVICE_TABLE(pci, cnss_pci_id_table);
6320 
6321 static const struct dev_pm_ops cnss_pm_ops = {
6322 	SET_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend, cnss_pci_resume)
6323 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend_noirq,
6324 				      cnss_pci_resume_noirq)
6325 	SET_RUNTIME_PM_OPS(cnss_pci_runtime_suspend, cnss_pci_runtime_resume,
6326 			   cnss_pci_runtime_idle)
6327 };
6328 
6329 struct pci_driver cnss_pci_driver = {
6330 	.name     = "cnss_pci",
6331 	.id_table = cnss_pci_id_table,
6332 	.probe    = cnss_pci_probe,
6333 	.remove   = cnss_pci_remove,
6334 	.driver = {
6335 		.pm = &cnss_pm_ops,
6336 	},
6337 };
6338 
6339 static int cnss_pci_enumerate(struct cnss_plat_data *plat_priv, u32 rc_num)
6340 {
6341 	int ret, retry = 0;
6342 
6343 	/* Always set initial target PCIe link speed to Gen2 for QCA6490 device
6344 	 * since there may be link issues if it boots up with Gen3 link speed.
6345 	 * Device is able to change it later at any time. It will be rejected
6346 	 * if requested speed is higher than the one specified in PCIe DT.
6347 	 */
6348 	if (plat_priv->device_id == QCA6490_DEVICE_ID) {
6349 		ret = cnss_pci_set_max_link_speed(plat_priv->bus_priv, rc_num,
6350 						  PCI_EXP_LNKSTA_CLS_5_0GB);
6351 		if (ret && ret != -EPROBE_DEFER)
6352 			cnss_pr_err("Failed to set max PCIe RC%x link speed to Gen2, err = %d\n",
6353 				    rc_num, ret);
6354 	}
6355 
6356 	cnss_pr_dbg("Trying to enumerate with PCIe RC%x\n", rc_num);
6357 retry:
6358 	ret = _cnss_pci_enumerate(plat_priv, rc_num);
6359 	if (ret) {
6360 		if (ret == -EPROBE_DEFER) {
6361 			cnss_pr_dbg("PCIe RC driver is not ready, defer probe\n");
6362 			goto out;
6363 		}
6364 		cnss_pr_err("Failed to enable PCIe RC%x, err = %d\n",
6365 			    rc_num, ret);
6366 		if (retry++ < LINK_TRAINING_RETRY_MAX_TIMES) {
6367 			cnss_pr_dbg("Retry PCI link training #%d\n", retry);
6368 			goto retry;
6369 		} else {
6370 			goto out;
6371 		}
6372 	}
6373 
6374 	plat_priv->rc_num = rc_num;
6375 
6376 out:
6377 	return ret;
6378 }
6379 
6380 int cnss_pci_init(struct cnss_plat_data *plat_priv)
6381 {
6382 	struct device *dev = &plat_priv->plat_dev->dev;
6383 	const __be32 *prop;
6384 	int ret = 0, prop_len = 0, rc_count, i;
6385 
6386 	prop = of_get_property(dev->of_node, "qcom,wlan-rc-num", &prop_len);
6387 	if (!prop || !prop_len) {
6388 		cnss_pr_err("Failed to get PCIe RC number from DT\n");
6389 		goto out;
6390 	}
6391 
6392 	rc_count = prop_len / sizeof(__be32);
6393 	for (i = 0; i < rc_count; i++) {
6394 		ret = cnss_pci_enumerate(plat_priv, be32_to_cpup(&prop[i]));
6395 		if (!ret)
6396 			break;
6397 		else if (ret == -EPROBE_DEFER || (ret && i == rc_count - 1))
6398 			goto out;
6399 	}
6400 
6401 	ret = pci_register_driver(&cnss_pci_driver);
6402 	if (ret) {
6403 		cnss_pr_err("Failed to register to PCI framework, err = %d\n",
6404 			    ret);
6405 		goto out;
6406 	}
6407 
6408 	if (!plat_priv->bus_priv) {
6409 		cnss_pr_err("Failed to probe PCI driver\n");
6410 		ret = -ENODEV;
6411 		goto unreg_pci;
6412 	}
6413 
6414 	return 0;
6415 
6416 unreg_pci:
6417 	pci_unregister_driver(&cnss_pci_driver);
6418 out:
6419 	return ret;
6420 }
6421 
6422 void cnss_pci_deinit(struct cnss_plat_data *plat_priv)
6423 {
6424 	pci_unregister_driver(&cnss_pci_driver);
6425 }
6426