xref: /wlan-dirver/platform/cnss2/pci.c (revision 36aaccd8879694233fe249b5e3fcd96a7ad01856)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include <linux/completion.h>
8 #include <linux/io.h>
9 #include <linux/irq.h>
10 #include <linux/memblock.h>
11 #include <linux/module.h>
12 #include <linux/msi.h>
13 #include <linux/of.h>
14 #include <linux/of_gpio.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/suspend.h>
17 #include <linux/version.h>
18 #include <linux/sched.h>
19 #include "main.h"
20 #include "bus.h"
21 #include "debug.h"
22 #include "pci.h"
23 #include "pci_platform.h"
24 #include "reg.h"
25 
26 #define PCI_LINK_UP			1
27 #define PCI_LINK_DOWN			0
28 
29 #define SAVE_PCI_CONFIG_SPACE		1
30 #define RESTORE_PCI_CONFIG_SPACE	0
31 
32 #define PCI_BAR_NUM			0
33 #define PCI_INVALID_READ(val)		((val) == U32_MAX)
34 
35 #define PCI_DMA_MASK_32_BIT		DMA_BIT_MASK(32)
36 #define PCI_DMA_MASK_36_BIT		DMA_BIT_MASK(36)
37 #define PCI_DMA_MASK_64_BIT		DMA_BIT_MASK(64)
38 
39 #define MHI_NODE_NAME			"qcom,mhi"
40 #define MHI_MSI_NAME			"MHI"
41 
42 #define QCA6390_PATH_PREFIX		"qca6390/"
43 #define QCA6490_PATH_PREFIX		"qca6490/"
44 #define QCN7605_PATH_PREFIX             "qcn7605/"
45 #define KIWI_PATH_PREFIX		"kiwi/"
46 #define MANGO_PATH_PREFIX		"mango/"
47 #define PEACH_PATH_PREFIX		"peach/"
48 #define DEFAULT_PHY_M3_FILE_NAME	"m3.bin"
49 #define DEFAULT_AUX_FILE_NAME		"aux_ucode.elf"
50 #define DEFAULT_PHY_UCODE_FILE_NAME	"phy_ucode.elf"
51 #define TME_PATCH_FILE_NAME_1_0		"tmel_peach_10.elf"
52 #define TME_PATCH_FILE_NAME_2_0		"tmel_peach_20.elf"
53 #define PHY_UCODE_V2_FILE_NAME		"phy_ucode20.elf"
54 #define DEFAULT_FW_FILE_NAME		"amss.bin"
55 #define FW_V2_FILE_NAME			"amss20.bin"
56 #define DEVICE_MAJOR_VERSION_MASK	0xF
57 
58 #define WAKE_MSI_NAME			"WAKE"
59 
60 #define DEV_RDDM_TIMEOUT		5000
61 #define WAKE_EVENT_TIMEOUT		5000
62 
63 #ifdef CONFIG_CNSS_EMULATION
64 #define EMULATION_HW			1
65 #else
66 #define EMULATION_HW			0
67 #endif
68 
69 #define RAMDUMP_SIZE_DEFAULT		0x420000
70 #define CNSS_256KB_SIZE			0x40000
71 #define DEVICE_RDDM_COOKIE		0xCAFECACE
72 
73 static bool cnss_driver_registered;
74 
75 static DEFINE_SPINLOCK(pci_link_down_lock);
76 static DEFINE_SPINLOCK(pci_reg_window_lock);
77 static DEFINE_SPINLOCK(time_sync_lock);
78 
79 #define MHI_TIMEOUT_OVERWRITE_MS	(plat_priv->ctrl_params.mhi_timeout)
80 #define MHI_M2_TIMEOUT_MS		(plat_priv->ctrl_params.mhi_m2_timeout)
81 
82 #define WLAON_PWR_CTRL_SHUTDOWN_DELAY_MIN_US	1000
83 #define WLAON_PWR_CTRL_SHUTDOWN_DELAY_MAX_US	2000
84 
85 #define RDDM_LINK_RECOVERY_RETRY		20
86 #define RDDM_LINK_RECOVERY_RETRY_DELAY_MS	20
87 
88 #define FORCE_WAKE_DELAY_MIN_US			4000
89 #define FORCE_WAKE_DELAY_MAX_US			6000
90 #define FORCE_WAKE_DELAY_TIMEOUT_US		60000
91 
92 #define REG_RETRY_MAX_TIMES		3
93 
94 #define MHI_SUSPEND_RETRY_MAX_TIMES		3
95 #define MHI_SUSPEND_RETRY_DELAY_US		5000
96 
97 #define BOOT_DEBUG_TIMEOUT_MS			7000
98 
99 #define HANG_DATA_LENGTH		384
100 #define HST_HANG_DATA_OFFSET		((3 * 1024 * 1024) - HANG_DATA_LENGTH)
101 #define HSP_HANG_DATA_OFFSET		((2 * 1024 * 1024) - HANG_DATA_LENGTH)
102 #define GNO_HANG_DATA_OFFSET		(0x7d000 - HANG_DATA_LENGTH)
103 
104 #define AFC_SLOT_SIZE                   0x1000
105 #define AFC_MAX_SLOT                    2
106 #define AFC_MEM_SIZE                    (AFC_SLOT_SIZE * AFC_MAX_SLOT)
107 #define AFC_AUTH_STATUS_OFFSET          1
108 #define AFC_AUTH_SUCCESS                1
109 #define AFC_AUTH_ERROR                  0
110 
111 static const struct mhi_channel_config cnss_mhi_channels[] = {
112 	{
113 		.num = 0,
114 		.name = "LOOPBACK",
115 		.num_elements = 32,
116 		.event_ring = 1,
117 		.dir = DMA_TO_DEVICE,
118 		.ee_mask = 0x4,
119 		.pollcfg = 0,
120 		.doorbell = MHI_DB_BRST_DISABLE,
121 		.lpm_notify = false,
122 		.offload_channel = false,
123 		.doorbell_mode_switch = false,
124 		.auto_queue = false,
125 	},
126 	{
127 		.num = 1,
128 		.name = "LOOPBACK",
129 		.num_elements = 32,
130 		.event_ring = 1,
131 		.dir = DMA_FROM_DEVICE,
132 		.ee_mask = 0x4,
133 		.pollcfg = 0,
134 		.doorbell = MHI_DB_BRST_DISABLE,
135 		.lpm_notify = false,
136 		.offload_channel = false,
137 		.doorbell_mode_switch = false,
138 		.auto_queue = false,
139 	},
140 	{
141 		.num = 4,
142 		.name = "DIAG",
143 		.num_elements = 64,
144 		.event_ring = 1,
145 		.dir = DMA_TO_DEVICE,
146 		.ee_mask = 0x4,
147 		.pollcfg = 0,
148 		.doorbell = MHI_DB_BRST_DISABLE,
149 		.lpm_notify = false,
150 		.offload_channel = false,
151 		.doorbell_mode_switch = false,
152 		.auto_queue = false,
153 	},
154 	{
155 		.num = 5,
156 		.name = "DIAG",
157 		.num_elements = 64,
158 		.event_ring = 1,
159 		.dir = DMA_FROM_DEVICE,
160 		.ee_mask = 0x4,
161 		.pollcfg = 0,
162 		.doorbell = MHI_DB_BRST_DISABLE,
163 		.lpm_notify = false,
164 		.offload_channel = false,
165 		.doorbell_mode_switch = false,
166 		.auto_queue = false,
167 	},
168 	{
169 		.num = 20,
170 		.name = "IPCR",
171 		.num_elements = 64,
172 		.event_ring = 1,
173 		.dir = DMA_TO_DEVICE,
174 		.ee_mask = 0x4,
175 		.pollcfg = 0,
176 		.doorbell = MHI_DB_BRST_DISABLE,
177 		.lpm_notify = false,
178 		.offload_channel = false,
179 		.doorbell_mode_switch = false,
180 		.auto_queue = false,
181 	},
182 	{
183 		.num = 21,
184 		.name = "IPCR",
185 		.num_elements = 64,
186 		.event_ring = 1,
187 		.dir = DMA_FROM_DEVICE,
188 		.ee_mask = 0x4,
189 		.pollcfg = 0,
190 		.doorbell = MHI_DB_BRST_DISABLE,
191 		.lpm_notify = false,
192 		.offload_channel = false,
193 		.doorbell_mode_switch = false,
194 		.auto_queue = true,
195 	},
196 /* All MHI satellite config to be at the end of data struct */
197 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
198 	{
199 		.num = 50,
200 		.name = "ADSP_0",
201 		.num_elements = 64,
202 		.event_ring = 3,
203 		.dir = DMA_BIDIRECTIONAL,
204 		.ee_mask = 0x4,
205 		.pollcfg = 0,
206 		.doorbell = MHI_DB_BRST_DISABLE,
207 		.lpm_notify = false,
208 		.offload_channel = true,
209 		.doorbell_mode_switch = false,
210 		.auto_queue = false,
211 	},
212 	{
213 		.num = 51,
214 		.name = "ADSP_1",
215 		.num_elements = 64,
216 		.event_ring = 3,
217 		.dir = DMA_BIDIRECTIONAL,
218 		.ee_mask = 0x4,
219 		.pollcfg = 0,
220 		.doorbell = MHI_DB_BRST_DISABLE,
221 		.lpm_notify = false,
222 		.offload_channel = true,
223 		.doorbell_mode_switch = false,
224 		.auto_queue = false,
225 	},
226 	{
227 		.num = 70,
228 		.name = "ADSP_2",
229 		.num_elements = 64,
230 		.event_ring = 3,
231 		.dir = DMA_BIDIRECTIONAL,
232 		.ee_mask = 0x4,
233 		.pollcfg = 0,
234 		.doorbell = MHI_DB_BRST_DISABLE,
235 		.lpm_notify = false,
236 		.offload_channel = true,
237 		.doorbell_mode_switch = false,
238 		.auto_queue = false,
239 	},
240 	{
241 		.num = 71,
242 		.name = "ADSP_3",
243 		.num_elements = 64,
244 		.event_ring = 3,
245 		.dir = DMA_BIDIRECTIONAL,
246 		.ee_mask = 0x4,
247 		.pollcfg = 0,
248 		.doorbell = MHI_DB_BRST_DISABLE,
249 		.lpm_notify = false,
250 		.offload_channel = true,
251 		.doorbell_mode_switch = false,
252 		.auto_queue = false,
253 	},
254 #endif
255 };
256 
257 static const struct mhi_channel_config cnss_mhi_channels_no_diag[] = {
258 	{
259 		.num = 0,
260 		.name = "LOOPBACK",
261 		.num_elements = 32,
262 		.event_ring = 1,
263 		.dir = DMA_TO_DEVICE,
264 		.ee_mask = 0x4,
265 		.pollcfg = 0,
266 		.doorbell = MHI_DB_BRST_DISABLE,
267 		.lpm_notify = false,
268 		.offload_channel = false,
269 		.doorbell_mode_switch = false,
270 		.auto_queue = false,
271 	},
272 	{
273 		.num = 1,
274 		.name = "LOOPBACK",
275 		.num_elements = 32,
276 		.event_ring = 1,
277 		.dir = DMA_FROM_DEVICE,
278 		.ee_mask = 0x4,
279 		.pollcfg = 0,
280 		.doorbell = MHI_DB_BRST_DISABLE,
281 		.lpm_notify = false,
282 		.offload_channel = false,
283 		.doorbell_mode_switch = false,
284 		.auto_queue = false,
285 	},
286 	{
287 		.num = 20,
288 		.name = "IPCR",
289 		.num_elements = 64,
290 		.event_ring = 1,
291 		.dir = DMA_TO_DEVICE,
292 		.ee_mask = 0x4,
293 		.pollcfg = 0,
294 		.doorbell = MHI_DB_BRST_DISABLE,
295 		.lpm_notify = false,
296 		.offload_channel = false,
297 		.doorbell_mode_switch = false,
298 		.auto_queue = false,
299 	},
300 	{
301 		.num = 21,
302 		.name = "IPCR",
303 		.num_elements = 64,
304 		.event_ring = 1,
305 		.dir = DMA_FROM_DEVICE,
306 		.ee_mask = 0x4,
307 		.pollcfg = 0,
308 		.doorbell = MHI_DB_BRST_DISABLE,
309 		.lpm_notify = false,
310 		.offload_channel = false,
311 		.doorbell_mode_switch = false,
312 		.auto_queue = true,
313 	},
314 /* All MHI satellite config to be at the end of data struct */
315 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
316 	{
317 		.num = 50,
318 		.name = "ADSP_0",
319 		.num_elements = 64,
320 		.event_ring = 3,
321 		.dir = DMA_BIDIRECTIONAL,
322 		.ee_mask = 0x4,
323 		.pollcfg = 0,
324 		.doorbell = MHI_DB_BRST_DISABLE,
325 		.lpm_notify = false,
326 		.offload_channel = true,
327 		.doorbell_mode_switch = false,
328 		.auto_queue = false,
329 	},
330 	{
331 		.num = 51,
332 		.name = "ADSP_1",
333 		.num_elements = 64,
334 		.event_ring = 3,
335 		.dir = DMA_BIDIRECTIONAL,
336 		.ee_mask = 0x4,
337 		.pollcfg = 0,
338 		.doorbell = MHI_DB_BRST_DISABLE,
339 		.lpm_notify = false,
340 		.offload_channel = true,
341 		.doorbell_mode_switch = false,
342 		.auto_queue = false,
343 	},
344 	{
345 		.num = 70,
346 		.name = "ADSP_2",
347 		.num_elements = 64,
348 		.event_ring = 3,
349 		.dir = DMA_BIDIRECTIONAL,
350 		.ee_mask = 0x4,
351 		.pollcfg = 0,
352 		.doorbell = MHI_DB_BRST_DISABLE,
353 		.lpm_notify = false,
354 		.offload_channel = true,
355 		.doorbell_mode_switch = false,
356 		.auto_queue = false,
357 	},
358 	{
359 		.num = 71,
360 		.name = "ADSP_3",
361 		.num_elements = 64,
362 		.event_ring = 3,
363 		.dir = DMA_BIDIRECTIONAL,
364 		.ee_mask = 0x4,
365 		.pollcfg = 0,
366 		.doorbell = MHI_DB_BRST_DISABLE,
367 		.lpm_notify = false,
368 		.offload_channel = true,
369 		.doorbell_mode_switch = false,
370 		.auto_queue = false,
371 	},
372 #endif
373 };
374 
375 static const struct mhi_channel_config cnss_mhi_channels_genoa[] = {
376 	{
377 		.num = 0,
378 		.name = "LOOPBACK",
379 		.num_elements = 32,
380 		.event_ring = 1,
381 		.dir = DMA_TO_DEVICE,
382 		.ee_mask = 0x4,
383 		.pollcfg = 0,
384 		.doorbell = MHI_DB_BRST_DISABLE,
385 		.lpm_notify = false,
386 		.offload_channel = false,
387 		.doorbell_mode_switch = false,
388 		.auto_queue = false,
389 	},
390 	{
391 		.num = 1,
392 		.name = "LOOPBACK",
393 		.num_elements = 32,
394 		.event_ring = 1,
395 		.dir = DMA_FROM_DEVICE,
396 		.ee_mask = 0x4,
397 		.pollcfg = 0,
398 		.doorbell = MHI_DB_BRST_DISABLE,
399 		.lpm_notify = false,
400 		.offload_channel = false,
401 		.doorbell_mode_switch = false,
402 		.auto_queue = false,
403 	},
404 	{
405 		.num = 4,
406 		.name = "DIAG",
407 		.num_elements = 64,
408 		.event_ring = 1,
409 		.dir = DMA_TO_DEVICE,
410 		.ee_mask = 0x4,
411 		.pollcfg = 0,
412 		.doorbell = MHI_DB_BRST_DISABLE,
413 		.lpm_notify = false,
414 		.offload_channel = false,
415 		.doorbell_mode_switch = false,
416 		.auto_queue = false,
417 	},
418 	{
419 		.num = 5,
420 		.name = "DIAG",
421 		.num_elements = 64,
422 		.event_ring = 1,
423 		.dir = DMA_FROM_DEVICE,
424 		.ee_mask = 0x4,
425 		.pollcfg = 0,
426 		.doorbell = MHI_DB_BRST_DISABLE,
427 		.lpm_notify = false,
428 		.offload_channel = false,
429 		.doorbell_mode_switch = false,
430 		.auto_queue = false,
431 	},
432 	{
433 		.num = 16,
434 		.name = "IPCR",
435 		.num_elements = 64,
436 		.event_ring = 1,
437 		.dir = DMA_TO_DEVICE,
438 		.ee_mask = 0x4,
439 		.pollcfg = 0,
440 		.doorbell = MHI_DB_BRST_DISABLE,
441 		.lpm_notify = false,
442 		.offload_channel = false,
443 		.doorbell_mode_switch = false,
444 		.auto_queue = false,
445 	},
446 	{
447 		.num = 17,
448 		.name = "IPCR",
449 		.num_elements = 64,
450 		.event_ring = 1,
451 		.dir = DMA_FROM_DEVICE,
452 		.ee_mask = 0x4,
453 		.pollcfg = 0,
454 		.doorbell = MHI_DB_BRST_DISABLE,
455 		.lpm_notify = false,
456 		.offload_channel = false,
457 		.doorbell_mode_switch = false,
458 		.auto_queue = true,
459 	},
460 };
461 
462 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0))
463 static struct mhi_event_config cnss_mhi_events[] = {
464 #else
465 static const struct mhi_event_config cnss_mhi_events[] = {
466 #endif
467 	{
468 		.num_elements = 32,
469 		.irq_moderation_ms = 0,
470 		.irq = 1,
471 		.mode = MHI_DB_BRST_DISABLE,
472 		.data_type = MHI_ER_CTRL,
473 		.priority = 0,
474 		.hardware_event = false,
475 		.client_managed = false,
476 		.offload_channel = false,
477 	},
478 	{
479 		.num_elements = 256,
480 		.irq_moderation_ms = 0,
481 		.irq = 2,
482 		.mode = MHI_DB_BRST_DISABLE,
483 		.priority = 1,
484 		.hardware_event = false,
485 		.client_managed = false,
486 		.offload_channel = false,
487 	},
488 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
489 	{
490 		.num_elements = 32,
491 		.irq_moderation_ms = 0,
492 		.irq = 1,
493 		.mode = MHI_DB_BRST_DISABLE,
494 		.data_type = MHI_ER_BW_SCALE,
495 		.priority = 2,
496 		.hardware_event = false,
497 		.client_managed = false,
498 		.offload_channel = false,
499 	},
500 #endif
501 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
502 	{
503 		.num_elements = 256,
504 		.irq_moderation_ms = 0,
505 		.irq = 2,
506 		.mode = MHI_DB_BRST_DISABLE,
507 		.data_type = MHI_ER_DATA,
508 		.priority = 1,
509 		.hardware_event = false,
510 		.client_managed = true,
511 		.offload_channel = true,
512 	},
513 #endif
514 };
515 
516 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
517 #define CNSS_MHI_SATELLITE_CH_CFG_COUNT 4
518 #define CNSS_MHI_SATELLITE_EVT_COUNT 1
519 #else
520 #define CNSS_MHI_SATELLITE_CH_CFG_COUNT 0
521 #define CNSS_MHI_SATELLITE_EVT_COUNT 0
522 #endif
523 
524 static const struct mhi_controller_config cnss_mhi_config_no_diag = {
525 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
526 	.max_channels = 72,
527 #else
528 	.max_channels = 32,
529 #endif
530 	.timeout_ms = 10000,
531 	.use_bounce_buf = false,
532 	.buf_len = 0x8000,
533 	.num_channels = ARRAY_SIZE(cnss_mhi_channels_no_diag),
534 	.ch_cfg = cnss_mhi_channels_no_diag,
535 	.num_events = ARRAY_SIZE(cnss_mhi_events),
536 	.event_cfg = cnss_mhi_events,
537 	.m2_no_db = true,
538 };
539 
540 static const struct mhi_controller_config cnss_mhi_config_default = {
541 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
542 	.max_channels = 72,
543 #else
544 	.max_channels = 32,
545 #endif
546 	.timeout_ms = 10000,
547 	.use_bounce_buf = false,
548 	.buf_len = 0x8000,
549 	.num_channels = ARRAY_SIZE(cnss_mhi_channels),
550 	.ch_cfg = cnss_mhi_channels,
551 	.num_events = ARRAY_SIZE(cnss_mhi_events),
552 	.event_cfg = cnss_mhi_events,
553 	.m2_no_db = true,
554 };
555 
556 static const struct mhi_controller_config cnss_mhi_config_genoa = {
557 	.max_channels = 32,
558 	.timeout_ms = 10000,
559 	.use_bounce_buf = false,
560 	.buf_len = 0x8000,
561 	.num_channels = ARRAY_SIZE(cnss_mhi_channels_genoa),
562 	.ch_cfg = cnss_mhi_channels_genoa,
563 	.num_events = ARRAY_SIZE(cnss_mhi_events) -
564 		CNSS_MHI_SATELLITE_EVT_COUNT,
565 	.event_cfg = cnss_mhi_events,
566 	.m2_no_db = true,
567 #if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0))
568 	.bhie_offset = 0x0324,
569 #endif
570 };
571 
572 static const struct mhi_controller_config cnss_mhi_config_no_satellite = {
573 	.max_channels = 32,
574 	.timeout_ms = 10000,
575 	.use_bounce_buf = false,
576 	.buf_len = 0x8000,
577 	.num_channels = ARRAY_SIZE(cnss_mhi_channels) -
578 			CNSS_MHI_SATELLITE_CH_CFG_COUNT,
579 	.ch_cfg = cnss_mhi_channels,
580 	.num_events = ARRAY_SIZE(cnss_mhi_events) -
581 			CNSS_MHI_SATELLITE_EVT_COUNT,
582 	.event_cfg = cnss_mhi_events,
583 	.m2_no_db = true,
584 };
585 
586 static struct cnss_pci_reg ce_src[] = {
587 	{ "SRC_RING_BASE_LSB", CE_SRC_RING_BASE_LSB_OFFSET },
588 	{ "SRC_RING_BASE_MSB", CE_SRC_RING_BASE_MSB_OFFSET },
589 	{ "SRC_RING_ID", CE_SRC_RING_ID_OFFSET },
590 	{ "SRC_RING_MISC", CE_SRC_RING_MISC_OFFSET },
591 	{ "SRC_CTRL", CE_SRC_CTRL_OFFSET },
592 	{ "SRC_R0_CE_CH_SRC_IS", CE_SRC_R0_CE_CH_SRC_IS_OFFSET },
593 	{ "SRC_RING_HP", CE_SRC_RING_HP_OFFSET },
594 	{ "SRC_RING_TP", CE_SRC_RING_TP_OFFSET },
595 	{ NULL },
596 };
597 
598 static struct cnss_pci_reg ce_dst[] = {
599 	{ "DEST_RING_BASE_LSB", CE_DEST_RING_BASE_LSB_OFFSET },
600 	{ "DEST_RING_BASE_MSB", CE_DEST_RING_BASE_MSB_OFFSET },
601 	{ "DEST_RING_ID", CE_DEST_RING_ID_OFFSET },
602 	{ "DEST_RING_MISC", CE_DEST_RING_MISC_OFFSET },
603 	{ "DEST_CTRL", CE_DEST_CTRL_OFFSET },
604 	{ "CE_CH_DST_IS", CE_CH_DST_IS_OFFSET },
605 	{ "CE_CH_DEST_CTRL2", CE_CH_DEST_CTRL2_OFFSET },
606 	{ "DEST_RING_HP", CE_DEST_RING_HP_OFFSET },
607 	{ "DEST_RING_TP", CE_DEST_RING_TP_OFFSET },
608 	{ "STATUS_RING_BASE_LSB", CE_STATUS_RING_BASE_LSB_OFFSET },
609 	{ "STATUS_RING_BASE_MSB", CE_STATUS_RING_BASE_MSB_OFFSET },
610 	{ "STATUS_RING_ID", CE_STATUS_RING_ID_OFFSET },
611 	{ "STATUS_RING_MISC", CE_STATUS_RING_MISC_OFFSET },
612 	{ "STATUS_RING_HP", CE_STATUS_RING_HP_OFFSET },
613 	{ "STATUS_RING_TP", CE_STATUS_RING_TP_OFFSET },
614 	{ NULL },
615 };
616 
617 static struct cnss_pci_reg ce_cmn[] = {
618 	{ "GXI_ERR_INTS", CE_COMMON_GXI_ERR_INTS },
619 	{ "GXI_ERR_STATS", CE_COMMON_GXI_ERR_STATS },
620 	{ "GXI_WDOG_STATUS", CE_COMMON_GXI_WDOG_STATUS },
621 	{ "TARGET_IE_0", CE_COMMON_TARGET_IE_0 },
622 	{ "TARGET_IE_1", CE_COMMON_TARGET_IE_1 },
623 	{ NULL },
624 };
625 
626 static struct cnss_pci_reg qdss_csr[] = {
627 	{ "QDSSCSR_ETRIRQCTRL", QDSS_APB_DEC_CSR_ETRIRQCTRL_OFFSET },
628 	{ "QDSSCSR_PRESERVEETF", QDSS_APB_DEC_CSR_PRESERVEETF_OFFSET },
629 	{ "QDSSCSR_PRESERVEETR0", QDSS_APB_DEC_CSR_PRESERVEETR0_OFFSET },
630 	{ "QDSSCSR_PRESERVEETR1", QDSS_APB_DEC_CSR_PRESERVEETR1_OFFSET },
631 	{ NULL },
632 };
633 
634 static struct cnss_pci_reg pci_scratch[] = {
635 	{ "PCIE_SCRATCH_0", PCIE_SCRATCH_0_SOC_PCIE_REG },
636 	{ "PCIE_SCRATCH_1", PCIE_SCRATCH_1_SOC_PCIE_REG },
637 	{ "PCIE_SCRATCH_2", PCIE_SCRATCH_2_SOC_PCIE_REG },
638 	{ NULL },
639 };
640 
641 static struct cnss_pci_reg pci_bhi_debug[] = {
642 	{ "PCIE_BHIE_DEBUG_0", PCIE_PCIE_BHIE_DEBUG_0 },
643 	{ "PCIE_BHIE_DEBUG_1", PCIE_PCIE_BHIE_DEBUG_1 },
644 	{ "PCIE_BHIE_DEBUG_2", PCIE_PCIE_BHIE_DEBUG_2 },
645 	{ "PCIE_BHIE_DEBUG_3", PCIE_PCIE_BHIE_DEBUG_3 },
646 	{ "PCIE_BHIE_DEBUG_4", PCIE_PCIE_BHIE_DEBUG_4 },
647 	{ "PCIE_BHIE_DEBUG_5", PCIE_PCIE_BHIE_DEBUG_5 },
648 	{ "PCIE_BHIE_DEBUG_6", PCIE_PCIE_BHIE_DEBUG_6 },
649 	{ "PCIE_BHIE_DEBUG_7", PCIE_PCIE_BHIE_DEBUG_7 },
650 	{ "PCIE_BHIE_DEBUG_8", PCIE_PCIE_BHIE_DEBUG_8 },
651 	{ "PCIE_BHIE_DEBUG_9", PCIE_PCIE_BHIE_DEBUG_9 },
652 	{ "PCIE_BHIE_DEBUG_10", PCIE_PCIE_BHIE_DEBUG_10 },
653 	{ NULL },
654 };
655 
656 /* First field of the structure is the device bit mask. Use
657  * enum cnss_pci_reg_mask as reference for the value.
658  */
659 static struct cnss_misc_reg wcss_reg_access_seq[] = {
660 	{1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
661 	{1, 1, QCA6390_GCC_DEBUG_CLK_CTL, 0x802},
662 	{1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
663 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_PLL_MODE, 0},
664 	{1, 1, QCA6390_GCC_DEBUG_CLK_CTL, 0x805},
665 	{1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
666 	{1, 0, QCA6390_WCSS_WFSS_PMM_WFSS_PMM_R0_PMM_CTRL, 0},
667 	{1, 0, QCA6390_WCSS_PMM_TOP_PMU_CX_CSR, 0},
668 	{1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_RAW_STAT, 0},
669 	{1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_EN, 0},
670 	{1, 0, QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_STS, 0},
671 	{1, 1, QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_CTL, 0xD},
672 	{1, 0, QCA6390_WCSS_PMM_TOP_TESTBUS_STS, 0},
673 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG, 0},
674 	{1, 1, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG, 0},
675 	{1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x8},
676 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
677 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_STS, 0},
678 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_CTL, 0},
679 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_0, 0},
680 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_9, 0},
681 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS0, 0},
682 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS1, 0},
683 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS2, 0},
684 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS3, 0},
685 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS4, 0},
686 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS5, 0},
687 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS6, 0},
688 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE0, 0},
689 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE1, 0},
690 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE2, 0},
691 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE3, 0},
692 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE4, 0},
693 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE5, 0},
694 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE6, 0},
695 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING0, 0},
696 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING1, 0},
697 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING2, 0},
698 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING3, 0},
699 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING4, 0},
700 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING5, 0},
701 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING6, 0},
702 	{1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x30040},
703 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
704 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
705 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
706 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
707 	{1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x30105},
708 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
709 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
710 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
711 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
712 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
713 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
714 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
715 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
716 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
717 	{1, 0, QCA6390_WCSS_CC_WCSS_UMAC_NOC_CBCR, 0},
718 	{1, 0, QCA6390_WCSS_CC_WCSS_UMAC_AHB_CBCR, 0},
719 	{1, 0, QCA6390_WCSS_CC_WCSS_UMAC_GDSCR, 0},
720 	{1, 0, QCA6390_WCSS_CC_WCSS_WLAN1_GDSCR, 0},
721 	{1, 0, QCA6390_WCSS_CC_WCSS_WLAN2_GDSCR, 0},
722 	{1, 0, QCA6390_WCSS_PMM_TOP_PMM_INT_CLR, 0},
723 	{1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_STICKY_EN, 0},
724 };
725 
726 static struct cnss_misc_reg pcie_reg_access_seq[] = {
727 	{1, 0, QCA6390_PCIE_PCIE_WCSS_STATUS_FOR_DEBUG_LOW_PCIE_LOCAL_REG, 0},
728 	{1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
729 	{1, 1, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0x18},
730 	{1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
731 	{1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
732 	{1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_STATUS_SOC_PCIE_REG, 0},
733 	{1, 0, QCA6390_PCIE_SOC_COMMIT_REPLAY_SOC_PCIE_REG, 0},
734 	{1, 0, QCA6390_TLMM_GPIO_IN_OUT57, 0},
735 	{1, 0, QCA6390_TLMM_GPIO_INTR_CFG57, 0},
736 	{1, 0, QCA6390_TLMM_GPIO_INTR_STATUS57, 0},
737 	{1, 0, QCA6390_TLMM_GPIO_IN_OUT59, 0},
738 	{1, 0, QCA6390_TLMM_GPIO_INTR_CFG59, 0},
739 	{1, 0, QCA6390_TLMM_GPIO_INTR_STATUS59, 0},
740 	{1, 0, QCA6390_PCIE_PCIE_PARF_LTSSM, 0},
741 	{1, 0, QCA6390_PCIE_PCIE_PARF_PM_STTS, 0},
742 	{1, 0, QCA6390_PCIE_PCIE_PARF_PM_STTS_1, 0},
743 	{1, 0, QCA6390_PCIE_PCIE_PARF_INT_STATUS, 0},
744 	{1, 0, QCA6390_PCIE_PCIE_INT_ALL_STATUS, 0},
745 	{1, 0, QCA6390_PCIE_PCIE_INT_ALL_MASK, 0},
746 	{1, 0, QCA6390_PCIE_PCIE_PARF_BDF_TO_SID_CFG, 0},
747 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_4, 0},
748 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_3, 0},
749 	{1, 0, QCA6390_PCIE_PCIE_PARF_MHI_CLOCK_RESET_CTRL, 0},
750 	{1, 0, QCA6390_PCIE_PCIE_PARF_MHI_BASE_ADDR_LOWER, 0},
751 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_STATUS, 0},
752 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_CFG, 0},
753 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2, 0},
754 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1SUB, 0},
755 	{1, 0, QCA6390_PCIE_PCIE_CORE_CONFIG, 0},
756 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_4, 0},
757 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L2, 0},
758 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1, 0},
759 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1, 0},
760 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2, 0},
761 	{1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_HIGH, 0},
762 	{1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_LOW, 0},
763 	{1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_HIGH, 0},
764 	{1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_LOW, 0},
765 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_STATUS_REG2, 0},
766 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_STATUS_REG2, 0},
767 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN2_CFG_REG1, 0},
768 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN1_CFG_REG1, 0},
769 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_APS_STATUS_REG1, 0},
770 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_APS_STATUS_REG1, 0},
771 	{1, 0, QCA6390_PCIE_PCIE_BHI_EXECENV_REG, 0},
772 };
773 
774 static struct cnss_misc_reg wlaon_reg_access_seq[] = {
775 	{3, 0, WLAON_SOC_POWER_CTRL, 0},
776 	{3, 0, WLAON_SOC_PWR_WDG_BARK_THRSHD, 0},
777 	{3, 0, WLAON_SOC_PWR_WDG_BITE_THRSHD, 0},
778 	{3, 0, WLAON_SW_COLD_RESET, 0},
779 	{3, 0, WLAON_RFA_MEM_SLP_NRET_N_OVERRIDE, 0},
780 	{3, 0, WLAON_GDSC_DELAY_SETTING, 0},
781 	{3, 0, WLAON_GDSC_DELAY_SETTING2, 0},
782 	{3, 0, WLAON_WL_PWR_STATUS_REG, 0},
783 	{3, 0, WLAON_WL_AON_DBG_CFG_REG, 0},
784 	{2, 0, WLAON_WL_AON_DBG_ENABLE_GRP0_REG, 0},
785 	{2, 0, WLAON_WL_AON_DBG_ENABLE_GRP1_REG, 0},
786 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL0, 0},
787 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL1, 0},
788 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL2, 0},
789 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL3, 0},
790 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL4, 0},
791 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL5, 0},
792 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL5_1, 0},
793 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL6, 0},
794 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL6_1, 0},
795 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL7, 0},
796 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL8, 0},
797 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL8_1, 0},
798 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL9, 0},
799 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL9_1, 0},
800 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL10, 0},
801 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL11, 0},
802 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL12, 0},
803 	{2, 0, WLAON_WL_AON_APM_OVERRIDE_REG, 0},
804 	{2, 0, WLAON_WL_AON_CXPC_REG, 0},
805 	{2, 0, WLAON_WL_AON_APM_STATUS0, 0},
806 	{2, 0, WLAON_WL_AON_APM_STATUS1, 0},
807 	{2, 0, WLAON_WL_AON_APM_STATUS2, 0},
808 	{2, 0, WLAON_WL_AON_APM_STATUS3, 0},
809 	{2, 0, WLAON_WL_AON_APM_STATUS4, 0},
810 	{2, 0, WLAON_WL_AON_APM_STATUS5, 0},
811 	{2, 0, WLAON_WL_AON_APM_STATUS6, 0},
812 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL1, 0},
813 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL6, 0},
814 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL7, 0},
815 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL3, 0},
816 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL4, 0},
817 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL5, 0},
818 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL8, 0},
819 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL2, 0},
820 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL9, 0},
821 	{3, 0, WLAON_RTC_CLK_CAL_CTRL1, 0},
822 	{3, 0, WLAON_RTC_CLK_CAL_CTRL2, 0},
823 	{3, 0, WLAON_RTC_CLK_CAL_CTRL3, 0},
824 	{3, 0, WLAON_RTC_CLK_CAL_CTRL4, 0},
825 	{3, 0, WLAON_RTC_CLK_CAL_CTRL5, 0},
826 	{3, 0, WLAON_RTC_CLK_CAL_CTRL6, 0},
827 	{3, 0, WLAON_RTC_CLK_CAL_CTRL7, 0},
828 	{3, 0, WLAON_RTC_CLK_CAL_CTRL8, 0},
829 	{3, 0, WLAON_RTC_CLK_CAL_CTRL9, 0},
830 	{3, 0, WLAON_WCSSAON_CONFIG_REG, 0},
831 	{3, 0, WLAON_WLAN_OEM_DEBUG_REG, 0},
832 	{3, 0, WLAON_WLAN_RAM_DUMP_REG, 0},
833 	{3, 0, WLAON_QDSS_WCSS_REG, 0},
834 	{3, 0, WLAON_QDSS_WCSS_ACK, 0},
835 	{3, 0, WLAON_WL_CLK_CNTL_KDF_REG, 0},
836 	{3, 0, WLAON_WL_CLK_CNTL_PMU_HFRC_REG, 0},
837 	{3, 0, WLAON_QFPROM_PWR_CTRL_REG, 0},
838 	{3, 0, WLAON_DLY_CONFIG, 0},
839 	{3, 0, WLAON_WLAON_Q6_IRQ_REG, 0},
840 	{3, 0, WLAON_PCIE_INTF_SW_CFG_REG, 0},
841 	{3, 0, WLAON_PCIE_INTF_STICKY_SW_CFG_REG, 0},
842 	{3, 0, WLAON_PCIE_INTF_PHY_SW_CFG_REG, 0},
843 	{3, 0, WLAON_PCIE_INTF_PHY_NOCSR_SW_CFG_REG, 0},
844 	{3, 0, WLAON_Q6_COOKIE_BIT, 0},
845 	{3, 0, WLAON_WARM_SW_ENTRY, 0},
846 	{3, 0, WLAON_RESET_DBG_SW_ENTRY, 0},
847 	{3, 0, WLAON_WL_PMUNOC_CFG_REG, 0},
848 	{3, 0, WLAON_RESET_CAUSE_CFG_REG, 0},
849 	{3, 0, WLAON_SOC_WCSSAON_WAKEUP_IRQ_7_EN_REG, 0},
850 	{3, 0, WLAON_DEBUG, 0},
851 	{3, 0, WLAON_SOC_PARAMETERS, 0},
852 	{3, 0, WLAON_WLPM_SIGNAL, 0},
853 	{3, 0, WLAON_SOC_RESET_CAUSE_REG, 0},
854 	{3, 0, WLAON_WAKEUP_PCIE_SOC_REG, 0},
855 	{3, 0, WLAON_PBL_STACK_CANARY, 0},
856 	{3, 0, WLAON_MEM_TOT_NUM_GRP_REG, 0},
857 	{3, 0, WLAON_MEM_TOT_BANKS_IN_GRP0_REG, 0},
858 	{3, 0, WLAON_MEM_TOT_BANKS_IN_GRP1_REG, 0},
859 	{3, 0, WLAON_MEM_TOT_BANKS_IN_GRP2_REG, 0},
860 	{3, 0, WLAON_MEM_TOT_BANKS_IN_GRP3_REG, 0},
861 	{3, 0, WLAON_MEM_TOT_SIZE_IN_GRP0_REG, 0},
862 	{3, 0, WLAON_MEM_TOT_SIZE_IN_GRP1_REG, 0},
863 	{3, 0, WLAON_MEM_TOT_SIZE_IN_GRP2_REG, 0},
864 	{3, 0, WLAON_MEM_TOT_SIZE_IN_GRP3_REG, 0},
865 	{3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP0_REG, 0},
866 	{3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP1_REG, 0},
867 	{3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP2_REG, 0},
868 	{3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP3_REG, 0},
869 	{3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP0_REG, 0},
870 	{3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP1_REG, 0},
871 	{3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP2_REG, 0},
872 	{3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP3_REG, 0},
873 	{3, 0, WLAON_MEM_CNT_SEL_REG, 0},
874 	{3, 0, WLAON_MEM_NO_EXTBHS_REG, 0},
875 	{3, 0, WLAON_MEM_DEBUG_REG, 0},
876 	{3, 0, WLAON_MEM_DEBUG_BUS_REG, 0},
877 	{3, 0, WLAON_MEM_REDUN_CFG_REG, 0},
878 	{3, 0, WLAON_WL_AON_SPARE2, 0},
879 	{3, 0, WLAON_VSEL_CFG_FOR_WL_RET_DISABLE_REG, 0},
880 	{3, 0, WLAON_BTFM_WLAN_IPC_STATUS_REG, 0},
881 	{3, 0, WLAON_MPM_COUNTER_CHICKEN_BITS, 0},
882 	{3, 0, WLAON_WLPM_CHICKEN_BITS, 0},
883 	{3, 0, WLAON_PCIE_PHY_PWR_REG, 0},
884 	{3, 0, WLAON_WL_CLK_CNTL_PMU_LPO2M_REG, 0},
885 	{3, 0, WLAON_WL_SS_ROOT_CLK_SWITCH_REG, 0},
886 	{3, 0, WLAON_POWERCTRL_PMU_REG, 0},
887 	{3, 0, WLAON_POWERCTRL_MEM_REG, 0},
888 	{3, 0, WLAON_PCIE_PWR_CTRL_REG, 0},
889 	{3, 0, WLAON_SOC_PWR_PROFILE_REG, 0},
890 	{3, 0, WLAON_WCSSAON_PCIE_SR_STATUS_HI_REG, 0},
891 	{3, 0, WLAON_WCSSAON_PCIE_SR_STATUS_LO_REG, 0},
892 	{3, 0, WLAON_WCSS_TCSR_PMM_SR_STATUS_HI_REG, 0},
893 	{3, 0, WLAON_WCSS_TCSR_PMM_SR_STATUS_LO_REG, 0},
894 	{3, 0, WLAON_MEM_SVS_CFG_REG, 0},
895 	{3, 0, WLAON_CMN_AON_MISC_REG, 0},
896 	{3, 0, WLAON_INTR_STATUS, 0},
897 	{2, 0, WLAON_INTR_ENABLE, 0},
898 	{2, 0, WLAON_NOC_DBG_BUS_SEL_REG, 0},
899 	{2, 0, WLAON_NOC_DBG_BUS_REG, 0},
900 	{2, 0, WLAON_WL_CTRL_MISC_REG, 0},
901 	{2, 0, WLAON_DBG_STATUS0, 0},
902 	{2, 0, WLAON_DBG_STATUS1, 0},
903 	{2, 0, WLAON_TIMERSYNC_OFFSET_L, 0},
904 	{2, 0, WLAON_TIMERSYNC_OFFSET_H, 0},
905 	{2, 0, WLAON_PMU_LDO_SETTLE_REG, 0},
906 };
907 
908 static struct cnss_misc_reg syspm_reg_access_seq[] = {
909 	{1, 0, QCA6390_SYSPM_SYSPM_PWR_STATUS, 0},
910 	{1, 0, QCA6390_SYSPM_DBG_BTFM_AON_REG, 0},
911 	{1, 0, QCA6390_SYSPM_DBG_BUS_SEL_REG, 0},
912 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
913 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
914 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
915 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
916 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
917 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
918 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
919 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
920 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
921 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
922 };
923 
924 static struct cnss_print_optimize print_optimize;
925 
926 #define WCSS_REG_SIZE ARRAY_SIZE(wcss_reg_access_seq)
927 #define PCIE_REG_SIZE ARRAY_SIZE(pcie_reg_access_seq)
928 #define WLAON_REG_SIZE ARRAY_SIZE(wlaon_reg_access_seq)
929 #define SYSPM_REG_SIZE ARRAY_SIZE(syspm_reg_access_seq)
930 
931 static int cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv);
932 static void cnss_pci_suspend_pwroff(struct pci_dev *pci_dev);
933 static bool cnss_should_suspend_pwroff(struct pci_dev *pci_dev);
934 static void cnss_pci_update_link_event(struct cnss_pci_data *pci_priv,
935 				       enum cnss_bus_event_type type,
936 				       void *data);
937 
938 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
939 static void cnss_mhi_debug_reg_dump(struct cnss_pci_data *pci_priv)
940 {
941 	mhi_debug_reg_dump(pci_priv->mhi_ctrl);
942 }
943 
944 static void cnss_mhi_dump_sfr(struct cnss_pci_data *pci_priv)
945 {
946 	mhi_dump_sfr(pci_priv->mhi_ctrl);
947 }
948 
949 static bool cnss_mhi_scan_rddm_cookie(struct cnss_pci_data *pci_priv,
950 				      u32 cookie)
951 {
952 	return mhi_scan_rddm_cookie(pci_priv->mhi_ctrl, cookie);
953 }
954 
955 static int cnss_mhi_pm_fast_suspend(struct cnss_pci_data *pci_priv,
956 				    bool notify_clients)
957 {
958 	return mhi_pm_fast_suspend(pci_priv->mhi_ctrl, notify_clients);
959 }
960 
961 static int cnss_mhi_pm_fast_resume(struct cnss_pci_data *pci_priv,
962 				   bool notify_clients)
963 {
964 	return mhi_pm_fast_resume(pci_priv->mhi_ctrl, notify_clients);
965 }
966 
967 static void cnss_mhi_set_m2_timeout_ms(struct cnss_pci_data *pci_priv,
968 				       u32 timeout)
969 {
970 	return mhi_set_m2_timeout_ms(pci_priv->mhi_ctrl, timeout);
971 }
972 
973 static int cnss_mhi_device_get_sync_atomic(struct cnss_pci_data *pci_priv,
974 					   int timeout_us, bool in_panic)
975 {
976 	return mhi_device_get_sync_atomic(pci_priv->mhi_ctrl->mhi_dev,
977 					  timeout_us, in_panic);
978 }
979 
980 #ifdef CONFIG_CNSS2_SMMU_DB_SUPPORT
981 static int cnss_mhi_host_notify_db_disable_trace(struct cnss_pci_data *pci_priv)
982 {
983 	return mhi_host_notify_db_disable_trace(pci_priv->mhi_ctrl);
984 }
985 #endif
986 
987 static void
988 cnss_mhi_controller_set_bw_scale_cb(struct cnss_pci_data *pci_priv,
989 				    int (*cb)(struct mhi_controller *mhi_ctrl,
990 					      struct mhi_link_info *link_info))
991 {
992 	mhi_controller_set_bw_scale_cb(pci_priv->mhi_ctrl, cb);
993 }
994 
995 static int cnss_mhi_force_reset(struct cnss_pci_data *pci_priv)
996 {
997 	return mhi_force_reset(pci_priv->mhi_ctrl);
998 }
999 
1000 void cnss_mhi_controller_set_base(struct cnss_pci_data *pci_priv,
1001 				  phys_addr_t base)
1002 {
1003 	return mhi_controller_set_base(pci_priv->mhi_ctrl, base);
1004 }
1005 #else
1006 static void cnss_mhi_debug_reg_dump(struct cnss_pci_data *pci_priv)
1007 {
1008 }
1009 
1010 static void cnss_mhi_dump_sfr(struct cnss_pci_data *pci_priv)
1011 {
1012 }
1013 
1014 static bool cnss_mhi_scan_rddm_cookie(struct cnss_pci_data *pci_priv,
1015 				      u32 cookie)
1016 {
1017 	return false;
1018 }
1019 
1020 static int cnss_mhi_pm_fast_suspend(struct cnss_pci_data *pci_priv,
1021 				    bool notify_clients)
1022 {
1023 	return -EOPNOTSUPP;
1024 }
1025 
1026 static int cnss_mhi_pm_fast_resume(struct cnss_pci_data *pci_priv,
1027 				   bool notify_clients)
1028 {
1029 	return -EOPNOTSUPP;
1030 }
1031 
1032 static void cnss_mhi_set_m2_timeout_ms(struct cnss_pci_data *pci_priv,
1033 				       u32 timeout)
1034 {
1035 }
1036 
1037 static int cnss_mhi_device_get_sync_atomic(struct cnss_pci_data *pci_priv,
1038 					   int timeout_us, bool in_panic)
1039 {
1040 	return -EOPNOTSUPP;
1041 }
1042 
1043 #ifdef CONFIG_CNSS2_SMMU_DB_SUPPORT
1044 static int cnss_mhi_host_notify_db_disable_trace(struct cnss_pci_data *pci_priv)
1045 {
1046 	return -EOPNOTSUPP;
1047 }
1048 #endif
1049 
1050 static void
1051 cnss_mhi_controller_set_bw_scale_cb(struct cnss_pci_data *pci_priv,
1052 				    int (*cb)(struct mhi_controller *mhi_ctrl,
1053 					      struct mhi_link_info *link_info))
1054 {
1055 }
1056 
1057 static int cnss_mhi_force_reset(struct cnss_pci_data *pci_priv)
1058 {
1059 	return -EOPNOTSUPP;
1060 }
1061 
1062 void cnss_mhi_controller_set_base(struct cnss_pci_data *pci_priv,
1063 				  phys_addr_t base)
1064 {
1065 }
1066 #endif /* CONFIG_MHI_BUS_MISC */
1067 
1068 #ifdef CONFIG_CNSS2_SMMU_DB_SUPPORT
1069 #define CNSS_MHI_WAKE_TIMEOUT		500000
1070 
1071 static void cnss_record_smmu_fault_timestamp(struct cnss_pci_data *pci_priv,
1072 					     enum cnss_smmu_fault_time id)
1073 {
1074 	if (id >= SMMU_CB_MAX)
1075 		return;
1076 
1077 	pci_priv->smmu_fault_timestamp[id] = sched_clock();
1078 }
1079 
1080 static void cnss_pci_smmu_fault_handler_irq(struct iommu_domain *domain,
1081 					    void *handler_token)
1082 {
1083 	struct cnss_pci_data *pci_priv = handler_token;
1084 	int ret = 0;
1085 
1086 	cnss_record_smmu_fault_timestamp(pci_priv, SMMU_CB_ENTRY);
1087 	ret = cnss_mhi_device_get_sync_atomic(pci_priv,
1088 					      CNSS_MHI_WAKE_TIMEOUT, true);
1089 	if (ret < 0) {
1090 		cnss_pr_err("Failed to bring mhi in M0 state, ret %d\n", ret);
1091 		return;
1092 	}
1093 
1094 	cnss_record_smmu_fault_timestamp(pci_priv, SMMU_CB_DOORBELL_RING);
1095 	ret = cnss_mhi_host_notify_db_disable_trace(pci_priv);
1096 	if (ret < 0)
1097 		cnss_pr_err("Fail to notify wlan fw to stop trace collection, ret %d\n", ret);
1098 
1099 	cnss_record_smmu_fault_timestamp(pci_priv, SMMU_CB_EXIT);
1100 }
1101 
1102 void cnss_register_iommu_fault_handler_irq(struct cnss_pci_data *pci_priv)
1103 {
1104 	qcom_iommu_set_fault_handler_irq(pci_priv->iommu_domain,
1105 					 cnss_pci_smmu_fault_handler_irq, pci_priv);
1106 }
1107 #else
1108 void cnss_register_iommu_fault_handler_irq(struct cnss_pci_data *pci_priv)
1109 {
1110 }
1111 #endif
1112 
1113 int cnss_pci_check_link_status(struct cnss_pci_data *pci_priv)
1114 {
1115 	u16 device_id;
1116 
1117 	if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
1118 		cnss_pr_dbg("%ps: PCIe link is in suspend state\n",
1119 			    (void *)_RET_IP_);
1120 		return -EACCES;
1121 	}
1122 
1123 	if (pci_priv->pci_link_down_ind) {
1124 		cnss_pr_err("%ps: PCIe link is down\n", (void *)_RET_IP_);
1125 		return -EIO;
1126 	}
1127 
1128 	pci_read_config_word(pci_priv->pci_dev, PCI_DEVICE_ID, &device_id);
1129 	if (device_id != pci_priv->device_id)  {
1130 		cnss_fatal_err("%ps: PCI device ID mismatch, link possibly down, current read ID: 0x%x, record ID: 0x%x\n",
1131 			       (void *)_RET_IP_, device_id,
1132 			       pci_priv->device_id);
1133 		return -EIO;
1134 	}
1135 
1136 	return 0;
1137 }
1138 
1139 static void cnss_pci_select_window(struct cnss_pci_data *pci_priv, u32 offset)
1140 {
1141 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1142 
1143 	u32 window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
1144 	u32 window_enable = WINDOW_ENABLE_BIT | window;
1145 	u32 val;
1146 
1147 	if (plat_priv->device_id == QCN7605_DEVICE_ID)
1148 		window_enable = QCN7605_WINDOW_ENABLE_BIT | window;
1149 
1150 	if (plat_priv->device_id == PEACH_DEVICE_ID) {
1151 		writel_relaxed(window_enable, pci_priv->bar +
1152 			       PEACH_PCIE_REMAP_BAR_CTRL_OFFSET);
1153 	} else {
1154 		writel_relaxed(window_enable, pci_priv->bar +
1155 			       QCA6390_PCIE_REMAP_BAR_CTRL_OFFSET);
1156 	}
1157 
1158 	if (window != pci_priv->remap_window) {
1159 		pci_priv->remap_window = window;
1160 		cnss_pr_dbg("Config PCIe remap window register to 0x%x\n",
1161 			    window_enable);
1162 	}
1163 
1164 	/* Read it back to make sure the write has taken effect */
1165 	if (plat_priv->device_id == PEACH_DEVICE_ID) {
1166 		val = readl_relaxed(pci_priv->bar +
1167 			PEACH_PCIE_REMAP_BAR_CTRL_OFFSET);
1168 	} else {
1169 		val = readl_relaxed(pci_priv->bar +
1170 			QCA6390_PCIE_REMAP_BAR_CTRL_OFFSET);
1171 	}
1172 	if (val != window_enable) {
1173 		cnss_pr_err("Failed to config window register to 0x%x, current value: 0x%x\n",
1174 			    window_enable, val);
1175 		if (!cnss_pci_check_link_status(pci_priv) &&
1176 		    !test_bit(CNSS_IN_PANIC, &plat_priv->driver_state))
1177 			CNSS_ASSERT(0);
1178 	}
1179 }
1180 
1181 static int cnss_pci_reg_read(struct cnss_pci_data *pci_priv,
1182 			     u32 offset, u32 *val)
1183 {
1184 	int ret;
1185 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1186 
1187 	if (!in_interrupt() && !irqs_disabled()) {
1188 		ret = cnss_pci_check_link_status(pci_priv);
1189 		if (ret)
1190 			return ret;
1191 	}
1192 
1193 	if (pci_priv->pci_dev->device == QCA6174_DEVICE_ID ||
1194 	    offset < MAX_UNWINDOWED_ADDRESS) {
1195 		*val = readl_relaxed(pci_priv->bar + offset);
1196 		return 0;
1197 	}
1198 
1199 	/* If in panic, assumption is kernel panic handler will hold all threads
1200 	 * and interrupts. Further pci_reg_window_lock could be held before
1201 	 * panic. So only lock during normal operation.
1202 	 */
1203 	if (test_bit(CNSS_IN_PANIC, &plat_priv->driver_state)) {
1204 		cnss_pci_select_window(pci_priv, offset);
1205 		*val = readl_relaxed(pci_priv->bar + WINDOW_START +
1206 				     (offset & WINDOW_RANGE_MASK));
1207 	} else {
1208 		spin_lock_bh(&pci_reg_window_lock);
1209 		cnss_pci_select_window(pci_priv, offset);
1210 		*val = readl_relaxed(pci_priv->bar + WINDOW_START +
1211 				     (offset & WINDOW_RANGE_MASK));
1212 		spin_unlock_bh(&pci_reg_window_lock);
1213 	}
1214 
1215 	return 0;
1216 }
1217 
1218 static int cnss_pci_reg_write(struct cnss_pci_data *pci_priv, u32 offset,
1219 			      u32 val)
1220 {
1221 	int ret;
1222 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1223 
1224 	if (!in_interrupt() && !irqs_disabled()) {
1225 		ret = cnss_pci_check_link_status(pci_priv);
1226 		if (ret)
1227 			return ret;
1228 	}
1229 
1230 	if (pci_priv->pci_dev->device == QCA6174_DEVICE_ID ||
1231 	    offset < MAX_UNWINDOWED_ADDRESS) {
1232 		writel_relaxed(val, pci_priv->bar + offset);
1233 		return 0;
1234 	}
1235 
1236 	/* Same constraint as PCI register read in panic */
1237 	if (test_bit(CNSS_IN_PANIC, &plat_priv->driver_state)) {
1238 		cnss_pci_select_window(pci_priv, offset);
1239 		writel_relaxed(val, pci_priv->bar + WINDOW_START +
1240 			  (offset & WINDOW_RANGE_MASK));
1241 	} else {
1242 		spin_lock_bh(&pci_reg_window_lock);
1243 		cnss_pci_select_window(pci_priv, offset);
1244 		writel_relaxed(val, pci_priv->bar + WINDOW_START +
1245 			  (offset & WINDOW_RANGE_MASK));
1246 		spin_unlock_bh(&pci_reg_window_lock);
1247 	}
1248 
1249 	return 0;
1250 }
1251 
1252 static int cnss_pci_force_wake_get(struct cnss_pci_data *pci_priv)
1253 {
1254 	struct device *dev = &pci_priv->pci_dev->dev;
1255 	int ret;
1256 
1257 	ret = cnss_pci_force_wake_request_sync(dev,
1258 					       FORCE_WAKE_DELAY_TIMEOUT_US);
1259 	if (ret) {
1260 		if (ret != -EAGAIN)
1261 			cnss_pr_err("Failed to request force wake\n");
1262 		return ret;
1263 	}
1264 
1265 	/* If device's M1 state-change event races here, it can be ignored,
1266 	 * as the device is expected to immediately move from M2 to M0
1267 	 * without entering low power state.
1268 	 */
1269 	if (cnss_pci_is_device_awake(dev) != true)
1270 		cnss_pr_warn("MHI not in M0, while reg still accessible\n");
1271 
1272 	return 0;
1273 }
1274 
1275 static int cnss_pci_force_wake_put(struct cnss_pci_data *pci_priv)
1276 {
1277 	struct device *dev = &pci_priv->pci_dev->dev;
1278 	int ret;
1279 
1280 	ret = cnss_pci_force_wake_release(dev);
1281 	if (ret && ret != -EAGAIN)
1282 		cnss_pr_err("Failed to release force wake\n");
1283 
1284 	return ret;
1285 }
1286 
1287 #if IS_ENABLED(CONFIG_INTERCONNECT)
1288 /**
1289  * cnss_setup_bus_bandwidth() - Setup interconnect vote for given bandwidth
1290  * @plat_priv: Platform private data struct
1291  * @bw: bandwidth
1292  * @save: toggle flag to save bandwidth to current_bw_vote
1293  *
1294  * Setup bandwidth votes for configured interconnect paths
1295  *
1296  * Return: 0 for success
1297  */
1298 static int cnss_setup_bus_bandwidth(struct cnss_plat_data *plat_priv,
1299 				    u32 bw, bool save)
1300 {
1301 	int ret = 0;
1302 	struct cnss_bus_bw_info *bus_bw_info;
1303 
1304 	if (!plat_priv->icc.path_count)
1305 		return -EOPNOTSUPP;
1306 
1307 	if (bw >= plat_priv->icc.bus_bw_cfg_count) {
1308 		cnss_pr_err("Invalid bus bandwidth Type: %d", bw);
1309 		return -EINVAL;
1310 	}
1311 
1312 	cnss_pr_buf("Bandwidth vote to %d, save %d\n", bw, save);
1313 
1314 	list_for_each_entry(bus_bw_info, &plat_priv->icc.list_head, list) {
1315 		ret = icc_set_bw(bus_bw_info->icc_path,
1316 				 bus_bw_info->cfg_table[bw].avg_bw,
1317 				 bus_bw_info->cfg_table[bw].peak_bw);
1318 		if (ret) {
1319 			cnss_pr_err("Could not set BW Cfg: %d, err = %d ICC Path: %s Val: %d %d\n",
1320 				    bw, ret, bus_bw_info->icc_name,
1321 				    bus_bw_info->cfg_table[bw].avg_bw,
1322 				    bus_bw_info->cfg_table[bw].peak_bw);
1323 			break;
1324 		}
1325 	}
1326 	if (ret == 0 && save)
1327 		plat_priv->icc.current_bw_vote = bw;
1328 	return ret;
1329 }
1330 
1331 int cnss_request_bus_bandwidth(struct device *dev, int bandwidth)
1332 {
1333 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
1334 
1335 	if (!plat_priv)
1336 		return -ENODEV;
1337 
1338 	if (bandwidth < 0)
1339 		return -EINVAL;
1340 
1341 	return cnss_setup_bus_bandwidth(plat_priv, (u32)bandwidth, true);
1342 }
1343 #else
1344 static int cnss_setup_bus_bandwidth(struct cnss_plat_data *plat_priv,
1345 				    u32 bw, bool save)
1346 {
1347 	return 0;
1348 }
1349 
1350 int cnss_request_bus_bandwidth(struct device *dev, int bandwidth)
1351 {
1352 	return 0;
1353 }
1354 #endif
1355 EXPORT_SYMBOL(cnss_request_bus_bandwidth);
1356 
1357 int cnss_pci_debug_reg_read(struct cnss_pci_data *pci_priv, u32 offset,
1358 			    u32 *val, bool raw_access)
1359 {
1360 	int ret = 0;
1361 	bool do_force_wake_put = true;
1362 
1363 	if (raw_access) {
1364 		ret = cnss_pci_reg_read(pci_priv, offset, val);
1365 		goto out;
1366 	}
1367 
1368 	ret = cnss_pci_is_device_down(&pci_priv->pci_dev->dev);
1369 	if (ret)
1370 		goto out;
1371 
1372 	ret = cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS);
1373 	if (ret < 0)
1374 		goto runtime_pm_put;
1375 
1376 	ret = cnss_pci_force_wake_get(pci_priv);
1377 	if (ret)
1378 		do_force_wake_put = false;
1379 
1380 	ret = cnss_pci_reg_read(pci_priv, offset, val);
1381 	if (ret) {
1382 		cnss_pr_err("Failed to read register offset 0x%x, err = %d\n",
1383 			    offset, ret);
1384 		goto force_wake_put;
1385 	}
1386 
1387 force_wake_put:
1388 	if (do_force_wake_put)
1389 		cnss_pci_force_wake_put(pci_priv);
1390 runtime_pm_put:
1391 	cnss_pci_pm_runtime_mark_last_busy(pci_priv);
1392 	cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
1393 out:
1394 	return ret;
1395 }
1396 
1397 int cnss_pci_debug_reg_write(struct cnss_pci_data *pci_priv, u32 offset,
1398 			     u32 val, bool raw_access)
1399 {
1400 	int ret = 0;
1401 	bool do_force_wake_put = true;
1402 
1403 	if (raw_access) {
1404 		ret = cnss_pci_reg_write(pci_priv, offset, val);
1405 		goto out;
1406 	}
1407 
1408 	ret = cnss_pci_is_device_down(&pci_priv->pci_dev->dev);
1409 	if (ret)
1410 		goto out;
1411 
1412 	ret = cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS);
1413 	if (ret < 0)
1414 		goto runtime_pm_put;
1415 
1416 	ret = cnss_pci_force_wake_get(pci_priv);
1417 	if (ret)
1418 		do_force_wake_put = false;
1419 
1420 	ret = cnss_pci_reg_write(pci_priv, offset, val);
1421 	if (ret) {
1422 		cnss_pr_err("Failed to write 0x%x to register offset 0x%x, err = %d\n",
1423 			    val, offset, ret);
1424 		goto force_wake_put;
1425 	}
1426 
1427 force_wake_put:
1428 	if (do_force_wake_put)
1429 		cnss_pci_force_wake_put(pci_priv);
1430 runtime_pm_put:
1431 	cnss_pci_pm_runtime_mark_last_busy(pci_priv);
1432 	cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
1433 out:
1434 	return ret;
1435 }
1436 
1437 static int cnss_set_pci_config_space(struct cnss_pci_data *pci_priv, bool save)
1438 {
1439 	struct pci_dev *pci_dev = pci_priv->pci_dev;
1440 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1441 	bool link_down_or_recovery;
1442 
1443 	if (!plat_priv)
1444 		return -ENODEV;
1445 
1446 	link_down_or_recovery = pci_priv->pci_link_down_ind ||
1447 		(test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state));
1448 
1449 	if (save) {
1450 		if (link_down_or_recovery) {
1451 			pci_priv->saved_state = NULL;
1452 		} else {
1453 			pci_save_state(pci_dev);
1454 			pci_priv->saved_state = pci_store_saved_state(pci_dev);
1455 		}
1456 	} else {
1457 		if (link_down_or_recovery) {
1458 			pci_load_saved_state(pci_dev, pci_priv->default_state);
1459 			pci_restore_state(pci_dev);
1460 		} else if (pci_priv->saved_state) {
1461 			pci_load_and_free_saved_state(pci_dev,
1462 						      &pci_priv->saved_state);
1463 			pci_restore_state(pci_dev);
1464 		}
1465 	}
1466 
1467 	return 0;
1468 }
1469 
1470 static int cnss_update_supported_link_info(struct cnss_pci_data *pci_priv)
1471 {
1472 	int ret = 0;
1473 	struct pci_dev *root_port;
1474 	struct device_node *root_of_node;
1475 	struct cnss_plat_data *plat_priv;
1476 
1477 	if (!pci_priv)
1478 		return -EINVAL;
1479 
1480 	if (pci_priv->device_id != KIWI_DEVICE_ID)
1481 		return ret;
1482 
1483 	plat_priv = pci_priv->plat_priv;
1484 	root_port = pcie_find_root_port(pci_priv->pci_dev);
1485 
1486 	if (!root_port) {
1487 		cnss_pr_err("PCIe root port is null\n");
1488 		return -EINVAL;
1489 	}
1490 
1491 	root_of_node = root_port->dev.of_node;
1492 	if (root_of_node && root_of_node->parent) {
1493 		ret = of_property_read_u32(root_of_node->parent,
1494 					   "qcom,target-link-speed",
1495 					   &plat_priv->supported_link_speed);
1496 		if (!ret)
1497 			cnss_pr_dbg("Supported PCIe Link Speed: %d\n",
1498 				    plat_priv->supported_link_speed);
1499 		else
1500 			plat_priv->supported_link_speed = 0;
1501 	}
1502 
1503 	return ret;
1504 }
1505 
1506 static int cnss_pci_get_link_status(struct cnss_pci_data *pci_priv)
1507 {
1508 	u16 link_status;
1509 	int ret;
1510 
1511 	ret = pcie_capability_read_word(pci_priv->pci_dev, PCI_EXP_LNKSTA,
1512 					&link_status);
1513 	if (ret)
1514 		return ret;
1515 
1516 	cnss_pr_dbg("Get PCI link status register: %u\n", link_status);
1517 
1518 	pci_priv->def_link_speed = link_status & PCI_EXP_LNKSTA_CLS;
1519 	pci_priv->def_link_width =
1520 		(link_status & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT;
1521 	pci_priv->cur_link_speed = pci_priv->def_link_speed;
1522 
1523 	cnss_pr_dbg("Default PCI link speed is 0x%x, link width is 0x%x\n",
1524 		    pci_priv->def_link_speed, pci_priv->def_link_width);
1525 
1526 	return 0;
1527 }
1528 
1529 static void cnss_pci_soc_scratch_reg_dump(struct cnss_pci_data *pci_priv)
1530 {
1531 	u32 reg_offset, val;
1532 	int i;
1533 
1534 	switch (pci_priv->device_id) {
1535 	case QCA6390_DEVICE_ID:
1536 	case QCA6490_DEVICE_ID:
1537 	case KIWI_DEVICE_ID:
1538 	case MANGO_DEVICE_ID:
1539 	case PEACH_DEVICE_ID:
1540 		break;
1541 	default:
1542 		return;
1543 	}
1544 
1545 	if (in_interrupt() || irqs_disabled())
1546 		return;
1547 
1548 	if (cnss_pci_check_link_status(pci_priv))
1549 		return;
1550 
1551 	cnss_pr_dbg("Start to dump SOC Scratch registers\n");
1552 
1553 	for (i = 0; pci_scratch[i].name; i++) {
1554 		reg_offset = pci_scratch[i].offset;
1555 		if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
1556 			return;
1557 		cnss_pr_dbg("PCIE_SOC_REG_%s = 0x%x\n",
1558 			    pci_scratch[i].name, val);
1559 	}
1560 }
1561 
1562 static void cnss_pci_soc_reset_cause_reg_dump(struct cnss_pci_data *pci_priv)
1563 {
1564 	u32 val;
1565 
1566 	switch (pci_priv->device_id) {
1567 	case PEACH_DEVICE_ID:
1568 		break;
1569 	default:
1570 		return;
1571 	}
1572 
1573 	if (in_interrupt() || irqs_disabled())
1574 		return;
1575 
1576 	if (cnss_pci_check_link_status(pci_priv))
1577 		return;
1578 
1579 	cnss_pr_dbg("Start to dump SOC Reset Cause registers\n");
1580 
1581 	if (cnss_pci_reg_read(pci_priv, WLAON_SOC_RESET_CAUSE_SHADOW_REG,
1582 			      &val))
1583 		return;
1584 	cnss_pr_dbg("WLAON_SOC_RESET_CAUSE_SHADOW_REG = 0x%x\n",
1585 		     val);
1586 
1587 }
1588 
1589 static void cnss_pci_bhi_debug_reg_dump(struct cnss_pci_data *pci_priv)
1590 {
1591 	u32 reg_offset, val;
1592 	int i;
1593 
1594 	switch (pci_priv->device_id) {
1595 	case PEACH_DEVICE_ID:
1596 		break;
1597 	default:
1598 		return;
1599 	}
1600 
1601 	if (cnss_pci_check_link_status(pci_priv))
1602 		return;
1603 
1604 	cnss_pr_dbg("Start to dump PCIE BHIE DEBUG registers\n");
1605 
1606 	for (i = 0; pci_bhi_debug[i].name; i++) {
1607 		reg_offset = pci_bhi_debug[i].offset;
1608 		if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
1609 			return;
1610 		cnss_pr_dbg("PCIE__%s = 0x%x\n",
1611 			     pci_bhi_debug[i].name, val);
1612 	}
1613 }
1614 
1615 int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv)
1616 {
1617 	int ret = 0;
1618 
1619 	if (!pci_priv)
1620 		return -ENODEV;
1621 
1622 	if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
1623 		cnss_pr_info("PCI link is already suspended\n");
1624 		goto out;
1625 	}
1626 
1627 	pci_clear_master(pci_priv->pci_dev);
1628 
1629 	ret = cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE);
1630 	if (ret)
1631 		goto out;
1632 
1633 	pci_disable_device(pci_priv->pci_dev);
1634 
1635 	if (pci_priv->pci_dev->device != QCA6174_DEVICE_ID) {
1636 		ret = pci_set_power_state(pci_priv->pci_dev, PCI_D3hot);
1637 		if (ret)
1638 			cnss_pr_err("Failed to set D3Hot, err =  %d\n", ret);
1639 	}
1640 
1641 	/* Always do PCIe L2 suspend during power off/PCIe link recovery */
1642 	pci_priv->drv_connected_last = 0;
1643 
1644 	ret = cnss_set_pci_link(pci_priv, PCI_LINK_DOWN);
1645 	if (ret)
1646 		goto out;
1647 
1648 	pci_priv->pci_link_state = PCI_LINK_DOWN;
1649 
1650 	return 0;
1651 out:
1652 	return ret;
1653 }
1654 
1655 int cnss_resume_pci_link(struct cnss_pci_data *pci_priv)
1656 {
1657 	int ret = 0;
1658 
1659 	if (!pci_priv)
1660 		return -ENODEV;
1661 
1662 	if (pci_priv->pci_link_state == PCI_LINK_UP) {
1663 		cnss_pr_info("PCI link is already resumed\n");
1664 		goto out;
1665 	}
1666 
1667 	ret = cnss_set_pci_link(pci_priv, PCI_LINK_UP);
1668 	if (ret) {
1669 		ret = -EAGAIN;
1670 		cnss_pci_update_link_event(pci_priv,
1671 					   BUS_EVENT_PCI_LINK_RESUME_FAIL, NULL);
1672 		goto out;
1673 	}
1674 
1675 	pci_priv->pci_link_state = PCI_LINK_UP;
1676 
1677 	if (pci_priv->pci_dev->device != QCA6174_DEVICE_ID) {
1678 		ret = pci_set_power_state(pci_priv->pci_dev, PCI_D0);
1679 		if (ret) {
1680 			cnss_pr_err("Failed to set D0, err = %d\n", ret);
1681 			goto out;
1682 		}
1683 	}
1684 
1685 	ret = cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
1686 	if (ret)
1687 		goto out;
1688 
1689 	ret = pci_enable_device(pci_priv->pci_dev);
1690 	if (ret) {
1691 		cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
1692 		goto out;
1693 	}
1694 
1695 	pci_set_master(pci_priv->pci_dev);
1696 
1697 	if (pci_priv->pci_link_down_ind)
1698 		pci_priv->pci_link_down_ind = false;
1699 
1700 	return 0;
1701 out:
1702 	return ret;
1703 }
1704 
1705 static void cnss_pci_update_link_event(struct cnss_pci_data *pci_priv,
1706 				       enum cnss_bus_event_type type,
1707 				       void *data)
1708 {
1709 	struct cnss_bus_event bus_event;
1710 
1711 	bus_event.etype = type;
1712 	bus_event.event_data = data;
1713 	cnss_pci_call_driver_uevent(pci_priv, CNSS_BUS_EVENT, &bus_event);
1714 }
1715 
1716 void cnss_pci_handle_linkdown(struct cnss_pci_data *pci_priv)
1717 {
1718 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1719 	struct pci_dev *pci_dev = pci_priv->pci_dev;
1720 	unsigned long flags;
1721 
1722 	if (test_bit(ENABLE_PCI_LINK_DOWN_PANIC,
1723 		     &plat_priv->ctrl_params.quirks))
1724 		panic("cnss: PCI link is down\n");
1725 
1726 	spin_lock_irqsave(&pci_link_down_lock, flags);
1727 	if (pci_priv->pci_link_down_ind) {
1728 		cnss_pr_dbg("PCI link down recovery is in progress, ignore\n");
1729 		spin_unlock_irqrestore(&pci_link_down_lock, flags);
1730 		return;
1731 	}
1732 	pci_priv->pci_link_down_ind = true;
1733 	spin_unlock_irqrestore(&pci_link_down_lock, flags);
1734 
1735 	if (pci_priv->mhi_ctrl) {
1736 		/* Notify MHI about link down*/
1737 		mhi_report_error(pci_priv->mhi_ctrl);
1738 	}
1739 
1740 	if (pci_dev->device == QCA6174_DEVICE_ID)
1741 		disable_irq_nosync(pci_dev->irq);
1742 
1743 	/* Notify bus related event. Now for all supported chips.
1744 	 * Here PCIe LINK_DOWN notification taken care.
1745 	 * uevent buffer can be extended later, to cover more bus info.
1746 	 */
1747 	cnss_pci_update_link_event(pci_priv, BUS_EVENT_PCI_LINK_DOWN, NULL);
1748 
1749 	cnss_fatal_err("PCI link down, schedule recovery\n");
1750 	reinit_completion(&pci_priv->wake_event_complete);
1751 	cnss_schedule_recovery(&pci_dev->dev, CNSS_REASON_LINK_DOWN);
1752 }
1753 
1754 int cnss_pci_link_down(struct device *dev)
1755 {
1756 	struct pci_dev *pci_dev = to_pci_dev(dev);
1757 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
1758 	struct cnss_plat_data *plat_priv = NULL;
1759 	int ret;
1760 
1761 	if (!pci_priv) {
1762 		cnss_pr_err("pci_priv is NULL\n");
1763 		return -EINVAL;
1764 	}
1765 
1766 	plat_priv = pci_priv->plat_priv;
1767 	if (!plat_priv) {
1768 		cnss_pr_err("plat_priv is NULL\n");
1769 		return -ENODEV;
1770 	}
1771 
1772 	if (pci_priv->pci_link_down_ind) {
1773 		cnss_pr_dbg("PCI link down recovery is already in progress\n");
1774 		return -EBUSY;
1775 	}
1776 
1777 	if (pci_priv->drv_connected_last &&
1778 	    of_property_read_bool(plat_priv->plat_dev->dev.of_node,
1779 				  "cnss-enable-self-recovery"))
1780 		plat_priv->ctrl_params.quirks |= BIT(LINK_DOWN_SELF_RECOVERY);
1781 
1782 	cnss_pr_err("PCI link down is detected by drivers\n");
1783 
1784 	ret = cnss_pci_assert_perst(pci_priv);
1785 	if (ret)
1786 		cnss_pci_handle_linkdown(pci_priv);
1787 
1788 	return ret;
1789 }
1790 EXPORT_SYMBOL(cnss_pci_link_down);
1791 
1792 int cnss_pci_get_reg_dump(struct device *dev, uint8_t *buffer, uint32_t len)
1793 {
1794 	struct pci_dev *pci_dev = to_pci_dev(dev);
1795 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
1796 
1797 	if (!pci_priv) {
1798 		cnss_pr_err("pci_priv is NULL\n");
1799 		return -ENODEV;
1800 	}
1801 
1802 	if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
1803 		cnss_pr_dbg("No PCIe reg dump since PCIe is suspended(D3)\n");
1804 		return -EACCES;
1805 	}
1806 
1807 	cnss_pr_dbg("Start to get PCIe reg dump\n");
1808 
1809 	return _cnss_pci_get_reg_dump(pci_priv, buffer, len);
1810 }
1811 EXPORT_SYMBOL(cnss_pci_get_reg_dump);
1812 
1813 int cnss_pcie_is_device_down(struct cnss_pci_data *pci_priv)
1814 {
1815 	struct cnss_plat_data *plat_priv;
1816 
1817 	if (!pci_priv) {
1818 		cnss_pr_err("pci_priv is NULL\n");
1819 		return -ENODEV;
1820 	}
1821 
1822 	plat_priv = pci_priv->plat_priv;
1823 	if (!plat_priv) {
1824 		cnss_pr_err("plat_priv is NULL\n");
1825 		return -ENODEV;
1826 	}
1827 
1828 	return test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) |
1829 		pci_priv->pci_link_down_ind;
1830 }
1831 
1832 int cnss_pci_is_device_down(struct device *dev)
1833 {
1834 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
1835 
1836 	return cnss_pcie_is_device_down(pci_priv);
1837 }
1838 EXPORT_SYMBOL(cnss_pci_is_device_down);
1839 
1840 int cnss_pci_shutdown_cleanup(struct cnss_pci_data *pci_priv)
1841 {
1842 	int ret;
1843 
1844 	if (!pci_priv) {
1845 		cnss_pr_err("pci_priv is NULL\n");
1846 		return -ENODEV;
1847 	}
1848 
1849 	ret = del_timer(&pci_priv->dev_rddm_timer);
1850 	cnss_pr_dbg("%s RDDM timer deleted", ret ? "Active" : "Inactive");
1851 	return ret;
1852 }
1853 
1854 void cnss_pci_lock_reg_window(struct device *dev, unsigned long *flags)
1855 {
1856 	spin_lock_bh(&pci_reg_window_lock);
1857 }
1858 EXPORT_SYMBOL(cnss_pci_lock_reg_window);
1859 
1860 void cnss_pci_unlock_reg_window(struct device *dev, unsigned long *flags)
1861 {
1862 	spin_unlock_bh(&pci_reg_window_lock);
1863 }
1864 EXPORT_SYMBOL(cnss_pci_unlock_reg_window);
1865 
1866 int cnss_get_pci_slot(struct device *dev)
1867 {
1868 	struct pci_dev *pci_dev = to_pci_dev(dev);
1869 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
1870 	struct cnss_plat_data *plat_priv = NULL;
1871 
1872 	if (!pci_priv) {
1873 		cnss_pr_err("pci_priv is NULL\n");
1874 		return -EINVAL;
1875 	}
1876 
1877 	plat_priv = pci_priv->plat_priv;
1878 	if (!plat_priv) {
1879 		cnss_pr_err("plat_priv is NULL\n");
1880 		return -ENODEV;
1881 	}
1882 
1883 	return plat_priv->rc_num;
1884 }
1885 EXPORT_SYMBOL(cnss_get_pci_slot);
1886 
1887 /**
1888  * cnss_pci_dump_bl_sram_mem - Dump WLAN device bootloader debug log
1889  * @pci_priv: driver PCI bus context pointer
1890  *
1891  * Dump primary and secondary bootloader debug log data. For SBL check the
1892  * log struct address and size for validity.
1893  *
1894  * Return: None
1895  */
1896 static void cnss_pci_dump_bl_sram_mem(struct cnss_pci_data *pci_priv)
1897 {
1898 	enum mhi_ee_type ee;
1899 	u32 mem_addr, val, pbl_log_max_size, sbl_log_max_size;
1900 	u32 pbl_log_sram_start;
1901 	u32 pbl_stage, sbl_log_start, sbl_log_size;
1902 	u32 pbl_wlan_boot_cfg, pbl_bootstrap_status;
1903 	u32 pbl_bootstrap_status_reg = PBL_BOOTSTRAP_STATUS;
1904 	u32 sbl_log_def_start = SRAM_START;
1905 	u32 sbl_log_def_end = SRAM_END;
1906 	int i;
1907 
1908 	cnss_pci_soc_reset_cause_reg_dump(pci_priv);
1909 
1910 	switch (pci_priv->device_id) {
1911 	case QCA6390_DEVICE_ID:
1912 		pbl_log_sram_start = QCA6390_DEBUG_PBL_LOG_SRAM_START;
1913 		pbl_log_max_size = QCA6390_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1914 		sbl_log_max_size = QCA6390_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1915 		break;
1916 	case QCA6490_DEVICE_ID:
1917 		pbl_log_sram_start = QCA6490_DEBUG_PBL_LOG_SRAM_START;
1918 		pbl_log_max_size = QCA6490_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1919 		sbl_log_max_size = QCA6490_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1920 		break;
1921 	case KIWI_DEVICE_ID:
1922 		pbl_bootstrap_status_reg = KIWI_PBL_BOOTSTRAP_STATUS;
1923 		pbl_log_sram_start = KIWI_DEBUG_PBL_LOG_SRAM_START;
1924 		pbl_log_max_size = KIWI_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1925 		sbl_log_max_size = KIWI_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1926 		break;
1927 	case MANGO_DEVICE_ID:
1928 		pbl_bootstrap_status_reg = MANGO_PBL_BOOTSTRAP_STATUS;
1929 		pbl_log_sram_start = MANGO_DEBUG_PBL_LOG_SRAM_START;
1930 		pbl_log_max_size = MANGO_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1931 		sbl_log_max_size = MANGO_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1932 		break;
1933 	case PEACH_DEVICE_ID:
1934 		pbl_bootstrap_status_reg = PEACH_PBL_BOOTSTRAP_STATUS;
1935 		pbl_log_sram_start = PEACH_DEBUG_PBL_LOG_SRAM_START;
1936 		pbl_log_max_size = PEACH_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1937 		sbl_log_max_size = PEACH_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1938 		break;
1939 	default:
1940 		return;
1941 	}
1942 
1943 	if (cnss_pci_check_link_status(pci_priv))
1944 		return;
1945 
1946 	cnss_pci_reg_read(pci_priv, TCSR_PBL_LOGGING_REG, &pbl_stage);
1947 	cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG2_REG, &sbl_log_start);
1948 	cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG3_REG, &sbl_log_size);
1949 	cnss_pci_reg_read(pci_priv, PBL_WLAN_BOOT_CFG, &pbl_wlan_boot_cfg);
1950 	cnss_pci_reg_read(pci_priv, pbl_bootstrap_status_reg,
1951 			  &pbl_bootstrap_status);
1952 	cnss_pr_dbg("TCSR_PBL_LOGGING: 0x%08x PCIE_BHI_ERRDBG: Start: 0x%08x Size:0x%08x\n",
1953 		    pbl_stage, sbl_log_start, sbl_log_size);
1954 	cnss_pr_dbg("PBL_WLAN_BOOT_CFG: 0x%08x PBL_BOOTSTRAP_STATUS: 0x%08x\n",
1955 		    pbl_wlan_boot_cfg, pbl_bootstrap_status);
1956 
1957 	ee = mhi_get_exec_env(pci_priv->mhi_ctrl);
1958 	if (CNSS_MHI_IN_MISSION_MODE(ee)) {
1959 		cnss_pr_err("Avoid Dumping PBL log data in Mission mode\n");
1960 		return;
1961 	}
1962 
1963 	cnss_pr_dbg("Dumping PBL log data\n");
1964 	for (i = 0; i < pbl_log_max_size; i += sizeof(val)) {
1965 		mem_addr = pbl_log_sram_start + i;
1966 		if (cnss_pci_reg_read(pci_priv, mem_addr, &val))
1967 			break;
1968 		cnss_pr_dbg("SRAM[0x%x] = 0x%x\n", mem_addr, val);
1969 	}
1970 
1971 	sbl_log_size = (sbl_log_size > sbl_log_max_size ?
1972 			sbl_log_max_size : sbl_log_size);
1973 	if (sbl_log_start < sbl_log_def_start ||
1974 	    sbl_log_start > sbl_log_def_end ||
1975 	    (sbl_log_start + sbl_log_size) > sbl_log_def_end) {
1976 		cnss_pr_err("Invalid SBL log data\n");
1977 		return;
1978 	}
1979 
1980 	ee = mhi_get_exec_env(pci_priv->mhi_ctrl);
1981 	if (CNSS_MHI_IN_MISSION_MODE(ee)) {
1982 		cnss_pr_err("Avoid Dumping SBL log data in Mission mode\n");
1983 		return;
1984 	}
1985 
1986 	cnss_pr_dbg("Dumping SBL log data\n");
1987 	for (i = 0; i < sbl_log_size; i += sizeof(val)) {
1988 		mem_addr = sbl_log_start + i;
1989 		if (cnss_pci_reg_read(pci_priv, mem_addr, &val))
1990 			break;
1991 		cnss_pr_dbg("SRAM[0x%x] = 0x%x\n", mem_addr, val);
1992 	}
1993 }
1994 
1995 #ifdef CONFIG_DISABLE_CNSS_SRAM_DUMP
1996 static void cnss_pci_dump_sram(struct cnss_pci_data *pci_priv)
1997 {
1998 }
1999 #else
2000 static void cnss_pci_dump_sram(struct cnss_pci_data *pci_priv)
2001 {
2002 	struct cnss_plat_data *plat_priv;
2003 	u32 i, mem_addr;
2004 	u32 *dump_ptr;
2005 
2006 	plat_priv = pci_priv->plat_priv;
2007 
2008 	if (plat_priv->device_id != QCA6490_DEVICE_ID ||
2009 	    cnss_get_host_build_type() != QMI_HOST_BUILD_TYPE_PRIMARY_V01)
2010 		return;
2011 
2012 	if (!plat_priv->sram_dump) {
2013 		cnss_pr_err("SRAM dump memory is not allocated\n");
2014 		return;
2015 	}
2016 
2017 	if (cnss_pci_check_link_status(pci_priv))
2018 		return;
2019 
2020 	cnss_pr_dbg("Dumping SRAM at 0x%lx\n", plat_priv->sram_dump);
2021 
2022 	for (i = 0; i < SRAM_DUMP_SIZE; i += sizeof(u32)) {
2023 		mem_addr = SRAM_START + i;
2024 		dump_ptr = (u32 *)(plat_priv->sram_dump + i);
2025 		if (cnss_pci_reg_read(pci_priv, mem_addr, dump_ptr)) {
2026 			cnss_pr_err("SRAM Dump failed at 0x%x\n", mem_addr);
2027 			break;
2028 		}
2029 		/* Relinquish CPU after dumping 256KB chunks*/
2030 		if (!(i % CNSS_256KB_SIZE))
2031 			cond_resched();
2032 	}
2033 }
2034 #endif
2035 
2036 static int cnss_pci_handle_mhi_poweron_timeout(struct cnss_pci_data *pci_priv)
2037 {
2038 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2039 
2040 	cnss_fatal_err("MHI power up returns timeout\n");
2041 
2042 	if (cnss_mhi_scan_rddm_cookie(pci_priv, DEVICE_RDDM_COOKIE) ||
2043 	    cnss_get_dev_sol_value(plat_priv) > 0) {
2044 		/* Wait for RDDM if RDDM cookie is set or device SOL GPIO is
2045 		 * high. If RDDM times out, PBL/SBL error region may have been
2046 		 * erased so no need to dump them either.
2047 		 */
2048 		if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
2049 		    !pci_priv->pci_link_down_ind) {
2050 			mod_timer(&pci_priv->dev_rddm_timer,
2051 				  jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
2052 		}
2053 	} else {
2054 		cnss_pr_dbg("RDDM cookie is not set and device SOL is low\n");
2055 		cnss_mhi_debug_reg_dump(pci_priv);
2056 		cnss_pci_bhi_debug_reg_dump(pci_priv);
2057 		cnss_pci_soc_scratch_reg_dump(pci_priv);
2058 		/* Dump PBL/SBL error log if RDDM cookie is not set */
2059 		cnss_pci_dump_bl_sram_mem(pci_priv);
2060 		cnss_pci_dump_sram(pci_priv);
2061 		return -ETIMEDOUT;
2062 	}
2063 
2064 	return 0;
2065 }
2066 
2067 static char *cnss_mhi_state_to_str(enum cnss_mhi_state mhi_state)
2068 {
2069 	switch (mhi_state) {
2070 	case CNSS_MHI_INIT:
2071 		return "INIT";
2072 	case CNSS_MHI_DEINIT:
2073 		return "DEINIT";
2074 	case CNSS_MHI_POWER_ON:
2075 		return "POWER_ON";
2076 	case CNSS_MHI_POWERING_OFF:
2077 		return "POWERING_OFF";
2078 	case CNSS_MHI_POWER_OFF:
2079 		return "POWER_OFF";
2080 	case CNSS_MHI_FORCE_POWER_OFF:
2081 		return "FORCE_POWER_OFF";
2082 	case CNSS_MHI_SUSPEND:
2083 		return "SUSPEND";
2084 	case CNSS_MHI_RESUME:
2085 		return "RESUME";
2086 	case CNSS_MHI_TRIGGER_RDDM:
2087 		return "TRIGGER_RDDM";
2088 	case CNSS_MHI_RDDM_DONE:
2089 		return "RDDM_DONE";
2090 	default:
2091 		return "UNKNOWN";
2092 	}
2093 };
2094 
2095 static int cnss_pci_check_mhi_state_bit(struct cnss_pci_data *pci_priv,
2096 					enum cnss_mhi_state mhi_state)
2097 {
2098 	switch (mhi_state) {
2099 	case CNSS_MHI_INIT:
2100 		if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state))
2101 			return 0;
2102 		break;
2103 	case CNSS_MHI_DEINIT:
2104 	case CNSS_MHI_POWER_ON:
2105 		if (test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state) &&
2106 		    !test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
2107 			return 0;
2108 		break;
2109 	case CNSS_MHI_FORCE_POWER_OFF:
2110 		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
2111 			return 0;
2112 		break;
2113 	case CNSS_MHI_POWER_OFF:
2114 	case CNSS_MHI_SUSPEND:
2115 		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
2116 		    !test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
2117 			return 0;
2118 		break;
2119 	case CNSS_MHI_RESUME:
2120 		if (test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
2121 			return 0;
2122 		break;
2123 	case CNSS_MHI_TRIGGER_RDDM:
2124 		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
2125 		    !test_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state))
2126 			return 0;
2127 		break;
2128 	case CNSS_MHI_RDDM_DONE:
2129 		return 0;
2130 	default:
2131 		cnss_pr_err("Unhandled MHI state: %s(%d)\n",
2132 			    cnss_mhi_state_to_str(mhi_state), mhi_state);
2133 	}
2134 
2135 	cnss_pr_err("Cannot set MHI state %s(%d) in current MHI state (0x%lx)\n",
2136 		    cnss_mhi_state_to_str(mhi_state), mhi_state,
2137 		    pci_priv->mhi_state);
2138 	if (mhi_state != CNSS_MHI_TRIGGER_RDDM)
2139 		CNSS_ASSERT(0);
2140 
2141 	return -EINVAL;
2142 }
2143 
2144 static int cnss_rddm_trigger_debug(struct cnss_pci_data *pci_priv)
2145 {
2146 	int read_val, ret;
2147 
2148 	if (!pci_priv || pci_priv->device_id != QCA6490_DEVICE_ID)
2149 		return -EOPNOTSUPP;
2150 
2151 	if (cnss_pci_check_link_status(pci_priv))
2152 		return -EINVAL;
2153 
2154 	cnss_pr_err("Write GCC Spare with ACE55 Pattern");
2155 	cnss_pci_reg_write(pci_priv, GCC_GCC_SPARE_REG_1, 0xACE55);
2156 	ret = cnss_pci_reg_read(pci_priv, GCC_GCC_SPARE_REG_1, &read_val);
2157 	cnss_pr_err("Read back GCC Spare: 0x%x, ret: %d", read_val, ret);
2158 	ret = cnss_pci_reg_read(pci_priv, GCC_PRE_ARES_DEBUG_TIMER_VAL,
2159 				&read_val);
2160 	cnss_pr_err("Warm reset allowed check: 0x%x, ret: %d", read_val, ret);
2161 	return ret;
2162 }
2163 
2164 static int cnss_rddm_trigger_check(struct cnss_pci_data *pci_priv)
2165 {
2166 	int read_val, ret;
2167 	u32 pbl_stage, sbl_log_start, sbl_log_size, pbl_wlan_boot_cfg;
2168 
2169 	if (!pci_priv || pci_priv->device_id != QCA6490_DEVICE_ID)
2170 		return -EOPNOTSUPP;
2171 
2172 	if (cnss_pci_check_link_status(pci_priv))
2173 		return -EINVAL;
2174 
2175 	ret = cnss_pci_reg_read(pci_priv, GCC_GCC_SPARE_REG_1, &read_val);
2176 	cnss_pr_err("Read GCC spare to check reset status: 0x%x, ret: %d",
2177 		    read_val, ret);
2178 
2179 	cnss_pci_reg_read(pci_priv, TCSR_PBL_LOGGING_REG, &pbl_stage);
2180 	cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG2_REG, &sbl_log_start);
2181 	cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG3_REG, &sbl_log_size);
2182 	cnss_pci_reg_read(pci_priv, PBL_WLAN_BOOT_CFG, &pbl_wlan_boot_cfg);
2183 	cnss_pr_dbg("TCSR_PBL_LOGGING: 0x%08x PCIE_BHI_ERRDBG: Start: 0x%08x Size:0x%08x \n",
2184 		    pbl_stage, sbl_log_start, sbl_log_size);
2185 	cnss_pr_dbg("PBL_WLAN_BOOT_CFG: 0x%08x\n", pbl_wlan_boot_cfg);
2186 
2187 	return ret;
2188 }
2189 
2190 static void cnss_pci_set_mhi_state_bit(struct cnss_pci_data *pci_priv,
2191 				       enum cnss_mhi_state mhi_state)
2192 {
2193 	switch (mhi_state) {
2194 	case CNSS_MHI_INIT:
2195 		set_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
2196 		break;
2197 	case CNSS_MHI_DEINIT:
2198 		clear_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
2199 		break;
2200 	case CNSS_MHI_POWER_ON:
2201 		set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
2202 		break;
2203 	case CNSS_MHI_POWERING_OFF:
2204 		set_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state);
2205 		break;
2206 	case CNSS_MHI_POWER_OFF:
2207 	case CNSS_MHI_FORCE_POWER_OFF:
2208 		clear_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
2209 		clear_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state);
2210 		clear_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state);
2211 		clear_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
2212 		break;
2213 	case CNSS_MHI_SUSPEND:
2214 		set_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
2215 		break;
2216 	case CNSS_MHI_RESUME:
2217 		clear_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
2218 		break;
2219 	case CNSS_MHI_TRIGGER_RDDM:
2220 		set_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state);
2221 		break;
2222 	case CNSS_MHI_RDDM_DONE:
2223 		set_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
2224 		break;
2225 	default:
2226 		cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
2227 	}
2228 }
2229 
2230 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
2231 static int cnss_mhi_pm_force_resume(struct cnss_pci_data *pci_priv)
2232 {
2233 	return mhi_pm_resume_force(pci_priv->mhi_ctrl);
2234 }
2235 #else
2236 static int cnss_mhi_pm_force_resume(struct cnss_pci_data *pci_priv)
2237 {
2238 	return mhi_pm_resume(pci_priv->mhi_ctrl);
2239 }
2240 #endif
2241 
2242 static int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv,
2243 				  enum cnss_mhi_state mhi_state)
2244 {
2245 	int ret = 0, retry = 0;
2246 
2247 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
2248 		return 0;
2249 
2250 	if (mhi_state < 0) {
2251 		cnss_pr_err("Invalid MHI state (%d)\n", mhi_state);
2252 		return -EINVAL;
2253 	}
2254 
2255 	ret = cnss_pci_check_mhi_state_bit(pci_priv, mhi_state);
2256 	if (ret)
2257 		goto out;
2258 
2259 	cnss_pr_vdbg("Setting MHI state: %s(%d)\n",
2260 		     cnss_mhi_state_to_str(mhi_state), mhi_state);
2261 
2262 	switch (mhi_state) {
2263 	case CNSS_MHI_INIT:
2264 		ret = mhi_prepare_for_power_up(pci_priv->mhi_ctrl);
2265 		break;
2266 	case CNSS_MHI_DEINIT:
2267 		mhi_unprepare_after_power_down(pci_priv->mhi_ctrl);
2268 		ret = 0;
2269 		break;
2270 	case CNSS_MHI_POWER_ON:
2271 		ret = mhi_sync_power_up(pci_priv->mhi_ctrl);
2272 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
2273 		/* Only set img_pre_alloc when power up succeeds */
2274 		if (!ret && !pci_priv->mhi_ctrl->img_pre_alloc) {
2275 			cnss_pr_dbg("Notify MHI to use already allocated images\n");
2276 			pci_priv->mhi_ctrl->img_pre_alloc = true;
2277 		}
2278 #endif
2279 		break;
2280 	case CNSS_MHI_POWER_OFF:
2281 		mhi_power_down(pci_priv->mhi_ctrl, true);
2282 		ret = 0;
2283 		break;
2284 	case CNSS_MHI_FORCE_POWER_OFF:
2285 		mhi_power_down(pci_priv->mhi_ctrl, false);
2286 		ret = 0;
2287 		break;
2288 	case CNSS_MHI_SUSPEND:
2289 retry_mhi_suspend:
2290 		mutex_lock(&pci_priv->mhi_ctrl->pm_mutex);
2291 		if (pci_priv->drv_connected_last)
2292 			ret = cnss_mhi_pm_fast_suspend(pci_priv, true);
2293 		else
2294 			ret = mhi_pm_suspend(pci_priv->mhi_ctrl);
2295 		mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
2296 		if (ret == -EBUSY && retry++ < MHI_SUSPEND_RETRY_MAX_TIMES) {
2297 			cnss_pr_vdbg("Retry MHI suspend #%d\n", retry);
2298 			usleep_range(MHI_SUSPEND_RETRY_DELAY_US,
2299 				     MHI_SUSPEND_RETRY_DELAY_US + 1000);
2300 			goto retry_mhi_suspend;
2301 		}
2302 		break;
2303 	case CNSS_MHI_RESUME:
2304 		mutex_lock(&pci_priv->mhi_ctrl->pm_mutex);
2305 		if (pci_priv->drv_connected_last) {
2306 			ret = cnss_pci_prevent_l1(&pci_priv->pci_dev->dev);
2307 			if (ret) {
2308 				mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
2309 				break;
2310 			}
2311 			ret = cnss_mhi_pm_fast_resume(pci_priv, true);
2312 			cnss_pci_allow_l1(&pci_priv->pci_dev->dev);
2313 		} else {
2314 			if (pci_priv->device_id == QCA6390_DEVICE_ID)
2315 				ret = cnss_mhi_pm_force_resume(pci_priv);
2316 			else
2317 				ret = mhi_pm_resume(pci_priv->mhi_ctrl);
2318 		}
2319 		mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
2320 		break;
2321 	case CNSS_MHI_TRIGGER_RDDM:
2322 		cnss_rddm_trigger_debug(pci_priv);
2323 		ret = mhi_force_rddm_mode(pci_priv->mhi_ctrl);
2324 		if (ret) {
2325 			cnss_pr_err("Failed to trigger RDDM, err = %d\n", ret);
2326 			cnss_rddm_trigger_check(pci_priv);
2327 		}
2328 		break;
2329 	case CNSS_MHI_RDDM_DONE:
2330 		break;
2331 	default:
2332 		cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
2333 		ret = -EINVAL;
2334 	}
2335 
2336 	if (ret)
2337 		goto out;
2338 
2339 	cnss_pci_set_mhi_state_bit(pci_priv, mhi_state);
2340 
2341 	return 0;
2342 
2343 out:
2344 	cnss_pr_err("Failed to set MHI state: %s(%d), err = %d\n",
2345 		    cnss_mhi_state_to_str(mhi_state), mhi_state, ret);
2346 	return ret;
2347 }
2348 
2349 static int cnss_pci_config_msi_addr(struct cnss_pci_data *pci_priv)
2350 {
2351 	int ret = 0;
2352 	struct pci_dev *pci_dev = pci_priv->pci_dev;
2353 	struct cnss_plat_data *plat_priv;
2354 
2355 	if (!pci_dev)
2356 		return -ENODEV;
2357 
2358 	if (!pci_dev->msix_enabled)
2359 		return ret;
2360 
2361 	plat_priv = pci_priv->plat_priv;
2362 	if (!plat_priv) {
2363 		cnss_pr_err("plat_priv is NULL\n");
2364 		return -ENODEV;
2365 	}
2366 
2367 	ret = of_property_read_u32(plat_priv->plat_dev->dev.of_node,
2368 				   "msix-match-addr",
2369 				   &pci_priv->msix_addr);
2370 	cnss_pr_dbg("MSI-X Match address is 0x%X\n",
2371 		    pci_priv->msix_addr);
2372 
2373 	return ret;
2374 }
2375 
2376 static int cnss_pci_config_msi_data(struct cnss_pci_data *pci_priv)
2377 {
2378 	struct msi_desc *msi_desc;
2379 	struct cnss_msi_config *msi_config;
2380 	struct pci_dev *pci_dev = pci_priv->pci_dev;
2381 
2382 	msi_config = pci_priv->msi_config;
2383 
2384 	if (pci_dev->msix_enabled) {
2385 		pci_priv->msi_ep_base_data = msi_config->users[0].base_vector;
2386 		cnss_pr_dbg("MSI-X base data is %d\n",
2387 			    pci_priv->msi_ep_base_data);
2388 		return 0;
2389 	}
2390 
2391 	msi_desc = irq_get_msi_desc(pci_dev->irq);
2392 	if (!msi_desc) {
2393 		cnss_pr_err("msi_desc is NULL!\n");
2394 		return -EINVAL;
2395 	}
2396 
2397 	pci_priv->msi_ep_base_data = msi_desc->msg.data;
2398 	cnss_pr_dbg("MSI base data is %d\n", pci_priv->msi_ep_base_data);
2399 
2400 	return 0;
2401 }
2402 
2403 #ifdef CONFIG_CNSS_SUPPORT_DUAL_DEV
2404 #define PLC_PCIE_NAME_LEN		14
2405 
2406 static struct cnss_plat_data *
2407 cnss_get_plat_priv_by_driver_ops(struct cnss_wlan_driver *driver_ops)
2408 {
2409 	int plat_env_count = cnss_get_plat_env_count();
2410 	struct cnss_plat_data *plat_env;
2411 	struct cnss_pci_data *pci_priv;
2412 	int i = 0;
2413 
2414 	if (!driver_ops) {
2415 		cnss_pr_err("No cnss driver\n");
2416 		return NULL;
2417 	}
2418 
2419 	for (i = 0; i < plat_env_count; i++) {
2420 		plat_env = cnss_get_plat_env(i);
2421 		if (!plat_env)
2422 			continue;
2423 		if (driver_ops->name && plat_env->pld_bus_ops_name) {
2424 			/* driver_ops->name = PLD_PCIE_OPS_NAME
2425 			 * #ifdef MULTI_IF_NAME
2426 			 * #define PLD_PCIE_OPS_NAME "pld_pcie_" MULTI_IF_NAME
2427 			 * #else
2428 			 * #define PLD_PCIE_OPS_NAME "pld_pcie"
2429 			 * #endif
2430 			 */
2431 			if (memcmp(driver_ops->name,
2432 				   plat_env->pld_bus_ops_name,
2433 				   PLC_PCIE_NAME_LEN) == 0)
2434 				return plat_env;
2435 		}
2436 	}
2437 
2438 	cnss_pr_vdbg("Invalid cnss driver name from ko %s\n", driver_ops->name);
2439 	/* in the dual wlan card case, the pld_bus_ops_name from dts
2440 	 * and driver_ops-> name from ko should match, otherwise
2441 	 * wlanhost driver don't know which plat_env it can use;
2442 	 * if doesn't find the match one, then get first available
2443 	 * instance insteadly.
2444 	 */
2445 
2446 	for (i = 0; i < plat_env_count; i++) {
2447 		plat_env = cnss_get_plat_env(i);
2448 
2449 		if (!plat_env)
2450 			continue;
2451 
2452 		pci_priv = plat_env->bus_priv;
2453 		if (!pci_priv) {
2454 			cnss_pr_err("pci_priv is NULL\n");
2455 			continue;
2456 		}
2457 
2458 		if (driver_ops == pci_priv->driver_ops)
2459 			return plat_env;
2460 	}
2461 	/* Doesn't find the existing instance,
2462 	 * so return the fist empty instance
2463 	 */
2464 	for (i = 0; i < plat_env_count; i++) {
2465 		plat_env = cnss_get_plat_env(i);
2466 
2467 		if (!plat_env)
2468 			continue;
2469 		pci_priv = plat_env->bus_priv;
2470 		if (!pci_priv) {
2471 			cnss_pr_err("pci_priv is NULL\n");
2472 			continue;
2473 		}
2474 
2475 		if (!pci_priv->driver_ops)
2476 			return plat_env;
2477 	}
2478 
2479 	return NULL;
2480 }
2481 
2482 static int cnss_pci_store_qrtr_node_id(struct cnss_pci_data *pci_priv)
2483 {
2484 	int ret = 0;
2485 	u32 scratch = QCA6390_PCIE_SOC_PCIE_REG_PCIE_SCRATCH_2_SOC_PCIE_REG;
2486 	struct cnss_plat_data *plat_priv;
2487 
2488 	if (!pci_priv) {
2489 		cnss_pr_err("pci_priv is NULL\n");
2490 		return -ENODEV;
2491 	}
2492 
2493 	plat_priv = pci_priv->plat_priv;
2494 	/**
2495 	 * in the single wlan chipset case, plat_priv->qrtr_node_id always is 0,
2496 	 * wlan fw will use the hardcode 7 as the qrtr node id.
2497 	 * in the dual Hastings case, we will read qrtr node id
2498 	 * from device tree and pass to get plat_priv->qrtr_node_id,
2499 	 * which always is not zero. And then store this new value
2500 	 * to pcie register, wlan fw will read out this qrtr node id
2501 	 * from this register and overwrite to the hardcode one
2502 	 * while do initialization for ipc router.
2503 	 * without this change, two Hastings will use the same
2504 	 * qrtr node instance id, which will mess up qmi message
2505 	 * exchange. According to qrtr spec, every node should
2506 	 * have unique qrtr node id
2507 	 */
2508 	if (plat_priv->device_id == QCA6390_DEVICE_ID &&
2509 	    plat_priv->qrtr_node_id) {
2510 		u32 val;
2511 
2512 		cnss_pr_dbg("write 0x%x to SCRATCH REG\n",
2513 			    plat_priv->qrtr_node_id);
2514 		ret = cnss_pci_reg_write(pci_priv, scratch,
2515 					 plat_priv->qrtr_node_id);
2516 		if (ret) {
2517 			cnss_pr_err("Failed to write register offset 0x%x, err = %d\n",
2518 				    scratch, ret);
2519 			goto out;
2520 		}
2521 
2522 		ret = cnss_pci_reg_read(pci_priv, scratch, &val);
2523 		if (ret) {
2524 			cnss_pr_err("Failed to read SCRATCH REG");
2525 			goto out;
2526 		}
2527 
2528 		if (val != plat_priv->qrtr_node_id) {
2529 			cnss_pr_err("qrtr node id write to register doesn't match with readout value");
2530 			return -ERANGE;
2531 		}
2532 	}
2533 out:
2534 	return ret;
2535 }
2536 #else
2537 static struct cnss_plat_data *
2538 cnss_get_plat_priv_by_driver_ops(struct cnss_wlan_driver *driver_ops)
2539 {
2540 	return cnss_bus_dev_to_plat_priv(NULL);
2541 }
2542 
2543 static int cnss_pci_store_qrtr_node_id(struct cnss_pci_data *pci_priv)
2544 {
2545 	return 0;
2546 }
2547 #endif
2548 
2549 int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv)
2550 {
2551 	int ret = 0;
2552 	struct cnss_plat_data *plat_priv;
2553 	unsigned int timeout = 0;
2554 	int retry = 0;
2555 
2556 	if (!pci_priv) {
2557 		cnss_pr_err("pci_priv is NULL\n");
2558 		return -ENODEV;
2559 	}
2560 
2561 	plat_priv = pci_priv->plat_priv;
2562 	if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
2563 		return 0;
2564 
2565 	if (MHI_TIMEOUT_OVERWRITE_MS)
2566 		pci_priv->mhi_ctrl->timeout_ms = MHI_TIMEOUT_OVERWRITE_MS;
2567 	cnss_mhi_set_m2_timeout_ms(pci_priv, MHI_M2_TIMEOUT_MS);
2568 
2569 	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_INIT);
2570 	if (ret)
2571 		return ret;
2572 
2573 	timeout = pci_priv->mhi_ctrl->timeout_ms;
2574 	/* For non-perf builds the timeout is 10 (default) * 6 seconds */
2575 	if (cnss_get_host_build_type() == QMI_HOST_BUILD_TYPE_PRIMARY_V01)
2576 		pci_priv->mhi_ctrl->timeout_ms *= 6;
2577 	else /* For perf builds the timeout is 10 (default) * 3 seconds */
2578 		pci_priv->mhi_ctrl->timeout_ms *= 3;
2579 
2580 retry:
2581 	ret = cnss_pci_store_qrtr_node_id(pci_priv);
2582 	if (ret) {
2583 		if (retry++ < REG_RETRY_MAX_TIMES)
2584 			goto retry;
2585 		else
2586 			return ret;
2587 	}
2588 
2589 	/* Start the timer to dump MHI/PBL/SBL debug data periodically */
2590 	mod_timer(&pci_priv->boot_debug_timer,
2591 		  jiffies + msecs_to_jiffies(BOOT_DEBUG_TIMEOUT_MS));
2592 	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_ON);
2593 	del_timer_sync(&pci_priv->boot_debug_timer);
2594 	if (ret == 0)
2595 		cnss_wlan_adsp_pc_enable(pci_priv, false);
2596 
2597 	pci_priv->mhi_ctrl->timeout_ms = timeout;
2598 
2599 	if (ret == -ETIMEDOUT) {
2600 		/* This is a special case needs to be handled that if MHI
2601 		 * power on returns -ETIMEDOUT, controller needs to take care
2602 		 * the cleanup by calling MHI power down. Force to set the bit
2603 		 * for driver internal MHI state to make sure it can be handled
2604 		 * properly later.
2605 		 */
2606 		set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
2607 		ret = cnss_pci_handle_mhi_poweron_timeout(pci_priv);
2608 	} else if (!ret) {
2609 		/* kernel may allocate a dummy vector before request_irq and
2610 		 * then allocate a real vector when request_irq is called.
2611 		 * So get msi_data here again to avoid spurious interrupt
2612 		 * as msi_data will configured to srngs.
2613 		 */
2614 		if (cnss_pci_is_one_msi(pci_priv))
2615 			ret = cnss_pci_config_msi_data(pci_priv);
2616 	}
2617 
2618 	return ret;
2619 }
2620 
2621 static void cnss_pci_power_off_mhi(struct cnss_pci_data *pci_priv)
2622 {
2623 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2624 
2625 	if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
2626 		return;
2627 
2628 	if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state)) {
2629 		cnss_pr_dbg("MHI is already powered off\n");
2630 		return;
2631 	}
2632 	cnss_wlan_adsp_pc_enable(pci_priv, true);
2633 	cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_RESUME);
2634 	cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_POWERING_OFF);
2635 
2636 	if (!pci_priv->pci_link_down_ind)
2637 		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_OFF);
2638 	else
2639 		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_FORCE_POWER_OFF);
2640 }
2641 
2642 static void cnss_pci_deinit_mhi(struct cnss_pci_data *pci_priv)
2643 {
2644 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2645 
2646 	if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
2647 		return;
2648 
2649 	if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state)) {
2650 		cnss_pr_dbg("MHI is already deinited\n");
2651 		return;
2652 	}
2653 
2654 	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_DEINIT);
2655 }
2656 
2657 static void cnss_pci_set_wlaon_pwr_ctrl(struct cnss_pci_data *pci_priv,
2658 					bool set_vddd4blow, bool set_shutdown,
2659 					bool do_force_wake)
2660 {
2661 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2662 	int ret;
2663 	u32 val;
2664 
2665 	if (!plat_priv->set_wlaon_pwr_ctrl)
2666 		return;
2667 
2668 	if (pci_priv->pci_link_state == PCI_LINK_DOWN ||
2669 	    pci_priv->pci_link_down_ind)
2670 		return;
2671 
2672 	if (do_force_wake)
2673 		if (cnss_pci_force_wake_get(pci_priv))
2674 			return;
2675 
2676 	ret = cnss_pci_reg_read(pci_priv, WLAON_QFPROM_PWR_CTRL_REG, &val);
2677 	if (ret) {
2678 		cnss_pr_err("Failed to read register offset 0x%x, err = %d\n",
2679 			    WLAON_QFPROM_PWR_CTRL_REG, ret);
2680 		goto force_wake_put;
2681 	}
2682 
2683 	cnss_pr_dbg("Read register offset 0x%x, val = 0x%x\n",
2684 		    WLAON_QFPROM_PWR_CTRL_REG, val);
2685 
2686 	if (set_vddd4blow)
2687 		val |= QFPROM_PWR_CTRL_VDD4BLOW_SW_EN_MASK;
2688 	else
2689 		val &= ~QFPROM_PWR_CTRL_VDD4BLOW_SW_EN_MASK;
2690 
2691 	if (set_shutdown)
2692 		val |= QFPROM_PWR_CTRL_SHUTDOWN_EN_MASK;
2693 	else
2694 		val &= ~QFPROM_PWR_CTRL_SHUTDOWN_EN_MASK;
2695 
2696 	ret = cnss_pci_reg_write(pci_priv, WLAON_QFPROM_PWR_CTRL_REG, val);
2697 	if (ret) {
2698 		cnss_pr_err("Failed to write register offset 0x%x, err = %d\n",
2699 			    WLAON_QFPROM_PWR_CTRL_REG, ret);
2700 		goto force_wake_put;
2701 	}
2702 
2703 	cnss_pr_dbg("Write val 0x%x to register offset 0x%x\n", val,
2704 		    WLAON_QFPROM_PWR_CTRL_REG);
2705 
2706 	if (set_shutdown)
2707 		usleep_range(WLAON_PWR_CTRL_SHUTDOWN_DELAY_MIN_US,
2708 			     WLAON_PWR_CTRL_SHUTDOWN_DELAY_MAX_US);
2709 
2710 force_wake_put:
2711 	if (do_force_wake)
2712 		cnss_pci_force_wake_put(pci_priv);
2713 }
2714 
2715 static int cnss_pci_get_device_timestamp(struct cnss_pci_data *pci_priv,
2716 					 u64 *time_us)
2717 {
2718 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2719 	u32 low, high;
2720 	u64 device_ticks;
2721 
2722 	if (!plat_priv->device_freq_hz) {
2723 		cnss_pr_err("Device time clock frequency is not valid\n");
2724 		return -EINVAL;
2725 	}
2726 
2727 	switch (pci_priv->device_id) {
2728 	case KIWI_DEVICE_ID:
2729 	case MANGO_DEVICE_ID:
2730 	case PEACH_DEVICE_ID:
2731 		cnss_pci_reg_read(pci_priv, PCIE_MHI_TIME_LOW, &low);
2732 		cnss_pci_reg_read(pci_priv, PCIE_MHI_TIME_HIGH, &high);
2733 		break;
2734 	default:
2735 		cnss_pci_reg_read(pci_priv, WLAON_GLOBAL_COUNTER_CTRL3, &low);
2736 		cnss_pci_reg_read(pci_priv, WLAON_GLOBAL_COUNTER_CTRL4, &high);
2737 		break;
2738 	}
2739 
2740 	device_ticks = (u64)high << 32 | low;
2741 	do_div(device_ticks, plat_priv->device_freq_hz / 100000);
2742 	*time_us = device_ticks * 10;
2743 
2744 	return 0;
2745 }
2746 
2747 static void cnss_pci_enable_time_sync_counter(struct cnss_pci_data *pci_priv)
2748 {
2749 	switch (pci_priv->device_id) {
2750 	case KIWI_DEVICE_ID:
2751 	case MANGO_DEVICE_ID:
2752 	case PEACH_DEVICE_ID:
2753 		return;
2754 	default:
2755 		break;
2756 	}
2757 
2758 	cnss_pci_reg_write(pci_priv, WLAON_GLOBAL_COUNTER_CTRL5,
2759 			   TIME_SYNC_ENABLE);
2760 }
2761 
2762 static void cnss_pci_clear_time_sync_counter(struct cnss_pci_data *pci_priv)
2763 {
2764 	switch (pci_priv->device_id) {
2765 	case KIWI_DEVICE_ID:
2766 	case MANGO_DEVICE_ID:
2767 	case PEACH_DEVICE_ID:
2768 		return;
2769 	default:
2770 		break;
2771 	}
2772 
2773 	cnss_pci_reg_write(pci_priv, WLAON_GLOBAL_COUNTER_CTRL5,
2774 			   TIME_SYNC_CLEAR);
2775 }
2776 
2777 
2778 static void cnss_pci_time_sync_reg_update(struct cnss_pci_data *pci_priv,
2779 					  u32 low, u32 high)
2780 {
2781 	u32 time_reg_low;
2782 	u32 time_reg_high;
2783 
2784 	switch (pci_priv->device_id) {
2785 	case KIWI_DEVICE_ID:
2786 	case MANGO_DEVICE_ID:
2787 	case PEACH_DEVICE_ID:
2788 		/* Use the next two shadow registers after host's usage */
2789 		time_reg_low = PCIE_SHADOW_REG_VALUE_0 +
2790 				(pci_priv->plat_priv->num_shadow_regs_v3 *
2791 				 SHADOW_REG_LEN_BYTES);
2792 		time_reg_high = time_reg_low + SHADOW_REG_LEN_BYTES;
2793 		break;
2794 	default:
2795 		time_reg_low = PCIE_SHADOW_REG_VALUE_34;
2796 		time_reg_high = PCIE_SHADOW_REG_VALUE_35;
2797 		break;
2798 	}
2799 
2800 	cnss_pci_reg_write(pci_priv, time_reg_low, low);
2801 	cnss_pci_reg_write(pci_priv, time_reg_high, high);
2802 
2803 	cnss_pci_reg_read(pci_priv, time_reg_low, &low);
2804 	cnss_pci_reg_read(pci_priv, time_reg_high, &high);
2805 
2806 	cnss_pr_dbg("Updated time sync regs [0x%x] = 0x%x, [0x%x] = 0x%x\n",
2807 		    time_reg_low, low, time_reg_high, high);
2808 }
2809 
2810 static int cnss_pci_update_timestamp(struct cnss_pci_data *pci_priv)
2811 {
2812 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2813 	struct device *dev = &pci_priv->pci_dev->dev;
2814 	unsigned long flags = 0;
2815 	u64 host_time_us, device_time_us, offset;
2816 	u32 low, high;
2817 	int ret;
2818 
2819 	ret = cnss_pci_prevent_l1(dev);
2820 	if (ret)
2821 		goto out;
2822 
2823 	ret = cnss_pci_force_wake_get(pci_priv);
2824 	if (ret)
2825 		goto allow_l1;
2826 
2827 	spin_lock_irqsave(&time_sync_lock, flags);
2828 	cnss_pci_clear_time_sync_counter(pci_priv);
2829 	cnss_pci_enable_time_sync_counter(pci_priv);
2830 	host_time_us = cnss_get_host_timestamp(plat_priv);
2831 	ret = cnss_pci_get_device_timestamp(pci_priv, &device_time_us);
2832 	cnss_pci_clear_time_sync_counter(pci_priv);
2833 	spin_unlock_irqrestore(&time_sync_lock, flags);
2834 	if (ret)
2835 		goto force_wake_put;
2836 
2837 	if (host_time_us < device_time_us) {
2838 		cnss_pr_err("Host time (%llu us) is smaller than device time (%llu us), stop\n",
2839 			    host_time_us, device_time_us);
2840 		ret = -EINVAL;
2841 		goto force_wake_put;
2842 	}
2843 
2844 	offset = host_time_us - device_time_us;
2845 	cnss_pr_dbg("Host time = %llu us, device time = %llu us, offset = %llu us\n",
2846 		    host_time_us, device_time_us, offset);
2847 
2848 	low = offset & 0xFFFFFFFF;
2849 	high = offset >> 32;
2850 
2851 	cnss_pci_time_sync_reg_update(pci_priv, low, high);
2852 
2853 force_wake_put:
2854 	cnss_pci_force_wake_put(pci_priv);
2855 allow_l1:
2856 	cnss_pci_allow_l1(dev);
2857 out:
2858 	return ret;
2859 }
2860 
2861 static void cnss_pci_time_sync_work_hdlr(struct work_struct *work)
2862 {
2863 	struct cnss_pci_data *pci_priv =
2864 		container_of(work, struct cnss_pci_data, time_sync_work.work);
2865 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2866 	unsigned int time_sync_period_ms =
2867 		plat_priv->ctrl_params.time_sync_period;
2868 
2869 	if (test_bit(DISABLE_TIME_SYNC, &plat_priv->ctrl_params.quirks)) {
2870 		cnss_pr_dbg("Time sync is disabled\n");
2871 		return;
2872 	}
2873 
2874 	if (!time_sync_period_ms) {
2875 		cnss_pr_dbg("Skip time sync as time period is 0\n");
2876 		return;
2877 	}
2878 
2879 	if (cnss_pci_is_device_down(&pci_priv->pci_dev->dev))
2880 		return;
2881 
2882 	if (cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS) < 0)
2883 		goto runtime_pm_put;
2884 
2885 	mutex_lock(&pci_priv->bus_lock);
2886 	cnss_pci_update_timestamp(pci_priv);
2887 	mutex_unlock(&pci_priv->bus_lock);
2888 	schedule_delayed_work(&pci_priv->time_sync_work,
2889 			      msecs_to_jiffies(time_sync_period_ms));
2890 
2891 runtime_pm_put:
2892 	cnss_pci_pm_runtime_mark_last_busy(pci_priv);
2893 	cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
2894 }
2895 
2896 static int cnss_pci_start_time_sync_update(struct cnss_pci_data *pci_priv)
2897 {
2898 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2899 
2900 	switch (pci_priv->device_id) {
2901 	case QCA6390_DEVICE_ID:
2902 	case QCA6490_DEVICE_ID:
2903 	case KIWI_DEVICE_ID:
2904 	case MANGO_DEVICE_ID:
2905 	case PEACH_DEVICE_ID:
2906 		break;
2907 	default:
2908 		return -EOPNOTSUPP;
2909 	}
2910 
2911 	if (!plat_priv->device_freq_hz) {
2912 		cnss_pr_dbg("Device time clock frequency is not valid, skip time sync\n");
2913 		return -EINVAL;
2914 	}
2915 
2916 	cnss_pci_time_sync_work_hdlr(&pci_priv->time_sync_work.work);
2917 
2918 	return 0;
2919 }
2920 
2921 static void cnss_pci_stop_time_sync_update(struct cnss_pci_data *pci_priv)
2922 {
2923 	switch (pci_priv->device_id) {
2924 	case QCA6390_DEVICE_ID:
2925 	case QCA6490_DEVICE_ID:
2926 	case KIWI_DEVICE_ID:
2927 	case MANGO_DEVICE_ID:
2928 	case PEACH_DEVICE_ID:
2929 		break;
2930 	default:
2931 		return;
2932 	}
2933 
2934 	cancel_delayed_work_sync(&pci_priv->time_sync_work);
2935 }
2936 
2937 int cnss_pci_set_therm_cdev_state(struct cnss_pci_data *pci_priv,
2938 				  unsigned long thermal_state,
2939 				  int tcdev_id)
2940 {
2941 	if (!pci_priv) {
2942 		cnss_pr_err("pci_priv is NULL!\n");
2943 		return -ENODEV;
2944 	}
2945 
2946 	if (!pci_priv->driver_ops || !pci_priv->driver_ops->set_therm_cdev_state) {
2947 		cnss_pr_err("driver_ops or set_therm_cdev_state is NULL\n");
2948 		return -EINVAL;
2949 	}
2950 
2951 	return pci_priv->driver_ops->set_therm_cdev_state(pci_priv->pci_dev,
2952 							 thermal_state,
2953 							 tcdev_id);
2954 }
2955 
2956 int cnss_pci_update_time_sync_period(struct cnss_pci_data *pci_priv,
2957 				     unsigned int time_sync_period)
2958 {
2959 	struct cnss_plat_data *plat_priv;
2960 
2961 	if (!pci_priv)
2962 		return -ENODEV;
2963 
2964 	plat_priv = pci_priv->plat_priv;
2965 
2966 	cnss_pci_stop_time_sync_update(pci_priv);
2967 	plat_priv->ctrl_params.time_sync_period = time_sync_period;
2968 	cnss_pci_start_time_sync_update(pci_priv);
2969 	cnss_pr_dbg("WLAN time sync period %u ms\n",
2970 		    plat_priv->ctrl_params.time_sync_period);
2971 
2972 	return 0;
2973 }
2974 
2975 int cnss_pci_call_driver_probe(struct cnss_pci_data *pci_priv)
2976 {
2977 	int ret = 0;
2978 	struct cnss_plat_data *plat_priv;
2979 
2980 	if (!pci_priv)
2981 		return -ENODEV;
2982 
2983 	plat_priv = pci_priv->plat_priv;
2984 	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
2985 		cnss_pr_err("Reboot is in progress, skip driver probe\n");
2986 		return -EINVAL;
2987 	}
2988 
2989 	if (test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
2990 		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
2991 		cnss_pr_dbg("Skip driver probe\n");
2992 		goto out;
2993 	}
2994 
2995 	if (!pci_priv->driver_ops) {
2996 		cnss_pr_err("driver_ops is NULL\n");
2997 		ret = -EINVAL;
2998 		goto out;
2999 	}
3000 
3001 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
3002 	    test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
3003 		ret = pci_priv->driver_ops->reinit(pci_priv->pci_dev,
3004 						   pci_priv->pci_device_id);
3005 		if (ret) {
3006 			cnss_pr_err("Failed to reinit host driver, err = %d\n",
3007 				    ret);
3008 			goto out;
3009 		}
3010 		complete(&plat_priv->recovery_complete);
3011 	} else if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state)) {
3012 		ret = pci_priv->driver_ops->probe(pci_priv->pci_dev,
3013 						  pci_priv->pci_device_id);
3014 		if (ret) {
3015 			cnss_pr_err("Failed to probe host driver, err = %d\n",
3016 				    ret);
3017 			complete_all(&plat_priv->power_up_complete);
3018 			goto out;
3019 		}
3020 		clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
3021 		set_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
3022 		cnss_pci_free_blob_mem(pci_priv);
3023 		complete_all(&plat_priv->power_up_complete);
3024 	} else if (test_bit(CNSS_DRIVER_IDLE_RESTART,
3025 			    &plat_priv->driver_state)) {
3026 		ret = pci_priv->driver_ops->idle_restart(pci_priv->pci_dev,
3027 			pci_priv->pci_device_id);
3028 		if (ret) {
3029 			cnss_pr_err("Failed to idle restart host driver, err = %d\n",
3030 				    ret);
3031 			plat_priv->power_up_error = ret;
3032 			complete_all(&plat_priv->power_up_complete);
3033 			goto out;
3034 		}
3035 		clear_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state);
3036 		complete_all(&plat_priv->power_up_complete);
3037 	} else {
3038 		complete(&plat_priv->power_up_complete);
3039 	}
3040 
3041 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
3042 		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
3043 		__pm_relax(plat_priv->recovery_ws);
3044 	}
3045 
3046 	cnss_pci_start_time_sync_update(pci_priv);
3047 
3048 	return 0;
3049 
3050 out:
3051 	return ret;
3052 }
3053 
3054 int cnss_pci_call_driver_remove(struct cnss_pci_data *pci_priv)
3055 {
3056 	struct cnss_plat_data *plat_priv;
3057 	int ret;
3058 
3059 	if (!pci_priv)
3060 		return -ENODEV;
3061 
3062 	plat_priv = pci_priv->plat_priv;
3063 
3064 	if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state) ||
3065 	    test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state) ||
3066 	    test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
3067 		cnss_pr_dbg("Skip driver remove\n");
3068 		return 0;
3069 	}
3070 
3071 	if (!pci_priv->driver_ops) {
3072 		cnss_pr_err("driver_ops is NULL\n");
3073 		return -EINVAL;
3074 	}
3075 
3076 	cnss_pci_stop_time_sync_update(pci_priv);
3077 
3078 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
3079 	    test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
3080 		complete(&plat_priv->rddm_complete);
3081 		pci_priv->driver_ops->shutdown(pci_priv->pci_dev);
3082 	} else if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
3083 		pci_priv->driver_ops->remove(pci_priv->pci_dev);
3084 		clear_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
3085 	} else if (test_bit(CNSS_DRIVER_IDLE_SHUTDOWN,
3086 			    &plat_priv->driver_state)) {
3087 		ret = pci_priv->driver_ops->idle_shutdown(pci_priv->pci_dev);
3088 		if (ret == -EAGAIN) {
3089 			clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN,
3090 				  &plat_priv->driver_state);
3091 			return ret;
3092 		}
3093 	}
3094 
3095 	plat_priv->get_info_cb_ctx = NULL;
3096 	plat_priv->get_info_cb = NULL;
3097 
3098 	return 0;
3099 }
3100 
3101 int cnss_pci_call_driver_modem_status(struct cnss_pci_data *pci_priv,
3102 				      int modem_current_status)
3103 {
3104 	struct cnss_wlan_driver *driver_ops;
3105 
3106 	if (!pci_priv)
3107 		return -ENODEV;
3108 
3109 	driver_ops = pci_priv->driver_ops;
3110 	if (!driver_ops || !driver_ops->modem_status)
3111 		return -EINVAL;
3112 
3113 	driver_ops->modem_status(pci_priv->pci_dev, modem_current_status);
3114 
3115 	return 0;
3116 }
3117 
3118 int cnss_pci_update_status(struct cnss_pci_data *pci_priv,
3119 			   enum cnss_driver_status status)
3120 {
3121 	struct cnss_wlan_driver *driver_ops;
3122 
3123 	if (!pci_priv)
3124 		return -ENODEV;
3125 
3126 	driver_ops = pci_priv->driver_ops;
3127 	if (!driver_ops || !driver_ops->update_status)
3128 		return -EINVAL;
3129 
3130 	cnss_pr_dbg("Update driver status: %d\n", status);
3131 
3132 	driver_ops->update_status(pci_priv->pci_dev, status);
3133 
3134 	return 0;
3135 }
3136 
3137 static void cnss_pci_misc_reg_dump(struct cnss_pci_data *pci_priv,
3138 				   struct cnss_misc_reg *misc_reg,
3139 				   u32 misc_reg_size,
3140 				   char *reg_name)
3141 {
3142 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3143 	bool do_force_wake_put = true;
3144 	int i;
3145 
3146 	if (!misc_reg)
3147 		return;
3148 
3149 	if (in_interrupt() || irqs_disabled())
3150 		return;
3151 
3152 	if (cnss_pci_check_link_status(pci_priv))
3153 		return;
3154 
3155 	if (cnss_pci_force_wake_get(pci_priv)) {
3156 		/* Continue to dump when device has entered RDDM already */
3157 		if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
3158 			return;
3159 		do_force_wake_put = false;
3160 	}
3161 
3162 	cnss_pr_dbg("Start to dump %s registers\n", reg_name);
3163 
3164 	for (i = 0; i < misc_reg_size; i++) {
3165 		if (!test_bit(pci_priv->misc_reg_dev_mask,
3166 			      &misc_reg[i].dev_mask))
3167 			continue;
3168 
3169 		if (misc_reg[i].wr) {
3170 			if (misc_reg[i].offset ==
3171 			    QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG &&
3172 			    i >= 1)
3173 				misc_reg[i].val =
3174 				QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG_MSK |
3175 				misc_reg[i - 1].val;
3176 			if (cnss_pci_reg_write(pci_priv,
3177 					       misc_reg[i].offset,
3178 					       misc_reg[i].val))
3179 				goto force_wake_put;
3180 			cnss_pr_vdbg("Write 0x%X to 0x%X\n",
3181 				     misc_reg[i].val,
3182 				     misc_reg[i].offset);
3183 
3184 		} else {
3185 			if (cnss_pci_reg_read(pci_priv,
3186 					      misc_reg[i].offset,
3187 					      &misc_reg[i].val))
3188 				goto force_wake_put;
3189 		}
3190 	}
3191 
3192 force_wake_put:
3193 	if (do_force_wake_put)
3194 		cnss_pci_force_wake_put(pci_priv);
3195 }
3196 
3197 static void cnss_pci_dump_misc_reg(struct cnss_pci_data *pci_priv)
3198 {
3199 	if (in_interrupt() || irqs_disabled())
3200 		return;
3201 
3202 	if (cnss_pci_check_link_status(pci_priv))
3203 		return;
3204 
3205 	cnss_pci_misc_reg_dump(pci_priv, pci_priv->wcss_reg,
3206 			       WCSS_REG_SIZE, "wcss");
3207 	cnss_pci_misc_reg_dump(pci_priv, pci_priv->pcie_reg,
3208 			       PCIE_REG_SIZE, "pcie");
3209 	cnss_pci_misc_reg_dump(pci_priv, pci_priv->wlaon_reg,
3210 			       WLAON_REG_SIZE, "wlaon");
3211 	cnss_pci_misc_reg_dump(pci_priv, pci_priv->syspm_reg,
3212 			       SYSPM_REG_SIZE, "syspm");
3213 }
3214 
3215 static void cnss_pci_dump_shadow_reg(struct cnss_pci_data *pci_priv)
3216 {
3217 	int i, j = 0, array_size = SHADOW_REG_COUNT + SHADOW_REG_INTER_COUNT;
3218 	u32 reg_offset;
3219 	bool do_force_wake_put = true;
3220 
3221 	if (in_interrupt() || irqs_disabled())
3222 		return;
3223 
3224 	if (cnss_pci_check_link_status(pci_priv))
3225 		return;
3226 
3227 	if (!pci_priv->debug_reg) {
3228 		pci_priv->debug_reg = devm_kzalloc(&pci_priv->pci_dev->dev,
3229 						   sizeof(*pci_priv->debug_reg)
3230 						   * array_size, GFP_KERNEL);
3231 		if (!pci_priv->debug_reg)
3232 			return;
3233 	}
3234 
3235 	if (cnss_pci_force_wake_get(pci_priv))
3236 		do_force_wake_put = false;
3237 
3238 	cnss_pr_dbg("Start to dump shadow registers\n");
3239 
3240 	for (i = 0; i < SHADOW_REG_COUNT; i++, j++) {
3241 		reg_offset = PCIE_SHADOW_REG_VALUE_0 + i * 4;
3242 		pci_priv->debug_reg[j].offset = reg_offset;
3243 		if (cnss_pci_reg_read(pci_priv, reg_offset,
3244 				      &pci_priv->debug_reg[j].val))
3245 			goto force_wake_put;
3246 	}
3247 
3248 	for (i = 0; i < SHADOW_REG_INTER_COUNT; i++, j++) {
3249 		reg_offset = PCIE_SHADOW_REG_INTER_0 + i * 4;
3250 		pci_priv->debug_reg[j].offset = reg_offset;
3251 		if (cnss_pci_reg_read(pci_priv, reg_offset,
3252 				      &pci_priv->debug_reg[j].val))
3253 			goto force_wake_put;
3254 	}
3255 
3256 force_wake_put:
3257 	if (do_force_wake_put)
3258 		cnss_pci_force_wake_put(pci_priv);
3259 }
3260 
3261 static int cnss_qca6174_powerup(struct cnss_pci_data *pci_priv)
3262 {
3263 	int ret = 0;
3264 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3265 
3266 	ret = cnss_power_on_device(plat_priv, false);
3267 	if (ret) {
3268 		cnss_pr_err("Failed to power on device, err = %d\n", ret);
3269 		goto out;
3270 	}
3271 
3272 	ret = cnss_resume_pci_link(pci_priv);
3273 	if (ret) {
3274 		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
3275 		goto power_off;
3276 	}
3277 
3278 	ret = cnss_pci_call_driver_probe(pci_priv);
3279 	if (ret)
3280 		goto suspend_link;
3281 
3282 	return 0;
3283 suspend_link:
3284 	cnss_suspend_pci_link(pci_priv);
3285 power_off:
3286 	cnss_power_off_device(plat_priv);
3287 out:
3288 	return ret;
3289 }
3290 
3291 static int cnss_qca6174_shutdown(struct cnss_pci_data *pci_priv)
3292 {
3293 	int ret = 0;
3294 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3295 
3296 	cnss_pci_pm_runtime_resume(pci_priv);
3297 
3298 	ret = cnss_pci_call_driver_remove(pci_priv);
3299 	if (ret == -EAGAIN)
3300 		goto out;
3301 
3302 	cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
3303 				   CNSS_BUS_WIDTH_NONE);
3304 	cnss_pci_set_monitor_wake_intr(pci_priv, false);
3305 	cnss_pci_set_auto_suspended(pci_priv, 0);
3306 
3307 	ret = cnss_suspend_pci_link(pci_priv);
3308 	if (ret)
3309 		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
3310 
3311 	cnss_power_off_device(plat_priv);
3312 
3313 	clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
3314 	clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state);
3315 
3316 out:
3317 	return ret;
3318 }
3319 
3320 static void cnss_qca6174_crash_shutdown(struct cnss_pci_data *pci_priv)
3321 {
3322 	if (pci_priv->driver_ops && pci_priv->driver_ops->crash_shutdown)
3323 		pci_priv->driver_ops->crash_shutdown(pci_priv->pci_dev);
3324 }
3325 
3326 static int cnss_qca6174_ramdump(struct cnss_pci_data *pci_priv)
3327 {
3328 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3329 	struct cnss_ramdump_info *ramdump_info;
3330 
3331 	ramdump_info = &plat_priv->ramdump_info;
3332 	if (!ramdump_info->ramdump_size)
3333 		return -EINVAL;
3334 
3335 	return cnss_do_ramdump(plat_priv);
3336 }
3337 
3338 static int cnss_qca6290_powerup(struct cnss_pci_data *pci_priv)
3339 {
3340 	int ret = 0;
3341 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3342 	unsigned int timeout;
3343 	int retry = 0, bt_en_gpio = plat_priv->pinctrl_info.bt_en_gpio;
3344 	int sw_ctrl_gpio = plat_priv->pinctrl_info.sw_ctrl_gpio;
3345 
3346 	if (plat_priv->ramdump_info_v2.dump_data_valid) {
3347 		cnss_pci_clear_dump_info(pci_priv);
3348 		cnss_pci_power_off_mhi(pci_priv);
3349 		cnss_suspend_pci_link(pci_priv);
3350 		cnss_pci_deinit_mhi(pci_priv);
3351 		cnss_power_off_device(plat_priv);
3352 	}
3353 
3354 	/* Clear QMI send usage count during every power up */
3355 	pci_priv->qmi_send_usage_count = 0;
3356 
3357 	plat_priv->power_up_error = 0;
3358 retry:
3359 	ret = cnss_power_on_device(plat_priv, false);
3360 	if (ret) {
3361 		cnss_pr_err("Failed to power on device, err = %d\n", ret);
3362 		goto out;
3363 	}
3364 
3365 	ret = cnss_resume_pci_link(pci_priv);
3366 	if (ret) {
3367 		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
3368 		cnss_pr_dbg("Value of SW_CTRL GPIO: %d\n",
3369 			    cnss_get_input_gpio_value(plat_priv, sw_ctrl_gpio));
3370 		if (test_bit(IGNORE_PCI_LINK_FAILURE,
3371 			     &plat_priv->ctrl_params.quirks)) {
3372 			cnss_pr_dbg("Ignore PCI link resume failure\n");
3373 			ret = 0;
3374 			goto out;
3375 		}
3376 		if (ret == -EAGAIN && retry++ < POWER_ON_RETRY_MAX_TIMES) {
3377 			cnss_power_off_device(plat_priv);
3378 			/* Force toggle BT_EN GPIO low */
3379 			if (retry == POWER_ON_RETRY_MAX_TIMES) {
3380 				cnss_pr_dbg("Retry #%d. Set BT_EN GPIO(%u) low\n",
3381 					    retry, bt_en_gpio);
3382 				if (bt_en_gpio >= 0)
3383 					gpio_direction_output(bt_en_gpio, 0);
3384 				cnss_pr_dbg("BT_EN GPIO val: %d\n",
3385 					    gpio_get_value(bt_en_gpio));
3386 			}
3387 			cnss_pr_dbg("Retry to resume PCI link #%d\n", retry);
3388 			cnss_pr_dbg("Value of SW_CTRL GPIO: %d\n",
3389 				    cnss_get_input_gpio_value(plat_priv,
3390 							      sw_ctrl_gpio));
3391 			msleep(POWER_ON_RETRY_DELAY_MS * retry);
3392 			goto retry;
3393 		}
3394 		/* Assert when it reaches maximum retries */
3395 		CNSS_ASSERT(0);
3396 		goto power_off;
3397 	}
3398 
3399 	cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, false, false);
3400 	timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_QMI);
3401 
3402 	ret = cnss_pci_start_mhi(pci_priv);
3403 	if (ret) {
3404 		cnss_fatal_err("Failed to start MHI, err = %d\n", ret);
3405 		if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
3406 		    !pci_priv->pci_link_down_ind && timeout) {
3407 			/* Start recovery directly for MHI start failures */
3408 			cnss_schedule_recovery(&pci_priv->pci_dev->dev,
3409 					       CNSS_REASON_DEFAULT);
3410 		}
3411 		return 0;
3412 	}
3413 
3414 	if (test_bit(USE_CORE_ONLY_FW, &plat_priv->ctrl_params.quirks)) {
3415 		clear_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state);
3416 		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
3417 		return 0;
3418 	}
3419 
3420 	cnss_set_pin_connect_status(plat_priv);
3421 
3422 	if (test_bit(QMI_BYPASS, &plat_priv->ctrl_params.quirks)) {
3423 		ret = cnss_pci_call_driver_probe(pci_priv);
3424 		if (ret)
3425 			goto stop_mhi;
3426 	} else if (timeout) {
3427 		if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state))
3428 			timeout += WLAN_COLD_BOOT_CAL_TIMEOUT;
3429 		else
3430 			timeout += WLAN_MISSION_MODE_TIMEOUT;
3431 		mod_timer(&plat_priv->fw_boot_timer,
3432 			  jiffies + msecs_to_jiffies(timeout));
3433 	}
3434 
3435 	return 0;
3436 
3437 stop_mhi:
3438 	cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, true);
3439 	cnss_pci_power_off_mhi(pci_priv);
3440 	cnss_suspend_pci_link(pci_priv);
3441 	cnss_pci_deinit_mhi(pci_priv);
3442 power_off:
3443 	cnss_power_off_device(plat_priv);
3444 out:
3445 	return ret;
3446 }
3447 
3448 static int cnss_qca6290_shutdown(struct cnss_pci_data *pci_priv)
3449 {
3450 	int ret = 0;
3451 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3452 	int do_force_wake = true;
3453 
3454 	cnss_pci_pm_runtime_resume(pci_priv);
3455 
3456 	ret = cnss_pci_call_driver_remove(pci_priv);
3457 	if (ret == -EAGAIN)
3458 		goto out;
3459 
3460 	cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
3461 				   CNSS_BUS_WIDTH_NONE);
3462 	cnss_pci_set_monitor_wake_intr(pci_priv, false);
3463 	cnss_pci_set_auto_suspended(pci_priv, 0);
3464 
3465 	if ((test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
3466 	     test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
3467 	     test_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state) ||
3468 	     test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state) ||
3469 	     test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state)) &&
3470 	    test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
3471 		del_timer(&pci_priv->dev_rddm_timer);
3472 		cnss_pci_collect_dump_info(pci_priv, false);
3473 
3474 		if (!plat_priv->recovery_enabled)
3475 			CNSS_ASSERT(0);
3476 	}
3477 
3478 	if (!cnss_is_device_powered_on(plat_priv)) {
3479 		cnss_pr_dbg("Device is already powered off, ignore\n");
3480 		goto skip_power_off;
3481 	}
3482 
3483 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
3484 		do_force_wake = false;
3485 	cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, do_force_wake);
3486 
3487 	/* FBC image will be freed after powering off MHI, so skip
3488 	 * if RAM dump data is still valid.
3489 	 */
3490 	if (plat_priv->ramdump_info_v2.dump_data_valid)
3491 		goto skip_power_off;
3492 
3493 	cnss_pci_power_off_mhi(pci_priv);
3494 	ret = cnss_suspend_pci_link(pci_priv);
3495 	if (ret)
3496 		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
3497 	cnss_pci_deinit_mhi(pci_priv);
3498 	cnss_power_off_device(plat_priv);
3499 
3500 skip_power_off:
3501 	pci_priv->remap_window = 0;
3502 
3503 	clear_bit(CNSS_FW_READY, &plat_priv->driver_state);
3504 	clear_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
3505 	if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
3506 	    test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state)) {
3507 		clear_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
3508 		pci_priv->pci_link_down_ind = false;
3509 	}
3510 	clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
3511 	clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state);
3512 	memset(&print_optimize, 0, sizeof(print_optimize));
3513 
3514 out:
3515 	return ret;
3516 }
3517 
3518 static void cnss_qca6290_crash_shutdown(struct cnss_pci_data *pci_priv)
3519 {
3520 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3521 
3522 	set_bit(CNSS_IN_PANIC, &plat_priv->driver_state);
3523 	cnss_pr_dbg("Crash shutdown with driver_state 0x%lx\n",
3524 		    plat_priv->driver_state);
3525 
3526 	cnss_pci_collect_dump_info(pci_priv, true);
3527 	clear_bit(CNSS_IN_PANIC, &plat_priv->driver_state);
3528 }
3529 
3530 static int cnss_qca6290_ramdump(struct cnss_pci_data *pci_priv)
3531 {
3532 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3533 	struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
3534 	struct cnss_dump_data *dump_data = &info_v2->dump_data;
3535 	struct cnss_dump_seg *dump_seg = info_v2->dump_data_vaddr;
3536 	int ret = 0;
3537 
3538 	if (!info_v2->dump_data_valid || !dump_seg ||
3539 	    dump_data->nentries == 0)
3540 		return 0;
3541 
3542 	ret = cnss_do_elf_ramdump(plat_priv);
3543 
3544 	cnss_pci_clear_dump_info(pci_priv);
3545 	cnss_pci_power_off_mhi(pci_priv);
3546 	cnss_suspend_pci_link(pci_priv);
3547 	cnss_pci_deinit_mhi(pci_priv);
3548 	cnss_power_off_device(plat_priv);
3549 
3550 	return ret;
3551 }
3552 
3553 int cnss_pci_dev_powerup(struct cnss_pci_data *pci_priv)
3554 {
3555 	int ret = 0;
3556 
3557 	if (!pci_priv) {
3558 		cnss_pr_err("pci_priv is NULL\n");
3559 		return -ENODEV;
3560 	}
3561 
3562 	switch (pci_priv->device_id) {
3563 	case QCA6174_DEVICE_ID:
3564 		ret = cnss_qca6174_powerup(pci_priv);
3565 		break;
3566 	case QCA6290_DEVICE_ID:
3567 	case QCA6390_DEVICE_ID:
3568 	case QCN7605_DEVICE_ID:
3569 	case QCA6490_DEVICE_ID:
3570 	case KIWI_DEVICE_ID:
3571 	case MANGO_DEVICE_ID:
3572 	case PEACH_DEVICE_ID:
3573 		ret = cnss_qca6290_powerup(pci_priv);
3574 		break;
3575 	default:
3576 		cnss_pr_err("Unknown device_id found: 0x%x\n",
3577 			    pci_priv->device_id);
3578 		ret = -ENODEV;
3579 	}
3580 
3581 	return ret;
3582 }
3583 
3584 int cnss_pci_dev_shutdown(struct cnss_pci_data *pci_priv)
3585 {
3586 	int ret = 0;
3587 
3588 	if (!pci_priv) {
3589 		cnss_pr_err("pci_priv is NULL\n");
3590 		return -ENODEV;
3591 	}
3592 
3593 	switch (pci_priv->device_id) {
3594 	case QCA6174_DEVICE_ID:
3595 		ret = cnss_qca6174_shutdown(pci_priv);
3596 		break;
3597 	case QCA6290_DEVICE_ID:
3598 	case QCA6390_DEVICE_ID:
3599 	case QCN7605_DEVICE_ID:
3600 	case QCA6490_DEVICE_ID:
3601 	case KIWI_DEVICE_ID:
3602 	case MANGO_DEVICE_ID:
3603 	case PEACH_DEVICE_ID:
3604 		ret = cnss_qca6290_shutdown(pci_priv);
3605 		break;
3606 	default:
3607 		cnss_pr_err("Unknown device_id found: 0x%x\n",
3608 			    pci_priv->device_id);
3609 		ret = -ENODEV;
3610 	}
3611 
3612 	return ret;
3613 }
3614 
3615 int cnss_pci_dev_crash_shutdown(struct cnss_pci_data *pci_priv)
3616 {
3617 	int ret = 0;
3618 
3619 	if (!pci_priv) {
3620 		cnss_pr_err("pci_priv is NULL\n");
3621 		return -ENODEV;
3622 	}
3623 
3624 	switch (pci_priv->device_id) {
3625 	case QCA6174_DEVICE_ID:
3626 		cnss_qca6174_crash_shutdown(pci_priv);
3627 		break;
3628 	case QCA6290_DEVICE_ID:
3629 	case QCA6390_DEVICE_ID:
3630 	case QCN7605_DEVICE_ID:
3631 	case QCA6490_DEVICE_ID:
3632 	case KIWI_DEVICE_ID:
3633 	case MANGO_DEVICE_ID:
3634 	case PEACH_DEVICE_ID:
3635 		cnss_qca6290_crash_shutdown(pci_priv);
3636 		break;
3637 	default:
3638 		cnss_pr_err("Unknown device_id found: 0x%x\n",
3639 			    pci_priv->device_id);
3640 		ret = -ENODEV;
3641 	}
3642 
3643 	return ret;
3644 }
3645 
3646 int cnss_pci_dev_ramdump(struct cnss_pci_data *pci_priv)
3647 {
3648 	int ret = 0;
3649 
3650 	if (!pci_priv) {
3651 		cnss_pr_err("pci_priv is NULL\n");
3652 		return -ENODEV;
3653 	}
3654 
3655 	switch (pci_priv->device_id) {
3656 	case QCA6174_DEVICE_ID:
3657 		ret = cnss_qca6174_ramdump(pci_priv);
3658 		break;
3659 	case QCA6290_DEVICE_ID:
3660 	case QCA6390_DEVICE_ID:
3661 	case QCN7605_DEVICE_ID:
3662 	case QCA6490_DEVICE_ID:
3663 	case KIWI_DEVICE_ID:
3664 	case MANGO_DEVICE_ID:
3665 	case PEACH_DEVICE_ID:
3666 		ret = cnss_qca6290_ramdump(pci_priv);
3667 		break;
3668 	default:
3669 		cnss_pr_err("Unknown device_id found: 0x%x\n",
3670 			    pci_priv->device_id);
3671 		ret = -ENODEV;
3672 	}
3673 
3674 	return ret;
3675 }
3676 
3677 int cnss_pci_is_drv_connected(struct device *dev)
3678 {
3679 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
3680 
3681 	if (!pci_priv)
3682 		return -ENODEV;
3683 
3684 	return pci_priv->drv_connected_last;
3685 }
3686 EXPORT_SYMBOL(cnss_pci_is_drv_connected);
3687 
3688 static void cnss_wlan_reg_driver_work(struct work_struct *work)
3689 {
3690 	struct cnss_plat_data *plat_priv =
3691 	container_of(work, struct cnss_plat_data, wlan_reg_driver_work.work);
3692 	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
3693 	struct cnss_cal_info *cal_info;
3694 	unsigned int timeout;
3695 
3696 	if (test_bit(CNSS_WLAN_HW_DISABLED, &plat_priv->driver_state))
3697 		return;
3698 
3699 	if (test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state)) {
3700 		goto reg_driver;
3701 	} else {
3702 		if (plat_priv->charger_mode) {
3703 			cnss_pr_err("Ignore calibration timeout in charger mode\n");
3704 			return;
3705 		}
3706 		if (!test_bit(CNSS_IN_COLD_BOOT_CAL,
3707 			      &plat_priv->driver_state)) {
3708 			timeout = cnss_get_timeout(plat_priv,
3709 						   CNSS_TIMEOUT_CALIBRATION);
3710 			cnss_pr_dbg("File system not ready to start calibration. Wait for %ds..\n",
3711 				    timeout / 1000);
3712 			schedule_delayed_work(&plat_priv->wlan_reg_driver_work,
3713 					      msecs_to_jiffies(timeout));
3714 			return;
3715 		}
3716 
3717 		del_timer(&plat_priv->fw_boot_timer);
3718 		if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state) &&
3719 		    !test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
3720 			cnss_pr_err("Timeout waiting for calibration to complete\n");
3721 			CNSS_ASSERT(0);
3722 		}
3723 		cal_info = kzalloc(sizeof(*cal_info), GFP_KERNEL);
3724 		if (!cal_info)
3725 			return;
3726 		cal_info->cal_status = CNSS_CAL_TIMEOUT;
3727 		cnss_driver_event_post(plat_priv,
3728 				       CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
3729 				       0, cal_info);
3730 	}
3731 reg_driver:
3732 	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
3733 		cnss_pr_dbg("Reboot/Shutdown is in progress, ignore register driver\n");
3734 		return;
3735 	}
3736 	reinit_completion(&plat_priv->power_up_complete);
3737 	cnss_driver_event_post(plat_priv,
3738 			       CNSS_DRIVER_EVENT_REGISTER_DRIVER,
3739 			       CNSS_EVENT_SYNC_UNKILLABLE,
3740 			       pci_priv->driver_ops);
3741 }
3742 
3743 int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops)
3744 {
3745 	int ret = 0;
3746 	struct cnss_plat_data *plat_priv;
3747 	struct cnss_pci_data *pci_priv;
3748 	const struct pci_device_id *id_table = driver_ops->id_table;
3749 	unsigned int timeout;
3750 
3751 	if (!cnss_check_driver_loading_allowed()) {
3752 		cnss_pr_info("No cnss2 dtsi entry present");
3753 		return -ENODEV;
3754 	}
3755 
3756 	plat_priv = cnss_get_plat_priv_by_driver_ops(driver_ops);
3757 
3758 	if (!plat_priv) {
3759 		cnss_pr_buf("plat_priv is not ready for register driver\n");
3760 		return -EAGAIN;
3761 	}
3762 
3763 	pci_priv = plat_priv->bus_priv;
3764 	if (test_bit(CNSS_WLAN_HW_DISABLED, &plat_priv->driver_state)) {
3765 		while (id_table && id_table->device) {
3766 			if (plat_priv->device_id == id_table->device) {
3767 				if (plat_priv->device_id == KIWI_DEVICE_ID &&
3768 				    driver_ops->chip_version != 2) {
3769 					cnss_pr_err("WLAN HW disabled. kiwi_v2 only supported\n");
3770 					return -ENODEV;
3771 				}
3772 				cnss_pr_info("WLAN register driver deferred for device ID: 0x%x due to HW disable\n",
3773 					     id_table->device);
3774 				plat_priv->driver_ops = driver_ops;
3775 				return 0;
3776 			}
3777 			id_table++;
3778 		}
3779 		return -ENODEV;
3780 	}
3781 
3782 	if (!test_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state)) {
3783 		cnss_pr_info("pci probe not yet done for register driver\n");
3784 		return -EAGAIN;
3785 	}
3786 
3787 	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state)) {
3788 		cnss_pr_err("Driver has already registered\n");
3789 		return -EEXIST;
3790 	}
3791 
3792 	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
3793 		cnss_pr_dbg("Reboot/Shutdown is in progress, ignore register driver\n");
3794 		return -EINVAL;
3795 	}
3796 
3797 	if (!id_table || !pci_dev_present(id_table)) {
3798 		/* id_table pointer will move from pci_dev_present(),
3799 		 * so check again using local pointer.
3800 		 */
3801 		id_table = driver_ops->id_table;
3802 		while (id_table && id_table->vendor) {
3803 			cnss_pr_info("Host driver is built for PCIe device ID 0x%x\n",
3804 				     id_table->device);
3805 			id_table++;
3806 		}
3807 		cnss_pr_err("Enumerated PCIe device id is 0x%x, reject unsupported driver\n",
3808 			    pci_priv->device_id);
3809 		return -ENODEV;
3810 	}
3811 
3812 	if (driver_ops->chip_version != CNSS_CHIP_VER_ANY &&
3813 	    driver_ops->chip_version != plat_priv->device_version.major_version) {
3814 		cnss_pr_err("Driver built for chip ver 0x%x, enumerated ver 0x%x, reject unsupported driver\n",
3815 			    driver_ops->chip_version,
3816 			    plat_priv->device_version.major_version);
3817 		return -ENODEV;
3818 	}
3819 	set_bit(CNSS_DRIVER_REGISTER, &plat_priv->driver_state);
3820 
3821 	if (!plat_priv->cbc_enabled ||
3822 	    test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state))
3823 		goto register_driver;
3824 
3825 	pci_priv->driver_ops = driver_ops;
3826 	/* If Cold Boot Calibration is enabled, it is the 1st step in init
3827 	 * sequence.CBC is done on file system_ready trigger. Qcacld will be
3828 	 * loaded from vendor_modprobe.sh at early boot and must be deferred
3829 	 * until CBC is complete
3830 	 */
3831 	timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_CALIBRATION);
3832 	INIT_DELAYED_WORK(&plat_priv->wlan_reg_driver_work,
3833 			  cnss_wlan_reg_driver_work);
3834 	schedule_delayed_work(&plat_priv->wlan_reg_driver_work,
3835 			      msecs_to_jiffies(timeout));
3836 	cnss_pr_info("WLAN register driver deferred for Calibration\n");
3837 	return 0;
3838 register_driver:
3839 	reinit_completion(&plat_priv->power_up_complete);
3840 	ret = cnss_driver_event_post(plat_priv,
3841 				     CNSS_DRIVER_EVENT_REGISTER_DRIVER,
3842 				     CNSS_EVENT_SYNC_UNKILLABLE,
3843 				     driver_ops);
3844 
3845 	return ret;
3846 }
3847 EXPORT_SYMBOL(cnss_wlan_register_driver);
3848 
3849 void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver_ops)
3850 {
3851 	struct cnss_plat_data *plat_priv;
3852 	int ret = 0;
3853 	unsigned int timeout;
3854 
3855 	plat_priv = cnss_get_plat_priv_by_driver_ops(driver_ops);
3856 	if (!plat_priv) {
3857 		cnss_pr_err("plat_priv is NULL\n");
3858 		return;
3859 	}
3860 
3861 	mutex_lock(&plat_priv->driver_ops_lock);
3862 
3863 	if (plat_priv->device_id == QCA6174_DEVICE_ID)
3864 		goto skip_wait_power_up;
3865 
3866 	timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_WLAN_WATCHDOG);
3867 	ret = wait_for_completion_timeout(&plat_priv->power_up_complete,
3868 					  msecs_to_jiffies(timeout));
3869 	if (!ret) {
3870 		cnss_pr_err("Timeout (%ums) waiting for driver power up to complete\n",
3871 			    timeout);
3872 		CNSS_ASSERT(0);
3873 	}
3874 
3875 skip_wait_power_up:
3876 	if (!test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
3877 	    !test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
3878 		goto skip_wait_recovery;
3879 
3880 	reinit_completion(&plat_priv->recovery_complete);
3881 	timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_RECOVERY);
3882 	ret = wait_for_completion_timeout(&plat_priv->recovery_complete,
3883 					  msecs_to_jiffies(timeout));
3884 	if (!ret) {
3885 		cnss_pr_err("Timeout (%ums) waiting for recovery to complete\n",
3886 			    timeout);
3887 		CNSS_ASSERT(0);
3888 	}
3889 
3890 skip_wait_recovery:
3891 	cnss_driver_event_post(plat_priv,
3892 			       CNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
3893 			       CNSS_EVENT_SYNC_UNKILLABLE, NULL);
3894 
3895 	mutex_unlock(&plat_priv->driver_ops_lock);
3896 }
3897 EXPORT_SYMBOL(cnss_wlan_unregister_driver);
3898 
3899 int cnss_pci_register_driver_hdlr(struct cnss_pci_data *pci_priv,
3900 				  void *data)
3901 {
3902 	int ret = 0;
3903 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3904 
3905 	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
3906 		cnss_pr_dbg("Reboot or shutdown is in progress, ignore register driver\n");
3907 		return -EINVAL;
3908 	}
3909 
3910 	set_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
3911 	pci_priv->driver_ops = data;
3912 
3913 	ret = cnss_pci_dev_powerup(pci_priv);
3914 	if (ret) {
3915 		clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
3916 		pci_priv->driver_ops = NULL;
3917 	} else {
3918 		set_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state);
3919 	}
3920 
3921 	return ret;
3922 }
3923 
3924 int cnss_pci_unregister_driver_hdlr(struct cnss_pci_data *pci_priv)
3925 {
3926 	struct cnss_plat_data *plat_priv;
3927 
3928 	if (!pci_priv)
3929 		return -EINVAL;
3930 
3931 	plat_priv = pci_priv->plat_priv;
3932 	set_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
3933 	cnss_pci_dev_shutdown(pci_priv);
3934 	pci_priv->driver_ops = NULL;
3935 	clear_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state);
3936 
3937 	return 0;
3938 }
3939 
3940 static int cnss_pci_suspend_driver(struct cnss_pci_data *pci_priv)
3941 {
3942 	struct pci_dev *pci_dev = pci_priv->pci_dev;
3943 	struct cnss_wlan_driver *driver_ops = pci_priv->driver_ops;
3944 	int ret = 0;
3945 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3946 
3947 	pm_message_t state = { .event = PM_EVENT_SUSPEND };
3948 
3949 	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
3950 	    driver_ops && driver_ops->suspend) {
3951 		ret = driver_ops->suspend(pci_dev, state);
3952 		if (ret) {
3953 			cnss_pr_err("Failed to suspend host driver, err = %d\n",
3954 				    ret);
3955 			ret = -EAGAIN;
3956 		}
3957 	}
3958 
3959 	return ret;
3960 }
3961 
3962 static int cnss_pci_resume_driver(struct cnss_pci_data *pci_priv)
3963 {
3964 	struct pci_dev *pci_dev = pci_priv->pci_dev;
3965 	struct cnss_wlan_driver *driver_ops = pci_priv->driver_ops;
3966 	int ret = 0;
3967 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3968 
3969 	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
3970 	    driver_ops && driver_ops->resume) {
3971 		ret = driver_ops->resume(pci_dev);
3972 		if (ret)
3973 			cnss_pr_err("Failed to resume host driver, err = %d\n",
3974 				    ret);
3975 	}
3976 
3977 	return ret;
3978 }
3979 
3980 int cnss_pci_suspend_bus(struct cnss_pci_data *pci_priv)
3981 {
3982 	struct pci_dev *pci_dev = pci_priv->pci_dev;
3983 	int ret = 0;
3984 
3985 	if (pci_priv->pci_link_state == PCI_LINK_DOWN)
3986 		goto out;
3987 
3988 	if (cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_SUSPEND)) {
3989 		ret = -EAGAIN;
3990 		goto out;
3991 	}
3992 
3993 	if (pci_priv->drv_connected_last)
3994 		goto skip_disable_pci;
3995 
3996 	pci_clear_master(pci_dev);
3997 	cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE);
3998 	pci_disable_device(pci_dev);
3999 
4000 	ret = pci_set_power_state(pci_dev, PCI_D3hot);
4001 	if (ret)
4002 		cnss_pr_err("Failed to set D3Hot, err = %d\n", ret);
4003 
4004 skip_disable_pci:
4005 	if (cnss_set_pci_link(pci_priv, PCI_LINK_DOWN)) {
4006 		ret = -EAGAIN;
4007 		goto resume_mhi;
4008 	}
4009 	pci_priv->pci_link_state = PCI_LINK_DOWN;
4010 
4011 	return 0;
4012 
4013 resume_mhi:
4014 	if (!pci_is_enabled(pci_dev))
4015 		if (pci_enable_device(pci_dev))
4016 			cnss_pr_err("Failed to enable PCI device\n");
4017 	if (pci_priv->saved_state)
4018 		cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
4019 	pci_set_master(pci_dev);
4020 	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
4021 out:
4022 	return ret;
4023 }
4024 
4025 int cnss_pci_resume_bus(struct cnss_pci_data *pci_priv)
4026 {
4027 	struct pci_dev *pci_dev = pci_priv->pci_dev;
4028 	int ret = 0;
4029 
4030 	if (pci_priv->pci_link_state == PCI_LINK_UP)
4031 		goto out;
4032 
4033 	if (cnss_set_pci_link(pci_priv, PCI_LINK_UP)) {
4034 		cnss_fatal_err("Failed to resume PCI link from suspend\n");
4035 		cnss_pci_link_down(&pci_dev->dev);
4036 		ret = -EAGAIN;
4037 		goto out;
4038 	}
4039 
4040 	pci_priv->pci_link_state = PCI_LINK_UP;
4041 
4042 	if (pci_priv->drv_connected_last)
4043 		goto skip_enable_pci;
4044 
4045 	ret = pci_enable_device(pci_dev);
4046 	if (ret) {
4047 		cnss_pr_err("Failed to enable PCI device, err = %d\n",
4048 			    ret);
4049 		goto out;
4050 	}
4051 
4052 	if (pci_priv->saved_state)
4053 		cnss_set_pci_config_space(pci_priv,
4054 					  RESTORE_PCI_CONFIG_SPACE);
4055 	pci_set_master(pci_dev);
4056 
4057 skip_enable_pci:
4058 	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
4059 out:
4060 	return ret;
4061 }
4062 
4063 static int cnss_pci_suspend(struct device *dev)
4064 {
4065 	int ret = 0;
4066 	struct pci_dev *pci_dev = to_pci_dev(dev);
4067 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4068 	struct cnss_plat_data *plat_priv;
4069 
4070 	if (!pci_priv)
4071 		goto out;
4072 
4073 	plat_priv = pci_priv->plat_priv;
4074 	if (!plat_priv)
4075 		goto out;
4076 
4077 	if (!cnss_is_device_powered_on(plat_priv))
4078 		goto out;
4079 
4080 	/* No mhi state bit set if only finish pcie enumeration,
4081 	 * so test_bit is not applicable to check if it is INIT state.
4082 	 */
4083 	if (pci_priv->mhi_state == CNSS_MHI_INIT) {
4084 		bool suspend = cnss_should_suspend_pwroff(pci_dev);
4085 
4086 		/* Do PCI link suspend and power off in the LPM case
4087 		 * if chipset didn't do that after pcie enumeration.
4088 		 */
4089 		if (!suspend) {
4090 			ret = cnss_suspend_pci_link(pci_priv);
4091 			if (ret)
4092 				cnss_pr_err("Failed to suspend PCI link, err = %d\n",
4093 					    ret);
4094 			cnss_power_off_device(plat_priv);
4095 			goto out;
4096 		}
4097 	}
4098 
4099 	if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks) &&
4100 	    pci_priv->drv_supported) {
4101 		pci_priv->drv_connected_last =
4102 			cnss_pci_get_drv_connected(pci_priv);
4103 		if (!pci_priv->drv_connected_last) {
4104 			cnss_pr_dbg("Firmware does not support non-DRV suspend, reject\n");
4105 			ret = -EAGAIN;
4106 			goto out;
4107 		}
4108 	}
4109 
4110 	set_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
4111 
4112 	ret = cnss_pci_suspend_driver(pci_priv);
4113 	if (ret)
4114 		goto clear_flag;
4115 
4116 	if (!pci_priv->disable_pc) {
4117 		mutex_lock(&pci_priv->bus_lock);
4118 		ret = cnss_pci_suspend_bus(pci_priv);
4119 		mutex_unlock(&pci_priv->bus_lock);
4120 		if (ret)
4121 			goto resume_driver;
4122 	}
4123 
4124 	cnss_pci_set_monitor_wake_intr(pci_priv, false);
4125 
4126 	return 0;
4127 
4128 resume_driver:
4129 	cnss_pci_resume_driver(pci_priv);
4130 clear_flag:
4131 	pci_priv->drv_connected_last = 0;
4132 	clear_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
4133 out:
4134 	return ret;
4135 }
4136 
4137 static int cnss_pci_resume(struct device *dev)
4138 {
4139 	int ret = 0;
4140 	struct pci_dev *pci_dev = to_pci_dev(dev);
4141 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4142 	struct cnss_plat_data *plat_priv;
4143 
4144 	if (!pci_priv)
4145 		goto out;
4146 
4147 	plat_priv = pci_priv->plat_priv;
4148 	if (!plat_priv)
4149 		goto out;
4150 
4151 	if (pci_priv->pci_link_down_ind)
4152 		goto out;
4153 
4154 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
4155 		goto out;
4156 
4157 	if (!pci_priv->disable_pc) {
4158 		mutex_lock(&pci_priv->bus_lock);
4159 		ret = cnss_pci_resume_bus(pci_priv);
4160 		mutex_unlock(&pci_priv->bus_lock);
4161 		if (ret)
4162 			goto out;
4163 	}
4164 
4165 	ret = cnss_pci_resume_driver(pci_priv);
4166 
4167 	pci_priv->drv_connected_last = 0;
4168 	clear_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
4169 
4170 out:
4171 	return ret;
4172 }
4173 
4174 static int cnss_pci_suspend_noirq(struct device *dev)
4175 {
4176 	int ret = 0;
4177 	struct pci_dev *pci_dev = to_pci_dev(dev);
4178 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4179 	struct cnss_wlan_driver *driver_ops;
4180 	struct cnss_plat_data *plat_priv;
4181 
4182 	if (!pci_priv)
4183 		goto out;
4184 
4185 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
4186 		goto out;
4187 
4188 	driver_ops = pci_priv->driver_ops;
4189 	plat_priv = pci_priv->plat_priv;
4190 	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
4191 	    driver_ops && driver_ops->suspend_noirq)
4192 		ret = driver_ops->suspend_noirq(pci_dev);
4193 
4194 	if (pci_priv->disable_pc && !pci_dev->state_saved &&
4195 	    !pci_priv->plat_priv->use_pm_domain)
4196 		pci_save_state(pci_dev);
4197 
4198 out:
4199 	return ret;
4200 }
4201 
4202 static int cnss_pci_resume_noirq(struct device *dev)
4203 {
4204 	int ret = 0;
4205 	struct pci_dev *pci_dev = to_pci_dev(dev);
4206 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4207 	struct cnss_wlan_driver *driver_ops;
4208 	struct cnss_plat_data *plat_priv;
4209 
4210 	if (!pci_priv)
4211 		goto out;
4212 
4213 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
4214 		goto out;
4215 
4216 	plat_priv = pci_priv->plat_priv;
4217 	driver_ops = pci_priv->driver_ops;
4218 	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
4219 	    driver_ops && driver_ops->resume_noirq &&
4220 	    !pci_priv->pci_link_down_ind)
4221 		ret = driver_ops->resume_noirq(pci_dev);
4222 
4223 out:
4224 	return ret;
4225 }
4226 
4227 static int cnss_pci_runtime_suspend(struct device *dev)
4228 {
4229 	int ret = 0;
4230 	struct pci_dev *pci_dev = to_pci_dev(dev);
4231 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4232 	struct cnss_plat_data *plat_priv;
4233 	struct cnss_wlan_driver *driver_ops;
4234 
4235 	if (!pci_priv)
4236 		return -EAGAIN;
4237 
4238 	plat_priv = pci_priv->plat_priv;
4239 	if (!plat_priv)
4240 		return -EAGAIN;
4241 
4242 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
4243 		return -EAGAIN;
4244 
4245 	if (pci_priv->pci_link_down_ind) {
4246 		cnss_pr_dbg("PCI link down recovery is in progress!\n");
4247 		return -EAGAIN;
4248 	}
4249 
4250 	if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks) &&
4251 	    pci_priv->drv_supported) {
4252 		pci_priv->drv_connected_last =
4253 			cnss_pci_get_drv_connected(pci_priv);
4254 		if (!pci_priv->drv_connected_last) {
4255 			cnss_pr_dbg("Firmware does not support non-DRV suspend, reject\n");
4256 			return -EAGAIN;
4257 		}
4258 	}
4259 
4260 	cnss_pr_vdbg("Runtime suspend start\n");
4261 
4262 	driver_ops = pci_priv->driver_ops;
4263 	if (driver_ops && driver_ops->runtime_ops &&
4264 	    driver_ops->runtime_ops->runtime_suspend)
4265 		ret = driver_ops->runtime_ops->runtime_suspend(pci_dev);
4266 	else
4267 		ret = cnss_auto_suspend(dev);
4268 
4269 	if (ret)
4270 		pci_priv->drv_connected_last = 0;
4271 
4272 	cnss_pr_vdbg("Runtime suspend status: %d\n", ret);
4273 
4274 	return ret;
4275 }
4276 
4277 static int cnss_pci_runtime_resume(struct device *dev)
4278 {
4279 	int ret = 0;
4280 	struct pci_dev *pci_dev = to_pci_dev(dev);
4281 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4282 	struct cnss_wlan_driver *driver_ops;
4283 
4284 	if (!pci_priv)
4285 		return -EAGAIN;
4286 
4287 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
4288 		return -EAGAIN;
4289 
4290 	if (pci_priv->pci_link_down_ind) {
4291 		cnss_pr_dbg("PCI link down recovery is in progress!\n");
4292 		return -EAGAIN;
4293 	}
4294 
4295 	cnss_pr_vdbg("Runtime resume start\n");
4296 
4297 	driver_ops = pci_priv->driver_ops;
4298 	if (driver_ops && driver_ops->runtime_ops &&
4299 	    driver_ops->runtime_ops->runtime_resume)
4300 		ret = driver_ops->runtime_ops->runtime_resume(pci_dev);
4301 	else
4302 		ret = cnss_auto_resume(dev);
4303 
4304 	cnss_pr_vdbg("Runtime resume status: %d\n", ret);
4305 
4306 	return ret;
4307 }
4308 
4309 static int cnss_pci_runtime_idle(struct device *dev)
4310 {
4311 	cnss_pr_vdbg("Runtime idle\n");
4312 
4313 	pm_request_autosuspend(dev);
4314 
4315 	return -EBUSY;
4316 }
4317 
4318 int cnss_wlan_pm_control(struct device *dev, bool vote)
4319 {
4320 	struct pci_dev *pci_dev = to_pci_dev(dev);
4321 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4322 	int ret = 0;
4323 
4324 	if (!pci_priv)
4325 		return -ENODEV;
4326 
4327 	ret = cnss_pci_disable_pc(pci_priv, vote);
4328 	if (ret)
4329 		return ret;
4330 
4331 	pci_priv->disable_pc = vote;
4332 	cnss_pr_dbg("%s PCIe power collapse\n", vote ? "disable" : "enable");
4333 
4334 	return 0;
4335 }
4336 EXPORT_SYMBOL(cnss_wlan_pm_control);
4337 
4338 static void cnss_pci_pm_runtime_get_record(struct cnss_pci_data *pci_priv,
4339 					   enum cnss_rtpm_id id)
4340 {
4341 	if (id >= RTPM_ID_MAX)
4342 		return;
4343 
4344 	atomic_inc(&pci_priv->pm_stats.runtime_get);
4345 	atomic_inc(&pci_priv->pm_stats.runtime_get_id[id]);
4346 	pci_priv->pm_stats.runtime_get_timestamp_id[id] =
4347 		cnss_get_host_timestamp(pci_priv->plat_priv);
4348 }
4349 
4350 static void cnss_pci_pm_runtime_put_record(struct cnss_pci_data *pci_priv,
4351 					   enum cnss_rtpm_id id)
4352 {
4353 	if (id >= RTPM_ID_MAX)
4354 		return;
4355 
4356 	atomic_inc(&pci_priv->pm_stats.runtime_put);
4357 	atomic_inc(&pci_priv->pm_stats.runtime_put_id[id]);
4358 	pci_priv->pm_stats.runtime_put_timestamp_id[id] =
4359 		cnss_get_host_timestamp(pci_priv->plat_priv);
4360 }
4361 
4362 void cnss_pci_pm_runtime_show_usage_count(struct cnss_pci_data *pci_priv)
4363 {
4364 	struct device *dev;
4365 
4366 	if (!pci_priv)
4367 		return;
4368 
4369 	dev = &pci_priv->pci_dev->dev;
4370 
4371 	cnss_pr_dbg("Runtime PM usage count: %d\n",
4372 		    atomic_read(&dev->power.usage_count));
4373 }
4374 
4375 int cnss_pci_pm_request_resume(struct cnss_pci_data *pci_priv)
4376 {
4377 	struct device *dev;
4378 	enum rpm_status status;
4379 
4380 	if (!pci_priv)
4381 		return -ENODEV;
4382 
4383 	dev = &pci_priv->pci_dev->dev;
4384 
4385 	status = dev->power.runtime_status;
4386 	if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
4387 		cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
4388 			     (void *)_RET_IP_);
4389 
4390 	return pm_request_resume(dev);
4391 }
4392 
4393 int cnss_pci_pm_runtime_resume(struct cnss_pci_data *pci_priv)
4394 {
4395 	struct device *dev;
4396 	enum rpm_status status;
4397 
4398 	if (!pci_priv)
4399 		return -ENODEV;
4400 
4401 	dev = &pci_priv->pci_dev->dev;
4402 
4403 	status = dev->power.runtime_status;
4404 	if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
4405 		cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
4406 			     (void *)_RET_IP_);
4407 
4408 	return pm_runtime_resume(dev);
4409 }
4410 
4411 int cnss_pci_pm_runtime_get(struct cnss_pci_data *pci_priv,
4412 			    enum cnss_rtpm_id id)
4413 {
4414 	struct device *dev;
4415 	enum rpm_status status;
4416 
4417 	if (!pci_priv)
4418 		return -ENODEV;
4419 
4420 	dev = &pci_priv->pci_dev->dev;
4421 
4422 	status = dev->power.runtime_status;
4423 	if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
4424 		cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
4425 			     (void *)_RET_IP_);
4426 
4427 	cnss_pci_pm_runtime_get_record(pci_priv, id);
4428 
4429 	return pm_runtime_get(dev);
4430 }
4431 
4432 int cnss_pci_pm_runtime_get_sync(struct cnss_pci_data *pci_priv,
4433 				 enum cnss_rtpm_id id)
4434 {
4435 	struct device *dev;
4436 	enum rpm_status status;
4437 
4438 	if (!pci_priv)
4439 		return -ENODEV;
4440 
4441 	dev = &pci_priv->pci_dev->dev;
4442 
4443 	status = dev->power.runtime_status;
4444 	if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
4445 		cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
4446 			     (void *)_RET_IP_);
4447 
4448 	cnss_pci_pm_runtime_get_record(pci_priv, id);
4449 
4450 	return pm_runtime_get_sync(dev);
4451 }
4452 
4453 void cnss_pci_pm_runtime_get_noresume(struct cnss_pci_data *pci_priv,
4454 				      enum cnss_rtpm_id id)
4455 {
4456 	if (!pci_priv)
4457 		return;
4458 
4459 	cnss_pci_pm_runtime_get_record(pci_priv, id);
4460 	pm_runtime_get_noresume(&pci_priv->pci_dev->dev);
4461 }
4462 
4463 int cnss_pci_pm_runtime_put_autosuspend(struct cnss_pci_data *pci_priv,
4464 					enum cnss_rtpm_id id)
4465 {
4466 	struct device *dev;
4467 
4468 	if (!pci_priv)
4469 		return -ENODEV;
4470 
4471 	dev = &pci_priv->pci_dev->dev;
4472 
4473 	if (atomic_read(&dev->power.usage_count) == 0) {
4474 		cnss_pr_dbg("Ignore excessive runtime PM put operation\n");
4475 		return -EINVAL;
4476 	}
4477 
4478 	cnss_pci_pm_runtime_put_record(pci_priv, id);
4479 
4480 	return pm_runtime_put_autosuspend(&pci_priv->pci_dev->dev);
4481 }
4482 
4483 void cnss_pci_pm_runtime_put_noidle(struct cnss_pci_data *pci_priv,
4484 				    enum cnss_rtpm_id id)
4485 {
4486 	struct device *dev;
4487 
4488 	if (!pci_priv)
4489 		return;
4490 
4491 	dev = &pci_priv->pci_dev->dev;
4492 
4493 	if (atomic_read(&dev->power.usage_count) == 0) {
4494 		cnss_pr_dbg("Ignore excessive runtime PM put operation\n");
4495 		return;
4496 	}
4497 
4498 	cnss_pci_pm_runtime_put_record(pci_priv, id);
4499 	pm_runtime_put_noidle(&pci_priv->pci_dev->dev);
4500 }
4501 
4502 void cnss_pci_pm_runtime_mark_last_busy(struct cnss_pci_data *pci_priv)
4503 {
4504 	if (!pci_priv)
4505 		return;
4506 
4507 	pm_runtime_mark_last_busy(&pci_priv->pci_dev->dev);
4508 }
4509 
4510 int cnss_auto_suspend(struct device *dev)
4511 {
4512 	int ret = 0;
4513 	struct pci_dev *pci_dev = to_pci_dev(dev);
4514 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4515 	struct cnss_plat_data *plat_priv;
4516 
4517 	if (!pci_priv)
4518 		return -ENODEV;
4519 
4520 	plat_priv = pci_priv->plat_priv;
4521 	if (!plat_priv)
4522 		return -ENODEV;
4523 
4524 	mutex_lock(&pci_priv->bus_lock);
4525 	if (!pci_priv->qmi_send_usage_count) {
4526 		ret = cnss_pci_suspend_bus(pci_priv);
4527 		if (ret) {
4528 			mutex_unlock(&pci_priv->bus_lock);
4529 			return ret;
4530 		}
4531 	}
4532 
4533 	cnss_pci_set_auto_suspended(pci_priv, 1);
4534 	mutex_unlock(&pci_priv->bus_lock);
4535 
4536 	cnss_pci_set_monitor_wake_intr(pci_priv, true);
4537 
4538 	/* For suspend temporarily set bandwidth vote to NONE and dont save in
4539 	 * current_bw_vote as in resume path we should vote for last used
4540 	 * bandwidth vote. Also ignore error if bw voting is not setup.
4541 	 */
4542 	cnss_setup_bus_bandwidth(plat_priv, CNSS_BUS_WIDTH_NONE, false);
4543 	return 0;
4544 }
4545 EXPORT_SYMBOL(cnss_auto_suspend);
4546 
4547 int cnss_auto_resume(struct device *dev)
4548 {
4549 	int ret = 0;
4550 	struct pci_dev *pci_dev = to_pci_dev(dev);
4551 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4552 	struct cnss_plat_data *plat_priv;
4553 
4554 	if (!pci_priv)
4555 		return -ENODEV;
4556 
4557 	plat_priv = pci_priv->plat_priv;
4558 	if (!plat_priv)
4559 		return -ENODEV;
4560 
4561 	mutex_lock(&pci_priv->bus_lock);
4562 	ret = cnss_pci_resume_bus(pci_priv);
4563 	if (ret) {
4564 		mutex_unlock(&pci_priv->bus_lock);
4565 		return ret;
4566 	}
4567 
4568 	cnss_pci_set_auto_suspended(pci_priv, 0);
4569 	mutex_unlock(&pci_priv->bus_lock);
4570 
4571 	cnss_request_bus_bandwidth(dev, plat_priv->icc.current_bw_vote);
4572 	pci_priv->drv_connected_last = 0;
4573 
4574 	return 0;
4575 }
4576 EXPORT_SYMBOL(cnss_auto_resume);
4577 
4578 int cnss_pci_force_wake_request_sync(struct device *dev, int timeout_us)
4579 {
4580 	struct pci_dev *pci_dev = to_pci_dev(dev);
4581 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4582 	struct cnss_plat_data *plat_priv;
4583 	struct mhi_controller *mhi_ctrl;
4584 
4585 	if (!pci_priv)
4586 		return -ENODEV;
4587 
4588 	switch (pci_priv->device_id) {
4589 	case QCA6390_DEVICE_ID:
4590 	case QCA6490_DEVICE_ID:
4591 	case KIWI_DEVICE_ID:
4592 	case MANGO_DEVICE_ID:
4593 	case PEACH_DEVICE_ID:
4594 		break;
4595 	default:
4596 		return 0;
4597 	}
4598 
4599 	mhi_ctrl = pci_priv->mhi_ctrl;
4600 	if (!mhi_ctrl)
4601 		return -EINVAL;
4602 
4603 	plat_priv = pci_priv->plat_priv;
4604 	if (!plat_priv)
4605 		return -ENODEV;
4606 
4607 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
4608 		return -EAGAIN;
4609 
4610 	if (timeout_us) {
4611 		/* Busy wait for timeout_us */
4612 		return cnss_mhi_device_get_sync_atomic(pci_priv,
4613 						       timeout_us, false);
4614 	} else {
4615 		/* Sleep wait for mhi_ctrl->timeout_ms */
4616 		return mhi_device_get_sync(mhi_ctrl->mhi_dev);
4617 	}
4618 }
4619 EXPORT_SYMBOL(cnss_pci_force_wake_request_sync);
4620 
4621 int cnss_pci_force_wake_request(struct device *dev)
4622 {
4623 	struct pci_dev *pci_dev = to_pci_dev(dev);
4624 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4625 	struct cnss_plat_data *plat_priv;
4626 	struct mhi_controller *mhi_ctrl;
4627 
4628 	if (!pci_priv)
4629 		return -ENODEV;
4630 
4631 	switch (pci_priv->device_id) {
4632 	case QCA6390_DEVICE_ID:
4633 	case QCA6490_DEVICE_ID:
4634 	case KIWI_DEVICE_ID:
4635 	case MANGO_DEVICE_ID:
4636 	case PEACH_DEVICE_ID:
4637 		break;
4638 	default:
4639 		return 0;
4640 	}
4641 
4642 	mhi_ctrl = pci_priv->mhi_ctrl;
4643 	if (!mhi_ctrl)
4644 		return -EINVAL;
4645 
4646 	plat_priv = pci_priv->plat_priv;
4647 	if (!plat_priv)
4648 		return -ENODEV;
4649 
4650 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
4651 		return -EAGAIN;
4652 
4653 	mhi_device_get(mhi_ctrl->mhi_dev);
4654 
4655 	return 0;
4656 }
4657 EXPORT_SYMBOL(cnss_pci_force_wake_request);
4658 
4659 int cnss_pci_is_device_awake(struct device *dev)
4660 {
4661 	struct pci_dev *pci_dev = to_pci_dev(dev);
4662 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4663 	struct mhi_controller *mhi_ctrl;
4664 
4665 	if (!pci_priv)
4666 		return -ENODEV;
4667 
4668 	switch (pci_priv->device_id) {
4669 	case QCA6390_DEVICE_ID:
4670 	case QCA6490_DEVICE_ID:
4671 	case KIWI_DEVICE_ID:
4672 	case MANGO_DEVICE_ID:
4673 	case PEACH_DEVICE_ID:
4674 		break;
4675 	default:
4676 		return 0;
4677 	}
4678 
4679 	mhi_ctrl = pci_priv->mhi_ctrl;
4680 	if (!mhi_ctrl)
4681 		return -EINVAL;
4682 
4683 	return (mhi_ctrl->dev_state == MHI_STATE_M0);
4684 }
4685 EXPORT_SYMBOL(cnss_pci_is_device_awake);
4686 
4687 int cnss_pci_force_wake_release(struct device *dev)
4688 {
4689 	struct pci_dev *pci_dev = to_pci_dev(dev);
4690 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4691 	struct cnss_plat_data *plat_priv;
4692 	struct mhi_controller *mhi_ctrl;
4693 
4694 	if (!pci_priv)
4695 		return -ENODEV;
4696 
4697 	switch (pci_priv->device_id) {
4698 	case QCA6390_DEVICE_ID:
4699 	case QCA6490_DEVICE_ID:
4700 	case KIWI_DEVICE_ID:
4701 	case MANGO_DEVICE_ID:
4702 	case PEACH_DEVICE_ID:
4703 		break;
4704 	default:
4705 		return 0;
4706 	}
4707 
4708 	mhi_ctrl = pci_priv->mhi_ctrl;
4709 	if (!mhi_ctrl)
4710 		return -EINVAL;
4711 
4712 	plat_priv = pci_priv->plat_priv;
4713 	if (!plat_priv)
4714 		return -ENODEV;
4715 
4716 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
4717 		return -EAGAIN;
4718 
4719 	mhi_device_put(mhi_ctrl->mhi_dev);
4720 
4721 	return 0;
4722 }
4723 EXPORT_SYMBOL(cnss_pci_force_wake_release);
4724 
4725 int cnss_pci_qmi_send_get(struct cnss_pci_data *pci_priv)
4726 {
4727 	int ret = 0;
4728 
4729 	if (!pci_priv)
4730 		return -ENODEV;
4731 
4732 	mutex_lock(&pci_priv->bus_lock);
4733 	if (cnss_pci_get_auto_suspended(pci_priv) &&
4734 	    !pci_priv->qmi_send_usage_count)
4735 		ret = cnss_pci_resume_bus(pci_priv);
4736 	pci_priv->qmi_send_usage_count++;
4737 	cnss_pr_buf("Increased QMI send usage count to %d\n",
4738 		    pci_priv->qmi_send_usage_count);
4739 	mutex_unlock(&pci_priv->bus_lock);
4740 
4741 	return ret;
4742 }
4743 
4744 int cnss_pci_qmi_send_put(struct cnss_pci_data *pci_priv)
4745 {
4746 	int ret = 0;
4747 
4748 	if (!pci_priv)
4749 		return -ENODEV;
4750 
4751 	mutex_lock(&pci_priv->bus_lock);
4752 	if (pci_priv->qmi_send_usage_count)
4753 		pci_priv->qmi_send_usage_count--;
4754 	cnss_pr_buf("Decreased QMI send usage count to %d\n",
4755 		    pci_priv->qmi_send_usage_count);
4756 	if (cnss_pci_get_auto_suspended(pci_priv) &&
4757 	    !pci_priv->qmi_send_usage_count &&
4758 	    !cnss_pcie_is_device_down(pci_priv))
4759 		ret = cnss_pci_suspend_bus(pci_priv);
4760 	mutex_unlock(&pci_priv->bus_lock);
4761 
4762 	return ret;
4763 }
4764 
4765 int cnss_send_buffer_to_afcmem(struct device *dev, const uint8_t *afcdb,
4766 			       uint32_t len, uint8_t slotid)
4767 {
4768 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
4769 	struct cnss_fw_mem *fw_mem;
4770 	void *mem = NULL;
4771 	int i, ret;
4772 	u32 *status;
4773 
4774 	if (!plat_priv)
4775 		return -EINVAL;
4776 
4777 	fw_mem = plat_priv->fw_mem;
4778 	if (slotid >= AFC_MAX_SLOT) {
4779 		cnss_pr_err("Invalid slot id %d\n", slotid);
4780 		ret = -EINVAL;
4781 		goto err;
4782 	}
4783 	if (len > AFC_SLOT_SIZE) {
4784 		cnss_pr_err("len %d greater than slot size", len);
4785 		ret = -EINVAL;
4786 		goto err;
4787 	}
4788 
4789 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
4790 		if (fw_mem[i].type == QMI_WLFW_AFC_MEM_V01) {
4791 			mem = fw_mem[i].va;
4792 			status = mem + (slotid * AFC_SLOT_SIZE);
4793 			break;
4794 		}
4795 	}
4796 
4797 	if (!mem) {
4798 		cnss_pr_err("AFC mem is not available\n");
4799 		ret = -ENOMEM;
4800 		goto err;
4801 	}
4802 
4803 	memcpy(mem + (slotid * AFC_SLOT_SIZE), afcdb, len);
4804 	if (len < AFC_SLOT_SIZE)
4805 		memset(mem + (slotid * AFC_SLOT_SIZE) + len,
4806 		       0, AFC_SLOT_SIZE - len);
4807 	status[AFC_AUTH_STATUS_OFFSET] = cpu_to_le32(AFC_AUTH_SUCCESS);
4808 
4809 	return 0;
4810 err:
4811 	return ret;
4812 }
4813 EXPORT_SYMBOL(cnss_send_buffer_to_afcmem);
4814 
4815 int cnss_reset_afcmem(struct device *dev, uint8_t slotid)
4816 {
4817 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
4818 	struct cnss_fw_mem *fw_mem;
4819 	void *mem = NULL;
4820 	int i, ret;
4821 
4822 	if (!plat_priv)
4823 		return -EINVAL;
4824 
4825 	fw_mem = plat_priv->fw_mem;
4826 	if (slotid >= AFC_MAX_SLOT) {
4827 		cnss_pr_err("Invalid slot id %d\n", slotid);
4828 		ret = -EINVAL;
4829 		goto err;
4830 	}
4831 
4832 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
4833 		if (fw_mem[i].type == QMI_WLFW_AFC_MEM_V01) {
4834 			mem = fw_mem[i].va;
4835 			break;
4836 		}
4837 	}
4838 
4839 	if (!mem) {
4840 		cnss_pr_err("AFC mem is not available\n");
4841 		ret = -ENOMEM;
4842 		goto err;
4843 	}
4844 
4845 	memset(mem + (slotid * AFC_SLOT_SIZE), 0, AFC_SLOT_SIZE);
4846 	return 0;
4847 
4848 err:
4849 	return ret;
4850 }
4851 EXPORT_SYMBOL(cnss_reset_afcmem);
4852 
4853 int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv)
4854 {
4855 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4856 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
4857 	struct device *dev = &pci_priv->pci_dev->dev;
4858 	int i;
4859 
4860 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
4861 		if (!fw_mem[i].va && fw_mem[i].size) {
4862 retry:
4863 			fw_mem[i].va =
4864 				dma_alloc_attrs(dev, fw_mem[i].size,
4865 						&fw_mem[i].pa, GFP_KERNEL,
4866 						fw_mem[i].attrs);
4867 
4868 			if (!fw_mem[i].va) {
4869 				if ((fw_mem[i].attrs &
4870 				    DMA_ATTR_FORCE_CONTIGUOUS)) {
4871 					fw_mem[i].attrs &=
4872 						~DMA_ATTR_FORCE_CONTIGUOUS;
4873 
4874 					cnss_pr_dbg("Fallback to non-contiguous memory for FW, Mem type: %u\n",
4875 						    fw_mem[i].type);
4876 					goto retry;
4877 				}
4878 				cnss_pr_err("Failed to allocate memory for FW, size: 0x%zx, type: %u\n",
4879 					    fw_mem[i].size, fw_mem[i].type);
4880 				CNSS_ASSERT(0);
4881 				return -ENOMEM;
4882 			}
4883 		}
4884 	}
4885 
4886 	return 0;
4887 }
4888 
4889 static void cnss_pci_free_fw_mem(struct cnss_pci_data *pci_priv)
4890 {
4891 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4892 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
4893 	struct device *dev = &pci_priv->pci_dev->dev;
4894 	int i;
4895 
4896 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
4897 		if (fw_mem[i].va && fw_mem[i].size) {
4898 			cnss_pr_dbg("Freeing memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n",
4899 				    fw_mem[i].va, &fw_mem[i].pa,
4900 				    fw_mem[i].size, fw_mem[i].type);
4901 			dma_free_attrs(dev, fw_mem[i].size,
4902 				       fw_mem[i].va, fw_mem[i].pa,
4903 				       fw_mem[i].attrs);
4904 			fw_mem[i].va = NULL;
4905 			fw_mem[i].pa = 0;
4906 			fw_mem[i].size = 0;
4907 			fw_mem[i].type = 0;
4908 		}
4909 	}
4910 
4911 	plat_priv->fw_mem_seg_len = 0;
4912 }
4913 
4914 int cnss_pci_alloc_qdss_mem(struct cnss_pci_data *pci_priv)
4915 {
4916 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4917 	struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
4918 	int i, j;
4919 
4920 	for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
4921 		if (!qdss_mem[i].va && qdss_mem[i].size) {
4922 			qdss_mem[i].va =
4923 				dma_alloc_coherent(&pci_priv->pci_dev->dev,
4924 						   qdss_mem[i].size,
4925 						   &qdss_mem[i].pa,
4926 						   GFP_KERNEL);
4927 			if (!qdss_mem[i].va) {
4928 				cnss_pr_err("Failed to allocate QDSS memory for FW, size: 0x%zx, type: %u, chuck-ID: %d\n",
4929 					    qdss_mem[i].size,
4930 					    qdss_mem[i].type, i);
4931 				break;
4932 			}
4933 		}
4934 	}
4935 
4936 	/* Best-effort allocation for QDSS trace */
4937 	if (i < plat_priv->qdss_mem_seg_len) {
4938 		for (j = i; j < plat_priv->qdss_mem_seg_len; j++) {
4939 			qdss_mem[j].type = 0;
4940 			qdss_mem[j].size = 0;
4941 		}
4942 		plat_priv->qdss_mem_seg_len = i;
4943 	}
4944 
4945 	return 0;
4946 }
4947 
4948 void cnss_pci_free_qdss_mem(struct cnss_pci_data *pci_priv)
4949 {
4950 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4951 	struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
4952 	int i;
4953 
4954 	for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
4955 		if (qdss_mem[i].va && qdss_mem[i].size) {
4956 			cnss_pr_dbg("Freeing memory for QDSS: pa: %pa, size: 0x%zx, type: %u\n",
4957 				    &qdss_mem[i].pa, qdss_mem[i].size,
4958 				    qdss_mem[i].type);
4959 			dma_free_coherent(&pci_priv->pci_dev->dev,
4960 					  qdss_mem[i].size, qdss_mem[i].va,
4961 					  qdss_mem[i].pa);
4962 			qdss_mem[i].va = NULL;
4963 			qdss_mem[i].pa = 0;
4964 			qdss_mem[i].size = 0;
4965 			qdss_mem[i].type = 0;
4966 		}
4967 	}
4968 	plat_priv->qdss_mem_seg_len = 0;
4969 }
4970 
4971 int cnss_pci_load_tme_patch(struct cnss_pci_data *pci_priv)
4972 {
4973 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4974 	struct cnss_fw_mem *tme_lite_mem = &plat_priv->tme_lite_mem;
4975 	char filename[MAX_FIRMWARE_NAME_LEN];
4976 	char *tme_patch_filename = NULL;
4977 	const struct firmware *fw_entry;
4978 	int ret = 0;
4979 
4980 	switch (pci_priv->device_id) {
4981 	case PEACH_DEVICE_ID:
4982 		if (plat_priv->device_version.major_version == FW_V1_NUMBER)
4983 			tme_patch_filename = TME_PATCH_FILE_NAME_1_0;
4984 		else if (plat_priv->device_version.major_version == FW_V2_NUMBER)
4985 			tme_patch_filename = TME_PATCH_FILE_NAME_2_0;
4986 		break;
4987 	case QCA6174_DEVICE_ID:
4988 	case QCA6290_DEVICE_ID:
4989 	case QCA6390_DEVICE_ID:
4990 	case QCA6490_DEVICE_ID:
4991 	case KIWI_DEVICE_ID:
4992 	case MANGO_DEVICE_ID:
4993 	default:
4994 		cnss_pr_dbg("TME-L not supported for device ID: (0x%x)\n",
4995 			    pci_priv->device_id);
4996 		return 0;
4997 	}
4998 
4999 	if (!tme_lite_mem->va && !tme_lite_mem->size) {
5000 		scnprintf(filename, MAX_FIRMWARE_NAME_LEN, "%s", tme_patch_filename);
5001 
5002 		ret = firmware_request_nowarn(&fw_entry, filename,
5003 					      &pci_priv->pci_dev->dev);
5004 		if (ret) {
5005 			cnss_pr_err("Failed to load TME-L patch: %s, ret: %d\n",
5006 				    filename, ret);
5007 			return ret;
5008 		}
5009 
5010 		tme_lite_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
5011 						fw_entry->size, &tme_lite_mem->pa,
5012 						GFP_KERNEL);
5013 		if (!tme_lite_mem->va) {
5014 			cnss_pr_err("Failed to allocate memory for M3, size: 0x%zx\n",
5015 				    fw_entry->size);
5016 			release_firmware(fw_entry);
5017 			return -ENOMEM;
5018 		}
5019 
5020 		memcpy(tme_lite_mem->va, fw_entry->data, fw_entry->size);
5021 		tme_lite_mem->size = fw_entry->size;
5022 		release_firmware(fw_entry);
5023 	}
5024 
5025 	return 0;
5026 }
5027 
5028 static void cnss_pci_free_tme_lite_mem(struct cnss_pci_data *pci_priv)
5029 {
5030 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5031 	struct cnss_fw_mem *tme_lite_mem = &plat_priv->tme_lite_mem;
5032 
5033 	if (tme_lite_mem->va && tme_lite_mem->size) {
5034 		cnss_pr_dbg("Freeing memory for TME patch, va: 0x%pK, pa: %pa, size: 0x%zx\n",
5035 			    tme_lite_mem->va, &tme_lite_mem->pa, tme_lite_mem->size);
5036 		dma_free_coherent(&pci_priv->pci_dev->dev, tme_lite_mem->size,
5037 				  tme_lite_mem->va, tme_lite_mem->pa);
5038 	}
5039 
5040 	tme_lite_mem->va = NULL;
5041 	tme_lite_mem->pa = 0;
5042 	tme_lite_mem->size = 0;
5043 }
5044 
5045 int cnss_pci_load_tme_opt_file(struct cnss_pci_data *pci_priv,
5046 				enum wlfw_tme_lite_file_type_v01 file)
5047 {
5048 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5049 	struct cnss_fw_mem *tme_lite_mem = NULL;
5050 	char filename[MAX_FIRMWARE_NAME_LEN];
5051 	char *tme_opt_filename = NULL;
5052 	const struct firmware *fw_entry;
5053 	int ret = 0;
5054 
5055 	switch (pci_priv->device_id) {
5056 	case PEACH_DEVICE_ID:
5057 		if (file == WLFW_TME_LITE_OEM_FUSE_FILE_V01) {
5058 			tme_opt_filename = TME_OEM_FUSE_FILE_NAME;
5059 			tme_lite_mem = &plat_priv->tme_opt_file_mem[0];
5060 		} else if (file == WLFW_TME_LITE_RPR_FILE_V01) {
5061 			tme_opt_filename = TME_RPR_FILE_NAME;
5062 			tme_lite_mem = &plat_priv->tme_opt_file_mem[1];
5063 		} else if (file == WLFW_TME_LITE_DPR_FILE_V01) {
5064 			tme_opt_filename = TME_DPR_FILE_NAME;
5065 			tme_lite_mem = &plat_priv->tme_opt_file_mem[2];
5066 		}
5067 		break;
5068 	case QCA6174_DEVICE_ID:
5069 	case QCA6290_DEVICE_ID:
5070 	case QCA6390_DEVICE_ID:
5071 	case QCA6490_DEVICE_ID:
5072 	case KIWI_DEVICE_ID:
5073 	case MANGO_DEVICE_ID:
5074 	default:
5075 		cnss_pr_dbg("TME-L opt file: %s not supported for device ID: (0x%x)\n",
5076 			    tme_opt_filename, pci_priv->device_id);
5077 		return 0;
5078 	}
5079 
5080 	if (!tme_lite_mem->va && !tme_lite_mem->size) {
5081 		cnss_pci_add_fw_prefix_name(pci_priv, filename,
5082 					    tme_opt_filename);
5083 
5084 		ret = firmware_request_nowarn(&fw_entry, filename,
5085 					      &pci_priv->pci_dev->dev);
5086 		if (ret) {
5087 			cnss_pr_err("Failed to load TME-L opt file: %s, ret: %d\n",
5088 				    filename, ret);
5089 			return ret;
5090 		}
5091 
5092 		tme_lite_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
5093 						fw_entry->size, &tme_lite_mem->pa,
5094 						GFP_KERNEL);
5095 		if (!tme_lite_mem->va) {
5096 			cnss_pr_err("Failed to allocate memory for TME-L opt file %s,size: 0x%zx\n",
5097 				    filename, fw_entry->size);
5098 			release_firmware(fw_entry);
5099 			return -ENOMEM;
5100 		}
5101 
5102 		memcpy(tme_lite_mem->va, fw_entry->data, fw_entry->size);
5103 		tme_lite_mem->size = fw_entry->size;
5104 		release_firmware(fw_entry);
5105 	}
5106 
5107 	return 0;
5108 }
5109 
5110 static void cnss_pci_free_tme_opt_file_mem(struct cnss_pci_data *pci_priv)
5111 {
5112 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5113 	struct cnss_fw_mem *tme_opt_file_mem = plat_priv->tme_opt_file_mem;
5114 	int i = 0;
5115 
5116 	for (i = 0; i < QMI_WLFW_MAX_TME_OPT_FILE_NUM; i++) {
5117 		if (tme_opt_file_mem[i].va && tme_opt_file_mem[i].size) {
5118 			cnss_pr_dbg("Free memory for TME opt file,va:0x%pK, pa:%pa, size:0x%zx\n",
5119 				tme_opt_file_mem[i].va, &tme_opt_file_mem[i].pa,
5120 				tme_opt_file_mem[i].size);
5121 			dma_free_coherent(&pci_priv->pci_dev->dev, tme_opt_file_mem[i].size,
5122 				tme_opt_file_mem[i].va, tme_opt_file_mem[i].pa);
5123 		}
5124 		tme_opt_file_mem[i].va = NULL;
5125 		tme_opt_file_mem[i].pa = 0;
5126 		tme_opt_file_mem[i].size = 0;
5127 	}
5128 }
5129 
5130 int cnss_pci_load_m3(struct cnss_pci_data *pci_priv)
5131 {
5132 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5133 	struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
5134 	char filename[MAX_FIRMWARE_NAME_LEN];
5135 	char *phy_filename = DEFAULT_PHY_UCODE_FILE_NAME;
5136 	const struct firmware *fw_entry;
5137 	int ret = 0;
5138 
5139 	/* Use forward compatibility here since for any recent device
5140 	 * it should use DEFAULT_PHY_UCODE_FILE_NAME.
5141 	 */
5142 	switch (pci_priv->device_id) {
5143 	case QCA6174_DEVICE_ID:
5144 		cnss_pr_err("Invalid device ID (0x%x) to load phy image\n",
5145 			    pci_priv->device_id);
5146 		return -EINVAL;
5147 	case QCA6290_DEVICE_ID:
5148 	case QCA6390_DEVICE_ID:
5149 	case QCA6490_DEVICE_ID:
5150 		phy_filename = DEFAULT_PHY_M3_FILE_NAME;
5151 		break;
5152 	case KIWI_DEVICE_ID:
5153 	case MANGO_DEVICE_ID:
5154 	case PEACH_DEVICE_ID:
5155 		switch (plat_priv->device_version.major_version) {
5156 		case FW_V2_NUMBER:
5157 			phy_filename = PHY_UCODE_V2_FILE_NAME;
5158 			break;
5159 		default:
5160 			break;
5161 		}
5162 		break;
5163 	default:
5164 		break;
5165 	}
5166 
5167 	if (!m3_mem->va && !m3_mem->size) {
5168 		cnss_pci_add_fw_prefix_name(pci_priv, filename,
5169 					    phy_filename);
5170 
5171 		ret = firmware_request_nowarn(&fw_entry, filename,
5172 					      &pci_priv->pci_dev->dev);
5173 		if (ret) {
5174 			cnss_pr_err("Failed to load M3 image: %s\n", filename);
5175 			return ret;
5176 		}
5177 
5178 		m3_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
5179 						fw_entry->size, &m3_mem->pa,
5180 						GFP_KERNEL);
5181 		if (!m3_mem->va) {
5182 			cnss_pr_err("Failed to allocate memory for M3, size: 0x%zx\n",
5183 				    fw_entry->size);
5184 			release_firmware(fw_entry);
5185 			return -ENOMEM;
5186 		}
5187 
5188 		memcpy(m3_mem->va, fw_entry->data, fw_entry->size);
5189 		m3_mem->size = fw_entry->size;
5190 		release_firmware(fw_entry);
5191 	}
5192 
5193 	return 0;
5194 }
5195 
5196 static void cnss_pci_free_m3_mem(struct cnss_pci_data *pci_priv)
5197 {
5198 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5199 	struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
5200 
5201 	if (m3_mem->va && m3_mem->size) {
5202 		cnss_pr_dbg("Freeing memory for M3, va: 0x%pK, pa: %pa, size: 0x%zx\n",
5203 			    m3_mem->va, &m3_mem->pa, m3_mem->size);
5204 		dma_free_coherent(&pci_priv->pci_dev->dev, m3_mem->size,
5205 				  m3_mem->va, m3_mem->pa);
5206 	}
5207 
5208 	m3_mem->va = NULL;
5209 	m3_mem->pa = 0;
5210 	m3_mem->size = 0;
5211 }
5212 
5213 #ifdef CONFIG_FREE_M3_BLOB_MEM
5214 void cnss_pci_free_blob_mem(struct cnss_pci_data *pci_priv)
5215 {
5216 	cnss_pci_free_m3_mem(pci_priv);
5217 }
5218 #else
5219 void cnss_pci_free_blob_mem(struct cnss_pci_data *pci_priv)
5220 {
5221 }
5222 #endif
5223 
5224 int cnss_pci_load_aux(struct cnss_pci_data *pci_priv)
5225 {
5226 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5227 	struct cnss_fw_mem *aux_mem = &plat_priv->aux_mem;
5228 	char filename[MAX_FIRMWARE_NAME_LEN];
5229 	char *aux_filename = DEFAULT_AUX_FILE_NAME;
5230 	const struct firmware *fw_entry;
5231 	int ret = 0;
5232 
5233 	if (!aux_mem->va && !aux_mem->size) {
5234 		cnss_pci_add_fw_prefix_name(pci_priv, filename,
5235 					    aux_filename);
5236 
5237 		ret = firmware_request_nowarn(&fw_entry, filename,
5238 					      &pci_priv->pci_dev->dev);
5239 		if (ret) {
5240 			cnss_pr_err("Failed to load AUX image: %s\n", filename);
5241 			return ret;
5242 		}
5243 
5244 		aux_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
5245 						fw_entry->size, &aux_mem->pa,
5246 						GFP_KERNEL);
5247 		if (!aux_mem->va) {
5248 			cnss_pr_err("Failed to allocate memory for AUX, size: 0x%zx\n",
5249 				    fw_entry->size);
5250 			release_firmware(fw_entry);
5251 			return -ENOMEM;
5252 		}
5253 
5254 		memcpy(aux_mem->va, fw_entry->data, fw_entry->size);
5255 		aux_mem->size = fw_entry->size;
5256 		release_firmware(fw_entry);
5257 	}
5258 
5259 	return 0;
5260 }
5261 
5262 static void cnss_pci_free_aux_mem(struct cnss_pci_data *pci_priv)
5263 {
5264 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5265 	struct cnss_fw_mem *aux_mem = &plat_priv->aux_mem;
5266 
5267 	if (aux_mem->va && aux_mem->size) {
5268 		cnss_pr_dbg("Freeing memory for AUX, va: 0x%pK, pa: %pa, size: 0x%zx\n",
5269 			    aux_mem->va, &aux_mem->pa, aux_mem->size);
5270 		dma_free_coherent(&pci_priv->pci_dev->dev, aux_mem->size,
5271 				  aux_mem->va, aux_mem->pa);
5272 	}
5273 
5274 	aux_mem->va = NULL;
5275 	aux_mem->pa = 0;
5276 	aux_mem->size = 0;
5277 }
5278 
5279 void cnss_pci_fw_boot_timeout_hdlr(struct cnss_pci_data *pci_priv)
5280 {
5281 	struct cnss_plat_data *plat_priv;
5282 
5283 	if (!pci_priv)
5284 		return;
5285 
5286 	cnss_fatal_err("Timeout waiting for FW ready indication\n");
5287 
5288 	plat_priv = pci_priv->plat_priv;
5289 	if (!plat_priv)
5290 		return;
5291 
5292 	if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state)) {
5293 		cnss_pr_dbg("Ignore FW ready timeout for calibration mode\n");
5294 		return;
5295 	}
5296 
5297 	cnss_schedule_recovery(&pci_priv->pci_dev->dev,
5298 			       CNSS_REASON_TIMEOUT);
5299 }
5300 
5301 static void cnss_pci_deinit_smmu(struct cnss_pci_data *pci_priv)
5302 {
5303 	pci_priv->iommu_domain = NULL;
5304 }
5305 
5306 int cnss_pci_get_iova(struct cnss_pci_data *pci_priv, u64 *addr, u64 *size)
5307 {
5308 	if (!pci_priv)
5309 		return -ENODEV;
5310 
5311 	if (!pci_priv->smmu_iova_len)
5312 		return -EINVAL;
5313 
5314 	*addr = pci_priv->smmu_iova_start;
5315 	*size = pci_priv->smmu_iova_len;
5316 
5317 	return 0;
5318 }
5319 
5320 int cnss_pci_get_iova_ipa(struct cnss_pci_data *pci_priv, u64 *addr, u64 *size)
5321 {
5322 	if (!pci_priv)
5323 		return -ENODEV;
5324 
5325 	if (!pci_priv->smmu_iova_ipa_len)
5326 		return -EINVAL;
5327 
5328 	*addr = pci_priv->smmu_iova_ipa_start;
5329 	*size = pci_priv->smmu_iova_ipa_len;
5330 
5331 	return 0;
5332 }
5333 
5334 bool cnss_pci_is_smmu_s1_enabled(struct cnss_pci_data *pci_priv)
5335 {
5336 	if (pci_priv)
5337 		return pci_priv->smmu_s1_enable;
5338 
5339 	return false;
5340 }
5341 struct iommu_domain *cnss_smmu_get_domain(struct device *dev)
5342 {
5343 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5344 
5345 	if (!pci_priv)
5346 		return NULL;
5347 
5348 	return pci_priv->iommu_domain;
5349 }
5350 EXPORT_SYMBOL(cnss_smmu_get_domain);
5351 
5352 int cnss_smmu_map(struct device *dev,
5353 		  phys_addr_t paddr, uint32_t *iova_addr, size_t size)
5354 {
5355 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5356 	struct cnss_plat_data *plat_priv;
5357 	unsigned long iova;
5358 	size_t len;
5359 	int ret = 0;
5360 	int flag = IOMMU_READ | IOMMU_WRITE;
5361 	struct pci_dev *root_port;
5362 	struct device_node *root_of_node;
5363 	bool dma_coherent = false;
5364 
5365 	if (!pci_priv)
5366 		return -ENODEV;
5367 
5368 	if (!iova_addr) {
5369 		cnss_pr_err("iova_addr is NULL, paddr %pa, size %zu\n",
5370 			    &paddr, size);
5371 		return -EINVAL;
5372 	}
5373 
5374 	plat_priv = pci_priv->plat_priv;
5375 
5376 	len = roundup(size + paddr - rounddown(paddr, PAGE_SIZE), PAGE_SIZE);
5377 	iova = roundup(pci_priv->smmu_iova_ipa_current, PAGE_SIZE);
5378 
5379 	if (pci_priv->iommu_geometry &&
5380 	    iova >= pci_priv->smmu_iova_ipa_start +
5381 		    pci_priv->smmu_iova_ipa_len) {
5382 		cnss_pr_err("No IOVA space to map, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n",
5383 			    iova,
5384 			    &pci_priv->smmu_iova_ipa_start,
5385 			    pci_priv->smmu_iova_ipa_len);
5386 		return -ENOMEM;
5387 	}
5388 
5389 	if (!test_bit(DISABLE_IO_COHERENCY,
5390 		      &plat_priv->ctrl_params.quirks)) {
5391 		root_port = pcie_find_root_port(pci_priv->pci_dev);
5392 		if (!root_port) {
5393 			cnss_pr_err("Root port is null, so dma_coherent is disabled\n");
5394 		} else {
5395 			root_of_node = root_port->dev.of_node;
5396 			if (root_of_node && root_of_node->parent) {
5397 				dma_coherent =
5398 				    of_property_read_bool(root_of_node->parent,
5399 							  "dma-coherent");
5400 			cnss_pr_dbg("dma-coherent is %s\n",
5401 				    dma_coherent ? "enabled" : "disabled");
5402 			if (dma_coherent)
5403 				flag |= IOMMU_CACHE;
5404 			}
5405 		}
5406 	}
5407 
5408 	cnss_pr_dbg("IOMMU map: iova %lx, len %zu\n", iova, len);
5409 
5410 	ret = cnss_iommu_map(pci_priv->iommu_domain, iova,
5411 			     rounddown(paddr, PAGE_SIZE), len, flag);
5412 	if (ret) {
5413 		cnss_pr_err("PA to IOVA mapping failed, ret %d\n", ret);
5414 		return ret;
5415 	}
5416 
5417 	pci_priv->smmu_iova_ipa_current = iova + len;
5418 	*iova_addr = (uint32_t)(iova + paddr - rounddown(paddr, PAGE_SIZE));
5419 	cnss_pr_dbg("IOMMU map: iova_addr %lx\n", *iova_addr);
5420 
5421 	return 0;
5422 }
5423 EXPORT_SYMBOL(cnss_smmu_map);
5424 
5425 int cnss_smmu_unmap(struct device *dev, uint32_t iova_addr, size_t size)
5426 {
5427 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5428 	unsigned long iova;
5429 	size_t unmapped;
5430 	size_t len;
5431 
5432 	if (!pci_priv)
5433 		return -ENODEV;
5434 
5435 	iova = rounddown(iova_addr, PAGE_SIZE);
5436 	len = roundup(size + iova_addr - iova, PAGE_SIZE);
5437 
5438 	if (iova >= pci_priv->smmu_iova_ipa_start +
5439 		    pci_priv->smmu_iova_ipa_len) {
5440 		cnss_pr_err("Out of IOVA space to unmap, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n",
5441 			    iova,
5442 			    &pci_priv->smmu_iova_ipa_start,
5443 			    pci_priv->smmu_iova_ipa_len);
5444 		return -ENOMEM;
5445 	}
5446 
5447 	cnss_pr_dbg("IOMMU unmap: iova %lx, len %zu\n", iova, len);
5448 
5449 	unmapped = iommu_unmap(pci_priv->iommu_domain, iova, len);
5450 	if (unmapped != len) {
5451 		cnss_pr_err("IOMMU unmap failed, unmapped = %zu, requested = %zu\n",
5452 			    unmapped, len);
5453 		return -EINVAL;
5454 	}
5455 
5456 	pci_priv->smmu_iova_ipa_current = iova;
5457 	return 0;
5458 }
5459 EXPORT_SYMBOL(cnss_smmu_unmap);
5460 
5461 int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info)
5462 {
5463 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5464 	struct cnss_plat_data *plat_priv;
5465 
5466 	if (!pci_priv)
5467 		return -ENODEV;
5468 
5469 	plat_priv = pci_priv->plat_priv;
5470 	if (!plat_priv)
5471 		return -ENODEV;
5472 
5473 	info->va = pci_priv->bar;
5474 	info->pa = pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM);
5475 	info->chip_id = plat_priv->chip_info.chip_id;
5476 	info->chip_family = plat_priv->chip_info.chip_family;
5477 	info->board_id = plat_priv->board_info.board_id;
5478 	info->soc_id = plat_priv->soc_info.soc_id;
5479 	info->fw_version = plat_priv->fw_version_info.fw_version;
5480 	strlcpy(info->fw_build_timestamp,
5481 		plat_priv->fw_version_info.fw_build_timestamp,
5482 		sizeof(info->fw_build_timestamp));
5483 	memcpy(&info->device_version, &plat_priv->device_version,
5484 	       sizeof(info->device_version));
5485 	memcpy(&info->dev_mem_info, &plat_priv->dev_mem_info,
5486 	       sizeof(info->dev_mem_info));
5487 	memcpy(&info->fw_build_id, &plat_priv->fw_build_id,
5488 	       sizeof(info->fw_build_id));
5489 
5490 	return 0;
5491 }
5492 EXPORT_SYMBOL(cnss_get_soc_info);
5493 
5494 int cnss_pci_get_user_msi_assignment(struct cnss_pci_data *pci_priv,
5495 				     char *user_name,
5496 				     int *num_vectors,
5497 				     u32 *user_base_data,
5498 				     u32 *base_vector)
5499 {
5500 	return cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
5501 					    user_name,
5502 					    num_vectors,
5503 					    user_base_data,
5504 					    base_vector);
5505 }
5506 
5507 static int cnss_pci_irq_set_affinity_hint(struct cnss_pci_data *pci_priv,
5508 					  unsigned int vec,
5509 					  const struct cpumask *cpumask)
5510 {
5511 	int ret;
5512 	struct pci_dev *pci_dev = pci_priv->pci_dev;
5513 
5514 	ret = irq_set_affinity_hint(pci_irq_vector(pci_dev, vec),
5515 				    cpumask);
5516 
5517 	return ret;
5518 }
5519 
5520 static int cnss_pci_enable_msi(struct cnss_pci_data *pci_priv)
5521 {
5522 	int ret = 0;
5523 	struct pci_dev *pci_dev = pci_priv->pci_dev;
5524 	int num_vectors;
5525 	struct cnss_msi_config *msi_config;
5526 
5527 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
5528 		return 0;
5529 
5530 	if (cnss_pci_is_force_one_msi(pci_priv)) {
5531 		ret = cnss_pci_get_one_msi_assignment(pci_priv);
5532 		cnss_pr_dbg("force one msi\n");
5533 	} else {
5534 		ret = cnss_pci_get_msi_assignment(pci_priv);
5535 	}
5536 	if (ret) {
5537 		cnss_pr_err("Failed to get MSI assignment, err = %d\n", ret);
5538 		goto out;
5539 	}
5540 
5541 	msi_config = pci_priv->msi_config;
5542 	if (!msi_config) {
5543 		cnss_pr_err("msi_config is NULL!\n");
5544 		ret = -EINVAL;
5545 		goto out;
5546 	}
5547 
5548 	num_vectors = pci_alloc_irq_vectors(pci_dev,
5549 					    msi_config->total_vectors,
5550 					    msi_config->total_vectors,
5551 					    PCI_IRQ_MSI | PCI_IRQ_MSIX);
5552 	if ((num_vectors != msi_config->total_vectors) &&
5553 	    !cnss_pci_fallback_one_msi(pci_priv, &num_vectors)) {
5554 		cnss_pr_err("Failed to get enough MSI vectors (%d), available vectors = %d",
5555 			    msi_config->total_vectors, num_vectors);
5556 		if (num_vectors >= 0)
5557 			ret = -EINVAL;
5558 		goto reset_msi_config;
5559 	}
5560 
5561 	/* With VT-d disabled on x86 platform, only one pci irq vector is
5562 	 * allocated. Once suspend the irq may be migrated to CPU0 if it was
5563 	 * affine to other CPU with one new msi vector re-allocated.
5564 	 * The observation cause the issue about no irq handler for vector
5565 	 * once resume.
5566 	 * The fix is to set irq vector affinity to CPU0 before calling
5567 	 * request_irq to avoid the irq migration.
5568 	 */
5569 	if (cnss_pci_is_one_msi(pci_priv)) {
5570 		ret = cnss_pci_irq_set_affinity_hint(pci_priv,
5571 						     0,
5572 						     cpumask_of(0));
5573 		if (ret) {
5574 			cnss_pr_err("Failed to affinize irq vector to CPU0\n");
5575 			goto free_msi_vector;
5576 		}
5577 	}
5578 
5579 	if (cnss_pci_config_msi_addr(pci_priv)) {
5580 		ret = -EINVAL;
5581 		goto free_msi_vector;
5582 	}
5583 
5584 	if (cnss_pci_config_msi_data(pci_priv)) {
5585 		ret = -EINVAL;
5586 		goto free_msi_vector;
5587 	}
5588 
5589 	return 0;
5590 
5591 free_msi_vector:
5592 	if (cnss_pci_is_one_msi(pci_priv))
5593 		cnss_pci_irq_set_affinity_hint(pci_priv, 0, NULL);
5594 	pci_free_irq_vectors(pci_priv->pci_dev);
5595 reset_msi_config:
5596 	pci_priv->msi_config = NULL;
5597 out:
5598 	return ret;
5599 }
5600 
5601 static void cnss_pci_disable_msi(struct cnss_pci_data *pci_priv)
5602 {
5603 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
5604 		return;
5605 
5606 	if (cnss_pci_is_one_msi(pci_priv))
5607 		cnss_pci_irq_set_affinity_hint(pci_priv, 0, NULL);
5608 
5609 	pci_free_irq_vectors(pci_priv->pci_dev);
5610 }
5611 
5612 int cnss_get_user_msi_assignment(struct device *dev, char *user_name,
5613 				 int *num_vectors, u32 *user_base_data,
5614 				 u32 *base_vector)
5615 {
5616 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5617 	struct cnss_msi_config *msi_config;
5618 	int idx;
5619 
5620 	if (!pci_priv)
5621 		return -ENODEV;
5622 
5623 	msi_config = pci_priv->msi_config;
5624 	if (!msi_config) {
5625 		cnss_pr_err("MSI is not supported.\n");
5626 		return -EINVAL;
5627 	}
5628 
5629 	for (idx = 0; idx < msi_config->total_users; idx++) {
5630 		if (strcmp(user_name, msi_config->users[idx].name) == 0) {
5631 			*num_vectors = msi_config->users[idx].num_vectors;
5632 			*user_base_data = msi_config->users[idx].base_vector
5633 				+ pci_priv->msi_ep_base_data;
5634 			*base_vector = msi_config->users[idx].base_vector;
5635 			/*Add only single print for each user*/
5636 			if (print_optimize.msi_log_chk[idx]++)
5637 				goto skip_print;
5638 
5639 			cnss_pr_dbg("Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
5640 				    user_name, *num_vectors, *user_base_data,
5641 				    *base_vector);
5642 skip_print:
5643 			return 0;
5644 		}
5645 	}
5646 
5647 	cnss_pr_err("Failed to find MSI assignment for %s!\n", user_name);
5648 
5649 	return -EINVAL;
5650 }
5651 EXPORT_SYMBOL(cnss_get_user_msi_assignment);
5652 
5653 int cnss_get_msi_irq(struct device *dev, unsigned int vector)
5654 {
5655 	struct pci_dev *pci_dev = to_pci_dev(dev);
5656 	int irq_num;
5657 
5658 	irq_num = pci_irq_vector(pci_dev, vector);
5659 	cnss_pr_dbg("Get IRQ number %d for vector index %d\n", irq_num, vector);
5660 
5661 	return irq_num;
5662 }
5663 EXPORT_SYMBOL(cnss_get_msi_irq);
5664 
5665 bool cnss_is_one_msi(struct device *dev)
5666 {
5667 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5668 
5669 	if (!pci_priv)
5670 		return false;
5671 
5672 	return cnss_pci_is_one_msi(pci_priv);
5673 }
5674 EXPORT_SYMBOL(cnss_is_one_msi);
5675 
5676 void cnss_get_msi_address(struct device *dev, u32 *msi_addr_low,
5677 			  u32 *msi_addr_high)
5678 {
5679 	struct pci_dev *pci_dev = to_pci_dev(dev);
5680 	struct cnss_pci_data *pci_priv;
5681 	u16 control;
5682 
5683 	if (!pci_dev)
5684 		return;
5685 
5686 	pci_priv = cnss_get_pci_priv(pci_dev);
5687 	if (!pci_priv)
5688 		return;
5689 
5690 	if (pci_dev->msix_enabled) {
5691 		*msi_addr_low = pci_priv->msix_addr;
5692 		*msi_addr_high = 0;
5693 		if (!print_optimize.msi_addr_chk++)
5694 			cnss_pr_dbg("Get MSI low addr = 0x%x, high addr = 0x%x\n",
5695 				    *msi_addr_low, *msi_addr_high);
5696 		return;
5697 	}
5698 
5699 	pci_read_config_word(pci_dev, pci_dev->msi_cap + PCI_MSI_FLAGS,
5700 			     &control);
5701 	pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
5702 			      msi_addr_low);
5703 	/* Return MSI high address only when device supports 64-bit MSI */
5704 	if (control & PCI_MSI_FLAGS_64BIT)
5705 		pci_read_config_dword(pci_dev,
5706 				      pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
5707 				      msi_addr_high);
5708 	else
5709 		*msi_addr_high = 0;
5710 	 /*Add only single print as the address is constant*/
5711 	 if (!print_optimize.msi_addr_chk++)
5712 		cnss_pr_dbg("Get MSI low addr = 0x%x, high addr = 0x%x\n",
5713 			    *msi_addr_low, *msi_addr_high);
5714 }
5715 EXPORT_SYMBOL(cnss_get_msi_address);
5716 
5717 u32 cnss_pci_get_wake_msi(struct cnss_pci_data *pci_priv)
5718 {
5719 	int ret, num_vectors;
5720 	u32 user_base_data, base_vector;
5721 
5722 	if (!pci_priv)
5723 		return -ENODEV;
5724 
5725 	ret = cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
5726 					   WAKE_MSI_NAME, &num_vectors,
5727 					   &user_base_data, &base_vector);
5728 	if (ret) {
5729 		cnss_pr_err("WAKE MSI is not valid\n");
5730 		return 0;
5731 	}
5732 
5733 	return user_base_data;
5734 }
5735 
5736 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0))
5737 static inline int cnss_pci_set_dma_mask(struct pci_dev *pci_dev, u64 mask)
5738 {
5739 	return dma_set_mask(&pci_dev->dev, mask);
5740 }
5741 
5742 static inline int cnss_pci_set_coherent_dma_mask(struct pci_dev *pci_dev,
5743 	u64 mask)
5744 {
5745 	return dma_set_coherent_mask(&pci_dev->dev, mask);
5746 }
5747 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) */
5748 static inline int cnss_pci_set_dma_mask(struct pci_dev *pci_dev, u64 mask)
5749 {
5750 	return pci_set_dma_mask(pci_dev, mask);
5751 }
5752 
5753 static inline int cnss_pci_set_coherent_dma_mask(struct pci_dev *pci_dev,
5754 	u64 mask)
5755 {
5756 	return pci_set_consistent_dma_mask(pci_dev, mask);
5757 }
5758 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) */
5759 
5760 static int cnss_pci_enable_bus(struct cnss_pci_data *pci_priv)
5761 {
5762 	int ret = 0;
5763 	struct pci_dev *pci_dev = pci_priv->pci_dev;
5764 	u16 device_id;
5765 
5766 	pci_read_config_word(pci_dev, PCI_DEVICE_ID, &device_id);
5767 	if (device_id != pci_priv->pci_device_id->device)  {
5768 		cnss_pr_err("PCI device ID mismatch, config ID: 0x%x, probe ID: 0x%x\n",
5769 			    device_id, pci_priv->pci_device_id->device);
5770 		ret = -EIO;
5771 		goto out;
5772 	}
5773 
5774 	ret = pci_assign_resource(pci_dev, PCI_BAR_NUM);
5775 	if (ret) {
5776 		pr_err("Failed to assign PCI resource, err = %d\n", ret);
5777 		goto out;
5778 	}
5779 
5780 	ret = pci_enable_device(pci_dev);
5781 	if (ret) {
5782 		cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
5783 		goto out;
5784 	}
5785 
5786 	ret = pci_request_region(pci_dev, PCI_BAR_NUM, "cnss");
5787 	if (ret) {
5788 		cnss_pr_err("Failed to request PCI region, err = %d\n", ret);
5789 		goto disable_device;
5790 	}
5791 
5792 	switch (device_id) {
5793 	case QCA6174_DEVICE_ID:
5794 	case QCN7605_DEVICE_ID:
5795 		pci_priv->dma_bit_mask = PCI_DMA_MASK_32_BIT;
5796 		break;
5797 	case QCA6390_DEVICE_ID:
5798 	case QCA6490_DEVICE_ID:
5799 	case KIWI_DEVICE_ID:
5800 	case MANGO_DEVICE_ID:
5801 	case PEACH_DEVICE_ID:
5802 		pci_priv->dma_bit_mask = PCI_DMA_MASK_36_BIT;
5803 		break;
5804 	default:
5805 		pci_priv->dma_bit_mask = PCI_DMA_MASK_32_BIT;
5806 		break;
5807 	}
5808 
5809 	cnss_pr_dbg("Set PCI DMA MASK (0x%llx)\n", pci_priv->dma_bit_mask);
5810 
5811 	ret = cnss_pci_set_dma_mask(pci_dev, pci_priv->dma_bit_mask);
5812 	if (ret) {
5813 		cnss_pr_err("Failed to set PCI DMA mask, err = %d\n", ret);
5814 		goto release_region;
5815 	}
5816 
5817 	ret = cnss_pci_set_coherent_dma_mask(pci_dev, pci_priv->dma_bit_mask);
5818 	if (ret) {
5819 		cnss_pr_err("Failed to set PCI coherent DMA mask, err = %d\n",
5820 			    ret);
5821 		goto release_region;
5822 	}
5823 
5824 	pci_priv->bar = pci_iomap(pci_dev, PCI_BAR_NUM, 0);
5825 	if (!pci_priv->bar) {
5826 		cnss_pr_err("Failed to do PCI IO map!\n");
5827 		ret = -EIO;
5828 		goto release_region;
5829 	}
5830 
5831 	/* Save default config space without BME enabled */
5832 	pci_save_state(pci_dev);
5833 	pci_priv->default_state = pci_store_saved_state(pci_dev);
5834 
5835 	pci_set_master(pci_dev);
5836 
5837 	return 0;
5838 
5839 release_region:
5840 	pci_release_region(pci_dev, PCI_BAR_NUM);
5841 disable_device:
5842 	pci_disable_device(pci_dev);
5843 out:
5844 	return ret;
5845 }
5846 
5847 static void cnss_pci_disable_bus(struct cnss_pci_data *pci_priv)
5848 {
5849 	struct pci_dev *pci_dev = pci_priv->pci_dev;
5850 
5851 	pci_clear_master(pci_dev);
5852 	pci_load_and_free_saved_state(pci_dev, &pci_priv->saved_state);
5853 	pci_load_and_free_saved_state(pci_dev, &pci_priv->default_state);
5854 
5855 	if (pci_priv->bar) {
5856 		pci_iounmap(pci_dev, pci_priv->bar);
5857 		pci_priv->bar = NULL;
5858 	}
5859 
5860 	pci_release_region(pci_dev, PCI_BAR_NUM);
5861 	if (pci_is_enabled(pci_dev))
5862 		pci_disable_device(pci_dev);
5863 }
5864 
5865 static void cnss_pci_dump_qdss_reg(struct cnss_pci_data *pci_priv)
5866 {
5867 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5868 	int i, array_size = ARRAY_SIZE(qdss_csr) - 1;
5869 	gfp_t gfp = GFP_KERNEL;
5870 	u32 reg_offset;
5871 
5872 	if (in_interrupt() || irqs_disabled())
5873 		gfp = GFP_ATOMIC;
5874 
5875 	if (!plat_priv->qdss_reg) {
5876 		plat_priv->qdss_reg = devm_kzalloc(&pci_priv->pci_dev->dev,
5877 						   sizeof(*plat_priv->qdss_reg)
5878 						   * array_size, gfp);
5879 		if (!plat_priv->qdss_reg)
5880 			return;
5881 	}
5882 
5883 	cnss_pr_dbg("Start to dump qdss registers\n");
5884 
5885 	for (i = 0; qdss_csr[i].name; i++) {
5886 		reg_offset = QDSS_APB_DEC_CSR_BASE + qdss_csr[i].offset;
5887 		if (cnss_pci_reg_read(pci_priv, reg_offset,
5888 				      &plat_priv->qdss_reg[i]))
5889 			return;
5890 		cnss_pr_dbg("%s[0x%x] = 0x%x\n", qdss_csr[i].name, reg_offset,
5891 			    plat_priv->qdss_reg[i]);
5892 	}
5893 }
5894 
5895 static void cnss_pci_dump_ce_reg(struct cnss_pci_data *pci_priv,
5896 				 enum cnss_ce_index ce)
5897 {
5898 	int i;
5899 	u32 ce_base = ce * CE_REG_INTERVAL;
5900 	u32 reg_offset, src_ring_base, dst_ring_base, cmn_base, val;
5901 
5902 	switch (pci_priv->device_id) {
5903 	case QCA6390_DEVICE_ID:
5904 		src_ring_base = QCA6390_CE_SRC_RING_REG_BASE;
5905 		dst_ring_base = QCA6390_CE_DST_RING_REG_BASE;
5906 		cmn_base = QCA6390_CE_COMMON_REG_BASE;
5907 		break;
5908 	case QCA6490_DEVICE_ID:
5909 		src_ring_base = QCA6490_CE_SRC_RING_REG_BASE;
5910 		dst_ring_base = QCA6490_CE_DST_RING_REG_BASE;
5911 		cmn_base = QCA6490_CE_COMMON_REG_BASE;
5912 		break;
5913 	default:
5914 		return;
5915 	}
5916 
5917 	switch (ce) {
5918 	case CNSS_CE_09:
5919 	case CNSS_CE_10:
5920 		for (i = 0; ce_src[i].name; i++) {
5921 			reg_offset = src_ring_base + ce_base + ce_src[i].offset;
5922 			if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
5923 				return;
5924 			cnss_pr_dbg("CE_%02d_%s[0x%x] = 0x%x\n",
5925 				    ce, ce_src[i].name, reg_offset, val);
5926 		}
5927 
5928 		for (i = 0; ce_dst[i].name; i++) {
5929 			reg_offset = dst_ring_base + ce_base + ce_dst[i].offset;
5930 			if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
5931 				return;
5932 			cnss_pr_dbg("CE_%02d_%s[0x%x] = 0x%x\n",
5933 				    ce, ce_dst[i].name, reg_offset, val);
5934 		}
5935 		break;
5936 	case CNSS_CE_COMMON:
5937 		for (i = 0; ce_cmn[i].name; i++) {
5938 			reg_offset = cmn_base  + ce_cmn[i].offset;
5939 			if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
5940 				return;
5941 			cnss_pr_dbg("CE_COMMON_%s[0x%x] = 0x%x\n",
5942 				    ce_cmn[i].name, reg_offset, val);
5943 		}
5944 		break;
5945 	default:
5946 		cnss_pr_err("Unsupported CE[%d] registers dump\n", ce);
5947 	}
5948 }
5949 
5950 static void cnss_pci_dump_debug_reg(struct cnss_pci_data *pci_priv)
5951 {
5952 	if (cnss_pci_check_link_status(pci_priv))
5953 		return;
5954 
5955 	cnss_pr_dbg("Start to dump debug registers\n");
5956 
5957 	cnss_mhi_debug_reg_dump(pci_priv);
5958 	cnss_pci_bhi_debug_reg_dump(pci_priv);
5959 	cnss_pci_soc_scratch_reg_dump(pci_priv);
5960 	cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_COMMON);
5961 	cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_09);
5962 	cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_10);
5963 }
5964 
5965 static int cnss_pci_assert_host_sol(struct cnss_pci_data *pci_priv)
5966 {
5967 	int ret;
5968 
5969 	ret = cnss_get_host_sol_value(pci_priv->plat_priv);
5970 	if (ret) {
5971 		if (ret < 0) {
5972 			cnss_pr_dbg("Host SOL functionality is not enabled\n");
5973 			return ret;
5974 		} else {
5975 			cnss_pr_dbg("Host SOL is already high\n");
5976 			/*
5977 			 * Return success if HOST SOL is already high.
5978 			 * This will indicate caller that a HOST SOL is
5979 			 * already asserted from some other thread and
5980 			 * no further action required from the caller.
5981 			 */
5982 			return 0;
5983 		}
5984 	}
5985 
5986 	cnss_pr_dbg("Assert host SOL GPIO to retry RDDM, expecting link down\n");
5987 	cnss_set_host_sol_value(pci_priv->plat_priv, 1);
5988 
5989 	return 0;
5990 }
5991 
5992 static void cnss_pci_mhi_reg_dump(struct cnss_pci_data *pci_priv)
5993 {
5994 	if (!cnss_pci_check_link_status(pci_priv))
5995 		cnss_mhi_debug_reg_dump(pci_priv);
5996 
5997 	cnss_pci_bhi_debug_reg_dump(pci_priv);
5998 	cnss_pci_soc_scratch_reg_dump(pci_priv);
5999 	cnss_pci_dump_misc_reg(pci_priv);
6000 	cnss_pci_dump_shadow_reg(pci_priv);
6001 }
6002 
6003 int cnss_pci_recover_link_down(struct cnss_pci_data *pci_priv)
6004 {
6005 	int ret;
6006 	int retry = 0;
6007 	enum mhi_ee_type mhi_ee;
6008 
6009 	switch (pci_priv->device_id) {
6010 	case QCA6390_DEVICE_ID:
6011 	case QCA6490_DEVICE_ID:
6012 	case KIWI_DEVICE_ID:
6013 	case MANGO_DEVICE_ID:
6014 	case PEACH_DEVICE_ID:
6015 		break;
6016 	default:
6017 		return -EOPNOTSUPP;
6018 	}
6019 
6020 	/* Always wait here to avoid missing WAKE assert for RDDM
6021 	 * before link recovery
6022 	 */
6023 	ret = wait_for_completion_timeout(&pci_priv->wake_event_complete,
6024 					  msecs_to_jiffies(WAKE_EVENT_TIMEOUT));
6025 	if (!ret)
6026 		cnss_pr_err("Timeout waiting for wake event after link down\n");
6027 
6028 	ret = cnss_suspend_pci_link(pci_priv);
6029 	if (ret)
6030 		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
6031 
6032 	ret = cnss_resume_pci_link(pci_priv);
6033 	if (ret) {
6034 		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
6035 		del_timer(&pci_priv->dev_rddm_timer);
6036 		return ret;
6037 	}
6038 
6039 retry:
6040 	/*
6041 	 * After PCIe link resumes, 20 to 400 ms delay is observerved
6042 	 * before device moves to RDDM.
6043 	 */
6044 	msleep(RDDM_LINK_RECOVERY_RETRY_DELAY_MS);
6045 	mhi_ee = mhi_get_exec_env(pci_priv->mhi_ctrl);
6046 	if (mhi_ee == MHI_EE_RDDM) {
6047 		del_timer(&pci_priv->dev_rddm_timer);
6048 		cnss_pr_info("Device in RDDM after link recovery, try to collect dump\n");
6049 		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
6050 				       CNSS_REASON_RDDM);
6051 		return 0;
6052 	} else if (retry++ < RDDM_LINK_RECOVERY_RETRY) {
6053 		cnss_pr_dbg("Wait for RDDM after link recovery, retry #%d, Device EE: %d\n",
6054 			    retry, mhi_ee);
6055 		goto retry;
6056 	}
6057 
6058 	if (!cnss_pci_assert_host_sol(pci_priv))
6059 		return 0;
6060 	cnss_mhi_debug_reg_dump(pci_priv);
6061 	cnss_pci_bhi_debug_reg_dump(pci_priv);
6062 	cnss_pci_soc_scratch_reg_dump(pci_priv);
6063 	cnss_schedule_recovery(&pci_priv->pci_dev->dev,
6064 			       CNSS_REASON_TIMEOUT);
6065 	return 0;
6066 }
6067 
6068 int cnss_pci_force_fw_assert_hdlr(struct cnss_pci_data *pci_priv)
6069 {
6070 	int ret;
6071 	struct cnss_plat_data *plat_priv;
6072 
6073 	if (!pci_priv)
6074 		return -ENODEV;
6075 
6076 	plat_priv = pci_priv->plat_priv;
6077 	if (!plat_priv)
6078 		return -ENODEV;
6079 
6080 	if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) ||
6081 	    test_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state))
6082 		return -EINVAL;
6083 	/*
6084 	 * Call pm_runtime_get_sync insteat of auto_resume to get
6085 	 * reference and make sure runtime_suspend wont get called.
6086 	 */
6087 	ret = cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS);
6088 	if (ret < 0)
6089 		goto runtime_pm_put;
6090 	/*
6091 	 * In some scenarios, cnss_pci_pm_runtime_get_sync
6092 	 * might not resume PCI bus. For those cases do auto resume.
6093 	 */
6094 	cnss_auto_resume(&pci_priv->pci_dev->dev);
6095 
6096 	if (!pci_priv->is_smmu_fault)
6097 		cnss_pci_mhi_reg_dump(pci_priv);
6098 
6099 	/* If link is still down here, directly trigger link down recovery */
6100 	ret = cnss_pci_check_link_status(pci_priv);
6101 	if (ret) {
6102 		cnss_pci_link_down(&pci_priv->pci_dev->dev);
6103 		cnss_pci_pm_runtime_mark_last_busy(pci_priv);
6104 		cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
6105 		return 0;
6106 	}
6107 
6108 	/*
6109 	 * Fist try MHI SYS_ERR, if fails try HOST SOL and return.
6110 	 * If SOL is not enabled try HOST Reset Rquest after MHI
6111 	 * SYS_ERRR fails.
6112 	 */
6113 	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_TRIGGER_RDDM);
6114 	if (ret) {
6115 		if (pci_priv->is_smmu_fault) {
6116 			cnss_pci_mhi_reg_dump(pci_priv);
6117 			pci_priv->is_smmu_fault = false;
6118 		}
6119 		if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) ||
6120 		    test_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state)) {
6121 			cnss_pr_dbg("MHI is not powered on, ignore RDDM failure\n");
6122 			cnss_pci_pm_runtime_mark_last_busy(pci_priv);
6123 			cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
6124 			return 0;
6125 		}
6126 		cnss_fatal_err("Failed to trigger RDDM, err = %d\n", ret);
6127 		if (!cnss_pci_assert_host_sol(pci_priv)) {
6128 			cnss_pci_pm_runtime_mark_last_busy(pci_priv);
6129 			cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
6130 			return 0;
6131 		}
6132 
6133 		cnss_pr_dbg("Sending Host Reset Req\n");
6134 		if (!cnss_mhi_force_reset(pci_priv)) {
6135 			ret = 0;
6136 			goto mhi_reg_dump;
6137 		}
6138 
6139 		cnss_pci_dump_debug_reg(pci_priv);
6140 		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
6141 				       CNSS_REASON_DEFAULT);
6142 		ret = 0;
6143 		goto runtime_pm_put;
6144 	}
6145 
6146 mhi_reg_dump:
6147 	if (pci_priv->is_smmu_fault) {
6148 		cnss_pci_mhi_reg_dump(pci_priv);
6149 		pci_priv->is_smmu_fault = false;
6150 	}
6151 
6152 	if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
6153 		mod_timer(&pci_priv->dev_rddm_timer,
6154 			  jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
6155 	}
6156 
6157 runtime_pm_put:
6158 	cnss_pci_pm_runtime_mark_last_busy(pci_priv);
6159 	cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
6160 	return ret;
6161 }
6162 
6163 static void cnss_pci_add_dump_seg(struct cnss_pci_data *pci_priv,
6164 				  struct cnss_dump_seg *dump_seg,
6165 				  enum cnss_fw_dump_type type, int seg_no,
6166 				  void *va, dma_addr_t dma, size_t size)
6167 {
6168 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6169 	struct device *dev = &pci_priv->pci_dev->dev;
6170 	phys_addr_t pa;
6171 
6172 	dump_seg->address = dma;
6173 	dump_seg->v_address = va;
6174 	dump_seg->size = size;
6175 	dump_seg->type = type;
6176 
6177 	cnss_pr_dbg("Seg: %x, va: %pK, dma: %pa, size: 0x%zx\n",
6178 		    seg_no, va, &dma, size);
6179 
6180 	if (type == CNSS_FW_CAL || cnss_va_to_pa(dev, size, va, dma, &pa, DMA_ATTR_FORCE_CONTIGUOUS))
6181 		return;
6182 
6183 	cnss_minidump_add_region(plat_priv, type, seg_no, va, pa, size);
6184 }
6185 
6186 static void cnss_pci_remove_dump_seg(struct cnss_pci_data *pci_priv,
6187 				     struct cnss_dump_seg *dump_seg,
6188 				     enum cnss_fw_dump_type type, int seg_no,
6189 				     void *va, dma_addr_t dma, size_t size)
6190 {
6191 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6192 	struct device *dev = &pci_priv->pci_dev->dev;
6193 	phys_addr_t pa;
6194 
6195 	cnss_va_to_pa(dev, size, va, dma, &pa, DMA_ATTR_FORCE_CONTIGUOUS);
6196 	cnss_minidump_remove_region(plat_priv, type, seg_no, va, pa, size);
6197 }
6198 
6199 int cnss_pci_call_driver_uevent(struct cnss_pci_data *pci_priv,
6200 				enum cnss_driver_status status, void *data)
6201 {
6202 	struct cnss_uevent_data uevent_data;
6203 	struct cnss_wlan_driver *driver_ops;
6204 
6205 	driver_ops = pci_priv->driver_ops;
6206 	if (!driver_ops || !driver_ops->update_event) {
6207 		cnss_pr_dbg("Hang event driver ops is NULL\n");
6208 		return -EINVAL;
6209 	}
6210 
6211 	cnss_pr_dbg("Calling driver uevent: %d\n", status);
6212 
6213 	uevent_data.status = status;
6214 	uevent_data.data = data;
6215 
6216 	return driver_ops->update_event(pci_priv->pci_dev, &uevent_data);
6217 }
6218 
6219 static void cnss_pci_send_hang_event(struct cnss_pci_data *pci_priv)
6220 {
6221 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6222 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
6223 	struct cnss_hang_event hang_event;
6224 	void *hang_data_va = NULL;
6225 	u64 offset = 0;
6226 	u16 length = 0;
6227 	int i = 0;
6228 
6229 	if (!fw_mem || !plat_priv->fw_mem_seg_len)
6230 		return;
6231 
6232 	memset(&hang_event, 0, sizeof(hang_event));
6233 	switch (pci_priv->device_id) {
6234 	case QCA6390_DEVICE_ID:
6235 		offset = HST_HANG_DATA_OFFSET;
6236 		length = HANG_DATA_LENGTH;
6237 		break;
6238 	case QCA6490_DEVICE_ID:
6239 		/* Fallback to hard-coded values if hang event params not
6240 		 * present in QMI. Once all the firmware branches have the
6241 		 * fix to send params over QMI, this can be removed.
6242 		 */
6243 		if (plat_priv->hang_event_data_len) {
6244 			offset = plat_priv->hang_data_addr_offset;
6245 			length = plat_priv->hang_event_data_len;
6246 		} else {
6247 			offset = HSP_HANG_DATA_OFFSET;
6248 			length = HANG_DATA_LENGTH;
6249 		}
6250 		break;
6251 	case KIWI_DEVICE_ID:
6252 	case MANGO_DEVICE_ID:
6253 	case PEACH_DEVICE_ID:
6254 		offset = plat_priv->hang_data_addr_offset;
6255 		length = plat_priv->hang_event_data_len;
6256 		break;
6257 	case QCN7605_DEVICE_ID:
6258 		offset = GNO_HANG_DATA_OFFSET;
6259 		length = HANG_DATA_LENGTH;
6260 		break;
6261 	default:
6262 		cnss_pr_err("Skip Hang Event Data as unsupported Device ID received: %d\n",
6263 			    pci_priv->device_id);
6264 		return;
6265 	}
6266 
6267 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
6268 		if (fw_mem[i].type == QMI_WLFW_MEM_TYPE_DDR_V01 &&
6269 		    fw_mem[i].va) {
6270 			/* The offset must be < (fw_mem size- hangdata length) */
6271 			if (!(offset <= fw_mem[i].size - length))
6272 				goto exit;
6273 
6274 			hang_data_va = fw_mem[i].va + offset;
6275 			hang_event.hang_event_data = kmemdup(hang_data_va,
6276 							     length,
6277 							     GFP_ATOMIC);
6278 			if (!hang_event.hang_event_data) {
6279 				cnss_pr_dbg("Hang data memory alloc failed\n");
6280 				return;
6281 			}
6282 			hang_event.hang_event_data_len = length;
6283 			break;
6284 		}
6285 	}
6286 
6287 	cnss_pci_call_driver_uevent(pci_priv, CNSS_HANG_EVENT, &hang_event);
6288 
6289 	kfree(hang_event.hang_event_data);
6290 	hang_event.hang_event_data = NULL;
6291 	return;
6292 exit:
6293 	cnss_pr_dbg("Invalid hang event params, offset:0x%x, length:0x%x\n",
6294 		    plat_priv->hang_data_addr_offset,
6295 		    plat_priv->hang_event_data_len);
6296 }
6297 
6298 #ifdef CONFIG_CNSS2_SSR_DRIVER_DUMP
6299 void cnss_pci_collect_host_dump_info(struct cnss_pci_data *pci_priv)
6300 {
6301 	struct cnss_ssr_driver_dump_entry *ssr_entry;
6302 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6303 	size_t num_entries_loaded = 0;
6304 	int x;
6305 	int ret = -1;
6306 
6307 	ssr_entry = kmalloc(sizeof(*ssr_entry) * CNSS_HOST_DUMP_TYPE_MAX, GFP_KERNEL);
6308 	if (!ssr_entry) {
6309 		cnss_pr_err("ssr_entry malloc failed");
6310 		return;
6311 	}
6312 
6313 	if (pci_priv->driver_ops &&
6314 	    pci_priv->driver_ops->collect_driver_dump) {
6315 		ret = pci_priv->driver_ops->collect_driver_dump(pci_priv->pci_dev,
6316 								ssr_entry,
6317 								&num_entries_loaded);
6318 	}
6319 
6320 	if (!ret) {
6321 		for (x = 0; x < num_entries_loaded; x++) {
6322 			cnss_pr_info("Idx:%d, ptr: %p, name: %s, size: %d\n",
6323 				     x, ssr_entry[x].buffer_pointer,
6324 				     ssr_entry[x].region_name,
6325 				     ssr_entry[x].buffer_size);
6326 		}
6327 
6328 		cnss_do_host_ramdump(plat_priv, ssr_entry, num_entries_loaded);
6329 	} else {
6330 		cnss_pr_info("Host SSR elf dump collection feature disabled\n");
6331 	}
6332 
6333 	kfree(ssr_entry);
6334 }
6335 #endif
6336 
6337 void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic)
6338 {
6339 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6340 	struct cnss_dump_data *dump_data =
6341 		&plat_priv->ramdump_info_v2.dump_data;
6342 	struct cnss_dump_seg *dump_seg =
6343 		plat_priv->ramdump_info_v2.dump_data_vaddr;
6344 	struct image_info *fw_image, *rddm_image;
6345 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
6346 	int ret, i, j;
6347 
6348 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
6349 	    !test_bit(CNSS_IN_PANIC, &plat_priv->driver_state))
6350 		cnss_pci_send_hang_event(pci_priv);
6351 
6352 	if (test_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state)) {
6353 		cnss_pr_dbg("RAM dump is already collected, skip\n");
6354 		return;
6355 	}
6356 
6357 	if (!cnss_is_device_powered_on(plat_priv)) {
6358 		cnss_pr_dbg("Device is already powered off, skip\n");
6359 		return;
6360 	}
6361 
6362 	if (!in_panic) {
6363 		mutex_lock(&pci_priv->bus_lock);
6364 		ret = cnss_pci_check_link_status(pci_priv);
6365 		if (ret) {
6366 			if (ret != -EACCES) {
6367 				mutex_unlock(&pci_priv->bus_lock);
6368 				return;
6369 			}
6370 			if (cnss_pci_resume_bus(pci_priv)) {
6371 				mutex_unlock(&pci_priv->bus_lock);
6372 				return;
6373 			}
6374 		}
6375 		mutex_unlock(&pci_priv->bus_lock);
6376 	} else {
6377 		if (cnss_pci_check_link_status(pci_priv))
6378 			return;
6379 		/* Inside panic handler, reduce timeout for RDDM to avoid
6380 		 * unnecessary hypervisor watchdog bite.
6381 		 */
6382 		pci_priv->mhi_ctrl->timeout_ms /= 2;
6383 	}
6384 
6385 	cnss_mhi_debug_reg_dump(pci_priv);
6386 	cnss_pci_bhi_debug_reg_dump(pci_priv);
6387 	cnss_pci_soc_scratch_reg_dump(pci_priv);
6388 	cnss_pci_dump_misc_reg(pci_priv);
6389 	cnss_rddm_trigger_debug(pci_priv);
6390 	ret = mhi_download_rddm_image(pci_priv->mhi_ctrl, in_panic);
6391 	if (ret) {
6392 		cnss_fatal_err("Failed to download RDDM image, err = %d\n",
6393 			       ret);
6394 		if (!cnss_pci_assert_host_sol(pci_priv))
6395 			return;
6396 		cnss_rddm_trigger_check(pci_priv);
6397 		cnss_pci_dump_debug_reg(pci_priv);
6398 		return;
6399 	}
6400 	cnss_rddm_trigger_check(pci_priv);
6401 	fw_image = pci_priv->mhi_ctrl->fbc_image;
6402 	rddm_image = pci_priv->mhi_ctrl->rddm_image;
6403 	dump_data->nentries = 0;
6404 
6405 	if (plat_priv->qdss_mem_seg_len)
6406 		cnss_pci_dump_qdss_reg(pci_priv);
6407 	cnss_mhi_dump_sfr(pci_priv);
6408 
6409 	if (!dump_seg) {
6410 		cnss_pr_warn("FW image dump collection not setup");
6411 		goto skip_dump;
6412 	}
6413 
6414 	cnss_pr_dbg("Collect FW image dump segment, nentries %d\n",
6415 		    fw_image->entries);
6416 
6417 	for (i = 0; i < fw_image->entries; i++) {
6418 		cnss_pci_add_dump_seg(pci_priv, dump_seg, CNSS_FW_IMAGE, i,
6419 				      fw_image->mhi_buf[i].buf,
6420 				      fw_image->mhi_buf[i].dma_addr,
6421 				      fw_image->mhi_buf[i].len);
6422 		dump_seg++;
6423 	}
6424 
6425 	dump_data->nentries += fw_image->entries;
6426 
6427 	cnss_pr_dbg("Collect RDDM image dump segment, nentries %d\n",
6428 		    rddm_image->entries);
6429 
6430 	for (i = 0; i < rddm_image->entries; i++) {
6431 		cnss_pci_add_dump_seg(pci_priv, dump_seg, CNSS_FW_RDDM, i,
6432 				      rddm_image->mhi_buf[i].buf,
6433 				      rddm_image->mhi_buf[i].dma_addr,
6434 				      rddm_image->mhi_buf[i].len);
6435 		dump_seg++;
6436 	}
6437 
6438 	dump_data->nentries += rddm_image->entries;
6439 
6440 	for (i = 0, j = 0; i < plat_priv->fw_mem_seg_len; i++) {
6441 		if (fw_mem[i].type == CNSS_MEM_TYPE_DDR) {
6442 			if (fw_mem[i].attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
6443 				cnss_pr_dbg("Collect remote heap dump segment\n");
6444 				cnss_pci_add_dump_seg(pci_priv, dump_seg,
6445 						      CNSS_FW_REMOTE_HEAP, j,
6446 						      fw_mem[i].va,
6447 						      fw_mem[i].pa,
6448 						      fw_mem[i].size);
6449 				dump_seg++;
6450 				dump_data->nentries++;
6451 				j++;
6452 			} else {
6453 				cnss_pr_dbg("Skip remote heap dumps as it is non-contiguous\n");
6454 			}
6455 		} else if (fw_mem[i].type == CNSS_MEM_CAL_V01) {
6456 				cnss_pr_dbg("Collect CAL memory dump segment\n");
6457 				cnss_pci_add_dump_seg(pci_priv, dump_seg,
6458 						      CNSS_FW_CAL, j,
6459 						      fw_mem[i].va,
6460 						      fw_mem[i].pa,
6461 						      fw_mem[i].size);
6462 				dump_seg++;
6463 				dump_data->nentries++;
6464 				j++;
6465 		}
6466 	}
6467 
6468 	if (dump_data->nentries > 0)
6469 		plat_priv->ramdump_info_v2.dump_data_valid = true;
6470 
6471 	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RDDM_DONE);
6472 
6473 skip_dump:
6474 	complete(&plat_priv->rddm_complete);
6475 }
6476 
6477 void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv)
6478 {
6479 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6480 	struct cnss_dump_seg *dump_seg =
6481 		plat_priv->ramdump_info_v2.dump_data_vaddr;
6482 	struct image_info *fw_image, *rddm_image;
6483 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
6484 	int i, j;
6485 
6486 	if (!dump_seg)
6487 		return;
6488 
6489 	fw_image = pci_priv->mhi_ctrl->fbc_image;
6490 	rddm_image = pci_priv->mhi_ctrl->rddm_image;
6491 
6492 	for (i = 0; i < fw_image->entries; i++) {
6493 		cnss_pci_remove_dump_seg(pci_priv, dump_seg, CNSS_FW_IMAGE, i,
6494 					 fw_image->mhi_buf[i].buf,
6495 					 fw_image->mhi_buf[i].dma_addr,
6496 					 fw_image->mhi_buf[i].len);
6497 		dump_seg++;
6498 	}
6499 
6500 	for (i = 0; i < rddm_image->entries; i++) {
6501 		cnss_pci_remove_dump_seg(pci_priv, dump_seg, CNSS_FW_RDDM, i,
6502 					 rddm_image->mhi_buf[i].buf,
6503 					 rddm_image->mhi_buf[i].dma_addr,
6504 					 rddm_image->mhi_buf[i].len);
6505 		dump_seg++;
6506 	}
6507 
6508 	for (i = 0, j = 0; i < plat_priv->fw_mem_seg_len; i++) {
6509 		if (fw_mem[i].type == CNSS_MEM_TYPE_DDR &&
6510 		    (fw_mem[i].attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
6511 			cnss_pci_remove_dump_seg(pci_priv, dump_seg,
6512 						 CNSS_FW_REMOTE_HEAP, j,
6513 						 fw_mem[i].va, fw_mem[i].pa,
6514 						 fw_mem[i].size);
6515 			dump_seg++;
6516 			j++;
6517 		} else if (fw_mem[i].type == CNSS_MEM_CAL_V01) {
6518 			cnss_pci_remove_dump_seg(pci_priv, dump_seg,
6519 						 CNSS_FW_CAL, j,
6520 						 fw_mem[i].va, fw_mem[i].pa,
6521 						 fw_mem[i].size);
6522 			dump_seg++;
6523 			j++;
6524 		}
6525 	}
6526 
6527 	plat_priv->ramdump_info_v2.dump_data.nentries = 0;
6528 	plat_priv->ramdump_info_v2.dump_data_valid = false;
6529 }
6530 
6531 void cnss_pci_device_crashed(struct cnss_pci_data *pci_priv)
6532 {
6533 	struct cnss_plat_data *plat_priv;
6534 
6535 	if (!pci_priv) {
6536 		cnss_pr_err("pci_priv is NULL\n");
6537 		return;
6538 	}
6539 
6540 	plat_priv = pci_priv->plat_priv;
6541 	if (!plat_priv) {
6542 		cnss_pr_err("plat_priv is NULL\n");
6543 		return;
6544 	}
6545 
6546 	if (plat_priv->recovery_enabled)
6547 		cnss_pci_collect_host_dump_info(pci_priv);
6548 
6549 	/* Call recovery handler in the DRIVER_RECOVERY event context
6550 	 * instead of scheduling work. In that way complete recovery
6551 	 * will be done as part of DRIVER_RECOVERY event and get
6552 	 * serialized with other events.
6553 	 */
6554 	cnss_recovery_handler(plat_priv);
6555 }
6556 
6557 static int cnss_mhi_pm_runtime_get(struct mhi_controller *mhi_ctrl)
6558 {
6559 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6560 
6561 	return cnss_pci_pm_runtime_get(pci_priv, RTPM_ID_MHI);
6562 }
6563 
6564 static void cnss_mhi_pm_runtime_put_noidle(struct mhi_controller *mhi_ctrl)
6565 {
6566 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6567 
6568 	cnss_pci_pm_runtime_put_noidle(pci_priv, RTPM_ID_MHI);
6569 }
6570 
6571 void cnss_pci_add_fw_prefix_name(struct cnss_pci_data *pci_priv,
6572 				 char *prefix_name, char *name)
6573 {
6574 	struct cnss_plat_data *plat_priv;
6575 
6576 	if (!pci_priv)
6577 		return;
6578 
6579 	plat_priv = pci_priv->plat_priv;
6580 
6581 	if (!plat_priv->use_fw_path_with_prefix) {
6582 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, "%s", name);
6583 		return;
6584 	}
6585 
6586 	switch (pci_priv->device_id) {
6587 	case QCN7605_DEVICE_ID:
6588 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6589 			  QCN7605_PATH_PREFIX "%s", name);
6590 		break;
6591 	case QCA6390_DEVICE_ID:
6592 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6593 			  QCA6390_PATH_PREFIX "%s", name);
6594 		break;
6595 	case QCA6490_DEVICE_ID:
6596 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6597 			  QCA6490_PATH_PREFIX "%s", name);
6598 		break;
6599 	case KIWI_DEVICE_ID:
6600 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6601 			  KIWI_PATH_PREFIX "%s", name);
6602 		break;
6603 	case MANGO_DEVICE_ID:
6604 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6605 			  MANGO_PATH_PREFIX "%s", name);
6606 		break;
6607 	case PEACH_DEVICE_ID:
6608 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6609 			  PEACH_PATH_PREFIX "%s", name);
6610 		break;
6611 	default:
6612 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, "%s", name);
6613 		break;
6614 	}
6615 
6616 	cnss_pr_dbg("FW name added with prefix: %s\n", prefix_name);
6617 }
6618 
6619 static int cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv)
6620 {
6621 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6622 
6623 	switch (pci_priv->device_id) {
6624 	case QCA6390_DEVICE_ID:
6625 		if (plat_priv->device_version.major_version < FW_V2_NUMBER) {
6626 			cnss_pr_dbg("Device ID:version (0x%lx:%d) is not supported\n",
6627 				    pci_priv->device_id,
6628 				    plat_priv->device_version.major_version);
6629 			return -EINVAL;
6630 		}
6631 		cnss_pci_add_fw_prefix_name(pci_priv, plat_priv->firmware_name,
6632 					    FW_V2_FILE_NAME);
6633 		snprintf(plat_priv->fw_fallback_name, MAX_FIRMWARE_NAME_LEN,
6634 			 FW_V2_FILE_NAME);
6635 		break;
6636 	case QCA6490_DEVICE_ID:
6637 	case KIWI_DEVICE_ID:
6638 	case MANGO_DEVICE_ID:
6639 	case PEACH_DEVICE_ID:
6640 		switch (plat_priv->device_version.major_version) {
6641 		case FW_V2_NUMBER:
6642 				cnss_pci_add_fw_prefix_name(pci_priv,
6643 							    plat_priv->firmware_name,
6644 							    FW_V2_FILE_NAME);
6645 				snprintf(plat_priv->fw_fallback_name,
6646 					 MAX_FIRMWARE_NAME_LEN,
6647 					 FW_V2_FILE_NAME);
6648 			break;
6649 		default:
6650 			cnss_pci_add_fw_prefix_name(pci_priv,
6651 						    plat_priv->firmware_name,
6652 						    DEFAULT_FW_FILE_NAME);
6653 			snprintf(plat_priv->fw_fallback_name,
6654 				 MAX_FIRMWARE_NAME_LEN,
6655 				 DEFAULT_FW_FILE_NAME);
6656 			break;
6657 		}
6658 		break;
6659 	default:
6660 		cnss_pci_add_fw_prefix_name(pci_priv, plat_priv->firmware_name,
6661 					    DEFAULT_FW_FILE_NAME);
6662 		snprintf(plat_priv->fw_fallback_name, MAX_FIRMWARE_NAME_LEN,
6663 			 DEFAULT_FW_FILE_NAME);
6664 		break;
6665 	}
6666 
6667 	cnss_pr_dbg("FW name is %s, FW fallback name is %s\n",
6668 		    plat_priv->firmware_name, plat_priv->fw_fallback_name);
6669 
6670 	return 0;
6671 }
6672 
6673 static char *cnss_mhi_notify_status_to_str(enum mhi_callback status)
6674 {
6675 	switch (status) {
6676 	case MHI_CB_IDLE:
6677 		return "IDLE";
6678 	case MHI_CB_EE_RDDM:
6679 		return "RDDM";
6680 	case MHI_CB_SYS_ERROR:
6681 		return "SYS_ERROR";
6682 	case MHI_CB_FATAL_ERROR:
6683 		return "FATAL_ERROR";
6684 	case MHI_CB_EE_MISSION_MODE:
6685 		return "MISSION_MODE";
6686 #if IS_ENABLED(CONFIG_MHI_BUS_MISC) && \
6687 (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0))
6688 	case MHI_CB_FALLBACK_IMG:
6689 		return "FW_FALLBACK";
6690 #endif
6691 	default:
6692 		return "UNKNOWN";
6693 	}
6694 };
6695 
6696 static void cnss_dev_rddm_timeout_hdlr(struct timer_list *t)
6697 {
6698 	struct cnss_pci_data *pci_priv =
6699 		from_timer(pci_priv, t, dev_rddm_timer);
6700 	enum mhi_ee_type mhi_ee;
6701 
6702 	if (!pci_priv)
6703 		return;
6704 
6705 	cnss_fatal_err("Timeout waiting for RDDM notification\n");
6706 
6707 	mhi_ee = mhi_get_exec_env(pci_priv->mhi_ctrl);
6708 	if (mhi_ee == MHI_EE_PBL)
6709 		cnss_pr_err("Device MHI EE is PBL, unable to collect dump\n");
6710 
6711 	if (mhi_ee == MHI_EE_RDDM) {
6712 		cnss_pr_info("Device MHI EE is RDDM, try to collect dump\n");
6713 		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
6714 				       CNSS_REASON_RDDM);
6715 	} else {
6716 		if (!cnss_pci_assert_host_sol(pci_priv))
6717 			return;
6718 		cnss_mhi_debug_reg_dump(pci_priv);
6719 		cnss_pci_bhi_debug_reg_dump(pci_priv);
6720 		cnss_pci_soc_scratch_reg_dump(pci_priv);
6721 		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
6722 				       CNSS_REASON_TIMEOUT);
6723 	}
6724 }
6725 
6726 static void cnss_boot_debug_timeout_hdlr(struct timer_list *t)
6727 {
6728 	struct cnss_pci_data *pci_priv =
6729 		from_timer(pci_priv, t, boot_debug_timer);
6730 
6731 	if (!pci_priv)
6732 		return;
6733 
6734 	if (cnss_pci_check_link_status(pci_priv))
6735 		return;
6736 
6737 	if (cnss_pci_is_device_down(&pci_priv->pci_dev->dev))
6738 		return;
6739 
6740 	if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
6741 		return;
6742 
6743 	if (cnss_mhi_scan_rddm_cookie(pci_priv, DEVICE_RDDM_COOKIE))
6744 		return;
6745 
6746 	cnss_pr_dbg("Dump MHI/PBL/SBL debug data every %ds during MHI power on\n",
6747 		    BOOT_DEBUG_TIMEOUT_MS / 1000);
6748 	cnss_mhi_debug_reg_dump(pci_priv);
6749 	cnss_pci_bhi_debug_reg_dump(pci_priv);
6750 	cnss_pci_soc_scratch_reg_dump(pci_priv);
6751 	cnss_pci_dump_bl_sram_mem(pci_priv);
6752 
6753 	mod_timer(&pci_priv->boot_debug_timer,
6754 		  jiffies + msecs_to_jiffies(BOOT_DEBUG_TIMEOUT_MS));
6755 }
6756 
6757 static int cnss_pci_handle_mhi_sys_err(struct cnss_pci_data *pci_priv)
6758 {
6759 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6760 
6761 	cnss_ignore_qmi_failure(true);
6762 	set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
6763 	del_timer(&plat_priv->fw_boot_timer);
6764 	reinit_completion(&pci_priv->wake_event_complete);
6765 	mod_timer(&pci_priv->dev_rddm_timer,
6766 		  jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
6767 	cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
6768 
6769 	return 0;
6770 }
6771 
6772 int cnss_pci_handle_dev_sol_irq(struct cnss_pci_data *pci_priv)
6773 {
6774 	return cnss_pci_handle_mhi_sys_err(pci_priv);
6775 }
6776 
6777 static void cnss_mhi_notify_status(struct mhi_controller *mhi_ctrl,
6778 				   enum mhi_callback reason)
6779 {
6780 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6781 	struct cnss_plat_data *plat_priv;
6782 	enum cnss_recovery_reason cnss_reason;
6783 
6784 	if (!pci_priv) {
6785 		cnss_pr_err("pci_priv is NULL");
6786 		return;
6787 	}
6788 
6789 	plat_priv = pci_priv->plat_priv;
6790 
6791 	if (reason != MHI_CB_IDLE)
6792 		cnss_pr_dbg("MHI status cb is called with reason %s(%d)\n",
6793 			    cnss_mhi_notify_status_to_str(reason), reason);
6794 
6795 	switch (reason) {
6796 	case MHI_CB_IDLE:
6797 	case MHI_CB_EE_MISSION_MODE:
6798 		return;
6799 	case MHI_CB_FATAL_ERROR:
6800 		cnss_ignore_qmi_failure(true);
6801 		set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
6802 		del_timer(&plat_priv->fw_boot_timer);
6803 		cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
6804 		cnss_reason = CNSS_REASON_DEFAULT;
6805 		break;
6806 	case MHI_CB_SYS_ERROR:
6807 		cnss_pci_handle_mhi_sys_err(pci_priv);
6808 		return;
6809 	case MHI_CB_EE_RDDM:
6810 		cnss_ignore_qmi_failure(true);
6811 		set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
6812 		del_timer(&plat_priv->fw_boot_timer);
6813 		del_timer(&pci_priv->dev_rddm_timer);
6814 		cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
6815 		cnss_reason = CNSS_REASON_RDDM;
6816 		break;
6817 #if IS_ENABLED(CONFIG_MHI_BUS_MISC) && \
6818 (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0))
6819 	case MHI_CB_FALLBACK_IMG:
6820 		plat_priv->use_fw_path_with_prefix = false;
6821 		cnss_pci_update_fw_name(pci_priv);
6822 		return;
6823 #endif
6824 
6825 	default:
6826 		cnss_pr_err("Unsupported MHI status cb reason: %d\n", reason);
6827 		return;
6828 	}
6829 
6830 	cnss_schedule_recovery(&pci_priv->pci_dev->dev, cnss_reason);
6831 }
6832 
6833 static int cnss_pci_get_mhi_msi(struct cnss_pci_data *pci_priv)
6834 {
6835 	int ret, num_vectors, i;
6836 	u32 user_base_data, base_vector;
6837 	int *irq;
6838 	unsigned int msi_data;
6839 	bool is_one_msi = false;
6840 
6841 	ret = cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
6842 					   MHI_MSI_NAME, &num_vectors,
6843 					   &user_base_data, &base_vector);
6844 	if (ret)
6845 		return ret;
6846 
6847 	if (cnss_pci_is_one_msi(pci_priv)) {
6848 		is_one_msi = true;
6849 		num_vectors = cnss_pci_get_one_msi_mhi_irq_array_size(pci_priv);
6850 	}
6851 	cnss_pr_dbg("Number of assigned MSI for MHI is %d, base vector is %d\n",
6852 		    num_vectors, base_vector);
6853 
6854 	irq = kcalloc(num_vectors, sizeof(int), GFP_KERNEL);
6855 	if (!irq)
6856 		return -ENOMEM;
6857 
6858 	for (i = 0; i < num_vectors; i++) {
6859 		msi_data = base_vector;
6860 		if (!is_one_msi)
6861 			msi_data += i;
6862 		irq[i] = cnss_get_msi_irq(&pci_priv->pci_dev->dev, msi_data);
6863 	}
6864 
6865 	pci_priv->mhi_ctrl->irq = irq;
6866 	pci_priv->mhi_ctrl->nr_irqs = num_vectors;
6867 
6868 	return 0;
6869 }
6870 
6871 static int cnss_mhi_bw_scale(struct mhi_controller *mhi_ctrl,
6872 			     struct mhi_link_info *link_info)
6873 {
6874 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6875 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6876 	int ret = 0;
6877 
6878 	cnss_pr_dbg("Setting link speed:0x%x, width:0x%x\n",
6879 		    link_info->target_link_speed,
6880 		    link_info->target_link_width);
6881 
6882 	/* It has to set target link speed here before setting link bandwidth
6883 	 * when device requests link speed change. This can avoid setting link
6884 	 * bandwidth getting rejected if requested link speed is higher than
6885 	 * current one.
6886 	 */
6887 	ret = cnss_pci_set_max_link_speed(pci_priv, plat_priv->rc_num,
6888 					  link_info->target_link_speed);
6889 	if (ret)
6890 		cnss_pr_err("Failed to set target link speed to 0x%x, err = %d\n",
6891 			    link_info->target_link_speed, ret);
6892 
6893 	ret = cnss_pci_set_link_bandwidth(pci_priv,
6894 					  link_info->target_link_speed,
6895 					  link_info->target_link_width);
6896 
6897 	if (ret) {
6898 		cnss_pr_err("Failed to set link bandwidth, err = %d\n", ret);
6899 		return ret;
6900 	}
6901 
6902 	pci_priv->def_link_speed = link_info->target_link_speed;
6903 	pci_priv->def_link_width = link_info->target_link_width;
6904 
6905 	return 0;
6906 }
6907 
6908 static int cnss_mhi_read_reg(struct mhi_controller *mhi_ctrl,
6909 			     void __iomem *addr, u32 *out)
6910 {
6911 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6912 
6913 	u32 tmp = readl_relaxed(addr);
6914 
6915 	/* Unexpected value, query the link status */
6916 	if (PCI_INVALID_READ(tmp) &&
6917 	    cnss_pci_check_link_status(pci_priv))
6918 		return -EIO;
6919 
6920 	*out = tmp;
6921 
6922 	return 0;
6923 }
6924 
6925 static void cnss_mhi_write_reg(struct mhi_controller *mhi_ctrl,
6926 			       void __iomem *addr, u32 val)
6927 {
6928 	writel_relaxed(val, addr);
6929 }
6930 
6931 static int cnss_get_mhi_soc_info(struct cnss_plat_data *plat_priv,
6932 				 struct mhi_controller *mhi_ctrl)
6933 {
6934 	int ret = 0;
6935 
6936 	ret = mhi_get_soc_info(mhi_ctrl);
6937 	if (ret)
6938 		goto exit;
6939 
6940 	plat_priv->device_version.family_number = mhi_ctrl->family_number;
6941 	plat_priv->device_version.device_number = mhi_ctrl->device_number;
6942 	plat_priv->device_version.major_version = mhi_ctrl->major_version;
6943 	plat_priv->device_version.minor_version = mhi_ctrl->minor_version;
6944 
6945 	cnss_pr_dbg("Get device version info, family number: 0x%x, device number: 0x%x, major version: 0x%x, minor version: 0x%x\n",
6946 		    plat_priv->device_version.family_number,
6947 		    plat_priv->device_version.device_number,
6948 		    plat_priv->device_version.major_version,
6949 		    plat_priv->device_version.minor_version);
6950 
6951 	/* Only keep lower 4 bits as real device major version */
6952 	plat_priv->device_version.major_version &= DEVICE_MAJOR_VERSION_MASK;
6953 
6954 exit:
6955 	return ret;
6956 }
6957 
6958 static bool cnss_is_tme_supported(struct cnss_pci_data *pci_priv)
6959 {
6960 	if (!pci_priv) {
6961 		cnss_pr_dbg("pci_priv is NULL");
6962 		return false;
6963 	}
6964 
6965 	switch (pci_priv->device_id) {
6966 	case PEACH_DEVICE_ID:
6967 		return true;
6968 	default:
6969 		return false;
6970 	}
6971 }
6972 
6973 static int cnss_pci_register_mhi(struct cnss_pci_data *pci_priv)
6974 {
6975 	int ret = 0;
6976 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6977 	struct pci_dev *pci_dev = pci_priv->pci_dev;
6978 	struct mhi_controller *mhi_ctrl;
6979 	phys_addr_t bar_start;
6980 	const struct mhi_controller_config *cnss_mhi_config =
6981 						&cnss_mhi_config_default;
6982 
6983 	ret = cnss_qmi_init(plat_priv);
6984 	if (ret)
6985 		return -EINVAL;
6986 
6987 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
6988 		return 0;
6989 
6990 	mhi_ctrl = mhi_alloc_controller();
6991 	if (!mhi_ctrl) {
6992 		cnss_pr_err("Invalid MHI controller context\n");
6993 		return -EINVAL;
6994 	}
6995 
6996 	pci_priv->mhi_ctrl = mhi_ctrl;
6997 	mhi_ctrl->cntrl_dev = &pci_dev->dev;
6998 
6999 	mhi_ctrl->fw_image = plat_priv->firmware_name;
7000 #if IS_ENABLED(CONFIG_MHI_BUS_MISC) && \
7001 (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0))
7002 	mhi_ctrl->fallback_fw_image = plat_priv->fw_fallback_name;
7003 #endif
7004 
7005 	mhi_ctrl->regs = pci_priv->bar;
7006 	mhi_ctrl->reg_len = pci_resource_len(pci_priv->pci_dev, PCI_BAR_NUM);
7007 	bar_start = pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM);
7008 	cnss_pr_dbg("BAR starts at %pa, length is %x\n",
7009 		    &bar_start, mhi_ctrl->reg_len);
7010 
7011 	ret = cnss_pci_get_mhi_msi(pci_priv);
7012 	if (ret) {
7013 		cnss_pr_err("Failed to get MSI for MHI, err = %d\n", ret);
7014 		goto free_mhi_ctrl;
7015 	}
7016 
7017 	if (cnss_pci_is_one_msi(pci_priv))
7018 		mhi_ctrl->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
7019 
7020 	if (pci_priv->smmu_s1_enable) {
7021 		mhi_ctrl->iova_start = pci_priv->smmu_iova_start;
7022 		mhi_ctrl->iova_stop = pci_priv->smmu_iova_start +
7023 					pci_priv->smmu_iova_len;
7024 	} else {
7025 		mhi_ctrl->iova_start = 0;
7026 		mhi_ctrl->iova_stop = pci_priv->dma_bit_mask;
7027 	}
7028 
7029 	mhi_ctrl->status_cb = cnss_mhi_notify_status;
7030 	mhi_ctrl->runtime_get = cnss_mhi_pm_runtime_get;
7031 	mhi_ctrl->runtime_put = cnss_mhi_pm_runtime_put_noidle;
7032 	mhi_ctrl->read_reg = cnss_mhi_read_reg;
7033 	mhi_ctrl->write_reg = cnss_mhi_write_reg;
7034 
7035 	mhi_ctrl->rddm_size = pci_priv->plat_priv->ramdump_info_v2.ramdump_size;
7036 	if (!mhi_ctrl->rddm_size)
7037 		mhi_ctrl->rddm_size = RAMDUMP_SIZE_DEFAULT;
7038 
7039 	if (plat_priv->device_id == QCN7605_DEVICE_ID)
7040 		mhi_ctrl->sbl_size = SZ_256K;
7041 	else
7042 		mhi_ctrl->sbl_size = SZ_512K;
7043 
7044 	mhi_ctrl->seg_len = SZ_512K;
7045 	mhi_ctrl->fbc_download = true;
7046 
7047 	ret = cnss_get_mhi_soc_info(plat_priv, mhi_ctrl);
7048 	if (ret)
7049 		goto free_mhi_irq;
7050 
7051 	/* Satellite config only supported on KIWI V2 and later chipset */
7052 	if (plat_priv->device_id <= QCA6490_DEVICE_ID ||
7053 			(plat_priv->device_id == KIWI_DEVICE_ID &&
7054 			 plat_priv->device_version.major_version == 1)) {
7055 		if (plat_priv->device_id == QCN7605_DEVICE_ID)
7056 			cnss_mhi_config = &cnss_mhi_config_genoa;
7057 		else
7058 			cnss_mhi_config = &cnss_mhi_config_no_satellite;
7059 	}
7060 
7061 	/* DIAG no longer supported on PEACH and later chipset */
7062 	if (plat_priv->device_id >= PEACH_DEVICE_ID) {
7063 		cnss_mhi_config = &cnss_mhi_config_no_diag;
7064 	}
7065 
7066 	mhi_ctrl->tme_supported_image = cnss_is_tme_supported(pci_priv);
7067 
7068 	ret = mhi_register_controller(mhi_ctrl, cnss_mhi_config);
7069 	if (ret) {
7070 		cnss_pr_err("Failed to register to MHI bus, err = %d\n", ret);
7071 		goto free_mhi_irq;
7072 	}
7073 
7074 	/* MHI satellite driver only needs to connect when DRV is supported */
7075 	if (cnss_pci_get_drv_supported(pci_priv))
7076 		cnss_mhi_controller_set_base(pci_priv, bar_start);
7077 
7078 	cnss_get_bwscal_info(plat_priv);
7079 	cnss_pr_dbg("no_bwscale: %d\n", plat_priv->no_bwscale);
7080 
7081 	/* BW scale CB needs to be set after registering MHI per requirement */
7082 	if (!plat_priv->no_bwscale)
7083 		cnss_mhi_controller_set_bw_scale_cb(pci_priv,
7084 						    cnss_mhi_bw_scale);
7085 
7086 	ret = cnss_pci_update_fw_name(pci_priv);
7087 	if (ret)
7088 		goto unreg_mhi;
7089 
7090 	return 0;
7091 
7092 unreg_mhi:
7093 	mhi_unregister_controller(mhi_ctrl);
7094 free_mhi_irq:
7095 	kfree(mhi_ctrl->irq);
7096 free_mhi_ctrl:
7097 	mhi_free_controller(mhi_ctrl);
7098 
7099 	return ret;
7100 }
7101 
7102 static void cnss_pci_unregister_mhi(struct cnss_pci_data *pci_priv)
7103 {
7104 	struct mhi_controller *mhi_ctrl = pci_priv->mhi_ctrl;
7105 
7106 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
7107 		return;
7108 
7109 	mhi_unregister_controller(mhi_ctrl);
7110 	kfree(mhi_ctrl->irq);
7111 	mhi_ctrl->irq = NULL;
7112 	mhi_free_controller(mhi_ctrl);
7113 	pci_priv->mhi_ctrl = NULL;
7114 }
7115 
7116 static void cnss_pci_config_regs(struct cnss_pci_data *pci_priv)
7117 {
7118 	switch (pci_priv->device_id) {
7119 	case QCA6390_DEVICE_ID:
7120 		pci_priv->misc_reg_dev_mask = REG_MASK_QCA6390;
7121 		pci_priv->wcss_reg = wcss_reg_access_seq;
7122 		pci_priv->pcie_reg = pcie_reg_access_seq;
7123 		pci_priv->wlaon_reg = wlaon_reg_access_seq;
7124 		pci_priv->syspm_reg = syspm_reg_access_seq;
7125 
7126 		/* Configure WDOG register with specific value so that we can
7127 		 * know if HW is in the process of WDOG reset recovery or not
7128 		 * when reading the registers.
7129 		 */
7130 		cnss_pci_reg_write
7131 		(pci_priv,
7132 		QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG,
7133 		QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG_VAL);
7134 		break;
7135 	case QCA6490_DEVICE_ID:
7136 		pci_priv->misc_reg_dev_mask = REG_MASK_QCA6490;
7137 		pci_priv->wlaon_reg = wlaon_reg_access_seq;
7138 		break;
7139 	default:
7140 		return;
7141 	}
7142 }
7143 
7144 #if !IS_ENABLED(CONFIG_ARCH_QCOM)
7145 static int cnss_pci_of_reserved_mem_device_init(struct cnss_pci_data *pci_priv)
7146 {
7147 	return 0;
7148 }
7149 
7150 static irqreturn_t cnss_pci_wake_handler(int irq, void *data)
7151 {
7152 	struct cnss_pci_data *pci_priv = data;
7153 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
7154 	enum rpm_status status;
7155 	struct device *dev;
7156 
7157 	pci_priv->wake_counter++;
7158 	cnss_pr_dbg("WLAN PCI wake IRQ (%u) is asserted #%u\n",
7159 		    pci_priv->wake_irq, pci_priv->wake_counter);
7160 
7161 	/* Make sure abort current suspend */
7162 	cnss_pm_stay_awake(plat_priv);
7163 	cnss_pm_relax(plat_priv);
7164 	/* Above two pm* API calls will abort system suspend only when
7165 	 * plat_dev->dev->ws is initiated by device_init_wakeup() API, and
7166 	 * calling pm_system_wakeup() is just to guarantee system suspend
7167 	 * can be aborted if it is not initiated in any case.
7168 	 */
7169 	pm_system_wakeup();
7170 
7171 	dev = &pci_priv->pci_dev->dev;
7172 	status = dev->power.runtime_status;
7173 
7174 	if ((cnss_pci_get_monitor_wake_intr(pci_priv) &&
7175 	     cnss_pci_get_auto_suspended(pci_priv)) ||
7176 	    (status == RPM_SUSPENDING || status == RPM_SUSPENDED)) {
7177 		cnss_pci_set_monitor_wake_intr(pci_priv, false);
7178 		cnss_pci_pm_request_resume(pci_priv);
7179 	}
7180 
7181 	return IRQ_HANDLED;
7182 }
7183 
7184 /**
7185  * cnss_pci_wake_gpio_init() - Setup PCI wake GPIO for WLAN
7186  * @pci_priv: driver PCI bus context pointer
7187  *
7188  * This function initializes WLAN PCI wake GPIO and corresponding
7189  * interrupt. It should be used in non-MSM platforms whose PCIe
7190  * root complex driver doesn't handle the GPIO.
7191  *
7192  * Return: 0 for success or skip, negative value for error
7193  */
7194 static int cnss_pci_wake_gpio_init(struct cnss_pci_data *pci_priv)
7195 {
7196 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
7197 	struct device *dev = &plat_priv->plat_dev->dev;
7198 	int ret = 0;
7199 
7200 	pci_priv->wake_gpio = of_get_named_gpio(dev->of_node,
7201 						"wlan-pci-wake-gpio", 0);
7202 	if (pci_priv->wake_gpio < 0)
7203 		goto out;
7204 
7205 	cnss_pr_dbg("Get PCI wake GPIO (%d) from device node\n",
7206 		    pci_priv->wake_gpio);
7207 
7208 	ret = gpio_request(pci_priv->wake_gpio, "wlan_pci_wake_gpio");
7209 	if (ret) {
7210 		cnss_pr_err("Failed to request PCI wake GPIO, err = %d\n",
7211 			    ret);
7212 		goto out;
7213 	}
7214 
7215 	gpio_direction_input(pci_priv->wake_gpio);
7216 	pci_priv->wake_irq = gpio_to_irq(pci_priv->wake_gpio);
7217 
7218 	ret = request_irq(pci_priv->wake_irq, cnss_pci_wake_handler,
7219 			  IRQF_TRIGGER_FALLING, "wlan_pci_wake_irq", pci_priv);
7220 	if (ret) {
7221 		cnss_pr_err("Failed to request PCI wake IRQ, err = %d\n", ret);
7222 		goto free_gpio;
7223 	}
7224 
7225 	ret = enable_irq_wake(pci_priv->wake_irq);
7226 	if (ret) {
7227 		cnss_pr_err("Failed to enable PCI wake IRQ, err = %d\n", ret);
7228 		goto free_irq;
7229 	}
7230 
7231 	return 0;
7232 
7233 free_irq:
7234 	free_irq(pci_priv->wake_irq, pci_priv);
7235 free_gpio:
7236 	gpio_free(pci_priv->wake_gpio);
7237 out:
7238 	return ret;
7239 }
7240 
7241 static void cnss_pci_wake_gpio_deinit(struct cnss_pci_data *pci_priv)
7242 {
7243 	if (pci_priv->wake_gpio < 0)
7244 		return;
7245 
7246 	disable_irq_wake(pci_priv->wake_irq);
7247 	free_irq(pci_priv->wake_irq, pci_priv);
7248 	gpio_free(pci_priv->wake_gpio);
7249 }
7250 #endif
7251 
7252 #ifdef CONFIG_CNSS_SUPPORT_DUAL_DEV
7253 static int cnss_try_suspend(struct cnss_plat_data *plat_priv)
7254 {
7255 	int ret = 0;
7256 
7257 	/* in the dual wlan card case, if call pci_register_driver after
7258 	 * finishing the first pcie device enumeration, it will cause
7259 	 * the cnss_pci_probe called in advance with the second wlan card,
7260 	 * and the sequence like this:
7261 	 * enter msm_pcie_enumerate -> pci_bus_add_devices -> cnss_pci_probe
7262 	 * -> exit msm_pcie_enumerate.
7263 	 * But the correct sequence we expected is like this:
7264 	 * enter msm_pcie_enumerate -> pci_bus_add_devices  ->
7265 	 * exit msm_pcie_enumerate -> cnss_pci_probe.
7266 	 * And this unexpected sequence will make the second wlan card do
7267 	 * pcie link suspend while the pcie enumeration not finished.
7268 	 * So need to add below logical to avoid doing pcie link suspend
7269 	 * if the enumeration has not finish.
7270 	 */
7271 	plat_priv->enumerate_done = true;
7272 
7273 	/* Now enumeration is finished, try to suspend PCIe link */
7274 	if (plat_priv->bus_priv) {
7275 		struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
7276 		struct pci_dev *pci_dev = pci_priv->pci_dev;
7277 
7278 		switch (pci_dev->device) {
7279 		case QCA6390_DEVICE_ID:
7280 			cnss_pci_set_wlaon_pwr_ctrl(pci_priv,
7281 						    false,
7282 						    true,
7283 						    false);
7284 
7285 			cnss_pci_suspend_pwroff(pci_dev);
7286 			break;
7287 		default:
7288 			cnss_pr_err("Unknown PCI device found: 0x%x\n",
7289 				    pci_dev->device);
7290 			ret = -ENODEV;
7291 		}
7292 	}
7293 
7294 	return ret;
7295 }
7296 #else
7297 static int cnss_try_suspend(struct cnss_plat_data *plat_priv)
7298 {
7299 	return 0;
7300 }
7301 #endif
7302 
7303 /* Setting to use this cnss_pm_domain ops will let PM framework override the
7304  * ops from dev->bus->pm which is pci_dev_pm_ops from pci-driver.c. This ops
7305  * has to take care everything device driver needed which is currently done
7306  * from pci_dev_pm_ops.
7307  */
7308 static struct dev_pm_domain cnss_pm_domain = {
7309 	.ops = {
7310 		SET_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend, cnss_pci_resume)
7311 		SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend_noirq,
7312 					      cnss_pci_resume_noirq)
7313 		SET_RUNTIME_PM_OPS(cnss_pci_runtime_suspend,
7314 				   cnss_pci_runtime_resume,
7315 				   cnss_pci_runtime_idle)
7316 	}
7317 };
7318 
7319 static int cnss_pci_get_dev_cfg_node(struct cnss_plat_data *plat_priv)
7320 {
7321 	struct device_node *child;
7322 	u32 id, i;
7323 	int id_n, ret;
7324 
7325 	if (plat_priv->dt_type != CNSS_DTT_MULTIEXCHG)
7326 		return 0;
7327 
7328 	if (!plat_priv->device_id) {
7329 		cnss_pr_err("Invalid device id\n");
7330 		return -EINVAL;
7331 	}
7332 
7333 	for_each_available_child_of_node(plat_priv->plat_dev->dev.of_node,
7334 					 child) {
7335 		if (strcmp(child->name, "chip_cfg"))
7336 			continue;
7337 
7338 		id_n = of_property_count_u32_elems(child, "supported-ids");
7339 		if (id_n <= 0) {
7340 			cnss_pr_err("Device id is NOT set\n");
7341 			return -EINVAL;
7342 		}
7343 
7344 		for (i = 0; i < id_n; i++) {
7345 			ret = of_property_read_u32_index(child,
7346 							 "supported-ids",
7347 							 i, &id);
7348 			if (ret) {
7349 				cnss_pr_err("Failed to read supported ids\n");
7350 				return -EINVAL;
7351 			}
7352 
7353 			if (id == plat_priv->device_id) {
7354 				plat_priv->dev_node = child;
7355 				cnss_pr_dbg("got node[%s@%d] for device[0x%x]\n",
7356 					    child->name, i, id);
7357 				return 0;
7358 			}
7359 		}
7360 	}
7361 
7362 	return -EINVAL;
7363 }
7364 
7365 #ifdef CONFIG_CNSS2_CONDITIONAL_POWEROFF
7366 static bool cnss_should_suspend_pwroff(struct pci_dev *pci_dev)
7367 {
7368 	bool suspend_pwroff;
7369 
7370 	switch (pci_dev->device) {
7371 	case QCA6390_DEVICE_ID:
7372 	case QCA6490_DEVICE_ID:
7373 		suspend_pwroff = false;
7374 		break;
7375 	default:
7376 		suspend_pwroff = true;
7377 	}
7378 
7379 	return suspend_pwroff;
7380 }
7381 #else
7382 static bool cnss_should_suspend_pwroff(struct pci_dev *pci_dev)
7383 {
7384 	return true;
7385 }
7386 #endif
7387 
7388 static int cnss_pci_set_gen2_speed(struct cnss_plat_data *plat_priv, u32 rc_num)
7389 {
7390 	int ret;
7391 
7392 	/* Always set initial target PCIe link speed to Gen2 for QCA6490 device
7393 	 * since there may be link issues if it boots up with Gen3 link speed.
7394 	 * Device is able to change it later at any time. It will be rejected
7395 	 * if requested speed is higher than the one specified in PCIe DT.
7396 	 */
7397 	ret = cnss_pci_set_max_link_speed(plat_priv->bus_priv, rc_num,
7398 					  PCI_EXP_LNKSTA_CLS_5_0GB);
7399 	if (ret && ret != -EPROBE_DEFER)
7400 		cnss_pr_err("Failed to set max PCIe RC%x link speed to Gen2, err = %d\n",
7401 				rc_num, ret);
7402 
7403 	return ret;
7404 }
7405 
7406 #ifdef CONFIG_CNSS2_ENUM_WITH_LOW_SPEED
7407 static void
7408 cnss_pci_downgrade_rc_speed(struct cnss_plat_data *plat_priv, u32 rc_num)
7409 {
7410 	int ret;
7411 
7412 	ret = cnss_pci_set_max_link_speed(plat_priv->bus_priv, rc_num,
7413 					  PCI_EXP_LNKSTA_CLS_2_5GB);
7414 	if (ret)
7415 		cnss_pr_err("Failed to set max PCIe RC%x link speed to Gen1, err = %d\n",
7416 			     rc_num, ret);
7417 }
7418 
7419 static void
7420 cnss_pci_restore_rc_speed(struct cnss_pci_data *pci_priv)
7421 {
7422 	int ret;
7423 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
7424 
7425 	/* if not Genoa, do not restore rc speed */
7426 	if (pci_priv->device_id == QCA6490_DEVICE_ID) {
7427 		cnss_pci_set_gen2_speed(plat_priv, plat_priv->rc_num);
7428 	} else if (pci_priv->device_id != QCN7605_DEVICE_ID) {
7429 		/* The request 0 will reset maximum GEN speed to default */
7430 		ret = cnss_pci_set_max_link_speed(pci_priv, plat_priv->rc_num, 0);
7431 		if (ret)
7432 			cnss_pr_err("Failed to reset max PCIe RC%x link speed to default, err = %d\n",
7433 				     plat_priv->rc_num, ret);
7434 	}
7435 }
7436 
7437 static void
7438 cnss_pci_link_retrain_trigger(struct cnss_pci_data *pci_priv)
7439 {
7440 	int ret;
7441 
7442 	/* suspend/resume will trigger retain to re-establish link speed */
7443 	ret = cnss_suspend_pci_link(pci_priv);
7444 	if (ret)
7445 		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
7446 
7447 	ret = cnss_resume_pci_link(pci_priv);
7448 	if (ret)
7449 		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
7450 
7451 	cnss_pci_get_link_status(pci_priv);
7452 }
7453 #else
7454 static void
7455 cnss_pci_downgrade_rc_speed(struct cnss_plat_data *plat_priv, u32 rc_num)
7456 {
7457 }
7458 
7459 static void
7460 cnss_pci_restore_rc_speed(struct cnss_pci_data *pci_priv)
7461 {
7462 }
7463 
7464 static void
7465 cnss_pci_link_retrain_trigger(struct cnss_pci_data *pci_priv)
7466 {
7467 }
7468 #endif
7469 
7470 static void cnss_pci_suspend_pwroff(struct pci_dev *pci_dev)
7471 {
7472 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
7473 	int rc_num = pci_dev->bus->domain_nr;
7474 	struct cnss_plat_data *plat_priv;
7475 	int ret = 0;
7476 	bool suspend_pwroff = cnss_should_suspend_pwroff(pci_dev);
7477 
7478 	plat_priv = cnss_get_plat_priv_by_rc_num(rc_num);
7479 
7480 	if (suspend_pwroff) {
7481 		ret = cnss_suspend_pci_link(pci_priv);
7482 		if (ret)
7483 			cnss_pr_err("Failed to suspend PCI link, err = %d\n",
7484 				    ret);
7485 		cnss_power_off_device(plat_priv);
7486 	} else {
7487 		cnss_pr_dbg("bus suspend and dev power off disabled for device [0x%x]\n",
7488 			    pci_dev->device);
7489 		cnss_pci_link_retrain_trigger(pci_priv);
7490 	}
7491 }
7492 
7493 static int cnss_pci_probe(struct pci_dev *pci_dev,
7494 			  const struct pci_device_id *id)
7495 {
7496 	int ret = 0;
7497 	struct cnss_pci_data *pci_priv;
7498 	struct device *dev = &pci_dev->dev;
7499 	int rc_num = pci_dev->bus->domain_nr;
7500 	struct cnss_plat_data *plat_priv = cnss_get_plat_priv_by_rc_num(rc_num);
7501 
7502 	cnss_pr_dbg("PCI is probing, vendor ID: 0x%x, device ID: 0x%x rc_num %d\n",
7503 		    id->vendor, pci_dev->device, rc_num);
7504 	if (!plat_priv) {
7505 		cnss_pr_err("Find match plat_priv with rc number failure\n");
7506 		ret = -ENODEV;
7507 		goto out;
7508 	}
7509 
7510 	pci_priv = devm_kzalloc(dev, sizeof(*pci_priv), GFP_KERNEL);
7511 	if (!pci_priv) {
7512 		ret = -ENOMEM;
7513 		goto out;
7514 	}
7515 
7516 	pci_priv->pci_link_state = PCI_LINK_UP;
7517 	pci_priv->plat_priv = plat_priv;
7518 	pci_priv->pci_dev = pci_dev;
7519 	pci_priv->pci_device_id = id;
7520 	pci_priv->device_id = pci_dev->device;
7521 	cnss_set_pci_priv(pci_dev, pci_priv);
7522 	plat_priv->device_id = pci_dev->device;
7523 	plat_priv->bus_priv = pci_priv;
7524 	mutex_init(&pci_priv->bus_lock);
7525 	if (plat_priv->use_pm_domain)
7526 		dev->pm_domain = &cnss_pm_domain;
7527 
7528 	cnss_pci_restore_rc_speed(pci_priv);
7529 
7530 	ret = cnss_pci_get_dev_cfg_node(plat_priv);
7531 	if (ret) {
7532 		cnss_pr_err("Failed to get device cfg node, err = %d\n", ret);
7533 		goto reset_ctx;
7534 	}
7535 
7536 	cnss_get_sleep_clk_supported(plat_priv);
7537 
7538 	ret = cnss_dev_specific_power_on(plat_priv);
7539 	if (ret < 0)
7540 		goto reset_ctx;
7541 
7542 	cnss_pci_of_reserved_mem_device_init(pci_priv);
7543 
7544 	ret = cnss_register_subsys(plat_priv);
7545 	if (ret)
7546 		goto reset_ctx;
7547 
7548 	ret = cnss_register_ramdump(plat_priv);
7549 	if (ret)
7550 		goto unregister_subsys;
7551 
7552 	ret = cnss_pci_init_smmu(pci_priv);
7553 	if (ret)
7554 		goto unregister_ramdump;
7555 
7556 	/* update drv support flag */
7557 	cnss_pci_update_drv_supported(pci_priv);
7558 
7559 	cnss_update_supported_link_info(pci_priv);
7560 
7561 	init_completion(&pci_priv->wake_event_complete);
7562 
7563 	ret = cnss_reg_pci_event(pci_priv);
7564 	if (ret) {
7565 		cnss_pr_err("Failed to register PCI event, err = %d\n", ret);
7566 		goto deinit_smmu;
7567 	}
7568 
7569 	ret = cnss_pci_enable_bus(pci_priv);
7570 	if (ret)
7571 		goto dereg_pci_event;
7572 
7573 	ret = cnss_pci_enable_msi(pci_priv);
7574 	if (ret)
7575 		goto disable_bus;
7576 
7577 	ret = cnss_pci_register_mhi(pci_priv);
7578 	if (ret)
7579 		goto disable_msi;
7580 
7581 	switch (pci_dev->device) {
7582 	case QCA6174_DEVICE_ID:
7583 		pci_read_config_word(pci_dev, QCA6174_REV_ID_OFFSET,
7584 				     &pci_priv->revision_id);
7585 		break;
7586 	case QCA6290_DEVICE_ID:
7587 	case QCA6390_DEVICE_ID:
7588 	case QCN7605_DEVICE_ID:
7589 	case QCA6490_DEVICE_ID:
7590 	case KIWI_DEVICE_ID:
7591 	case MANGO_DEVICE_ID:
7592 	case PEACH_DEVICE_ID:
7593 		if ((cnss_is_dual_wlan_enabled() &&
7594 		     plat_priv->enumerate_done) || !cnss_is_dual_wlan_enabled())
7595 			cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, false,
7596 						    false);
7597 
7598 		timer_setup(&pci_priv->dev_rddm_timer,
7599 			    cnss_dev_rddm_timeout_hdlr, 0);
7600 		timer_setup(&pci_priv->boot_debug_timer,
7601 			    cnss_boot_debug_timeout_hdlr, 0);
7602 		INIT_DELAYED_WORK(&pci_priv->time_sync_work,
7603 				  cnss_pci_time_sync_work_hdlr);
7604 		cnss_pci_get_link_status(pci_priv);
7605 		cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, false);
7606 		cnss_pci_wake_gpio_init(pci_priv);
7607 		break;
7608 	default:
7609 		cnss_pr_err("Unknown PCI device found: 0x%x\n",
7610 			    pci_dev->device);
7611 		ret = -ENODEV;
7612 		goto unreg_mhi;
7613 	}
7614 
7615 	cnss_pci_config_regs(pci_priv);
7616 	if (EMULATION_HW)
7617 		goto out;
7618 	if (cnss_is_dual_wlan_enabled() && !plat_priv->enumerate_done)
7619 		goto probe_done;
7620 	cnss_pci_suspend_pwroff(pci_dev);
7621 
7622 probe_done:
7623 	set_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state);
7624 
7625 	return 0;
7626 
7627 unreg_mhi:
7628 	cnss_pci_unregister_mhi(pci_priv);
7629 disable_msi:
7630 	cnss_pci_disable_msi(pci_priv);
7631 disable_bus:
7632 	cnss_pci_disable_bus(pci_priv);
7633 dereg_pci_event:
7634 	cnss_dereg_pci_event(pci_priv);
7635 deinit_smmu:
7636 	cnss_pci_deinit_smmu(pci_priv);
7637 unregister_ramdump:
7638 	cnss_unregister_ramdump(plat_priv);
7639 unregister_subsys:
7640 	cnss_unregister_subsys(plat_priv);
7641 reset_ctx:
7642 	plat_priv->bus_priv = NULL;
7643 out:
7644 	return ret;
7645 }
7646 
7647 static void cnss_pci_remove(struct pci_dev *pci_dev)
7648 {
7649 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
7650 	struct cnss_plat_data *plat_priv =
7651 		cnss_bus_dev_to_plat_priv(&pci_dev->dev);
7652 
7653 	clear_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state);
7654 	cnss_pci_unregister_driver_hdlr(pci_priv);
7655 	cnss_pci_free_aux_mem(pci_priv);
7656 	cnss_pci_free_tme_lite_mem(pci_priv);
7657 	cnss_pci_free_tme_opt_file_mem(pci_priv);
7658 	cnss_pci_free_m3_mem(pci_priv);
7659 	cnss_pci_free_fw_mem(pci_priv);
7660 	cnss_pci_free_qdss_mem(pci_priv);
7661 
7662 	switch (pci_dev->device) {
7663 	case QCA6290_DEVICE_ID:
7664 	case QCA6390_DEVICE_ID:
7665 	case QCN7605_DEVICE_ID:
7666 	case QCA6490_DEVICE_ID:
7667 	case KIWI_DEVICE_ID:
7668 	case MANGO_DEVICE_ID:
7669 	case PEACH_DEVICE_ID:
7670 		cnss_pci_wake_gpio_deinit(pci_priv);
7671 		del_timer(&pci_priv->boot_debug_timer);
7672 		del_timer(&pci_priv->dev_rddm_timer);
7673 		break;
7674 	default:
7675 		break;
7676 	}
7677 
7678 	cnss_pci_unregister_mhi(pci_priv);
7679 	cnss_pci_disable_msi(pci_priv);
7680 	cnss_pci_disable_bus(pci_priv);
7681 	cnss_dereg_pci_event(pci_priv);
7682 	cnss_pci_deinit_smmu(pci_priv);
7683 	if (plat_priv) {
7684 		cnss_unregister_ramdump(plat_priv);
7685 		cnss_unregister_subsys(plat_priv);
7686 		plat_priv->bus_priv = NULL;
7687 	} else {
7688 		cnss_pr_err("Plat_priv is null, Unable to unregister ramdump,subsys\n");
7689 	}
7690 }
7691 
7692 static const struct pci_device_id cnss_pci_id_table[] = {
7693 	{ QCA6174_VENDOR_ID, QCA6174_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7694 	{ QCA6290_VENDOR_ID, QCA6290_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7695 	{ QCA6390_VENDOR_ID, QCA6390_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7696 	{ QCN7605_VENDOR_ID, QCN7605_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7697 	{ QCA6490_VENDOR_ID, QCA6490_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7698 	{ KIWI_VENDOR_ID, KIWI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7699 	{ MANGO_VENDOR_ID, MANGO_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7700 	{ PEACH_VENDOR_ID, PEACH_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7701 	{ 0 }
7702 };
7703 MODULE_DEVICE_TABLE(pci, cnss_pci_id_table);
7704 
7705 static const struct dev_pm_ops cnss_pm_ops = {
7706 	SET_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend, cnss_pci_resume)
7707 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend_noirq,
7708 				      cnss_pci_resume_noirq)
7709 	SET_RUNTIME_PM_OPS(cnss_pci_runtime_suspend, cnss_pci_runtime_resume,
7710 			   cnss_pci_runtime_idle)
7711 };
7712 
7713 static struct pci_driver cnss_pci_driver = {
7714 	.name     = "cnss_pci",
7715 	.id_table = cnss_pci_id_table,
7716 	.probe    = cnss_pci_probe,
7717 	.remove   = cnss_pci_remove,
7718 	.driver = {
7719 		.pm = &cnss_pm_ops,
7720 	},
7721 };
7722 
7723 static int cnss_pci_enumerate(struct cnss_plat_data *plat_priv, u32 rc_num)
7724 {
7725 	int ret, retry = 0;
7726 
7727 	if (plat_priv->device_id == QCA6490_DEVICE_ID) {
7728 		cnss_pci_set_gen2_speed(plat_priv, rc_num);
7729 	} else {
7730 		cnss_pci_downgrade_rc_speed(plat_priv, rc_num);
7731 	}
7732 
7733 	cnss_pr_dbg("Trying to enumerate with PCIe RC%x\n", rc_num);
7734 retry:
7735 	ret = _cnss_pci_enumerate(plat_priv, rc_num);
7736 	if (ret) {
7737 		if (ret == -EPROBE_DEFER) {
7738 			cnss_pr_dbg("PCIe RC driver is not ready, defer probe\n");
7739 			goto out;
7740 		}
7741 		cnss_pr_err("Failed to enable PCIe RC%x, err = %d\n",
7742 			    rc_num, ret);
7743 		if (retry++ < LINK_TRAINING_RETRY_MAX_TIMES) {
7744 			cnss_pr_dbg("Retry PCI link training #%d\n", retry);
7745 			goto retry;
7746 		} else {
7747 			goto out;
7748 		}
7749 	}
7750 
7751 	plat_priv->rc_num = rc_num;
7752 
7753 out:
7754 	return ret;
7755 }
7756 
7757 int cnss_pci_init(struct cnss_plat_data *plat_priv)
7758 {
7759 	struct device *dev = &plat_priv->plat_dev->dev;
7760 	const __be32 *prop;
7761 	int ret = 0, prop_len = 0, rc_count, i;
7762 
7763 	prop = of_get_property(dev->of_node, "qcom,wlan-rc-num", &prop_len);
7764 	if (!prop || !prop_len) {
7765 		cnss_pr_err("Failed to get PCIe RC number from DT\n");
7766 		goto out;
7767 	}
7768 
7769 	rc_count = prop_len / sizeof(__be32);
7770 	for (i = 0; i < rc_count; i++) {
7771 		ret = cnss_pci_enumerate(plat_priv, be32_to_cpup(&prop[i]));
7772 		if (!ret)
7773 			break;
7774 		else if (ret == -EPROBE_DEFER || (ret && i == rc_count - 1))
7775 			goto out;
7776 	}
7777 
7778 	ret = cnss_try_suspend(plat_priv);
7779 	if (ret) {
7780 		cnss_pr_err("Failed to suspend, ret: %d\n", ret);
7781 		goto out;
7782 	}
7783 
7784 	if (!cnss_driver_registered) {
7785 		ret = pci_register_driver(&cnss_pci_driver);
7786 		if (ret) {
7787 			cnss_pr_err("Failed to register to PCI framework, err = %d\n",
7788 				    ret);
7789 			goto out;
7790 		}
7791 		if (!plat_priv->bus_priv) {
7792 			cnss_pr_err("Failed to probe PCI driver\n");
7793 			ret = -ENODEV;
7794 			goto unreg_pci;
7795 		}
7796 		cnss_driver_registered = true;
7797 	}
7798 
7799 	return 0;
7800 
7801 unreg_pci:
7802 	pci_unregister_driver(&cnss_pci_driver);
7803 out:
7804 	return ret;
7805 }
7806 
7807 void cnss_pci_deinit(struct cnss_plat_data *plat_priv)
7808 {
7809 	if (cnss_driver_registered) {
7810 		pci_unregister_driver(&cnss_pci_driver);
7811 		cnss_driver_registered = false;
7812 	}
7813 }
7814