xref: /wlan-dirver/qcacld-3.0/core/hdd/src/wlan_hdd_driver_ops.c (revision d72ae0d21602f2772219b8a8ede203f5f730cb49)
1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <linux/platform_device.h>
21 #include <linux/pci.h>
22 #include "cds_api.h"
23 #include "qdf_status.h"
24 #include "qdf_lock.h"
25 #include "cds_sched.h"
26 #include "osdep.h"
27 #include "hif.h"
28 #include "htc.h"
29 #include "epping_main.h"
30 #include "osif_sync.h"
31 #include "wlan_hdd_main.h"
32 #include "wlan_hdd_power.h"
33 #include "wlan_logging_sock_svc.h"
34 #include "wma_api.h"
35 #include "wlan_hdd_napi.h"
36 #include "wlan_policy_mgr_api.h"
37 #include "qwlan_version.h"
38 #include "bmi.h"
39 #include <ol_defines.h>
40 #include "cdp_txrx_bus.h"
41 #include "cdp_txrx_misc.h"
42 #include "pld_common.h"
43 #include "wlan_hdd_driver_ops.h"
44 #include "wlan_ipa_ucfg_api.h"
45 #include "wlan_hdd_debugfs.h"
46 #include "cfg_ucfg_api.h"
47 #include <linux/suspend.h>
48 #include <qdf_notifier.h>
49 #include <qdf_hang_event_notifier.h>
50 #include "wlan_hdd_thermal.h"
51 #include "wlan_dp_ucfg_api.h"
52 #include "qdf_ssr_driver_dump.h"
53 #include "wlan_hdd_ioctl.h"
54 
55 #ifdef MODULE
56 #ifdef WLAN_WEAR_CHIPSET
57 #define WLAN_MODULE_NAME  "wlan"
58 #else
59 #define WLAN_MODULE_NAME  module_name(THIS_MODULE)
60 #endif
61 #else
62 #define WLAN_MODULE_NAME  "wlan"
63 #endif
64 
65 #define SSR_MAX_FAIL_CNT 3
66 static uint8_t re_init_fail_cnt, probe_fail_cnt;
67 
68 /* An atomic flag to check if SSR cleanup has been done or not */
69 static qdf_atomic_t is_recovery_cleanup_done;
70 
71 /* firmware/host hang event data */
72 static uint8_t g_fw_host_hang_event[QDF_HANG_EVENT_DATA_SIZE];
73 
74 /*
75  * In BMI Phase we are only sending small chunk (256 bytes) of the FW image at
76  * a time, and wait for the completion interrupt to start the next transfer.
77  * During this phase, the KRAIT is entering IDLE/StandAlone(SA) Power Save(PS).
78  * The delay incurred for resuming from IDLE/SA PS is huge during driver load.
79  * So prevent APPS IDLE/SA PS durint driver load for reducing interrupt latency.
80  */
81 
82 static inline void hdd_request_pm_qos(struct device *dev, int val)
83 {
84 	pld_request_pm_qos(dev, val);
85 }
86 
87 static inline void hdd_remove_pm_qos(struct device *dev)
88 {
89 	pld_remove_pm_qos(dev);
90 }
91 
92 /**
93  * hdd_get_bandwidth_level() - get current bandwidth level
94  * @data: Context
95  *
96  * Return: current bandwidth level
97  */
98 static int hdd_get_bandwidth_level(void *data)
99 {
100 	int ret = PLD_BUS_WIDTH_NONE;
101 	struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
102 
103 	if (hdd_ctx)
104 		ret = ucfg_dp_get_current_throughput_level(hdd_ctx->psoc);
105 
106 	return ret;
107 }
108 
109 #ifdef DP_MEM_PRE_ALLOC
110 
111 /**
112  * hdd_get_consistent_mem_unaligned() - API to get consistent unaligned mem
113  * @size: Size of memory required
114  * @paddr: Pointer to paddr to be filled in by API
115  * @ring_type: Pointer to ring type for which consistent memory is needed
116  *
117  * Return: Virtual address of consistent memory on success, else null
118  */
119 static
120 void *hdd_get_consistent_mem_unaligned(size_t size,
121 				       qdf_dma_addr_t *paddr,
122 				       uint32_t ring_type)
123 {
124 	return ucfg_dp_prealloc_get_consistent_mem_unaligned(size, paddr,
125 							     ring_type);
126 }
127 
128 /**
129  * hdd_put_consistent_mem_unaligned() - API to put consistent unaligned mem
130  * @vaddr: Virtual address of memory
131  *
132  * Return: None
133  */
134 static
135 void hdd_put_consistent_mem_unaligned(void *vaddr)
136 {
137 	ucfg_dp_prealloc_put_consistent_mem_unaligned(vaddr);
138 }
139 
140 /**
141  * hdd_dp_prealloc_get_multi_pages() - gets pre-alloc DP multi-pages memory
142  * @desc_type: descriptor type
143  * @elem_size: single element size
144  * @elem_num: total number of elements should be allocated
145  * @pages: multi page information storage
146  * @cacheable: coherent memory or cacheable memory
147  *
148  * Return: None
149  */
150 static
151 void hdd_dp_prealloc_get_multi_pages(uint32_t desc_type, qdf_size_t elem_size,
152 				     uint16_t elem_num,
153 				     struct qdf_mem_multi_page_t *pages,
154 				     bool cacheable)
155 {
156 	ucfg_dp_prealloc_get_multi_pages(desc_type, elem_size, elem_num, pages,
157 					 cacheable);
158 }
159 
160 /**
161  * hdd_dp_prealloc_put_multi_pages() - puts back pre-alloc DP multi-pages memory
162  * @desc_type: descriptor type
163  * @pages: multi page information storage
164  *
165  * Return: None
166  */
167 static
168 void hdd_dp_prealloc_put_multi_pages(uint32_t desc_type,
169 				     struct qdf_mem_multi_page_t *pages)
170 {
171 	ucfg_dp_prealloc_put_multi_pages(desc_type, pages);
172 }
173 #else
174 static
175 void *hdd_get_consistent_mem_unaligned(size_t size,
176 				       qdf_dma_addr_t *paddr,
177 				       uint32_t ring_type)
178 {
179 	hdd_err_rl("prealloc not support!");
180 
181 	return NULL;
182 }
183 
184 static
185 void hdd_put_consistent_mem_unaligned(void *vaddr)
186 {
187 	hdd_err_rl("prealloc not support!");
188 }
189 
190 static inline
191 void hdd_dp_prealloc_get_multi_pages(uint32_t desc_type, qdf_size_t elem_size,
192 				     uint16_t elem_num,
193 				     struct qdf_mem_multi_page_t *pages,
194 				     bool cacheable)
195 {
196 }
197 
198 static inline
199 void hdd_dp_prealloc_put_multi_pages(uint32_t desc_type,
200 				     struct qdf_mem_multi_page_t *pages)
201 {
202 }
203 #endif
204 
205 /**
206  * hdd_set_recovery_in_progress() - API to set recovery in progress
207  * @data: Context
208  * @val: Value to set
209  *
210  * Return: None
211  */
212 static void hdd_set_recovery_in_progress(void *data, uint8_t val)
213 {
214 	cds_set_recovery_in_progress(val);
215 }
216 
217 /**
218  * hdd_is_driver_unloading() - API to query if driver is unloading
219  * @data: Private Data
220  *
221  * Return: True/False
222  */
223 static bool hdd_is_driver_unloading(void *data)
224 {
225 	return cds_is_driver_unloading();
226 }
227 
228 /**
229  * hdd_is_load_or_unload_in_progress() - API to query if driver is
230  * loading/unloading
231  * @data: Private Data
232  *
233  * Return: bool
234  */
235 static bool hdd_is_load_or_unload_in_progress(void *data)
236 {
237 	return cds_is_load_or_unload_in_progress();
238 }
239 
240 /**
241  * hdd_is_recovery_in_progress() - API to query if recovery in progress
242  * @data: Private Data
243  *
244  * Return: bool
245  */
246 static bool hdd_is_recovery_in_progress(void *data)
247 {
248 	return cds_is_driver_recovering();
249 }
250 
251 /**
252  * hdd_is_target_ready() - API to query if target is in ready state
253  * @data: Private Data
254  *
255  * Return: bool
256  */
257 static bool hdd_is_target_ready(void *data)
258 {
259 	return cds_is_target_ready();
260 }
261 
262 /**
263  * hdd_send_driver_ready_to_user() - API to indicate driver ready
264  * to userspace.
265  */
266 static void hdd_send_driver_ready_to_user(void)
267 {
268 	struct sk_buff *nl_event;
269 	struct hdd_context *hdd_ctx;
270 	int flags = cds_get_gfp_flags();
271 
272 	hdd_enter();
273 
274 	hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
275 	if (!hdd_ctx) {
276 		hdd_err("HDD Context is NULL");
277 		return;
278 	}
279 
280 	nl_event = wlan_cfg80211_vendor_event_alloc(
281 			hdd_ctx->wiphy, NULL, 0,
282 			QCA_NL80211_VENDOR_SUBCMD_DRIVER_READY_INDEX,
283 			flags);
284 	if (!nl_event) {
285 		hdd_err("wlan_cfg80211_vendor_event_alloc failed");
286 		return;
287 	}
288 
289 	wlan_cfg80211_vendor_event(nl_event, flags);
290 }
291 
292 /**
293  * hdd_hif_init_driver_state_callbacks() - API to initialize HIF callbacks
294  * @data: Private Data
295  * @cbk: HIF Driver State callbacks
296  *
297  * HIF should be independent of CDS calls. Pass CDS Callbacks to HIF, HIF will
298  * call the callbacks.
299  *
300  * Return: void
301  */
302 static void hdd_hif_init_driver_state_callbacks(void *data,
303 			struct hif_driver_state_callbacks *cbk)
304 {
305 	cbk->context = data;
306 	cbk->set_recovery_in_progress = hdd_set_recovery_in_progress;
307 	cbk->is_recovery_in_progress = hdd_is_recovery_in_progress;
308 	cbk->is_load_unload_in_progress = hdd_is_load_or_unload_in_progress;
309 	cbk->is_driver_unloading = hdd_is_driver_unloading;
310 	cbk->is_target_ready = hdd_is_target_ready;
311 	cbk->get_bandwidth_level = hdd_get_bandwidth_level;
312 	cbk->prealloc_get_consistent_mem_unaligned =
313 		hdd_get_consistent_mem_unaligned;
314 	cbk->prealloc_put_consistent_mem_unaligned =
315 		hdd_put_consistent_mem_unaligned;
316 	cbk->prealloc_get_multi_pages =
317 		hdd_dp_prealloc_get_multi_pages;
318 	cbk->prealloc_put_multi_pages =
319 		hdd_dp_prealloc_put_multi_pages;
320 }
321 
322 #ifdef HIF_DETECTION_LATENCY_ENABLE
323 void hdd_hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
324 {
325 	hif_set_enable_detection(hif_ctx, value);
326 }
327 #endif
328 
329 #ifdef FORCE_WAKE
330 void hdd_set_hif_init_phase(struct hif_opaque_softc *hif_ctx,
331 			    bool hal_init_phase)
332 {
333 	hif_srng_init_phase(hif_ctx, hal_init_phase);
334 }
335 #endif /* FORCE_WAKE */
336 
337 /**
338  * hdd_hif_set_attribute() - API to set CE attribute if memory is limited
339  * @hif_ctx: hif context
340  *
341  * Return: None
342  */
343 #ifdef SLUB_MEM_OPTIMIZE
344 static void hdd_hif_set_attribute(struct hif_opaque_softc *hif_ctx)
345 {
346 	hif_set_attribute(hif_ctx, HIF_LOWDESC_CE_NO_PKTLOG_CFG);
347 }
348 #else
349 static void hdd_hif_set_attribute(struct hif_opaque_softc *hif_ctx)
350 {}
351 #endif
352 
353 /**
354  * hdd_hif_register_shutdown_notifier() - Register HIF shutdown notifier
355  * @hif_ctx: HIF Context
356  *
357  * Return: success/failure
358  */
359 static QDF_STATUS
360 hdd_hif_register_shutdown_notifier(struct hif_opaque_softc *hif_ctx)
361 {
362 	return cds_shutdown_notifier_register(
363 					hif_shutdown_notifier_cb,
364 					hif_ctx);
365 }
366 
367 /**
368  * hdd_hif_set_ce_max_yield_time() - Wrapper API to set CE max yield time
369  * @hif_ctx: hif context
370  * @bus_type: underlying bus type
371  * @ce_service_max_yield_time: max yield time to be set
372  *
373  * Return: None
374  */
375 #if defined(CONFIG_SLUB_DEBUG_ON)
376 
377 static void hdd_hif_set_ce_max_yield_time(struct hif_opaque_softc *hif_ctx,
378 					  enum qdf_bus_type bus_type,
379 					  uint32_t ce_service_max_yield_time)
380 {
381 #define CE_SNOC_MAX_YIELD_TIME_US 2000
382 
383 	if (bus_type == QDF_BUS_TYPE_SNOC &&
384 	    ce_service_max_yield_time < CE_SNOC_MAX_YIELD_TIME_US)
385 		ce_service_max_yield_time = CE_SNOC_MAX_YIELD_TIME_US;
386 
387 	hif_set_ce_service_max_yield_time(hif_ctx, ce_service_max_yield_time);
388 }
389 
390 #else
391 static void hdd_hif_set_ce_max_yield_time(struct hif_opaque_softc *hif_ctx,
392 					  enum qdf_bus_type bus_type,
393 					  uint32_t ce_service_max_yield_time)
394 {
395 	hif_set_ce_service_max_yield_time(hif_ctx, ce_service_max_yield_time);
396 }
397 #endif
398 
399 /**
400  * hdd_init_cds_hif_context() - API to set CDS HIF Context
401  * @hif: HIF Context
402  *
403  * Return: success/failure
404  */
405 static int hdd_init_cds_hif_context(void *hif)
406 {
407 	QDF_STATUS status;
408 
409 	status = cds_set_context(QDF_MODULE_ID_HIF, hif);
410 
411 	if (status)
412 		return -ENOENT;
413 
414 	return 0;
415 }
416 
417 /**
418  * hdd_deinit_cds_hif_context() - API to clear CDS HIF COntext
419  *
420  * Return: None
421  */
422 static void hdd_deinit_cds_hif_context(void)
423 {
424 	QDF_STATUS status;
425 
426 	status = cds_set_context(QDF_MODULE_ID_HIF, NULL);
427 
428 	if (status)
429 		hdd_err("Failed to reset CDS HIF Context");
430 }
431 
432 /**
433  * to_bus_type() - Map PLD bus type to low level bus type
434  * @bus_type: PLD bus type
435  *
436  * Map PLD bus type to low level bus type.
437  *
438  * Return: low level bus type.
439  */
440 static enum qdf_bus_type to_bus_type(enum pld_bus_type bus_type)
441 {
442 	switch (bus_type) {
443 	case PLD_BUS_TYPE_PCIE_FW_SIM:
444 	case PLD_BUS_TYPE_PCIE:
445 		return QDF_BUS_TYPE_PCI;
446 	case PLD_BUS_TYPE_SNOC_FW_SIM:
447 	case PLD_BUS_TYPE_SNOC:
448 		return QDF_BUS_TYPE_SNOC;
449 	case PLD_BUS_TYPE_SDIO:
450 		return QDF_BUS_TYPE_SDIO;
451 	case PLD_BUS_TYPE_USB:
452 		return QDF_BUS_TYPE_USB;
453 	case PLD_BUS_TYPE_IPCI_FW_SIM:
454 	case PLD_BUS_TYPE_IPCI:
455 		return QDF_BUS_TYPE_IPCI;
456 	default:
457 		return QDF_BUS_TYPE_NONE;
458 	}
459 }
460 
461 int hdd_hif_open(struct device *dev, void *bdev, const struct hif_bus_id *bid,
462 			enum qdf_bus_type bus_type, bool reinit)
463 {
464 	QDF_STATUS status;
465 	int ret = 0;
466 	struct hif_opaque_softc *hif_ctx;
467 	qdf_device_t qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
468 	struct hif_driver_state_callbacks cbk;
469 	uint32_t mode = cds_get_conparam();
470 	struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
471 
472 	if (!hdd_ctx)
473 		return -EFAULT;
474 
475 	hdd_hif_init_driver_state_callbacks(dev, &cbk);
476 
477 	hif_ctx = hif_open(qdf_ctx, mode, bus_type, &cbk, hdd_ctx->psoc);
478 	if (!hif_ctx) {
479 		hdd_err("hif_open error");
480 		return -ENOMEM;
481 	}
482 
483 	ret = hdd_init_cds_hif_context(hif_ctx);
484 	if (ret) {
485 		hdd_err("Failed to set global HIF CDS Context err: %d", ret);
486 		goto err_hif_close;
487 	}
488 
489 	status = hdd_hif_register_shutdown_notifier(hif_ctx);
490 	if (status != QDF_STATUS_SUCCESS) {
491 		hdd_err("Shutdown notifier register failed: %d", status);
492 		goto err_deinit_hif_context;
493 	}
494 
495 	hdd_hif_set_attribute(hif_ctx);
496 
497 	status = hif_enable(hif_ctx, dev, bdev, bid, bus_type,
498 			    (reinit == true) ?  HIF_ENABLE_TYPE_REINIT :
499 			    HIF_ENABLE_TYPE_PROBE);
500 	if (!QDF_IS_STATUS_SUCCESS(status)) {
501 		hdd_err("hif_enable failed status: %d, reinit: %d",
502 			status, reinit);
503 
504 		ret = qdf_status_to_os_return(status);
505 		goto err_deinit_hif_context;
506 	} else {
507 		cds_set_target_ready(true);
508 		ret = hdd_napi_create();
509 		hdd_debug("hdd_napi_create returned: %d", ret);
510 		if (ret == 0)
511 			hdd_debug("NAPI: no instances are created");
512 		else if (ret < 0) {
513 			hdd_err("NAPI creation error, rc: 0x%x, reinit: %d",
514 				ret, reinit);
515 			ret = -EFAULT;
516 			goto mark_target_not_ready;
517 		} else {
518 			hdd_napi_event(NAPI_EVT_INI_FILE,
519 				       (void *)ucfg_dp_get_napi_enabled(hdd_ctx->psoc));
520 		}
521 	}
522 
523 	hdd_hif_set_ce_max_yield_time(
524 				hif_ctx, bus_type,
525 				cfg_get(hdd_ctx->psoc,
526 					CFG_DP_CE_SERVICE_MAX_YIELD_TIME));
527 	ucfg_pmo_psoc_set_hif_handle(hdd_ctx->psoc, hif_ctx);
528 	ucfg_dp_set_hif_handle(hdd_ctx->psoc, hif_ctx);
529 	hif_set_ce_service_max_rx_ind_flush(hif_ctx,
530 				cfg_get(hdd_ctx->psoc,
531 					CFG_DP_CE_SERVICE_MAX_RX_IND_FLUSH));
532 	return 0;
533 
534 mark_target_not_ready:
535 	cds_set_target_ready(false);
536 
537 err_deinit_hif_context:
538 	hdd_deinit_cds_hif_context();
539 
540 err_hif_close:
541 	hif_close(hif_ctx);
542 	return ret;
543 }
544 
545 void hdd_hif_close(struct hdd_context *hdd_ctx, void *hif_ctx)
546 {
547 	if (!hdd_ctx) {
548 		hdd_err("hdd_ctx error");
549 		return;
550 	}
551 
552 	if (!hif_ctx)
553 		return;
554 
555 	cds_set_target_ready(false);
556 	hif_disable(hif_ctx, HIF_DISABLE_TYPE_REMOVE);
557 
558 	hdd_napi_destroy(true);
559 
560 	hdd_deinit_cds_hif_context();
561 	hif_close(hif_ctx);
562 
563 	ucfg_pmo_psoc_set_hif_handle(hdd_ctx->psoc, NULL);
564 }
565 
566 /**
567  * hdd_init_qdf_ctx() - API to initialize global QDF Device structure
568  * @dev: Device Pointer
569  * @bdev: Bus Device pointer
570  * @bus_type: Underlying bus type
571  * @bid: Bus id passed by platform driver
572  *
573  * Return: 0 - success, < 0 - failure
574  */
575 static int hdd_init_qdf_ctx(struct device *dev, void *bdev,
576 			    enum qdf_bus_type bus_type,
577 			    const struct hif_bus_id *bid)
578 {
579 	qdf_device_t qdf_dev = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
580 
581 	if (!qdf_dev)
582 		return -EINVAL;
583 
584 	qdf_dev->dev = dev;
585 	qdf_dev->drv_hdl = bdev;
586 	qdf_dev->bus_type = bus_type;
587 	qdf_dev->bid = bid;
588 
589 	qdf_dma_invalid_buf_list_init();
590 
591 	if (cds_smmu_mem_map_setup(qdf_dev, ucfg_ipa_is_ready()) !=
592 		QDF_STATUS_SUCCESS) {
593 		hdd_err("cds_smmu_mem_map_setup() failed");
594 	}
595 
596 	return 0;
597 }
598 
599 /**
600  * hdd_deinit_qdf_ctx() - API to Deinitialize global QDF Device structure
601  * @domain: Debug domain
602  *
603  * Return: 0 - success, < 0 - failure
604  */
605 int hdd_deinit_qdf_ctx(uint8_t domain)
606 {
607 	qdf_device_t qdf_dev = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
608 
609 	if (!qdf_dev)
610 		return -EINVAL;
611 
612 	qdf_dma_invalid_buf_free(qdf_dev->dev, domain);
613 
614 	return 0;
615 }
616 
617 /**
618  * check_for_probe_defer() - API to check return value
619  * @ret: Return Value
620  *
621  * Return: return -EPROBE_DEFER to platform driver if return value
622  * is -ENOMEM. Platform driver will try to re-probe.
623  */
624 #ifdef MODULE
625 static int check_for_probe_defer(int ret)
626 {
627 	return ret;
628 }
629 #else
630 static int check_for_probe_defer(int ret)
631 {
632 	if (ret == -ENOMEM)
633 		return -EPROBE_DEFER;
634 	return ret;
635 }
636 #endif
637 
638 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
639 static void hdd_abort_system_suspend(struct device *dev)
640 {
641 	qdf_pm_system_wakeup();
642 }
643 #else
644 static void hdd_abort_system_suspend(struct device *dev)
645 {
646 }
647 #endif
648 
649 int hdd_soc_idle_restart_lock(struct device *dev)
650 {
651 	hdd_prevent_suspend(WIFI_POWER_EVENT_WAKELOCK_DRIVER_IDLE_RESTART);
652 
653 	hdd_abort_system_suspend(dev);
654 
655 	return 0;
656 }
657 
658 void hdd_soc_idle_restart_unlock(void)
659 {
660 	hdd_allow_suspend(WIFI_POWER_EVENT_WAKELOCK_DRIVER_IDLE_RESTART);
661 }
662 
663 static void hdd_soc_load_lock(struct device *dev)
664 {
665 	hdd_prevent_suspend(WIFI_POWER_EVENT_WAKELOCK_DRIVER_INIT);
666 	hdd_request_pm_qos(dev, DISABLE_KRAIT_IDLE_PS_VAL);
667 }
668 
669 static void hdd_soc_load_unlock(struct device *dev)
670 {
671 	hdd_remove_pm_qos(dev);
672 	hdd_allow_suspend(WIFI_POWER_EVENT_WAKELOCK_DRIVER_INIT);
673 }
674 
675 #ifdef DP_MEM_PRE_ALLOC
676 /**
677  * hdd_init_dma_mask() - Set the DMA mask for dma memory pre-allocation
678  * @dev: device handle
679  * @bus_type: Bus type for which init is being done
680  *
681  * Return: 0 - success, non-zero on failure
682  */
683 static int hdd_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type)
684 {
685 	return hif_init_dma_mask(dev, bus_type);
686 }
687 #else
688 static inline int
689 hdd_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type)
690 {
691 	return QDF_STATUS_SUCCESS;
692 }
693 #endif
694 
695 static int __hdd_soc_probe(struct device *dev,
696 			   void *bdev,
697 			   const struct hif_bus_id *bid,
698 			   enum qdf_bus_type bus_type)
699 {
700 	struct hdd_context *hdd_ctx;
701 	QDF_STATUS status;
702 	int errno;
703 
704 	hdd_info("probing driver");
705 
706 	hdd_soc_load_lock(dev);
707 	cds_set_load_in_progress(true);
708 	cds_set_driver_in_bad_state(false);
709 	cds_set_recovery_in_progress(false);
710 
711 	errno = hdd_init_qdf_ctx(dev, bdev, bus_type, bid);
712 	if (errno)
713 		goto unlock;
714 
715 	errno = hdd_init_dma_mask(dev, bus_type);
716 	if (errno)
717 		goto unlock;
718 
719 	hdd_ctx = hdd_context_create(dev);
720 	if (IS_ERR(hdd_ctx)) {
721 		errno = PTR_ERR(hdd_ctx);
722 		goto assert_fail_count;
723 	}
724 
725 	status = ucfg_dp_prealloc_init((struct cdp_ctrl_objmgr_psoc *)
726 					hdd_ctx->psoc);
727 
728 	if (status != QDF_STATUS_SUCCESS) {
729 		errno = qdf_status_to_os_return(status);
730 		goto dp_prealloc_fail;
731 	}
732 
733 	errno = hdd_wlan_startup(hdd_ctx);
734 	if (errno)
735 		goto hdd_context_destroy;
736 
737 	status = hdd_psoc_create_vdevs(hdd_ctx);
738 	if (QDF_IS_STATUS_ERROR(status)) {
739 		errno = qdf_status_to_os_return(status);
740 		goto wlan_exit;
741 	}
742 
743 	probe_fail_cnt = 0;
744 	cds_set_driver_loaded(true);
745 	cds_set_load_in_progress(false);
746 	hdd_start_complete(0);
747 	hdd_thermal_mitigation_register(hdd_ctx, dev);
748 
749 	hdd_set_sar_init_index(hdd_ctx);
750 	hdd_soc_load_unlock(dev);
751 
752 	return 0;
753 
754 wlan_exit:
755 	hdd_wlan_exit(hdd_ctx);
756 
757 hdd_context_destroy:
758 	ucfg_dp_prealloc_deinit();
759 
760 dp_prealloc_fail:
761 	hdd_context_destroy(hdd_ctx);
762 
763 assert_fail_count:
764 	probe_fail_cnt++;
765 	hdd_err("consecutive probe failures:%u", probe_fail_cnt);
766 	QDF_BUG(probe_fail_cnt < SSR_MAX_FAIL_CNT);
767 
768 unlock:
769 	cds_set_load_in_progress(false);
770 	hdd_soc_load_unlock(dev);
771 
772 	return check_for_probe_defer(errno);
773 }
774 
775 /**
776  * hdd_soc_probe() - perform SoC probe
777  * @dev: kernel device being probed
778  * @bdev: bus device structure
779  * @bid: bus identifier for shared busses
780  * @bus_type: underlying bus type
781  *
782  * A SoC probe indicates new SoC hardware has become available and needs to be
783  * initialized.
784  *
785  * Return: Errno
786  */
787 static int hdd_soc_probe(struct device *dev,
788 			 void *bdev,
789 			 const struct hif_bus_id *bid,
790 			 enum qdf_bus_type bus_type)
791 {
792 	struct osif_psoc_sync *psoc_sync;
793 	int errno;
794 
795 	hdd_info("probing driver");
796 
797 	errno = osif_psoc_sync_create_and_trans(&psoc_sync);
798 	if (errno)
799 		return errno;
800 
801 	osif_psoc_sync_register(dev, psoc_sync);
802 	errno = __hdd_soc_probe(dev, bdev, bid, bus_type);
803 	if (errno)
804 		goto destroy_sync;
805 
806 	osif_psoc_sync_trans_stop(psoc_sync);
807 
808 	return 0;
809 
810 destroy_sync:
811 	osif_psoc_sync_unregister(dev);
812 	osif_psoc_sync_wait_for_ops(psoc_sync);
813 
814 	osif_psoc_sync_trans_stop(psoc_sync);
815 	osif_psoc_sync_destroy(psoc_sync);
816 
817 	return errno;
818 }
819 
820 static int __hdd_soc_recovery_reinit(struct device *dev,
821 				     void *bdev,
822 				     const struct hif_bus_id *bid,
823 				     enum qdf_bus_type bus_type)
824 {
825 	int errno;
826 	struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
827 
828 	hdd_info("re-probing driver");
829 
830 	if (!hdd_ctx) {
831 		hdd_err("hdd_ctx is null!");
832 		return qdf_status_to_os_return(QDF_STATUS_E_RESOURCES);
833 	}
834 
835 	hdd_soc_load_lock(dev);
836 	cds_set_driver_in_bad_state(false);
837 
838 	errno = hdd_init_qdf_ctx(dev, bdev, bus_type, bid);
839 	if (errno)
840 		goto unlock;
841 
842 	errno = hdd_wlan_re_init();
843 	if (errno) {
844 		re_init_fail_cnt++;
845 		goto assert_fail_count;
846 	}
847 
848 	re_init_fail_cnt = 0;
849 
850 	/*
851 	 * In case of SSR within SSR we have seen the race
852 	 * where the reinit is successful and fw down is received
853 	 * which sets the recovery in progress. Now as reinit is
854 	 * successful we reset the recovery in progress here.
855 	 * So check if FW is down then don't reset the recovery
856 	 * in progress
857 	 */
858 	if (!qdf_is_fw_down()) {
859 		cds_set_recovery_in_progress(false);
860 		hdd_handle_cached_commands();
861 	}
862 
863 	if (!hdd_is_any_interface_open(hdd_ctx)) {
864 		hdd_debug("restarting idle shutdown timer");
865 		hdd_psoc_idle_timer_start(hdd_ctx);
866 	}
867 
868 	hdd_soc_load_unlock(dev);
869 	hdd_send_driver_ready_to_user();
870 
871 	return 0;
872 
873 assert_fail_count:
874 	hdd_err("consecutive reinit failures:%u", re_init_fail_cnt);
875 	QDF_BUG(re_init_fail_cnt < SSR_MAX_FAIL_CNT);
876 
877 unlock:
878 	cds_set_driver_in_bad_state(true);
879 	hdd_soc_load_unlock(dev);
880 	hdd_start_complete(errno);
881 
882 	return check_for_probe_defer(errno);
883 }
884 
885 /**
886  * hdd_soc_recovery_reinit() - perform PDR/SSR SoC reinit
887  * @dev: the kernel device being re-initialized
888  * @bdev: bus device structure
889  * @bid: bus identifier for shared busses
890  * @bus_type: underlying bus type
891  *
892  * When communication with firmware breaks down, a SoC recovery process kicks in
893  * with two phases: shutdown and reinit.
894  *
895  * SSR reinit is similar to a 'probe' but happens in response to an SSR
896  * shutdown. The idea is to re-initialize the SoC to as close to its old,
897  * pre-communications-breakdown configuration as possible. This is completely
898  * transparent from a userspace point of view.
899  *
900  * Return: Errno
901  */
902 static int hdd_soc_recovery_reinit(struct device *dev,
903 				   void *bdev,
904 				   const struct hif_bus_id *bid,
905 				   enum qdf_bus_type bus_type)
906 {
907 	struct osif_psoc_sync *psoc_sync;
908 	int errno;
909 
910 	/* if driver is unloading, there is no need to do SSR */
911 	if (qdf_is_driver_unloading()) {
912 		hdd_info("driver is unloading, avoid SSR");
913 		return 0;
914 	}
915 
916 	/* SSR transition is initiated at the beginning of soc shutdown */
917 	errno = osif_psoc_sync_trans_resume(dev, &psoc_sync);
918 	QDF_BUG(!errno);
919 	if (errno)
920 		return errno;
921 
922 	errno = __hdd_soc_recovery_reinit(dev, bdev, bid, bus_type);
923 
924 
925 	osif_psoc_sync_trans_stop(psoc_sync);
926 	hdd_start_complete(0);
927 
928 	return errno;
929 }
930 
931 static void __hdd_soc_remove(struct device *dev)
932 {
933 	struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
934 
935 	QDF_BUG(hdd_ctx);
936 	if (!hdd_ctx)
937 		return;
938 
939 	pr_info("%s: Removing driver v%s\n", WLAN_MODULE_NAME,
940 		QWLAN_VERSIONSTR);
941 
942 	qdf_rtpm_sync_resume();
943 	cds_set_driver_loaded(false);
944 	cds_set_unload_in_progress(true);
945 	if (!hdd_wait_for_debugfs_threads_completion())
946 		hdd_warn("Debugfs threads are still active attempting driver unload anyway");
947 
948 	if (hdd_get_conparam() == QDF_GLOBAL_EPPING_MODE) {
949 		hdd_wlan_stop_modules(hdd_ctx, false);
950 		qdf_nbuf_deinit_replenish_timer();
951 	} else {
952 		hdd_thermal_mitigation_unregister(hdd_ctx, dev);
953 		hdd_wlan_exit(hdd_ctx);
954 	}
955 
956 	hdd_context_destroy(hdd_ctx);
957 
958 	cds_set_driver_in_bad_state(false);
959 	cds_set_unload_in_progress(false);
960 
961 	ucfg_dp_prealloc_deinit();
962 
963 	pr_info("%s: Driver De-initialized\n", WLAN_MODULE_NAME);
964 }
965 
966 /**
967  * hdd_soc_remove() - perform SoC remove
968  * @dev: the kernel device being removed
969  *
970  * A SoC remove indicates the attached SoC hardware is about to go away and
971  * needs to be cleaned up.
972  *
973  * Return: void
974  */
975 static void hdd_soc_remove(struct device *dev)
976 {
977 	__hdd_soc_remove(dev);
978 }
979 
980 #ifdef FEATURE_WLAN_DIAG_SUPPORT
981 /**
982  * hdd_wlan_ssr_shutdown_event()- send ssr shutdown state
983  *
984  * This Function send send ssr shutdown state diag event
985  *
986  * Return: void.
987  */
988 static void hdd_wlan_ssr_shutdown_event(void)
989 {
990 	WLAN_HOST_DIAG_EVENT_DEF(ssr_shutdown,
991 					struct host_event_wlan_ssr_shutdown);
992 	qdf_mem_zero(&ssr_shutdown, sizeof(ssr_shutdown));
993 	ssr_shutdown.status = SSR_SUB_SYSTEM_SHUTDOWN;
994 	WLAN_HOST_DIAG_EVENT_REPORT(&ssr_shutdown,
995 					EVENT_WLAN_SSR_SHUTDOWN_SUBSYSTEM);
996 }
997 #else
998 static inline void hdd_wlan_ssr_shutdown_event(void) { }
999 #endif
1000 
1001 /**
1002  * hdd_send_hang_data() - Send hang data to userspace
1003  * @data: Hang data
1004  * @data_len: Length of @data
1005  *
1006  * Return: None
1007  */
1008 static void hdd_send_hang_data(uint8_t *data, size_t data_len)
1009 {
1010 	enum qdf_hang_reason reason = QDF_REASON_UNSPECIFIED;
1011 	struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
1012 
1013 	if (!hdd_ctx)
1014 		return;
1015 
1016 	cds_get_recovery_reason(&reason);
1017 	cds_reset_recovery_reason();
1018 	wlan_hdd_send_hang_reason_event(hdd_ctx, reason, data, data_len);
1019 }
1020 
1021 /**
1022  * hdd_psoc_shutdown_notify() - notify the various interested parties that the
1023  *	soc is starting recovery shutdown
1024  * @hdd_ctx: the HDD context corresponding to the soc undergoing shutdown
1025  *
1026  * Return: None
1027  */
1028 static void hdd_psoc_shutdown_notify(struct hdd_context *hdd_ctx)
1029 {
1030 	hdd_enter();
1031 	wlan_cfg80211_cleanup_scan_queue(hdd_ctx->pdev, NULL);
1032 
1033 	if (ucfg_ipa_is_enabled()) {
1034 		ucfg_ipa_uc_force_pipe_shutdown(hdd_ctx->pdev);
1035 
1036 		if (pld_is_fw_rejuvenate(hdd_ctx->parent_dev) ||
1037 		    pld_is_pdr(hdd_ctx->parent_dev))
1038 			ucfg_ipa_fw_rejuvenate_send_msg(hdd_ctx->pdev);
1039 	}
1040 
1041 	cds_shutdown_notifier_call();
1042 	cds_shutdown_notifier_purge();
1043 
1044 	hdd_wlan_ssr_shutdown_event();
1045 	hdd_exit();
1046 }
1047 
1048 /**
1049  * hdd_soc_recovery_cleanup() - Perform SSR related cleanup activities.
1050  *
1051  * This function will perform cleanup activities related to when driver
1052  * undergoes SSR. Activities include stopping idle timer and invoking shutdown
1053  * notifier.
1054  *
1055  * Return: None
1056  */
1057 static void hdd_soc_recovery_cleanup(void)
1058 {
1059 	struct hdd_context *hdd_ctx;
1060 
1061 	hdd_enter();
1062 	hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
1063 	if (!hdd_ctx)
1064 		return;
1065 
1066 	/* cancel/flush any pending/active idle shutdown work */
1067 	hdd_psoc_idle_timer_stop(hdd_ctx);
1068 	ucfg_dp_bus_bw_compute_timer_stop(hdd_ctx->psoc);
1069 
1070 	/* nothing to do if the soc is already unloaded */
1071 	if (hdd_ctx->driver_status == DRIVER_MODULES_CLOSED) {
1072 		hdd_info("Driver modules are already closed");
1073 		return;
1074 	}
1075 
1076 	if (cds_is_load_or_unload_in_progress()) {
1077 		hdd_info("Load/unload in progress, ignore SSR shutdown");
1078 		return;
1079 	}
1080 
1081 	hdd_psoc_shutdown_notify(hdd_ctx);
1082 	hdd_exit();
1083 }
1084 
1085 static void __hdd_soc_recovery_shutdown(void)
1086 {
1087 	struct hdd_context *hdd_ctx;
1088 	void *hif_ctx;
1089 
1090 	/* recovery starts via firmware down indication; ensure we got one */
1091 	QDF_BUG(cds_is_driver_recovering());
1092 
1093 	hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
1094 	if (!hdd_ctx)
1095 		return;
1096 
1097 	/*
1098 	 * Perform SSR related cleanup if it has not already been done as a
1099 	 * part of receiving the uevent.
1100 	 */
1101 	if (!qdf_atomic_read(&is_recovery_cleanup_done))
1102 		hdd_soc_recovery_cleanup();
1103 	else
1104 		qdf_atomic_set(&is_recovery_cleanup_done, 0);
1105 
1106 	if (!hdd_wait_for_debugfs_threads_completion())
1107 		hdd_err("Debugfs threads are still pending, attempting SSR anyway");
1108 
1109 	hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
1110 	if (!hif_ctx)
1111 		return;
1112 
1113 	/* mask the host controller interrupts */
1114 	hif_mask_interrupt_call(hif_ctx);
1115 
1116 	if (!QDF_IS_EPPING_ENABLED(cds_get_conparam())) {
1117 		hif_disable_isr(hif_ctx);
1118 		hdd_wlan_shutdown();
1119 	}
1120 }
1121 
1122 /**
1123  * hdd_soc_recovery_shutdown() - perform PDR/SSR SoC shutdown
1124  * @dev: the device to shutdown
1125  *
1126  * When communication with firmware breaks down, a SoC recovery process kicks in
1127  * with two phases: shutdown and reinit.
1128  *
1129  * SSR shutdown is similar to a 'remove' but without communication with
1130  * firmware. The idea is to retain as much SoC configuration as possible, so it
1131  * can be re-initialized to the same state after a reset. This is completely
1132  * transparent from a userspace point of view.
1133  *
1134  * Return: void
1135  */
1136 static void hdd_soc_recovery_shutdown(struct device *dev)
1137 {
1138 	struct osif_psoc_sync *psoc_sync;
1139 	int errno;
1140 
1141 	/* if driver is unloading, there is no need to do SSR */
1142 	if (qdf_is_driver_unloading()) {
1143 		hdd_info("driver is unloading, avoid SSR");
1144 		return;
1145 	}
1146 
1147 	errno = osif_psoc_sync_trans_start_wait(dev, &psoc_sync);
1148 	if (errno)
1149 		return;
1150 
1151 	ucfg_dp_wait_complete_tasks();
1152 	osif_psoc_sync_wait_for_ops(psoc_sync);
1153 
1154 	__hdd_soc_recovery_shutdown();
1155 
1156 	/* SSR transition is concluded at the end of soc re-init */
1157 }
1158 
1159 /**
1160  * wlan_hdd_crash_shutdown() - wlan_hdd_crash_shutdown
1161  *
1162  * HDD crash shutdown function: This function is called by
1163  * platform driver's crash shutdown routine
1164  *
1165  * Return: void
1166  */
1167 static void wlan_hdd_crash_shutdown(void)
1168 {
1169 	QDF_STATUS ret;
1170 	WMA_HANDLE wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
1171 
1172 	if (!wma_handle)
1173 		return;
1174 
1175 	/*
1176 	 * When kernel panic happen, if WiFi FW is still active
1177 	 * it may cause NOC errors/memory corruption, to avoid
1178 	 * this, inject a fw crash first.
1179 	 * send crash_inject to FW directly, because we are now
1180 	 * in an atomic context, and preempt has been disabled,
1181 	 * MCThread won't be scheduled at the moment, at the same
1182 	 * time, TargetFailure event won't be received after inject
1183 	 * crash due to the same reason.
1184 	 */
1185 	ret = wma_crash_inject(wma_handle, RECOVERY_SIM_ASSERT, 0);
1186 	if (QDF_IS_STATUS_ERROR(ret)) {
1187 		hdd_err("Failed to send crash inject:%d", ret);
1188 		return;
1189 	}
1190 
1191 	hif_crash_shutdown(cds_get_context(QDF_MODULE_ID_HIF));
1192 }
1193 
1194 /**
1195  * wlan_hdd_notify_handler() - wlan_hdd_notify_handler
1196  *
1197  * This function is called by the platform driver to notify the
1198  * COEX
1199  *
1200  * @state: state
1201  *
1202  * Return: void
1203  */
1204 static void wlan_hdd_notify_handler(int state)
1205 {
1206 	if (!QDF_IS_EPPING_ENABLED(cds_get_conparam())) {
1207 		int ret;
1208 
1209 		ret = hdd_wlan_notify_modem_power_state(state);
1210 		if (ret < 0)
1211 			hdd_err("Fail to send notify");
1212 	}
1213 }
1214 
1215 static int hdd_to_pmo_interface_pause(enum wow_interface_pause hdd_pause,
1216 				      enum pmo_wow_interface_pause *pmo_pause)
1217 {
1218 	switch (hdd_pause) {
1219 	case WOW_INTERFACE_PAUSE_DEFAULT:
1220 		*pmo_pause = PMO_WOW_INTERFACE_PAUSE_DEFAULT;
1221 		break;
1222 	case WOW_INTERFACE_PAUSE_ENABLE:
1223 		*pmo_pause = PMO_WOW_INTERFACE_PAUSE_ENABLE;
1224 		break;
1225 	case WOW_INTERFACE_PAUSE_DISABLE:
1226 		*pmo_pause = PMO_WOW_INTERFACE_PAUSE_DISABLE;
1227 		break;
1228 	default:
1229 		hdd_err("Invalid interface pause: %d", hdd_pause);
1230 		return -EINVAL;
1231 	}
1232 
1233 	return 0;
1234 }
1235 
1236 static int hdd_to_pmo_resume_trigger(enum wow_resume_trigger hdd_trigger,
1237 				     enum pmo_wow_resume_trigger *pmo_trigger)
1238 {
1239 	switch (hdd_trigger) {
1240 	case WOW_RESUME_TRIGGER_DEFAULT:
1241 		*pmo_trigger = PMO_WOW_RESUME_TRIGGER_DEFAULT;
1242 		break;
1243 	case WOW_RESUME_TRIGGER_HTC_WAKEUP:
1244 		*pmo_trigger = PMO_WOW_RESUME_TRIGGER_HTC_WAKEUP;
1245 		break;
1246 	case WOW_RESUME_TRIGGER_GPIO:
1247 		*pmo_trigger = PMO_WOW_RESUME_TRIGGER_GPIO;
1248 		break;
1249 	default:
1250 		hdd_err("Invalid resume trigger: %d", hdd_trigger);
1251 		return -EINVAL;
1252 	}
1253 
1254 	return 0;
1255 }
1256 
1257 static int
1258 hdd_to_pmo_wow_enable_params(struct wow_enable_params *in_params,
1259 			     struct pmo_wow_enable_params *out_params)
1260 {
1261 	int err;
1262 
1263 	/* unit-test suspend */
1264 	out_params->is_unit_test = in_params->is_unit_test;
1265 
1266 	/* interface pause */
1267 	err = hdd_to_pmo_interface_pause(in_params->interface_pause,
1268 					 &out_params->interface_pause);
1269 	if (err)
1270 		return err;
1271 
1272 	/* resume trigger */
1273 	err = hdd_to_pmo_resume_trigger(in_params->resume_trigger,
1274 					&out_params->resume_trigger);
1275 	if (err)
1276 		return err;
1277 
1278 	return 0;
1279 }
1280 
1281 /**
1282  * __wlan_hdd_bus_suspend() - handles platform suspend
1283  * @wow_params: collection of wow enable override parameters
1284  * @type: WoW suspend type
1285  *
1286  * Does precondition validation. Ensures that a subsystem restart isn't in
1287  * progress. Ensures that no load or unload is in progress. Does:
1288  *	data path suspend
1289  *	component (pmo) suspend
1290  *	hif (bus) suspend
1291  *
1292  * Return: 0 for success, -EFAULT for null pointers,
1293  *     -EBUSY or -EAGAIN if another operation is in progress and
1294  *     wlan will not be ready to suspend in time.
1295  */
1296 static int __wlan_hdd_bus_suspend(struct wow_enable_params wow_params,
1297 				  enum qdf_suspend_type type)
1298 {
1299 	int err;
1300 	QDF_STATUS status;
1301 	struct hdd_context *hdd_ctx;
1302 	void *hif_ctx;
1303 	void *dp_soc;
1304 	struct pmo_wow_enable_params pmo_params;
1305 	int pending;
1306 	struct bbm_params param = {0};
1307 
1308 	hdd_info("starting bus suspend");
1309 
1310 	hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
1311 	if (!hdd_ctx)
1312 		return -ENODEV;
1313 
1314 	err = wlan_hdd_validate_context(hdd_ctx);
1315 	if (0 != err) {
1316 		if (pld_is_low_power_mode(hdd_ctx->parent_dev))
1317 			hdd_debug("low power mode (Deep Sleep/Hibernate)");
1318 		else
1319 			return err;
1320 	}
1321 
1322 	/* If Wifi is off, return success for system suspend */
1323 	if (hdd_ctx->driver_status != DRIVER_MODULES_ENABLED) {
1324 		hdd_debug("Driver Module closed; skipping suspend");
1325 		return 0;
1326 	}
1327 
1328 
1329 	hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
1330 	if (!hif_ctx)
1331 		return -EINVAL;
1332 
1333 	err = hdd_to_pmo_wow_enable_params(&wow_params, &pmo_params);
1334 	if (err) {
1335 		hdd_err("Invalid WoW enable parameters: %d", err);
1336 		return err;
1337 	}
1338 
1339 	dp_soc = cds_get_context(QDF_MODULE_ID_SOC);
1340 	err = qdf_status_to_os_return(ucfg_dp_bus_suspend(dp_soc,
1341 							  OL_TXRX_PDEV_ID));
1342 	if (err) {
1343 		hdd_err("Failed cdp bus suspend: %d", err);
1344 		return err;
1345 	}
1346 
1347 	if (ucfg_ipa_is_tx_pending(hdd_ctx->pdev)) {
1348 		hdd_err("failed due to pending IPA TX comps");
1349 		err = -EBUSY;
1350 		goto resume_dp;
1351 	}
1352 
1353 	err = hif_bus_early_suspend(hif_ctx);
1354 	if (err) {
1355 		hdd_err("Failed hif bus early suspend");
1356 		goto resume_dp;
1357 	}
1358 
1359 	status = ucfg_pmo_psoc_bus_suspend_req(hdd_ctx->psoc,
1360 					       type,
1361 					       &pmo_params);
1362 	err = qdf_status_to_os_return(status);
1363 	if (err) {
1364 		hdd_err("Failed pmo bus suspend: %d", status);
1365 		goto late_hif_resume;
1366 	}
1367 
1368 	hif_system_pm_set_state_suspended(hif_ctx);
1369 
1370 	err = hif_bus_suspend(hif_ctx);
1371 	if (err) {
1372 		hdd_err("Failed hif bus suspend: %d", err);
1373 		goto resume_pmo;
1374 	}
1375 
1376 	status = ucfg_pmo_core_txrx_suspend(hdd_ctx->psoc);
1377 	err = qdf_status_to_os_return(status);
1378 	if (err) {
1379 		hdd_err("Failed to suspend TXRX: %d", err);
1380 		goto resume_hif;
1381 	}
1382 
1383 	pending = cdp_rx_get_pending(cds_get_context(QDF_MODULE_ID_SOC));
1384 	if (pending) {
1385 		hdd_debug("Prevent suspend, RX frame pending %d", pending);
1386 		err = -EBUSY;
1387 		goto resume_txrx;
1388 	}
1389 
1390 	if (hif_try_prevent_ep_vote_access(hif_ctx)) {
1391 		hdd_debug("Prevent suspend, ep work pending");
1392 		err = -EBUSY;
1393 		goto resume_txrx;
1394 	}
1395 
1396 	/*
1397 	 * Remove bus votes at the very end, after making sure there are no
1398 	 * pending bus transactions from WLAN SOC for TX/RX.
1399 	 */
1400 	param.policy = BBM_NON_PERSISTENT_POLICY;
1401 	param.policy_info.flag = BBM_APPS_SUSPEND;
1402 	ucfg_dp_bbm_apply_independent_policy(hdd_ctx->psoc, &param);
1403 
1404 	hdd_info("bus suspend succeeded");
1405 	return 0;
1406 
1407 resume_txrx:
1408 	status = ucfg_pmo_core_txrx_resume(hdd_ctx->psoc);
1409 	QDF_BUG(QDF_IS_STATUS_SUCCESS(status));
1410 
1411 resume_hif:
1412 	status = hif_bus_resume(hif_ctx);
1413 	QDF_BUG(QDF_IS_STATUS_SUCCESS(status));
1414 
1415 resume_pmo:
1416 	status = ucfg_pmo_psoc_bus_resume_req(hdd_ctx->psoc,
1417 					      type);
1418 	QDF_BUG(QDF_IS_STATUS_SUCCESS(status));
1419 
1420 late_hif_resume:
1421 	status = hif_bus_late_resume(hif_ctx);
1422 	QDF_BUG(QDF_IS_STATUS_SUCCESS(status));
1423 
1424 resume_dp:
1425 	status = ucfg_dp_bus_resume(dp_soc, OL_TXRX_PDEV_ID);
1426 	QDF_BUG(QDF_IS_STATUS_SUCCESS(status));
1427 	hif_system_pm_set_state_on(hif_ctx);
1428 
1429 	return err;
1430 }
1431 
1432 int wlan_hdd_bus_suspend(void)
1433 {
1434 	struct wow_enable_params default_params = {0};
1435 
1436 	return __wlan_hdd_bus_suspend(default_params, QDF_SYSTEM_SUSPEND);
1437 }
1438 
1439 #ifdef WLAN_SUSPEND_RESUME_TEST
1440 int wlan_hdd_unit_test_bus_suspend(struct wow_enable_params wow_params)
1441 {
1442 	return __wlan_hdd_bus_suspend(wow_params, QDF_UNIT_TEST_WOW_SUSPEND);
1443 }
1444 #endif
1445 
1446 /**
1447  * wlan_hdd_bus_suspend_noirq() - handle .suspend_noirq callback
1448  *
1449  * This function is called by the platform driver to complete the
1450  * bus suspend callback when device interrupts are disabled by kernel.
1451  * Call HIF and WMA suspend_noirq callbacks to make sure there is no
1452  * wake up pending from FW before allowing suspend.
1453  *
1454  * Return: 0 for success and -EBUSY if FW is requesting wake up
1455  */
1456 int wlan_hdd_bus_suspend_noirq(void)
1457 {
1458 	struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
1459 	void *hif_ctx;
1460 	int errno;
1461 	uint32_t pending_events;
1462 
1463 	hdd_debug("start bus_suspend_noirq");
1464 
1465 	if (!hdd_ctx)
1466 		return -ENODEV;
1467 
1468 	/* If Wifi is off, return success for system suspend */
1469 	if (hdd_ctx->driver_status != DRIVER_MODULES_ENABLED) {
1470 		hdd_debug("Driver module closed; skip bus-noirq suspend");
1471 		return 0;
1472 	}
1473 
1474 	errno = wlan_hdd_validate_context(hdd_ctx);
1475 	if (errno)
1476 		return errno;
1477 
1478 	hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
1479 	if (!hif_ctx)
1480 		return -EINVAL;
1481 
1482 	errno = hif_bus_suspend_noirq(hif_ctx);
1483 	if (errno)
1484 		goto done;
1485 
1486 	errno = ucfg_pmo_psoc_is_target_wake_up_received(hdd_ctx->psoc);
1487 	if (errno == -EAGAIN) {
1488 		hdd_err("Firmware attempting wakeup, try again");
1489 		wlan_hdd_inc_suspend_stats(hdd_ctx,
1490 					   SUSPEND_FAIL_INITIAL_WAKEUP);
1491 	}
1492 	if (errno)
1493 		goto resume_hif_noirq;
1494 
1495 	pending_events = wma_critical_events_in_flight();
1496 	if (pending_events) {
1497 		hdd_err("%d critical event(s) in flight; try again",
1498 			pending_events);
1499 		errno = -EAGAIN;
1500 		goto resume_hif_noirq;
1501 	}
1502 
1503 	hdd_ctx->suspend_resume_stats.suspends++;
1504 
1505 	hdd_debug("bus_suspend_noirq done");
1506 	return 0;
1507 
1508 resume_hif_noirq:
1509 	QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
1510 
1511 done:
1512 	hdd_err("suspend_noirq failed, status: %d", errno);
1513 
1514 	return errno;
1515 }
1516 
1517 /**
1518  * wlan_hdd_bus_resume() - handles platform resume
1519  *
1520  * @type: WoW suspend type
1521  *
1522  * Does precondition validation. Ensures that a subsystem restart isn't in
1523  * progress.  Ensures that no load or unload is in progress.  Ensures that
1524  * it has valid pointers for the required contexts.
1525  * Calls into hif to resume the bus operation.
1526  * Calls into wma to handshake with firmware and notify it that the bus is up.
1527  * Calls into ol_txrx for symmetry.
1528  * Failures are treated as catastrophic.
1529  *
1530  * return: error code or 0 for success
1531  */
1532 int wlan_hdd_bus_resume(enum qdf_suspend_type type)
1533 {
1534 	struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
1535 	void *hif_ctx;
1536 	int status;
1537 	QDF_STATUS qdf_status;
1538 	void *dp_soc;
1539 	struct bbm_params param = {0};
1540 
1541 	if (cds_is_driver_recovering())
1542 		return 0;
1543 
1544 	hdd_info("starting bus resume");
1545 
1546 	if (!hdd_ctx)
1547 		return -ENODEV;
1548 
1549 	/* If Wifi is off, return success for system resume */
1550 	if (hdd_ctx->driver_status != DRIVER_MODULES_ENABLED) {
1551 		hdd_debug("Driver Module closed; return success");
1552 		return 0;
1553 	}
1554 
1555 	status = wlan_hdd_validate_context(hdd_ctx);
1556 	if (status)
1557 		return status;
1558 
1559 	hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
1560 	if (!hif_ctx)
1561 		return -EINVAL;
1562 
1563 	/*
1564 	 * Add bus votes at the beginning, before making sure there are any
1565 	 * bus transactions from WLAN SOC for TX/RX.
1566 	 */
1567 	param.policy = BBM_NON_PERSISTENT_POLICY;
1568 	param.policy_info.flag = BBM_APPS_RESUME;
1569 	ucfg_dp_bbm_apply_independent_policy(hdd_ctx->psoc, &param);
1570 
1571 	status = hif_bus_resume(hif_ctx);
1572 	if (status) {
1573 		hdd_err("Failed hif bus resume");
1574 		goto out;
1575 	}
1576 
1577 	hif_system_pm_set_state_resuming(hif_ctx);
1578 
1579 	qdf_status = ucfg_pmo_psoc_bus_resume_req(hdd_ctx->psoc,
1580 						  type);
1581 	status = qdf_status_to_os_return(qdf_status);
1582 	if (status) {
1583 		hdd_err("Failed pmo bus resume");
1584 		goto out;
1585 	}
1586 
1587 	qdf_status = ucfg_pmo_core_txrx_resume(hdd_ctx->psoc);
1588 	status = qdf_status_to_os_return(qdf_status);
1589 	if (status) {
1590 		hdd_err("Failed to resume TXRX");
1591 		goto out;
1592 	}
1593 
1594 	hif_system_pm_set_state_on(hif_ctx);
1595 
1596 	status = hif_bus_late_resume(hif_ctx);
1597 	if (status) {
1598 		hdd_err("Failed hif bus late resume");
1599 		goto out;
1600 	}
1601 
1602 	dp_soc = cds_get_context(QDF_MODULE_ID_SOC);
1603 	qdf_status = ucfg_dp_bus_resume(dp_soc, OL_TXRX_PDEV_ID);
1604 	status = qdf_status_to_os_return(qdf_status);
1605 	if (status) {
1606 		hdd_err("Failed cdp bus resume");
1607 		goto out;
1608 	}
1609 
1610 	hdd_info("bus resume succeeded");
1611 	return 0;
1612 
1613 out:
1614 	hif_system_pm_set_state_suspended(hif_ctx);
1615 	if (cds_is_driver_recovering() || cds_is_driver_in_bad_state() ||
1616 	    cds_is_fw_down())
1617 		return 0;
1618 
1619 	if (status != -ETIMEDOUT)
1620 		QDF_BUG(false);
1621 
1622 	return status;
1623 }
1624 
1625 /**
1626  * wlan_hdd_bus_resume_noirq(): handle bus resume no irq
1627  *
1628  * This function is called by the platform driver to do bus
1629  * resume no IRQ before calling resume callback. Call WMA and HIF
1630  * layers to complete the resume_noirq.
1631  *
1632  * Return: 0 for success and negative error code for failure
1633  */
1634 int wlan_hdd_bus_resume_noirq(void)
1635 {
1636 	struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
1637 	void *hif_ctx;
1638 	int status;
1639 	QDF_STATUS qdf_status;
1640 
1641 	hdd_debug("starting bus_resume_noirq");
1642 	if (cds_is_driver_recovering())
1643 		return 0;
1644 
1645 	if (!hdd_ctx)
1646 		return -ENODEV;
1647 
1648 	/* If Wifi is off, return success for system resume */
1649 	if (hdd_ctx->driver_status != DRIVER_MODULES_ENABLED) {
1650 		hdd_debug("Driver Module closed return success");
1651 		return 0;
1652 	}
1653 
1654 	status = wlan_hdd_validate_context(hdd_ctx);
1655 	if (status)
1656 		return status;
1657 
1658 	hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
1659 	if (!hif_ctx)
1660 		return -EINVAL;
1661 
1662 	qdf_status = ucfg_pmo_psoc_clear_target_wake_up(hdd_ctx->psoc);
1663 	QDF_BUG(!qdf_status);
1664 
1665 	status = hif_bus_resume_noirq(hif_ctx);
1666 	QDF_BUG(!status);
1667 
1668 	hdd_debug("bus_resume_noirq done");
1669 
1670 	return status;
1671 }
1672 
1673 /**
1674  * wlan_hdd_bus_reset_resume() - resume wlan bus after reset
1675  *
1676  * This function is called to tell the driver that the device has been resumed
1677  * and it has also been reset. The driver should redo any necessary
1678  * initialization. It is mainly used by the USB bus
1679  *
1680  * Return: int 0 for success, non zero for failure
1681  */
1682 static int wlan_hdd_bus_reset_resume(void)
1683 {
1684 	struct hif_opaque_softc *scn = cds_get_context(QDF_MODULE_ID_HIF);
1685 
1686 	if (!scn)
1687 		return -EFAULT;
1688 
1689 	return hif_bus_reset_resume(scn);
1690 }
1691 
1692 #ifdef FEATURE_RUNTIME_PM
1693 /**
1694  * hdd_pld_runtime_suspend_cb() - Runtime suspend callback from PMO
1695  *
1696  * Return: 0 on success or error value otherwise
1697  */
1698 static int hdd_pld_runtime_suspend_cb(void)
1699 {
1700 	qdf_device_t qdf_dev = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
1701 
1702 	if (!qdf_dev)
1703 		return -EINVAL;
1704 
1705 	return pld_auto_suspend(qdf_dev->dev);
1706 }
1707 
1708 /**
1709  * wlan_hdd_runtime_suspend() - suspend the wlan bus without apps suspend
1710  * @dev: Driver device instance
1711  *
1712  * Each layer is responsible for its own suspend actions.  wma_runtime_suspend
1713  * takes care of the parts of the 802.11 suspend that we want to do for runtime
1714  * suspend.
1715  *
1716  * Return: 0 or errno
1717  */
1718 static int wlan_hdd_runtime_suspend(struct device *dev)
1719 {
1720 	int err;
1721 	QDF_STATUS status;
1722 	struct hdd_context *hdd_ctx;
1723 	qdf_time_t delta;
1724 
1725 	hdd_debug("Starting runtime suspend");
1726 
1727 	hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
1728 	err = wlan_hdd_validate_context(hdd_ctx);
1729 	if (err)
1730 		return err;
1731 
1732 	if (hdd_ctx->driver_status != DRIVER_MODULES_ENABLED) {
1733 		hdd_debug("Driver module closed skipping runtime suspend");
1734 		return 0;
1735 	}
1736 
1737 	if (!hdd_is_runtime_pm_enabled(hdd_ctx))
1738 		return 0;
1739 
1740 	if (ucfg_scan_get_pdev_status(hdd_ctx->pdev) !=
1741 	    SCAN_NOT_IN_PROGRESS) {
1742 		hdd_debug("Scan in progress, ignore runtime suspend");
1743 		return -EBUSY;
1744 	}
1745 
1746 	if (ucfg_ipa_is_tx_pending(hdd_ctx->pdev)) {
1747 		hdd_debug("IPA TX comps pending, ignore rtpm suspend");
1748 		return -EBUSY;
1749 	}
1750 
1751 	if (hdd_ctx->config->runtime_pm == hdd_runtime_pm_dynamic &&
1752 	    wlan_hdd_is_cpu_pm_qos_in_progress(hdd_ctx)) {
1753 		hdd_debug("PM QoS Latency constraint, ignore runtime suspend");
1754 		return -EBUSY;
1755 	}
1756 
1757 	status = ucfg_pmo_psoc_bus_runtime_suspend(hdd_ctx->psoc,
1758 						   hdd_pld_runtime_suspend_cb);
1759 	err = qdf_status_to_os_return(status);
1760 
1761 	hdd_ctx->runtime_suspend_done_time_stamp =
1762 						qdf_get_log_timestamp_usecs();
1763 	delta = hdd_ctx->runtime_suspend_done_time_stamp -
1764 		hdd_ctx->runtime_resume_start_time_stamp;
1765 
1766 	if (hdd_ctx->runtime_suspend_done_time_stamp >
1767 	   hdd_ctx->runtime_resume_start_time_stamp)
1768 		hdd_debug("Runtime suspend done result: %d total cxpc up time %lu microseconds",
1769 			  err, delta);
1770 
1771 	if (status == QDF_STATUS_SUCCESS)
1772 		ucfg_dp_bus_bw_compute_timer_stop(hdd_ctx->psoc);
1773 
1774 	hdd_debug("Runtime suspend done result: %d", err);
1775 
1776 	return err;
1777 }
1778 
1779 /**
1780  * hdd_pld_runtime_resume_cb() - Runtime resume callback from PMO
1781  *
1782  * Return: 0 on success or error value otherwise
1783  */
1784 static int hdd_pld_runtime_resume_cb(void)
1785 {
1786 	qdf_device_t qdf_dev = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
1787 
1788 	if (!qdf_dev)
1789 		return -EINVAL;
1790 
1791 	return pld_auto_resume(qdf_dev->dev);
1792 }
1793 
1794 /**
1795  * wlan_hdd_runtime_resume() - resume the wlan bus from runtime suspend
1796  * @dev: Driver device instance
1797  *
1798  * Sets the runtime pm state and coordinates resume between hif wma and
1799  * ol_txrx.
1800  *
1801  * Return: success since failure is a bug
1802  */
1803 static int wlan_hdd_runtime_resume(struct device *dev)
1804 {
1805 	struct hdd_context *hdd_ctx;
1806 	QDF_STATUS status;
1807 	qdf_time_t delta;
1808 
1809 	hdd_debug("Starting runtime resume");
1810 
1811 	hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
1812 
1813 	/*
1814 	 * In__hdd_soc_remove, runtime_sync_resume is called before setting
1815 	 * unload_in_progress flag. wlan_hdd_validate_context will cause
1816 	 * resume fail, if driver load/unload in-progress, so not doing
1817 	 * wlan_hdd_validate_context, have only SSR in progress check.
1818 	 */
1819 	if (!hdd_ctx)
1820 		return 0;
1821 
1822 	if (cds_is_driver_recovering()) {
1823 		hdd_debug("Recovery in progress, state:0x%x",
1824 			  cds_get_driver_state());
1825 		return 0;
1826 	}
1827 
1828 	if (hdd_ctx->driver_status != DRIVER_MODULES_ENABLED) {
1829 		hdd_debug("Driver module closed skipping runtime resume");
1830 		return 0;
1831 	}
1832 
1833 	if (!hdd_is_runtime_pm_enabled(hdd_ctx))
1834 		return 0;
1835 
1836 	hdd_ctx->runtime_resume_start_time_stamp =
1837 						qdf_get_log_timestamp_usecs();
1838 	delta = hdd_ctx->runtime_resume_start_time_stamp -
1839 		hdd_ctx->runtime_suspend_done_time_stamp;
1840 	hdd_debug("Starting runtime resume total cxpc down time %lu microseconds",
1841 		  delta);
1842 
1843 	status = ucfg_pmo_psoc_bus_runtime_resume(hdd_ctx->psoc,
1844 						  hdd_pld_runtime_resume_cb);
1845 	if (status != QDF_STATUS_SUCCESS) {
1846 		hdd_err("PMO Runtime resume failed: %d", status);
1847 	} else {
1848 		if (policy_mgr_get_connection_count(hdd_ctx->psoc))
1849 			ucfg_dp_bus_bw_compute_timer_try_start(hdd_ctx->psoc);
1850 	}
1851 
1852 	hdd_debug("Runtime resume done");
1853 
1854 	return 0;
1855 }
1856 #endif
1857 
1858 /**
1859  * wlan_hdd_pld_probe() - probe function registered to PLD
1860  * @dev: device
1861  * @pld_bus_type: PLD bus type
1862  * @bdev: bus device structure
1863  * @id: bus identifier for shared busses
1864  *
1865  * Return: 0 on success
1866  */
1867 static int wlan_hdd_pld_probe(struct device *dev,
1868 			      enum pld_bus_type pld_bus_type,
1869 			      void *bdev,
1870 			      void *id)
1871 {
1872 	enum qdf_bus_type bus_type = to_bus_type(pld_bus_type);
1873 
1874 	if (bus_type == QDF_BUS_TYPE_NONE) {
1875 		hdd_err("Invalid bus type %d->%d", pld_bus_type, bus_type);
1876 		return -EINVAL;
1877 	}
1878 	qdf_ssr_driver_dump_register_region("hang_event_data",
1879 					    g_fw_host_hang_event,
1880 					    sizeof(g_fw_host_hang_event));
1881 
1882 	return hdd_soc_probe(dev, bdev, id, bus_type);
1883 }
1884 
1885 /**
1886  * wlan_hdd_pld_remove() - remove function registered to PLD
1887  * @dev: device to remove
1888  * @bus_type: PLD bus type
1889  *
1890  * Return: void
1891  */
1892 static void wlan_hdd_pld_remove(struct device *dev, enum pld_bus_type bus_type)
1893 {
1894 	hdd_enter();
1895 
1896 	hdd_soc_remove(dev);
1897 	qdf_ssr_driver_dump_unregister_region("hang_event_data");
1898 
1899 	hdd_exit();
1900 }
1901 
1902 static void hdd_soc_idle_shutdown_lock(struct device *dev)
1903 {
1904 	hdd_prevent_suspend(WIFI_POWER_EVENT_WAKELOCK_DRIVER_IDLE_SHUTDOWN);
1905 
1906 	hdd_abort_system_suspend(dev);
1907 }
1908 
1909 static void hdd_soc_idle_shutdown_unlock(void)
1910 {
1911 	hdd_allow_suspend(WIFI_POWER_EVENT_WAKELOCK_DRIVER_IDLE_SHUTDOWN);
1912 }
1913 
1914 /**
1915  * wlan_hdd_pld_idle_shutdown() - wifi module idle shutdown after interface
1916  *                                inactivity timeout has triggered idle shutdown
1917  * @dev: device to remove
1918  * @bus_type: PLD bus type
1919  *
1920  * Return: 0 for success and negative error code for failure
1921  */
1922 static int wlan_hdd_pld_idle_shutdown(struct device *dev,
1923 				       enum pld_bus_type bus_type)
1924 {
1925 	int ret;
1926 
1927 	hdd_soc_idle_shutdown_lock(dev);
1928 
1929 	ret = hdd_psoc_idle_shutdown(dev);
1930 
1931 	hdd_soc_idle_shutdown_unlock();
1932 
1933 	return ret;
1934 }
1935 
1936 /**
1937  * wlan_hdd_pld_idle_restart() - wifi module idle restart after idle shutdown
1938  * @dev: device to remove
1939  * @bus_type: PLD bus type
1940  *
1941  * Return: 0 for success and negative error code for failure
1942  */
1943 static int wlan_hdd_pld_idle_restart(struct device *dev,
1944 				      enum pld_bus_type bus_type)
1945 {
1946 	return hdd_psoc_idle_restart(dev);
1947 }
1948 
1949 /**
1950  * wlan_hdd_pld_shutdown() - shutdown function registered to PLD
1951  * @dev: device to shutdown
1952  * @bus_type: PLD bus type
1953  *
1954  * Return: void
1955  */
1956 static void wlan_hdd_pld_shutdown(struct device *dev,
1957 				  enum pld_bus_type bus_type)
1958 {
1959 	hdd_enter();
1960 
1961 	hdd_soc_recovery_shutdown(dev);
1962 
1963 	hdd_exit();
1964 }
1965 
1966 /**
1967  * wlan_hdd_pld_reinit() - reinit function registered to PLD
1968  * @dev: device
1969  * @pld_bus_type: PLD bus type
1970  * @bdev: bus device structure
1971  * @id: bus identifier for shared busses
1972  *
1973  * Return: 0 on success
1974  */
1975 static int wlan_hdd_pld_reinit(struct device *dev,
1976 			       enum pld_bus_type pld_bus_type,
1977 			       void *bdev,
1978 			       void *id)
1979 {
1980 	enum qdf_bus_type bus_type = to_bus_type(pld_bus_type);
1981 
1982 	if (bus_type == QDF_BUS_TYPE_NONE) {
1983 		hdd_err("Invalid bus type %d->%d", pld_bus_type, bus_type);
1984 		return -EINVAL;
1985 	}
1986 
1987 	return hdd_soc_recovery_reinit(dev, bdev, id, bus_type);
1988 }
1989 
1990 /**
1991  * wlan_hdd_pld_crash_shutdown() - crash_shutdown function registered to PLD
1992  * @dev: device
1993  * @bus_type: PLD bus type
1994  *
1995  * Return: void
1996  */
1997 static void wlan_hdd_pld_crash_shutdown(struct device *dev,
1998 			     enum pld_bus_type bus_type)
1999 {
2000 	wlan_hdd_crash_shutdown();
2001 }
2002 
2003 /**
2004  * wlan_hdd_pld_suspend() - suspend function registered to PLD
2005  * @dev: device
2006  * @bus_type: PLD bus type
2007  * @state: PM state
2008  *
2009  * Return: 0 on success
2010  */
2011 static int wlan_hdd_pld_suspend(struct device *dev,
2012 				enum pld_bus_type bus_type,
2013 				pm_message_t state)
2014 
2015 {
2016 	struct osif_psoc_sync *psoc_sync;
2017 	int errno;
2018 	struct hdd_context *hdd_ctx;
2019 
2020 	hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
2021 	if (!hdd_ctx)
2022 		return -ENODEV;
2023 
2024 	errno = wlan_hdd_validate_context(hdd_ctx);
2025 	if (0 != errno) {
2026 		if (pld_is_low_power_mode(hdd_ctx->parent_dev))
2027 			hdd_debug("low power mode (Deep Sleep/Hibernate)");
2028 		else
2029 			return errno;
2030 	}
2031 
2032 	/*
2033 	 * Flush the idle shutdown before ops start.This is done here to avoid
2034 	 * the deadlock as idle shutdown waits for the dsc ops
2035 	 * to complete.
2036 	 */
2037 	hdd_psoc_idle_timer_stop(hdd_ctx);
2038 
2039 
2040 	errno = osif_psoc_sync_op_start(dev, &psoc_sync);
2041 	if (errno)
2042 		return errno;
2043 
2044 	errno = wlan_hdd_bus_suspend();
2045 
2046 	osif_psoc_sync_op_stop(psoc_sync);
2047 
2048 	return errno;
2049 }
2050 
2051 /**
2052  * wlan_hdd_pld_resume() - resume function registered to PLD
2053  * @dev: device
2054  * @bus_type: PLD bus type
2055  *
2056  * Return: 0 on success
2057  */
2058 static int wlan_hdd_pld_resume(struct device *dev,
2059 		    enum pld_bus_type bus_type)
2060 {
2061 	struct osif_psoc_sync *psoc_sync;
2062 	int errno;
2063 
2064 	errno = osif_psoc_sync_op_start(dev, &psoc_sync);
2065 	if (errno)
2066 		return errno;
2067 
2068 	errno = wlan_hdd_bus_resume(QDF_SYSTEM_SUSPEND);
2069 
2070 	osif_psoc_sync_op_stop(psoc_sync);
2071 
2072 	return errno;
2073 }
2074 
2075 /**
2076  * wlan_hdd_pld_suspend_noirq() - handle suspend no irq
2077  * @dev: device
2078  * @bus_type: PLD bus type
2079  *
2080  * Complete the actions started by suspend().  Carry out any
2081  * additional operations required for suspending the device that might be
2082  * racing with its driver's interrupt handler, which is guaranteed not to
2083  * run while suspend_noirq() is being executed. Make sure to resume device
2084  * if FW has sent initial wake up message and expecting APPS to wake up.
2085  *
2086  * Return: 0 on success
2087  */
2088 static int wlan_hdd_pld_suspend_noirq(struct device *dev,
2089 				      enum pld_bus_type bus_type)
2090 {
2091 	struct osif_psoc_sync *psoc_sync;
2092 	int errno;
2093 
2094 	errno = osif_psoc_sync_op_start(dev, &psoc_sync);
2095 	if (errno)
2096 		return errno;
2097 
2098 	errno = wlan_hdd_bus_suspend_noirq();
2099 
2100 	osif_psoc_sync_op_stop(psoc_sync);
2101 
2102 	return errno;
2103 }
2104 
2105 /**
2106  * wlan_hdd_pld_resume_noirq() - handle resume no irq
2107  * @dev: device
2108  * @bus_type: PLD bus type
2109  *
2110  * Prepare for the execution of resume() by carrying out any
2111  * operations required for resuming the device that might be racing with
2112  * its driver's interrupt handler, which is guaranteed not to run while
2113  * resume_noirq() is being executed. Make sure to clear target initial
2114  * wake up request such that next suspend can happen cleanly.
2115  *
2116  * Return: 0 on success
2117  */
2118 static int wlan_hdd_pld_resume_noirq(struct device *dev,
2119 				     enum pld_bus_type bus_type)
2120 {
2121 	struct osif_psoc_sync *psoc_sync;
2122 	int errno;
2123 
2124 	errno = osif_psoc_sync_op_start(dev, &psoc_sync);
2125 	if (errno)
2126 		return errno;
2127 
2128 	errno = wlan_hdd_bus_resume_noirq();
2129 
2130 	osif_psoc_sync_op_stop(psoc_sync);
2131 
2132 	return errno;
2133 }
2134 
2135 /**
2136  * wlan_hdd_pld_reset_resume() - reset resume function registered to PLD
2137  * @dev: device
2138  * @bus_type: PLD bus type
2139  *
2140  * Return: 0 on success
2141  */
2142 static int wlan_hdd_pld_reset_resume(struct device *dev,
2143 				     enum pld_bus_type bus_type)
2144 {
2145 	struct osif_psoc_sync *psoc_sync;
2146 	int errno;
2147 
2148 	errno = osif_psoc_sync_op_start(dev, &psoc_sync);
2149 	if (errno)
2150 		return errno;
2151 
2152 	errno = wlan_hdd_bus_reset_resume();
2153 
2154 	osif_psoc_sync_op_stop(psoc_sync);
2155 
2156 	return errno;
2157 }
2158 
2159 /**
2160  * wlan_hdd_pld_notify_handler() - notify_handler function registered to PLD
2161  * @dev: device
2162  * @bus_type: PLD bus type
2163  * @state: Modem power state
2164  *
2165  * Return: void
2166  */
2167 static void wlan_hdd_pld_notify_handler(struct device *dev,
2168 			     enum pld_bus_type bus_type,
2169 			     int state)
2170 {
2171 	wlan_hdd_notify_handler(state);
2172 }
2173 
2174 /**
2175  * wlan_hdd_pld_uevent() - platform uevent handler
2176  * @dev: device on which the uevent occurred
2177  * @event_data: uevent parameters
2178  *
2179  * Return: None
2180  */
2181 static void
2182 wlan_hdd_pld_uevent(struct device *dev, struct pld_uevent_data *event_data)
2183 {
2184 	struct qdf_notifer_data hang_evt_data;
2185 	void *hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
2186 	enum qdf_hang_reason reason = QDF_REASON_UNSPECIFIED;
2187 	uint8_t bus_type;
2188 
2189 	bus_type = pld_get_bus_type(dev);
2190 
2191 	switch (event_data->uevent) {
2192 	case PLD_SMMU_FAULT:
2193 		qdf_set_smmu_fault_state(true);
2194 		hdd_debug("Received smmu fault indication");
2195 		break;
2196 	case PLD_FW_DOWN:
2197 		hdd_debug("Received firmware down indication");
2198 		hdd_dump_log_buffer(NULL, NULL);
2199 		cds_set_target_ready(false);
2200 		cds_set_recovery_in_progress(true);
2201 		hdd_init_start_completion();
2202 
2203 		/* Notify external threads currently waiting on firmware
2204 		 * by forcefully completing waiting events with a "reset"
2205 		 * status. This will cause the event to fail early instead
2206 		 * of timing out.
2207 		 */
2208 		qdf_complete_wait_events();
2209 
2210 		/*
2211 		 * In case of some platforms, uevent will come to the driver in
2212 		 * process context. In that case, it is safe to complete the
2213 		 * SSR cleanup activities in the same context. In case of
2214 		 * other platforms, it will be invoked in interrupt context.
2215 		 * Performing the cleanup in interrupt context is not ideal,
2216 		 * thus defer the cleanup to be done during
2217 		 * hdd_soc_recovery_shutdown
2218 		 */
2219 		if (qdf_in_interrupt() || bus_type == PLD_BUS_TYPE_PCIE)
2220 			break;
2221 
2222 		hdd_soc_recovery_cleanup();
2223 		qdf_atomic_set(&is_recovery_cleanup_done, 1);
2224 
2225 		break;
2226 	case PLD_FW_HANG_EVENT:
2227 		hdd_info("Received firmware hang event");
2228 		cds_get_recovery_reason(&reason);
2229 
2230 		if ((reason == QDF_REASON_UNSPECIFIED) && hif_ctx) {
2231 			hif_display_ctrl_traffic_pipes_state(hif_ctx);
2232 			hif_display_latest_desc_hist(hif_ctx);
2233 		}
2234 
2235 		qdf_mem_zero(&g_fw_host_hang_event, QDF_HANG_EVENT_DATA_SIZE);
2236 		hang_evt_data.hang_data = g_fw_host_hang_event;
2237 		hang_evt_data.offset = 0;
2238 		qdf_hang_event_notifier_call(reason, &hang_evt_data);
2239 		hang_evt_data.offset = QDF_WLAN_HANG_FW_OFFSET;
2240 		if (event_data->hang_data.hang_event_data_len >=
2241 		    QDF_HANG_EVENT_DATA_SIZE / 2)
2242 			event_data->hang_data.hang_event_data_len =
2243 						QDF_HANG_EVENT_DATA_SIZE / 2;
2244 
2245 		if (event_data->hang_data.hang_event_data_len)
2246 			qdf_mem_copy((hang_evt_data.hang_data +
2247 				      hang_evt_data.offset),
2248 				     event_data->hang_data.hang_event_data,
2249 				     event_data->hang_data.hang_event_data_len);
2250 
2251 		hdd_send_hang_data(hang_evt_data.hang_data,
2252 				   QDF_HANG_EVENT_DATA_SIZE);
2253 		break;
2254 	case PLD_BUS_EVENT:
2255 		hdd_debug("Bus event received");
2256 
2257 		/* Currently only link_down taken care.
2258 		 * Need to extend event buffer to define more bus info,
2259 		 * if need later.
2260 		 */
2261 		if (event_data->bus_data.etype == PLD_BUS_EVENT_PCIE_LINK_DOWN)
2262 			host_log_device_status(WLAN_STATUS_BUS_EXCEPTION);
2263 		break;
2264 	case PLD_SYS_REBOOT:
2265 		hdd_info("Received system reboot");
2266 		cds_set_sys_rebooting();
2267 		break;
2268 	default:
2269 		/* other events intentionally not handled */
2270 		hdd_debug("Received uevent %d", event_data->uevent);
2271 		break;
2272 	}
2273 
2274 }
2275 
2276 #ifdef WLAN_FEATURE_SSR_DRIVER_DUMP
2277 static int
2278 wlan_hdd_pld_collect_driver_dump(struct device *dev,
2279 				 enum pld_bus_type bus_type,
2280 				 struct cnss_ssr_driver_dump_entry *input_array,
2281 				 size_t *num_entries_loaded)
2282 {
2283 	QDF_STATUS status;
2284 
2285 	status =  qdf_ssr_driver_dump_retrieve_regions(input_array,
2286 						       num_entries_loaded);
2287 	return qdf_status_to_os_return(status);
2288 }
2289 #endif
2290 
2291 #ifdef FEATURE_RUNTIME_PM
2292 /**
2293  * wlan_hdd_pld_runtime_suspend() - runtime suspend function registered to PLD
2294  * @dev: device
2295  * @bus_type: PLD bus type
2296  *
2297  * Return: 0 on success
2298  */
2299 static int wlan_hdd_pld_runtime_suspend(struct device *dev,
2300 					enum pld_bus_type bus_type)
2301 {
2302 	struct osif_psoc_sync *psoc_sync;
2303 	int errno;
2304 
2305 	errno = osif_psoc_sync_op_start(dev, &psoc_sync);
2306 	if (errno)
2307 		goto out;
2308 
2309 	errno = wlan_hdd_runtime_suspend(dev);
2310 
2311 	osif_psoc_sync_op_stop(psoc_sync);
2312 
2313 out:
2314 	/* If it returns other errno to kernel, it will treat
2315 	 * it as critical issue, so all the future runtime
2316 	 * PM api will return error, pm runtime can't be work
2317 	 * anymore. Such case found in SSR.
2318 	 */
2319 	if (errno && errno != -EAGAIN && errno != -EBUSY)
2320 		errno = -EAGAIN;
2321 	return errno;
2322 }
2323 
2324 /**
2325  * wlan_hdd_pld_runtime_resume() - runtime resume function registered to PLD
2326  * @dev: device
2327  * @bus_type: PLD bus type
2328  *
2329  * Return: 0 on success
2330  */
2331 static int wlan_hdd_pld_runtime_resume(struct device *dev,
2332 				       enum pld_bus_type bus_type)
2333 {
2334 	/* As opposite to suspend, Runtime PM resume can happen
2335 	 * synchronously during driver shutdown or idle shutown,
2336 	 * so remove PSOC sync protection here.
2337 	 */
2338 	return wlan_hdd_runtime_resume(dev);
2339 }
2340 #endif
2341 
2342 struct pld_driver_ops wlan_drv_ops = {
2343 	.probe      = wlan_hdd_pld_probe,
2344 	.remove     = wlan_hdd_pld_remove,
2345 	.idle_shutdown = wlan_hdd_pld_idle_shutdown,
2346 	.idle_restart = wlan_hdd_pld_idle_restart,
2347 	.shutdown   = wlan_hdd_pld_shutdown,
2348 	.reinit     = wlan_hdd_pld_reinit,
2349 	.crash_shutdown = wlan_hdd_pld_crash_shutdown,
2350 	.suspend    = wlan_hdd_pld_suspend,
2351 	.resume     = wlan_hdd_pld_resume,
2352 	.suspend_noirq = wlan_hdd_pld_suspend_noirq,
2353 	.resume_noirq  = wlan_hdd_pld_resume_noirq,
2354 	.reset_resume = wlan_hdd_pld_reset_resume,
2355 	.modem_status = wlan_hdd_pld_notify_handler,
2356 	.uevent = wlan_hdd_pld_uevent,
2357 #ifdef WLAN_FEATURE_SSR_DRIVER_DUMP
2358 	.collect_driver_dump = wlan_hdd_pld_collect_driver_dump,
2359 #endif
2360 #ifdef FEATURE_RUNTIME_PM
2361 	.runtime_suspend = wlan_hdd_pld_runtime_suspend,
2362 	.runtime_resume = wlan_hdd_pld_runtime_resume,
2363 #endif
2364 	.set_curr_therm_cdev_state = wlan_hdd_pld_set_thermal_mitigation,
2365 };
2366 
2367 int wlan_hdd_register_driver(void)
2368 {
2369 	return pld_register_driver(&wlan_drv_ops);
2370 }
2371 
2372 void wlan_hdd_unregister_driver(void)
2373 {
2374 	pld_unregister_driver();
2375 }
2376