1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <linux/platform_device.h>
21 #include <linux/pci.h>
22 #include "cds_api.h"
23 #include "qdf_status.h"
24 #include "qdf_lock.h"
25 #include "cds_sched.h"
26 #include "osdep.h"
27 #include "hif.h"
28 #include "htc.h"
29 #include "epping_main.h"
30 #include "osif_sync.h"
31 #include "wlan_hdd_main.h"
32 #include "wlan_hdd_power.h"
33 #include "wlan_logging_sock_svc.h"
34 #include "wma_api.h"
35 #include "wlan_hdd_napi.h"
36 #include "wlan_policy_mgr_api.h"
37 #include "qwlan_version.h"
38 #include "bmi.h"
39 #include <ol_defines.h>
40 #include "cdp_txrx_bus.h"
41 #include "cdp_txrx_misc.h"
42 #include "pld_common.h"
43 #include "wlan_hdd_driver_ops.h"
44 #include "wlan_ipa_ucfg_api.h"
45 #include "wlan_hdd_debugfs.h"
46 #include "cfg_ucfg_api.h"
47 #include <linux/suspend.h>
48 #include <qdf_notifier.h>
49 #include <qdf_hang_event_notifier.h>
50 #include "wlan_hdd_thermal.h"
51 #include "wlan_dp_ucfg_api.h"
52 #include "qdf_ssr_driver_dump.h"
53 #include "wlan_hdd_ioctl.h"
54 
55 #ifdef MODULE
56 #ifdef WLAN_WEAR_CHIPSET
57 #define WLAN_MODULE_NAME  "wlan"
58 #else
59 #define WLAN_MODULE_NAME  module_name(THIS_MODULE)
60 #endif
61 #else
62 #define WLAN_MODULE_NAME  "wlan"
63 #endif
64 
65 #define SSR_MAX_FAIL_CNT 3
66 static uint8_t re_init_fail_cnt, probe_fail_cnt;
67 
68 /* An atomic flag to check if SSR cleanup has been done or not */
69 static qdf_atomic_t is_recovery_cleanup_done;
70 
71 /* firmware/host hang event data */
72 static uint8_t g_fw_host_hang_event[QDF_HANG_EVENT_DATA_SIZE];
73 
74 /*
75  * In BMI Phase we are only sending small chunk (256 bytes) of the FW image at
76  * a time, and wait for the completion interrupt to start the next transfer.
77  * During this phase, the KRAIT is entering IDLE/StandAlone(SA) Power Save(PS).
78  * The delay incurred for resuming from IDLE/SA PS is huge during driver load.
79  * So prevent APPS IDLE/SA PS durint driver load for reducing interrupt latency.
80  */
81 
hdd_request_pm_qos(struct device * dev,int val)82 static inline void hdd_request_pm_qos(struct device *dev, int val)
83 {
84 	pld_request_pm_qos(dev, val);
85 }
86 
hdd_remove_pm_qos(struct device * dev)87 static inline void hdd_remove_pm_qos(struct device *dev)
88 {
89 	pld_remove_pm_qos(dev);
90 }
91 
92 /**
93  * hdd_get_bandwidth_level() - get current bandwidth level
94  * @data: Context
95  *
96  * Return: current bandwidth level
97  */
hdd_get_bandwidth_level(void * data)98 static int hdd_get_bandwidth_level(void *data)
99 {
100 	int ret = PLD_BUS_WIDTH_NONE;
101 	struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
102 
103 	if (hdd_ctx)
104 		ret = ucfg_dp_get_current_throughput_level(hdd_ctx->psoc);
105 
106 	return ret;
107 }
108 
109 #ifdef DP_MEM_PRE_ALLOC
110 
111 /**
112  * hdd_get_consistent_mem_unaligned() - API to get consistent unaligned mem
113  * @size: Size of memory required
114  * @paddr: Pointer to paddr to be filled in by API
115  * @ring_type: Pointer to ring type for which consistent memory is needed
116  *
117  * Return: Virtual address of consistent memory on success, else null
118  */
119 static
hdd_get_consistent_mem_unaligned(size_t size,qdf_dma_addr_t * paddr,uint32_t ring_type)120 void *hdd_get_consistent_mem_unaligned(size_t size,
121 				       qdf_dma_addr_t *paddr,
122 				       uint32_t ring_type)
123 {
124 	return ucfg_dp_prealloc_get_consistent_mem_unaligned(size, paddr,
125 							     ring_type);
126 }
127 
128 /**
129  * hdd_put_consistent_mem_unaligned() - API to put consistent unaligned mem
130  * @vaddr: Virtual address of memory
131  *
132  * Return: None
133  */
134 static
hdd_put_consistent_mem_unaligned(void * vaddr)135 void hdd_put_consistent_mem_unaligned(void *vaddr)
136 {
137 	ucfg_dp_prealloc_put_consistent_mem_unaligned(vaddr);
138 }
139 
140 /**
141  * hdd_dp_prealloc_get_multi_pages() - gets pre-alloc DP multi-pages memory
142  * @desc_type: descriptor type
143  * @elem_size: single element size
144  * @elem_num: total number of elements should be allocated
145  * @pages: multi page information storage
146  * @cacheable: coherent memory or cacheable memory
147  *
148  * Return: None
149  */
150 static
hdd_dp_prealloc_get_multi_pages(uint32_t desc_type,qdf_size_t elem_size,uint16_t elem_num,struct qdf_mem_multi_page_t * pages,bool cacheable)151 void hdd_dp_prealloc_get_multi_pages(uint32_t desc_type, qdf_size_t elem_size,
152 				     uint16_t elem_num,
153 				     struct qdf_mem_multi_page_t *pages,
154 				     bool cacheable)
155 {
156 	ucfg_dp_prealloc_get_multi_pages(desc_type, elem_size, elem_num, pages,
157 					 cacheable);
158 }
159 
160 /**
161  * hdd_dp_prealloc_put_multi_pages() - puts back pre-alloc DP multi-pages memory
162  * @desc_type: descriptor type
163  * @pages: multi page information storage
164  *
165  * Return: None
166  */
167 static
hdd_dp_prealloc_put_multi_pages(uint32_t desc_type,struct qdf_mem_multi_page_t * pages)168 void hdd_dp_prealloc_put_multi_pages(uint32_t desc_type,
169 				     struct qdf_mem_multi_page_t *pages)
170 {
171 	ucfg_dp_prealloc_put_multi_pages(desc_type, pages);
172 }
173 #else
174 static
hdd_get_consistent_mem_unaligned(size_t size,qdf_dma_addr_t * paddr,uint32_t ring_type)175 void *hdd_get_consistent_mem_unaligned(size_t size,
176 				       qdf_dma_addr_t *paddr,
177 				       uint32_t ring_type)
178 {
179 	hdd_err_rl("prealloc not support!");
180 
181 	return NULL;
182 }
183 
184 static
hdd_put_consistent_mem_unaligned(void * vaddr)185 void hdd_put_consistent_mem_unaligned(void *vaddr)
186 {
187 	hdd_err_rl("prealloc not support!");
188 }
189 
190 static inline
hdd_dp_prealloc_get_multi_pages(uint32_t desc_type,qdf_size_t elem_size,uint16_t elem_num,struct qdf_mem_multi_page_t * pages,bool cacheable)191 void hdd_dp_prealloc_get_multi_pages(uint32_t desc_type, qdf_size_t elem_size,
192 				     uint16_t elem_num,
193 				     struct qdf_mem_multi_page_t *pages,
194 				     bool cacheable)
195 {
196 }
197 
198 static inline
hdd_dp_prealloc_put_multi_pages(uint32_t desc_type,struct qdf_mem_multi_page_t * pages)199 void hdd_dp_prealloc_put_multi_pages(uint32_t desc_type,
200 				     struct qdf_mem_multi_page_t *pages)
201 {
202 }
203 #endif
204 
205 /**
206  * hdd_is_driver_unloading() - API to query if driver is unloading
207  * @data: Private Data
208  *
209  * Return: True/False
210  */
hdd_is_driver_unloading(void * data)211 static bool hdd_is_driver_unloading(void *data)
212 {
213 	return cds_is_driver_unloading();
214 }
215 
216 /**
217  * hdd_is_load_or_unload_in_progress() - API to query if driver is
218  * loading/unloading
219  * @data: Private Data
220  *
221  * Return: bool
222  */
hdd_is_load_or_unload_in_progress(void * data)223 static bool hdd_is_load_or_unload_in_progress(void *data)
224 {
225 	return cds_is_load_or_unload_in_progress();
226 }
227 
228 /**
229  * hdd_is_recovery_in_progress() - API to query if recovery in progress
230  * @data: Private Data
231  *
232  * Return: bool
233  */
hdd_is_recovery_in_progress(void * data)234 static bool hdd_is_recovery_in_progress(void *data)
235 {
236 	return cds_is_driver_recovering();
237 }
238 
239 /**
240  * hdd_is_target_ready() - API to query if target is in ready state
241  * @data: Private Data
242  *
243  * Return: bool
244  */
hdd_is_target_ready(void * data)245 static bool hdd_is_target_ready(void *data)
246 {
247 	return cds_is_target_ready();
248 }
249 
250 /**
251  * hdd_send_driver_ready_to_user() - API to indicate driver ready
252  * to userspace.
253  */
hdd_send_driver_ready_to_user(void)254 static void hdd_send_driver_ready_to_user(void)
255 {
256 	struct sk_buff *nl_event;
257 	struct hdd_context *hdd_ctx;
258 	int flags = cds_get_gfp_flags();
259 
260 	hdd_enter();
261 
262 	hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
263 	if (!hdd_ctx) {
264 		hdd_err("HDD Context is NULL");
265 		return;
266 	}
267 
268 	nl_event = wlan_cfg80211_vendor_event_alloc(
269 			hdd_ctx->wiphy, NULL, 0,
270 			QCA_NL80211_VENDOR_SUBCMD_DRIVER_READY_INDEX,
271 			flags);
272 	if (!nl_event) {
273 		hdd_err("wlan_cfg80211_vendor_event_alloc failed");
274 		return;
275 	}
276 
277 	wlan_cfg80211_vendor_event(nl_event, flags);
278 }
279 
280 #ifdef FEATURE_WLAN_DIAG_SUPPORT
281 /**
282  * hdd_wlan_ssr_shutdown_event()- send ssr shutdown state
283  *
284  * This Function sends ssr shutdown state diag event
285  *
286  * Return: void.
287  */
hdd_wlan_ssr_shutdown_event(void)288 static void hdd_wlan_ssr_shutdown_event(void)
289 {
290 	WLAN_HOST_DIAG_EVENT_DEF(ssr_shutdown,
291 				 struct host_event_wlan_ssr_shutdown);
292 	qdf_mem_zero(&ssr_shutdown, sizeof(ssr_shutdown));
293 	ssr_shutdown.status = SSR_SUB_SYSTEM_SHUTDOWN;
294 	WLAN_HOST_DIAG_EVENT_REPORT(&ssr_shutdown,
295 					EVENT_WLAN_SSR_SHUTDOWN_SUBSYSTEM);
296 }
297 #else
hdd_wlan_ssr_shutdown_event(void)298 static inline void hdd_wlan_ssr_shutdown_event(void) { }
299 #endif
300 
301 /**
302  * hdd_psoc_shutdown_notify() - notify the various interested parties that the
303  *	soc is starting recovery shutdown
304  * @hdd_ctx: the HDD context corresponding to the soc undergoing shutdown
305  *
306  * Return: None
307  */
hdd_psoc_shutdown_notify(struct hdd_context * hdd_ctx)308 static void hdd_psoc_shutdown_notify(struct hdd_context *hdd_ctx)
309 {
310 	hdd_enter();
311 	wlan_cfg80211_cleanup_scan_queue(hdd_ctx->pdev, NULL);
312 
313 	cds_shutdown_notifier_call();
314 	cds_shutdown_notifier_purge();
315 
316 	hdd_wlan_ssr_shutdown_event();
317 	hdd_exit();
318 }
319 
320 /**
321  * hdd_soc_recovery_cleanup() - Perform SSR related cleanup activities.
322  *
323  * This function will perform cleanup activities related to when driver
324  * undergoes SSR. Activities include stopping idle timer and invoking shutdown
325  * notifier.
326  *
327  * Return: None
328  */
hdd_soc_recovery_cleanup(void)329 static void hdd_soc_recovery_cleanup(void)
330 {
331 	struct hdd_context *hdd_ctx;
332 
333 	hdd_enter();
334 	hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
335 	if (!hdd_ctx)
336 		return;
337 
338 	/* cancel/flush any pending/active idle shutdown work */
339 	hdd_psoc_idle_timer_stop(hdd_ctx);
340 	ucfg_dp_bus_bw_compute_timer_stop(hdd_ctx->psoc);
341 
342 	/* nothing to do if the soc is already unloaded */
343 	if (hdd_ctx->driver_status == DRIVER_MODULES_CLOSED) {
344 		hdd_info("Driver modules are already closed");
345 		return;
346 	}
347 
348 	if (cds_is_load_or_unload_in_progress()) {
349 		hdd_info("Load/unload in progress, ignore SSR shutdown");
350 		return;
351 	}
352 
353 	hdd_psoc_shutdown_notify(hdd_ctx);
354 	hdd_exit();
355 }
356 
357 /**
358  * hdd_set_recovery_in_progress() - API to set recovery in progress
359  * @data: Context
360  * @val: Value to set
361  *
362  * Return: None
363  */
hdd_set_recovery_in_progress(void * data,uint8_t val)364 static void hdd_set_recovery_in_progress(void *data, uint8_t val)
365 {
366 	cds_set_recovery_in_progress(val);
367 	/* SSR can be triggred late cleanup existing queue for kernel handshake */
368 	if (!qdf_in_interrupt())
369 		hdd_soc_recovery_cleanup();
370 }
371 
372 /**
373  * hdd_hif_init_driver_state_callbacks() - API to initialize HIF callbacks
374  * @data: Private Data
375  * @cbk: HIF Driver State callbacks
376  *
377  * HIF should be independent of CDS calls. Pass CDS Callbacks to HIF, HIF will
378  * call the callbacks.
379  *
380  * Return: void
381  */
hdd_hif_init_driver_state_callbacks(void * data,struct hif_driver_state_callbacks * cbk)382 static void hdd_hif_init_driver_state_callbacks(void *data,
383 			struct hif_driver_state_callbacks *cbk)
384 {
385 	cbk->context = data;
386 	cbk->set_recovery_in_progress = hdd_set_recovery_in_progress;
387 	cbk->is_recovery_in_progress = hdd_is_recovery_in_progress;
388 	cbk->is_load_unload_in_progress = hdd_is_load_or_unload_in_progress;
389 	cbk->is_driver_unloading = hdd_is_driver_unloading;
390 	cbk->is_target_ready = hdd_is_target_ready;
391 	cbk->get_bandwidth_level = hdd_get_bandwidth_level;
392 	cbk->prealloc_get_consistent_mem_unaligned =
393 		hdd_get_consistent_mem_unaligned;
394 	cbk->prealloc_put_consistent_mem_unaligned =
395 		hdd_put_consistent_mem_unaligned;
396 	cbk->prealloc_get_multi_pages =
397 		hdd_dp_prealloc_get_multi_pages;
398 	cbk->prealloc_put_multi_pages =
399 		hdd_dp_prealloc_put_multi_pages;
400 }
401 
402 #ifdef HIF_DETECTION_LATENCY_ENABLE
hdd_hif_set_enable_detection(struct hif_opaque_softc * hif_ctx,bool value)403 void hdd_hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
404 {
405 	hif_set_enable_detection(hif_ctx, value);
406 }
407 #endif
408 
409 #ifdef FORCE_WAKE
hdd_set_hif_init_phase(struct hif_opaque_softc * hif_ctx,bool hal_init_phase)410 void hdd_set_hif_init_phase(struct hif_opaque_softc *hif_ctx,
411 			    bool hal_init_phase)
412 {
413 	hif_srng_init_phase(hif_ctx, hal_init_phase);
414 }
415 #endif /* FORCE_WAKE */
416 
417 /**
418  * hdd_hif_set_attribute() - API to set CE attribute if memory is limited
419  * @hif_ctx: hif context
420  *
421  * Return: None
422  */
423 #ifdef SLUB_MEM_OPTIMIZE
hdd_hif_set_attribute(struct hif_opaque_softc * hif_ctx)424 static void hdd_hif_set_attribute(struct hif_opaque_softc *hif_ctx)
425 {
426 	hif_set_attribute(hif_ctx, HIF_LOWDESC_CE_NO_PKTLOG_CFG);
427 }
428 #else
hdd_hif_set_attribute(struct hif_opaque_softc * hif_ctx)429 static void hdd_hif_set_attribute(struct hif_opaque_softc *hif_ctx)
430 {}
431 #endif
432 
433 /**
434  * hdd_hif_register_shutdown_notifier() - Register HIF shutdown notifier
435  * @hif_ctx: HIF Context
436  *
437  * Return: success/failure
438  */
439 static QDF_STATUS
hdd_hif_register_shutdown_notifier(struct hif_opaque_softc * hif_ctx)440 hdd_hif_register_shutdown_notifier(struct hif_opaque_softc *hif_ctx)
441 {
442 	return cds_shutdown_notifier_register(
443 					hif_shutdown_notifier_cb,
444 					hif_ctx);
445 }
446 
447 /**
448  * hdd_hif_set_ce_max_yield_time() - Wrapper API to set CE max yield time
449  * @hif_ctx: hif context
450  * @bus_type: underlying bus type
451  * @ce_service_max_yield_time: max yield time to be set
452  *
453  * Return: None
454  */
455 #if defined(CONFIG_SLUB_DEBUG_ON)
456 
hdd_hif_set_ce_max_yield_time(struct hif_opaque_softc * hif_ctx,enum qdf_bus_type bus_type,uint32_t ce_service_max_yield_time)457 static void hdd_hif_set_ce_max_yield_time(struct hif_opaque_softc *hif_ctx,
458 					  enum qdf_bus_type bus_type,
459 					  uint32_t ce_service_max_yield_time)
460 {
461 #define CE_SNOC_MAX_YIELD_TIME_US 2000
462 
463 	if (bus_type == QDF_BUS_TYPE_SNOC &&
464 	    ce_service_max_yield_time < CE_SNOC_MAX_YIELD_TIME_US)
465 		ce_service_max_yield_time = CE_SNOC_MAX_YIELD_TIME_US;
466 
467 	hif_set_ce_service_max_yield_time(hif_ctx, ce_service_max_yield_time);
468 }
469 
470 #else
hdd_hif_set_ce_max_yield_time(struct hif_opaque_softc * hif_ctx,enum qdf_bus_type bus_type,uint32_t ce_service_max_yield_time)471 static void hdd_hif_set_ce_max_yield_time(struct hif_opaque_softc *hif_ctx,
472 					  enum qdf_bus_type bus_type,
473 					  uint32_t ce_service_max_yield_time)
474 {
475 	hif_set_ce_service_max_yield_time(hif_ctx, ce_service_max_yield_time);
476 }
477 #endif
478 
479 /**
480  * hdd_init_cds_hif_context() - API to set CDS HIF Context
481  * @hif: HIF Context
482  *
483  * Return: success/failure
484  */
hdd_init_cds_hif_context(void * hif)485 static int hdd_init_cds_hif_context(void *hif)
486 {
487 	QDF_STATUS status;
488 
489 	status = cds_set_context(QDF_MODULE_ID_HIF, hif);
490 
491 	if (status)
492 		return -ENOENT;
493 
494 	return 0;
495 }
496 
497 /**
498  * hdd_deinit_cds_hif_context() - API to clear CDS HIF COntext
499  *
500  * Return: None
501  */
hdd_deinit_cds_hif_context(void)502 static void hdd_deinit_cds_hif_context(void)
503 {
504 	QDF_STATUS status;
505 
506 	status = cds_set_context(QDF_MODULE_ID_HIF, NULL);
507 
508 	if (status)
509 		hdd_err("Failed to reset CDS HIF Context");
510 }
511 
512 /**
513  * to_bus_type() - Map PLD bus type to low level bus type
514  * @bus_type: PLD bus type
515  *
516  * Map PLD bus type to low level bus type.
517  *
518  * Return: low level bus type.
519  */
to_bus_type(enum pld_bus_type bus_type)520 static enum qdf_bus_type to_bus_type(enum pld_bus_type bus_type)
521 {
522 	switch (bus_type) {
523 	case PLD_BUS_TYPE_PCIE_FW_SIM:
524 	case PLD_BUS_TYPE_PCIE:
525 		return QDF_BUS_TYPE_PCI;
526 	case PLD_BUS_TYPE_SNOC_FW_SIM:
527 	case PLD_BUS_TYPE_SNOC:
528 		return QDF_BUS_TYPE_SNOC;
529 	case PLD_BUS_TYPE_SDIO:
530 		return QDF_BUS_TYPE_SDIO;
531 	case PLD_BUS_TYPE_USB:
532 		return QDF_BUS_TYPE_USB;
533 	case PLD_BUS_TYPE_IPCI_FW_SIM:
534 	case PLD_BUS_TYPE_IPCI:
535 		return QDF_BUS_TYPE_IPCI;
536 	default:
537 		return QDF_BUS_TYPE_NONE;
538 	}
539 }
540 
hdd_hif_open(struct device * dev,void * bdev,const struct hif_bus_id * bid,enum qdf_bus_type bus_type,bool reinit)541 int hdd_hif_open(struct device *dev, void *bdev, const struct hif_bus_id *bid,
542 			enum qdf_bus_type bus_type, bool reinit)
543 {
544 	QDF_STATUS status;
545 	int ret = 0;
546 	struct hif_opaque_softc *hif_ctx;
547 	qdf_device_t qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
548 	struct hif_driver_state_callbacks cbk;
549 	uint32_t mode = cds_get_conparam();
550 	struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
551 
552 	if (!hdd_ctx)
553 		return -EFAULT;
554 
555 	hdd_hif_init_driver_state_callbacks(dev, &cbk);
556 
557 	hif_ctx = hif_open(qdf_ctx, mode, bus_type, &cbk, hdd_ctx->psoc);
558 	if (!hif_ctx) {
559 		hdd_err("hif_open error");
560 		return -ENOMEM;
561 	}
562 
563 	ret = hdd_init_cds_hif_context(hif_ctx);
564 	if (ret) {
565 		hdd_err("Failed to set global HIF CDS Context err: %d", ret);
566 		goto err_hif_close;
567 	}
568 
569 	status = hdd_hif_register_shutdown_notifier(hif_ctx);
570 	if (status != QDF_STATUS_SUCCESS) {
571 		hdd_err("Shutdown notifier register failed: %d", status);
572 		goto err_deinit_hif_context;
573 	}
574 
575 	hdd_hif_set_attribute(hif_ctx);
576 
577 	status = hif_enable(hif_ctx, dev, bdev, bid, bus_type,
578 			    (reinit == true) ?  HIF_ENABLE_TYPE_REINIT :
579 			    HIF_ENABLE_TYPE_PROBE);
580 	if (!QDF_IS_STATUS_SUCCESS(status)) {
581 		hdd_err("hif_enable failed status: %d, reinit: %d",
582 			status, reinit);
583 
584 		ret = qdf_status_to_os_return(status);
585 		goto err_deinit_hif_context;
586 	} else {
587 		cds_set_target_ready(true);
588 		ret = hdd_napi_create();
589 		hdd_debug("hdd_napi_create returned: %d", ret);
590 		if (ret == 0)
591 			hdd_debug("NAPI: no instances are created");
592 		else if (ret < 0) {
593 			hdd_err("NAPI creation error, rc: 0x%x, reinit: %d",
594 				ret, reinit);
595 			ret = -EFAULT;
596 			goto mark_target_not_ready;
597 		} else {
598 			hdd_napi_event(NAPI_EVT_INI_FILE,
599 				       (void *)ucfg_dp_get_napi_enabled(hdd_ctx->psoc));
600 		}
601 	}
602 
603 	hdd_hif_set_ce_max_yield_time(
604 				hif_ctx, bus_type,
605 				cfg_get(hdd_ctx->psoc,
606 					CFG_DP_CE_SERVICE_MAX_YIELD_TIME));
607 	ucfg_pmo_psoc_set_hif_handle(hdd_ctx->psoc, hif_ctx);
608 	ucfg_dp_set_hif_handle(hdd_ctx->psoc, hif_ctx);
609 	hif_set_ce_service_max_rx_ind_flush(hif_ctx,
610 				cfg_get(hdd_ctx->psoc,
611 					CFG_DP_CE_SERVICE_MAX_RX_IND_FLUSH));
612 	return 0;
613 
614 mark_target_not_ready:
615 	cds_set_target_ready(false);
616 
617 err_deinit_hif_context:
618 	hdd_deinit_cds_hif_context();
619 
620 err_hif_close:
621 	hif_close(hif_ctx);
622 	return ret;
623 }
624 
hdd_hif_close(struct hdd_context * hdd_ctx,void * hif_ctx)625 void hdd_hif_close(struct hdd_context *hdd_ctx, void *hif_ctx)
626 {
627 	if (!hdd_ctx) {
628 		hdd_err("hdd_ctx error");
629 		return;
630 	}
631 
632 	if (!hif_ctx)
633 		return;
634 
635 	cds_set_target_ready(false);
636 	hif_disable(hif_ctx, HIF_DISABLE_TYPE_REMOVE);
637 
638 	hdd_napi_destroy(true);
639 
640 	hdd_deinit_cds_hif_context();
641 	hif_close(hif_ctx);
642 
643 	ucfg_pmo_psoc_set_hif_handle(hdd_ctx->psoc, NULL);
644 }
645 
646 /**
647  * hdd_init_qdf_ctx() - API to initialize global QDF Device structure
648  * @dev: Device Pointer
649  * @bdev: Bus Device pointer
650  * @bus_type: Underlying bus type
651  * @bid: Bus id passed by platform driver
652  *
653  * Return: 0 - success, < 0 - failure
654  */
hdd_init_qdf_ctx(struct device * dev,void * bdev,enum qdf_bus_type bus_type,const struct hif_bus_id * bid)655 static int hdd_init_qdf_ctx(struct device *dev, void *bdev,
656 			    enum qdf_bus_type bus_type,
657 			    const struct hif_bus_id *bid)
658 {
659 	qdf_device_t qdf_dev = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
660 
661 	if (!qdf_dev)
662 		return -EINVAL;
663 
664 	qdf_dev->dev = dev;
665 	qdf_dev->drv_hdl = bdev;
666 	qdf_dev->bus_type = bus_type;
667 	qdf_dev->bid = bid;
668 
669 	qdf_dma_invalid_buf_list_init();
670 
671 	if (cds_smmu_mem_map_setup(qdf_dev, ucfg_ipa_is_ready()) !=
672 		QDF_STATUS_SUCCESS) {
673 		hdd_err("cds_smmu_mem_map_setup() failed");
674 	}
675 
676 	return 0;
677 }
678 
679 /**
680  * hdd_deinit_qdf_ctx() - API to Deinitialize global QDF Device structure
681  * @domain: Debug domain
682  *
683  * Return: 0 - success, < 0 - failure
684  */
hdd_deinit_qdf_ctx(uint8_t domain)685 int hdd_deinit_qdf_ctx(uint8_t domain)
686 {
687 	qdf_device_t qdf_dev = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
688 
689 	if (!qdf_dev)
690 		return -EINVAL;
691 
692 	qdf_dma_invalid_buf_free(qdf_dev->dev, domain);
693 
694 	return 0;
695 }
696 
697 /**
698  * check_for_probe_defer() - API to check return value
699  * @ret: Return Value
700  *
701  * Return: return -EPROBE_DEFER to platform driver if return value
702  * is -ENOMEM. Platform driver will try to re-probe.
703  */
704 #ifdef MODULE
check_for_probe_defer(int ret)705 static int check_for_probe_defer(int ret)
706 {
707 	return ret;
708 }
709 #else
check_for_probe_defer(int ret)710 static int check_for_probe_defer(int ret)
711 {
712 	if (ret == -ENOMEM)
713 		return -EPROBE_DEFER;
714 	return ret;
715 }
716 #endif
717 
718 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
hdd_abort_system_suspend(struct device * dev)719 static void hdd_abort_system_suspend(struct device *dev)
720 {
721 	qdf_pm_system_wakeup();
722 }
723 #else
hdd_abort_system_suspend(struct device * dev)724 static void hdd_abort_system_suspend(struct device *dev)
725 {
726 }
727 #endif
728 
hdd_soc_idle_restart_lock(struct device * dev)729 int hdd_soc_idle_restart_lock(struct device *dev)
730 {
731 	hdd_prevent_suspend(WIFI_POWER_EVENT_WAKELOCK_DRIVER_IDLE_RESTART);
732 
733 	hdd_abort_system_suspend(dev);
734 
735 	return 0;
736 }
737 
hdd_soc_idle_restart_unlock(void)738 void hdd_soc_idle_restart_unlock(void)
739 {
740 	hdd_allow_suspend(WIFI_POWER_EVENT_WAKELOCK_DRIVER_IDLE_RESTART);
741 }
742 
hdd_soc_load_lock(struct device * dev)743 static void hdd_soc_load_lock(struct device *dev)
744 {
745 	hdd_prevent_suspend(WIFI_POWER_EVENT_WAKELOCK_DRIVER_INIT);
746 	hdd_request_pm_qos(dev, DISABLE_KRAIT_IDLE_PS_VAL);
747 }
748 
hdd_soc_load_unlock(struct device * dev)749 static void hdd_soc_load_unlock(struct device *dev)
750 {
751 	hdd_remove_pm_qos(dev);
752 	hdd_allow_suspend(WIFI_POWER_EVENT_WAKELOCK_DRIVER_INIT);
753 }
754 
755 #ifdef DP_MEM_PRE_ALLOC
756 /**
757  * hdd_init_dma_mask() - Set the DMA mask for dma memory pre-allocation
758  * @dev: device handle
759  * @bus_type: Bus type for which init is being done
760  *
761  * Return: 0 - success, non-zero on failure
762  */
hdd_init_dma_mask(struct device * dev,enum qdf_bus_type bus_type)763 static int hdd_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type)
764 {
765 	return hif_init_dma_mask(dev, bus_type);
766 }
767 #else
768 static inline int
hdd_init_dma_mask(struct device * dev,enum qdf_bus_type bus_type)769 hdd_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type)
770 {
771 	return QDF_STATUS_SUCCESS;
772 }
773 #endif
774 
__hdd_soc_probe(struct device * dev,void * bdev,const struct hif_bus_id * bid,enum qdf_bus_type bus_type)775 static int __hdd_soc_probe(struct device *dev,
776 			   void *bdev,
777 			   const struct hif_bus_id *bid,
778 			   enum qdf_bus_type bus_type)
779 {
780 	struct hdd_context *hdd_ctx;
781 	QDF_STATUS status;
782 	int errno;
783 
784 	hdd_info("probing driver");
785 
786 	hdd_soc_load_lock(dev);
787 	cds_set_load_in_progress(true);
788 	cds_set_driver_in_bad_state(false);
789 	cds_set_recovery_in_progress(false);
790 
791 	errno = hdd_init_qdf_ctx(dev, bdev, bus_type, bid);
792 	if (errno)
793 		goto unlock;
794 
795 	errno = hdd_init_dma_mask(dev, bus_type);
796 	if (errno)
797 		goto unlock;
798 
799 	hdd_ctx = hdd_context_create(dev);
800 	if (IS_ERR(hdd_ctx)) {
801 		errno = PTR_ERR(hdd_ctx);
802 		goto assert_fail_count;
803 	}
804 
805 	status = ucfg_dp_prealloc_init((struct cdp_ctrl_objmgr_psoc *)
806 					hdd_ctx->psoc);
807 
808 	if (status != QDF_STATUS_SUCCESS) {
809 		errno = qdf_status_to_os_return(status);
810 		goto dp_prealloc_fail;
811 	}
812 
813 	errno = hdd_wlan_startup(hdd_ctx);
814 	if (errno)
815 		goto hdd_context_destroy;
816 
817 	status = hdd_psoc_create_vdevs(hdd_ctx);
818 	if (QDF_IS_STATUS_ERROR(status)) {
819 		errno = qdf_status_to_os_return(status);
820 		goto wlan_exit;
821 	}
822 
823 	probe_fail_cnt = 0;
824 	cds_set_driver_loaded(true);
825 	cds_set_load_in_progress(false);
826 	hdd_start_complete(0);
827 	hdd_thermal_mitigation_register(hdd_ctx, dev);
828 
829 	hdd_set_sar_init_index(hdd_ctx);
830 	hdd_soc_load_unlock(dev);
831 
832 	return 0;
833 
834 wlan_exit:
835 	hdd_wlan_exit(hdd_ctx);
836 
837 hdd_context_destroy:
838 	ucfg_dp_prealloc_deinit();
839 
840 dp_prealloc_fail:
841 	hdd_context_destroy(hdd_ctx);
842 
843 assert_fail_count:
844 	probe_fail_cnt++;
845 	hdd_err("consecutive probe failures:%u", probe_fail_cnt);
846 	QDF_BUG(probe_fail_cnt < SSR_MAX_FAIL_CNT);
847 
848 unlock:
849 	cds_set_load_in_progress(false);
850 	hdd_soc_load_unlock(dev);
851 
852 	return check_for_probe_defer(errno);
853 }
854 
855 /**
856  * hdd_soc_probe() - perform SoC probe
857  * @dev: kernel device being probed
858  * @bdev: bus device structure
859  * @bid: bus identifier for shared busses
860  * @bus_type: underlying bus type
861  *
862  * A SoC probe indicates new SoC hardware has become available and needs to be
863  * initialized.
864  *
865  * Return: Errno
866  */
hdd_soc_probe(struct device * dev,void * bdev,const struct hif_bus_id * bid,enum qdf_bus_type bus_type)867 static int hdd_soc_probe(struct device *dev,
868 			 void *bdev,
869 			 const struct hif_bus_id *bid,
870 			 enum qdf_bus_type bus_type)
871 {
872 	struct osif_psoc_sync *psoc_sync;
873 	int errno;
874 
875 	hdd_info("probing driver");
876 
877 	errno = osif_psoc_sync_create_and_trans(&psoc_sync);
878 	if (errno)
879 		return errno;
880 
881 	osif_psoc_sync_register(dev, psoc_sync);
882 	errno = __hdd_soc_probe(dev, bdev, bid, bus_type);
883 	if (errno)
884 		goto destroy_sync;
885 
886 	osif_psoc_sync_trans_stop(psoc_sync);
887 
888 	return 0;
889 
890 destroy_sync:
891 	osif_psoc_sync_unregister(dev);
892 	osif_psoc_sync_wait_for_ops(psoc_sync);
893 
894 	osif_psoc_sync_trans_stop(psoc_sync);
895 	osif_psoc_sync_destroy(psoc_sync);
896 
897 	return errno;
898 }
899 
__hdd_soc_recovery_reinit(struct device * dev,void * bdev,const struct hif_bus_id * bid,enum qdf_bus_type bus_type)900 static int __hdd_soc_recovery_reinit(struct device *dev,
901 				     void *bdev,
902 				     const struct hif_bus_id *bid,
903 				     enum qdf_bus_type bus_type)
904 {
905 	int errno;
906 	struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
907 
908 	hdd_info("re-probing driver");
909 
910 	if (!hdd_ctx) {
911 		hdd_err("hdd_ctx is null!");
912 		return qdf_status_to_os_return(QDF_STATUS_E_RESOURCES);
913 	}
914 
915 	hdd_soc_load_lock(dev);
916 	cds_set_driver_in_bad_state(false);
917 
918 	errno = hdd_init_qdf_ctx(dev, bdev, bus_type, bid);
919 	if (errno)
920 		goto unlock;
921 
922 	errno = hdd_wlan_re_init();
923 	if (errno) {
924 		re_init_fail_cnt++;
925 		goto assert_fail_count;
926 	}
927 
928 	re_init_fail_cnt = 0;
929 
930 	/*
931 	 * In case of SSR within SSR we have seen the race
932 	 * where the reinit is successful and fw down is received
933 	 * which sets the recovery in progress. Now as reinit is
934 	 * successful we reset the recovery in progress here.
935 	 * So check if FW is down then don't reset the recovery
936 	 * in progress
937 	 */
938 	if (!qdf_is_fw_down()) {
939 		cds_set_recovery_in_progress(false);
940 		hdd_handle_cached_commands();
941 	}
942 
943 	if (!hdd_is_any_interface_open(hdd_ctx)) {
944 		hdd_debug("restarting idle shutdown timer");
945 		hdd_psoc_idle_timer_start(hdd_ctx);
946 	}
947 
948 	hdd_soc_load_unlock(dev);
949 	hdd_send_driver_ready_to_user();
950 
951 	return 0;
952 
953 assert_fail_count:
954 	hdd_err("consecutive reinit failures:%u", re_init_fail_cnt);
955 	QDF_BUG(re_init_fail_cnt < SSR_MAX_FAIL_CNT);
956 
957 unlock:
958 	cds_set_driver_in_bad_state(true);
959 	hdd_soc_load_unlock(dev);
960 	hdd_start_complete(errno);
961 
962 	return check_for_probe_defer(errno);
963 }
964 
965 /**
966  * hdd_soc_recovery_reinit() - perform PDR/SSR SoC reinit
967  * @dev: the kernel device being re-initialized
968  * @bdev: bus device structure
969  * @bid: bus identifier for shared busses
970  * @bus_type: underlying bus type
971  *
972  * When communication with firmware breaks down, a SoC recovery process kicks in
973  * with two phases: shutdown and reinit.
974  *
975  * SSR reinit is similar to a 'probe' but happens in response to an SSR
976  * shutdown. The idea is to re-initialize the SoC to as close to its old,
977  * pre-communications-breakdown configuration as possible. This is completely
978  * transparent from a userspace point of view.
979  *
980  * Return: Errno
981  */
hdd_soc_recovery_reinit(struct device * dev,void * bdev,const struct hif_bus_id * bid,enum qdf_bus_type bus_type)982 static int hdd_soc_recovery_reinit(struct device *dev,
983 				   void *bdev,
984 				   const struct hif_bus_id *bid,
985 				   enum qdf_bus_type bus_type)
986 {
987 	struct osif_psoc_sync *psoc_sync;
988 	int errno;
989 
990 	/* if driver is unloading, there is no need to do SSR */
991 	if (qdf_is_driver_unloading()) {
992 		hdd_info("driver is unloading, avoid SSR");
993 		return 0;
994 	}
995 
996 	/* SSR transition is initiated at the beginning of soc shutdown */
997 	errno = osif_psoc_sync_trans_resume(dev, &psoc_sync);
998 	QDF_BUG(!errno);
999 	if (errno)
1000 		return errno;
1001 
1002 	errno = __hdd_soc_recovery_reinit(dev, bdev, bid, bus_type);
1003 
1004 
1005 	osif_psoc_sync_trans_stop(psoc_sync);
1006 	hdd_start_complete(0);
1007 
1008 	return errno;
1009 }
1010 
__hdd_soc_remove(struct device * dev)1011 static void __hdd_soc_remove(struct device *dev)
1012 {
1013 	struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
1014 
1015 	QDF_BUG(hdd_ctx);
1016 	if (!hdd_ctx)
1017 		return;
1018 
1019 	pr_info("%s: Removing driver v%s\n", WLAN_MODULE_NAME,
1020 		QWLAN_VERSIONSTR);
1021 
1022 	qdf_rtpm_sync_resume();
1023 	cds_set_driver_loaded(false);
1024 	cds_set_unload_in_progress(true);
1025 	if (!hdd_wait_for_debugfs_threads_completion())
1026 		hdd_warn("Debugfs threads are still active attempting driver unload anyway");
1027 
1028 	if (hdd_get_conparam() == QDF_GLOBAL_EPPING_MODE) {
1029 		hdd_wlan_stop_modules(hdd_ctx, false);
1030 		qdf_nbuf_deinit_replenish_timer();
1031 	} else {
1032 		hdd_thermal_mitigation_unregister(hdd_ctx, dev);
1033 		hdd_wlan_exit(hdd_ctx);
1034 	}
1035 
1036 	hdd_context_destroy(hdd_ctx);
1037 
1038 	cds_set_driver_in_bad_state(false);
1039 	cds_set_unload_in_progress(false);
1040 
1041 	ucfg_dp_prealloc_deinit();
1042 
1043 	pr_info("%s: Driver De-initialized\n", WLAN_MODULE_NAME);
1044 }
1045 
1046 /**
1047  * hdd_soc_remove() - perform SoC remove
1048  * @dev: the kernel device being removed
1049  *
1050  * A SoC remove indicates the attached SoC hardware is about to go away and
1051  * needs to be cleaned up.
1052  *
1053  * Return: void
1054  */
hdd_soc_remove(struct device * dev)1055 static void hdd_soc_remove(struct device *dev)
1056 {
1057 	__hdd_soc_remove(dev);
1058 }
1059 
1060 /**
1061  * hdd_send_hang_data() - Send hang data to userspace
1062  * @data: Hang data
1063  * @data_len: Length of @data
1064  *
1065  * Return: None
1066  */
hdd_send_hang_data(uint8_t * data,size_t data_len)1067 static void hdd_send_hang_data(uint8_t *data, size_t data_len)
1068 {
1069 	enum qdf_hang_reason reason = QDF_REASON_UNSPECIFIED;
1070 	struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
1071 
1072 	if (!hdd_ctx)
1073 		return;
1074 
1075 	cds_get_recovery_reason(&reason);
1076 	cds_reset_recovery_reason();
1077 	wlan_hdd_send_hang_reason_event(hdd_ctx, reason, data, data_len);
1078 }
1079 
__hdd_soc_recovery_shutdown(void)1080 static void __hdd_soc_recovery_shutdown(void)
1081 {
1082 	struct hdd_context *hdd_ctx;
1083 	void *hif_ctx;
1084 
1085 	/* recovery starts via firmware down indication; ensure we got one */
1086 	QDF_BUG(cds_is_driver_recovering());
1087 
1088 	hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
1089 	if (!hdd_ctx)
1090 		return;
1091 
1092 	/*
1093 	 * Perform SSR related cleanup if it has not already been done as a
1094 	 * part of receiving the uevent.
1095 	 */
1096 	if (!qdf_atomic_read(&is_recovery_cleanup_done))
1097 		hdd_soc_recovery_cleanup();
1098 	else
1099 		qdf_atomic_set(&is_recovery_cleanup_done, 0);
1100 
1101 	if (!hdd_wait_for_debugfs_threads_completion())
1102 		hdd_err("Debugfs threads are still pending, attempting SSR anyway");
1103 
1104 	hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
1105 	if (!hif_ctx)
1106 		return;
1107 
1108 	/* mask the host controller interrupts */
1109 	hif_mask_interrupt_call(hif_ctx);
1110 
1111 	if (!QDF_IS_EPPING_ENABLED(cds_get_conparam())) {
1112 		hif_disable_isr(hif_ctx);
1113 		hdd_wlan_shutdown();
1114 	}
1115 }
1116 
1117 /**
1118  * hdd_soc_recovery_shutdown() - perform PDR/SSR SoC shutdown
1119  * @dev: the device to shutdown
1120  *
1121  * When communication with firmware breaks down, a SoC recovery process kicks in
1122  * with two phases: shutdown and reinit.
1123  *
1124  * SSR shutdown is similar to a 'remove' but without communication with
1125  * firmware. The idea is to retain as much SoC configuration as possible, so it
1126  * can be re-initialized to the same state after a reset. This is completely
1127  * transparent from a userspace point of view.
1128  *
1129  * Return: void
1130  */
hdd_soc_recovery_shutdown(struct device * dev)1131 static void hdd_soc_recovery_shutdown(struct device *dev)
1132 {
1133 	struct osif_psoc_sync *psoc_sync;
1134 	int errno;
1135 
1136 	/* if driver is unloading, there is no need to do SSR */
1137 	if (qdf_is_driver_unloading()) {
1138 		hdd_info("driver is unloading, avoid SSR");
1139 		return;
1140 	}
1141 
1142 	errno = osif_psoc_sync_trans_start_wait(dev, &psoc_sync);
1143 	if (errno)
1144 		return;
1145 
1146 	ucfg_dp_wait_complete_tasks();
1147 	osif_psoc_sync_wait_for_ops(psoc_sync);
1148 
1149 	__hdd_soc_recovery_shutdown();
1150 
1151 	/* SSR transition is concluded at the end of soc re-init */
1152 }
1153 
1154 /**
1155  * wlan_hdd_crash_shutdown() - wlan_hdd_crash_shutdown
1156  *
1157  * HDD crash shutdown function: This function is called by
1158  * platform driver's crash shutdown routine
1159  *
1160  * Return: void
1161  */
wlan_hdd_crash_shutdown(void)1162 static void wlan_hdd_crash_shutdown(void)
1163 {
1164 	QDF_STATUS ret;
1165 	WMA_HANDLE wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
1166 
1167 	if (!wma_handle)
1168 		return;
1169 
1170 	/*
1171 	 * When kernel panic happen, if WiFi FW is still active
1172 	 * it may cause NOC errors/memory corruption, to avoid
1173 	 * this, inject a fw crash first.
1174 	 * send crash_inject to FW directly, because we are now
1175 	 * in an atomic context, and preempt has been disabled,
1176 	 * MCThread won't be scheduled at the moment, at the same
1177 	 * time, TargetFailure event won't be received after inject
1178 	 * crash due to the same reason.
1179 	 */
1180 	ret = wma_crash_inject(wma_handle, RECOVERY_SIM_ASSERT, 0);
1181 	if (QDF_IS_STATUS_ERROR(ret)) {
1182 		hdd_err("Failed to send crash inject:%d", ret);
1183 		return;
1184 	}
1185 
1186 	hif_crash_shutdown(cds_get_context(QDF_MODULE_ID_HIF));
1187 }
1188 
1189 /**
1190  * wlan_hdd_notify_handler() - wlan_hdd_notify_handler
1191  *
1192  * This function is called by the platform driver to notify the
1193  * COEX
1194  *
1195  * @state: state
1196  *
1197  * Return: void
1198  */
wlan_hdd_notify_handler(int state)1199 static void wlan_hdd_notify_handler(int state)
1200 {
1201 	if (!QDF_IS_EPPING_ENABLED(cds_get_conparam())) {
1202 		int ret;
1203 
1204 		ret = hdd_wlan_notify_modem_power_state(state);
1205 		if (ret < 0)
1206 			hdd_err("Fail to send notify");
1207 	}
1208 }
1209 
hdd_to_pmo_interface_pause(enum wow_interface_pause hdd_pause,enum pmo_wow_interface_pause * pmo_pause)1210 static int hdd_to_pmo_interface_pause(enum wow_interface_pause hdd_pause,
1211 				      enum pmo_wow_interface_pause *pmo_pause)
1212 {
1213 	switch (hdd_pause) {
1214 	case WOW_INTERFACE_PAUSE_DEFAULT:
1215 		*pmo_pause = PMO_WOW_INTERFACE_PAUSE_DEFAULT;
1216 		break;
1217 	case WOW_INTERFACE_PAUSE_ENABLE:
1218 		*pmo_pause = PMO_WOW_INTERFACE_PAUSE_ENABLE;
1219 		break;
1220 	case WOW_INTERFACE_PAUSE_DISABLE:
1221 		*pmo_pause = PMO_WOW_INTERFACE_PAUSE_DISABLE;
1222 		break;
1223 	default:
1224 		hdd_err("Invalid interface pause: %d", hdd_pause);
1225 		return -EINVAL;
1226 	}
1227 
1228 	return 0;
1229 }
1230 
hdd_to_pmo_resume_trigger(enum wow_resume_trigger hdd_trigger,enum pmo_wow_resume_trigger * pmo_trigger)1231 static int hdd_to_pmo_resume_trigger(enum wow_resume_trigger hdd_trigger,
1232 				     enum pmo_wow_resume_trigger *pmo_trigger)
1233 {
1234 	switch (hdd_trigger) {
1235 	case WOW_RESUME_TRIGGER_DEFAULT:
1236 		*pmo_trigger = PMO_WOW_RESUME_TRIGGER_DEFAULT;
1237 		break;
1238 	case WOW_RESUME_TRIGGER_HTC_WAKEUP:
1239 		*pmo_trigger = PMO_WOW_RESUME_TRIGGER_HTC_WAKEUP;
1240 		break;
1241 	case WOW_RESUME_TRIGGER_GPIO:
1242 		*pmo_trigger = PMO_WOW_RESUME_TRIGGER_GPIO;
1243 		break;
1244 	default:
1245 		hdd_err("Invalid resume trigger: %d", hdd_trigger);
1246 		return -EINVAL;
1247 	}
1248 
1249 	return 0;
1250 }
1251 
1252 static int
hdd_to_pmo_wow_enable_params(struct wow_enable_params * in_params,struct pmo_wow_enable_params * out_params)1253 hdd_to_pmo_wow_enable_params(struct wow_enable_params *in_params,
1254 			     struct pmo_wow_enable_params *out_params)
1255 {
1256 	int err;
1257 
1258 	/* unit-test suspend */
1259 	out_params->is_unit_test = in_params->is_unit_test;
1260 
1261 	/* interface pause */
1262 	err = hdd_to_pmo_interface_pause(in_params->interface_pause,
1263 					 &out_params->interface_pause);
1264 	if (err)
1265 		return err;
1266 
1267 	/* resume trigger */
1268 	err = hdd_to_pmo_resume_trigger(in_params->resume_trigger,
1269 					&out_params->resume_trigger);
1270 	if (err)
1271 		return err;
1272 
1273 	return 0;
1274 }
1275 
__wlan_hdd_trigger_cds_recovery(enum qdf_hang_reason reason,const char * func,const uint32_t line)1276 void __wlan_hdd_trigger_cds_recovery(enum qdf_hang_reason reason,
1277 				     const char *func, const uint32_t line)
1278 {
1279 	__cds_trigger_recovery(reason, func, line);
1280 }
1281 
1282 /**
1283  * __wlan_hdd_bus_suspend() - handles platform suspend
1284  * @wow_params: collection of wow enable override parameters
1285  * @type: WoW suspend type
1286  *
1287  * Does precondition validation. Ensures that a subsystem restart isn't in
1288  * progress. Ensures that no load or unload is in progress. Does:
1289  *	data path suspend
1290  *	component (pmo) suspend
1291  *	hif (bus) suspend
1292  *
1293  * Return: 0 for success, -EFAULT for null pointers,
1294  *     -EBUSY or -EAGAIN if another operation is in progress and
1295  *     wlan will not be ready to suspend in time.
1296  */
__wlan_hdd_bus_suspend(struct wow_enable_params wow_params,enum qdf_suspend_type type)1297 static int __wlan_hdd_bus_suspend(struct wow_enable_params wow_params,
1298 				  enum qdf_suspend_type type)
1299 {
1300 	int err;
1301 	QDF_STATUS status;
1302 	struct hdd_context *hdd_ctx;
1303 	void *hif_ctx;
1304 	void *dp_soc;
1305 	struct pmo_wow_enable_params pmo_params;
1306 	int pending;
1307 	struct bbm_params param = {0};
1308 
1309 	hdd_info("starting bus suspend");
1310 
1311 	hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
1312 	if (!hdd_ctx)
1313 		return -ENODEV;
1314 
1315 	err = wlan_hdd_validate_context(hdd_ctx);
1316 	if (0 != err) {
1317 		if (pld_is_low_power_mode(hdd_ctx->parent_dev))
1318 			hdd_debug("low power mode (Deep Sleep/Hibernate)");
1319 		else
1320 			return err;
1321 	}
1322 
1323 	/* If Wifi is off, return success for system suspend */
1324 	if (hdd_ctx->driver_status != DRIVER_MODULES_ENABLED) {
1325 		hdd_debug("Driver Module closed; skipping suspend");
1326 		return 0;
1327 	}
1328 
1329 
1330 	hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
1331 	if (!hif_ctx)
1332 		return -EINVAL;
1333 
1334 	err = hdd_to_pmo_wow_enable_params(&wow_params, &pmo_params);
1335 	if (err) {
1336 		hdd_err("Invalid WoW enable parameters: %d", err);
1337 		return err;
1338 	}
1339 
1340 	dp_soc = cds_get_context(QDF_MODULE_ID_SOC);
1341 	err = qdf_status_to_os_return(ucfg_dp_bus_suspend(dp_soc,
1342 							  OL_TXRX_PDEV_ID));
1343 	if (err) {
1344 		hdd_err("Failed cdp bus suspend: %d", err);
1345 		return err;
1346 	}
1347 
1348 	if (ucfg_ipa_is_tx_pending(hdd_ctx->pdev)) {
1349 		hdd_err("failed due to pending IPA TX comps");
1350 		err = -EBUSY;
1351 		goto resume_dp;
1352 	}
1353 
1354 	err = hif_bus_early_suspend(hif_ctx);
1355 	if (err) {
1356 		hdd_err("Failed hif bus early suspend");
1357 		goto resume_dp;
1358 	}
1359 
1360 	status = ucfg_pmo_psoc_bus_suspend_req(hdd_ctx->psoc,
1361 					       type,
1362 					       &pmo_params);
1363 	err = qdf_status_to_os_return(status);
1364 	if (err) {
1365 		hdd_err("Failed pmo bus suspend: %d", status);
1366 		goto late_hif_resume;
1367 	}
1368 
1369 	hif_system_pm_set_state_suspended(hif_ctx);
1370 
1371 	err = hif_bus_suspend(hif_ctx);
1372 	if (err) {
1373 		hdd_err("Failed hif bus suspend: %d", err);
1374 		goto resume_pmo;
1375 	}
1376 
1377 	status = ucfg_pmo_core_txrx_suspend(hdd_ctx->psoc);
1378 	err = qdf_status_to_os_return(status);
1379 	if (err) {
1380 		hdd_err("Failed to suspend TXRX: %d", err);
1381 		goto resume_hif;
1382 	}
1383 
1384 	pending = cdp_rx_get_pending(cds_get_context(QDF_MODULE_ID_SOC));
1385 	if (pending) {
1386 		hdd_debug("Prevent suspend, RX frame pending %d", pending);
1387 		err = -EBUSY;
1388 		goto resume_txrx;
1389 	}
1390 
1391 	if (hif_try_prevent_ep_vote_access(hif_ctx)) {
1392 		hdd_debug("Prevent suspend, ep work pending");
1393 		err = -EBUSY;
1394 		goto resume_txrx;
1395 	}
1396 
1397 	/*
1398 	 * Remove bus votes at the very end, after making sure there are no
1399 	 * pending bus transactions from WLAN SOC for TX/RX.
1400 	 */
1401 	param.policy = BBM_NON_PERSISTENT_POLICY;
1402 	param.policy_info.flag = BBM_APPS_SUSPEND;
1403 	ucfg_dp_bbm_apply_independent_policy(hdd_ctx->psoc, &param);
1404 
1405 	hdd_info("bus suspend succeeded");
1406 	return 0;
1407 
1408 resume_txrx:
1409 	status = ucfg_pmo_core_txrx_resume(hdd_ctx->psoc);
1410 	if (QDF_IS_STATUS_ERROR(status)) {
1411 		wlan_hdd_trigger_cds_recovery(QDF_RESUME_TIMEOUT);
1412 		return qdf_status_to_os_return(status);
1413 	}
1414 
1415 resume_hif:
1416 	status = hif_bus_resume(hif_ctx);
1417 	if (QDF_IS_STATUS_ERROR(status)) {
1418 		wlan_hdd_trigger_cds_recovery(QDF_RESUME_TIMEOUT);
1419 		return qdf_status_to_os_return(status);
1420 	}
1421 
1422 resume_pmo:
1423 	status = ucfg_pmo_psoc_bus_resume_req(hdd_ctx->psoc,
1424 					      type);
1425 	if (QDF_IS_STATUS_ERROR(status)) {
1426 		wlan_hdd_trigger_cds_recovery(QDF_RESUME_TIMEOUT);
1427 		return qdf_status_to_os_return(status);
1428 	}
1429 
1430 late_hif_resume:
1431 	status = hif_bus_late_resume(hif_ctx);
1432 	if (QDF_IS_STATUS_ERROR(status)) {
1433 		wlan_hdd_trigger_cds_recovery(QDF_RESUME_TIMEOUT);
1434 		return qdf_status_to_os_return(status);
1435 	}
1436 
1437 resume_dp:
1438 	status = ucfg_dp_bus_resume(dp_soc, OL_TXRX_PDEV_ID);
1439 	if (QDF_IS_STATUS_ERROR(status)) {
1440 		wlan_hdd_trigger_cds_recovery(QDF_RESUME_TIMEOUT);
1441 		return qdf_status_to_os_return(status);
1442 	}
1443 	hif_system_pm_set_state_on(hif_ctx);
1444 	return err;
1445 }
1446 
wlan_hdd_bus_suspend(void)1447 int wlan_hdd_bus_suspend(void)
1448 {
1449 	struct wow_enable_params default_params = {0};
1450 
1451 	return __wlan_hdd_bus_suspend(default_params, QDF_SYSTEM_SUSPEND);
1452 }
1453 
1454 #ifdef WLAN_SUSPEND_RESUME_TEST
wlan_hdd_unit_test_bus_suspend(struct wow_enable_params wow_params)1455 int wlan_hdd_unit_test_bus_suspend(struct wow_enable_params wow_params)
1456 {
1457 	return __wlan_hdd_bus_suspend(wow_params, QDF_UNIT_TEST_WOW_SUSPEND);
1458 }
1459 #endif
1460 
1461 /**
1462  * wlan_hdd_bus_suspend_noirq() - handle .suspend_noirq callback
1463  *
1464  * This function is called by the platform driver to complete the
1465  * bus suspend callback when device interrupts are disabled by kernel.
1466  * Call HIF and WMA suspend_noirq callbacks to make sure there is no
1467  * wake up pending from FW before allowing suspend.
1468  *
1469  * Return: 0 for success and -EBUSY if FW is requesting wake up
1470  */
wlan_hdd_bus_suspend_noirq(void)1471 int wlan_hdd_bus_suspend_noirq(void)
1472 {
1473 	struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
1474 	void *hif_ctx;
1475 	int errno;
1476 	uint32_t pending_events;
1477 
1478 	hdd_debug("start bus_suspend_noirq");
1479 
1480 	if (!hdd_ctx)
1481 		return -ENODEV;
1482 
1483 	/* If Wifi is off, return success for system suspend */
1484 	if (hdd_ctx->driver_status != DRIVER_MODULES_ENABLED) {
1485 		hdd_debug("Driver module closed; skip bus-noirq suspend");
1486 		return 0;
1487 	}
1488 
1489 	errno = wlan_hdd_validate_context(hdd_ctx);
1490 	if (errno)
1491 		return errno;
1492 
1493 	hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
1494 	if (!hif_ctx)
1495 		return -EINVAL;
1496 
1497 	errno = hif_bus_suspend_noirq(hif_ctx);
1498 	if (errno)
1499 		goto done;
1500 
1501 	errno = ucfg_pmo_psoc_is_target_wake_up_received(hdd_ctx->psoc);
1502 	if (errno == -EAGAIN) {
1503 		hdd_err("Firmware attempting wakeup, try again");
1504 		wlan_hdd_inc_suspend_stats(hdd_ctx,
1505 					   SUSPEND_FAIL_INITIAL_WAKEUP);
1506 	}
1507 	if (errno)
1508 		goto resume_hif_noirq;
1509 
1510 	pending_events = wma_critical_events_in_flight();
1511 	if (pending_events) {
1512 		hdd_err("%d critical event(s) in flight; try again",
1513 			pending_events);
1514 		errno = -EAGAIN;
1515 		goto resume_hif_noirq;
1516 	}
1517 
1518 	hdd_ctx->suspend_resume_stats.suspends++;
1519 
1520 	hdd_debug("bus_suspend_noirq done");
1521 	return 0;
1522 
1523 resume_hif_noirq:
1524 	QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
1525 
1526 done:
1527 	hdd_err("suspend_noirq failed, status: %d", errno);
1528 
1529 	return errno;
1530 }
1531 
1532 /**
1533  * wlan_hdd_bus_resume() - handles platform resume
1534  *
1535  * @type: WoW suspend type
1536  *
1537  * Does precondition validation. Ensures that a subsystem restart isn't in
1538  * progress.  Ensures that no load or unload is in progress.  Ensures that
1539  * it has valid pointers for the required contexts.
1540  * Calls into hif to resume the bus operation.
1541  * Calls into wma to handshake with firmware and notify it that the bus is up.
1542  * Calls into ol_txrx for symmetry.
1543  * Failures are treated as catastrophic.
1544  *
1545  * return: error code or 0 for success
1546  */
wlan_hdd_bus_resume(enum qdf_suspend_type type)1547 int wlan_hdd_bus_resume(enum qdf_suspend_type type)
1548 {
1549 	struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
1550 	void *hif_ctx;
1551 	int status;
1552 	QDF_STATUS qdf_status;
1553 	void *dp_soc;
1554 	struct bbm_params param = {0};
1555 
1556 	if (cds_is_driver_recovering())
1557 		return 0;
1558 
1559 	hdd_info("starting bus resume");
1560 
1561 	if (!hdd_ctx)
1562 		return -ENODEV;
1563 
1564 	/* If Wifi is off, return success for system resume */
1565 	if (hdd_ctx->driver_status != DRIVER_MODULES_ENABLED) {
1566 		hdd_debug("Driver Module closed; return success");
1567 		return 0;
1568 	}
1569 
1570 	status = wlan_hdd_validate_context(hdd_ctx);
1571 	if (status)
1572 		return status;
1573 
1574 	hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
1575 	if (!hif_ctx)
1576 		return -EINVAL;
1577 
1578 	/*
1579 	 * Add bus votes at the beginning, before making sure there are any
1580 	 * bus transactions from WLAN SOC for TX/RX.
1581 	 */
1582 	param.policy = BBM_NON_PERSISTENT_POLICY;
1583 	param.policy_info.flag = BBM_APPS_RESUME;
1584 	ucfg_dp_bbm_apply_independent_policy(hdd_ctx->psoc, &param);
1585 
1586 	status = hif_bus_resume(hif_ctx);
1587 	if (status) {
1588 		hdd_err("Failed hif bus resume");
1589 		goto out;
1590 	}
1591 
1592 	hif_system_pm_set_state_resuming(hif_ctx);
1593 
1594 	qdf_status = ucfg_pmo_psoc_bus_resume_req(hdd_ctx->psoc,
1595 						  type);
1596 	status = qdf_status_to_os_return(qdf_status);
1597 	if (status) {
1598 		hdd_err("Failed pmo bus resume");
1599 		goto out;
1600 	}
1601 
1602 	qdf_status = ucfg_pmo_core_txrx_resume(hdd_ctx->psoc);
1603 	status = qdf_status_to_os_return(qdf_status);
1604 	if (status) {
1605 		hdd_err("Failed to resume TXRX");
1606 		goto out;
1607 	}
1608 
1609 	hif_system_pm_set_state_on(hif_ctx);
1610 
1611 	status = hif_bus_late_resume(hif_ctx);
1612 	if (status) {
1613 		hdd_err("Failed hif bus late resume");
1614 		goto out;
1615 	}
1616 
1617 	dp_soc = cds_get_context(QDF_MODULE_ID_SOC);
1618 	qdf_status = ucfg_dp_bus_resume(dp_soc, OL_TXRX_PDEV_ID);
1619 	status = qdf_status_to_os_return(qdf_status);
1620 	if (status) {
1621 		hdd_err("Failed cdp bus resume");
1622 		goto out;
1623 	}
1624 
1625 	hdd_info("bus resume succeeded");
1626 	return 0;
1627 
1628 out:
1629 	hif_system_pm_set_state_suspended(hif_ctx);
1630 	if (cds_is_driver_recovering() || cds_is_driver_in_bad_state() ||
1631 	    cds_is_fw_down())
1632 		return 0;
1633 
1634 	if (status != -ETIMEDOUT)
1635 		QDF_BUG(false);
1636 
1637 	return status;
1638 }
1639 
1640 /**
1641  * wlan_hdd_bus_resume_noirq(): handle bus resume no irq
1642  *
1643  * This function is called by the platform driver to do bus
1644  * resume no IRQ before calling resume callback. Call WMA and HIF
1645  * layers to complete the resume_noirq.
1646  *
1647  * Return: 0 for success and negative error code for failure
1648  */
wlan_hdd_bus_resume_noirq(void)1649 int wlan_hdd_bus_resume_noirq(void)
1650 {
1651 	struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
1652 	void *hif_ctx;
1653 	int status;
1654 	QDF_STATUS qdf_status;
1655 
1656 	hdd_debug("starting bus_resume_noirq");
1657 	if (cds_is_driver_recovering())
1658 		return 0;
1659 
1660 	if (!hdd_ctx)
1661 		return -ENODEV;
1662 
1663 	/* If Wifi is off, return success for system resume */
1664 	if (hdd_ctx->driver_status != DRIVER_MODULES_ENABLED) {
1665 		hdd_debug("Driver Module closed return success");
1666 		return 0;
1667 	}
1668 
1669 	status = wlan_hdd_validate_context(hdd_ctx);
1670 	if (status)
1671 		return status;
1672 
1673 	hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
1674 	if (!hif_ctx)
1675 		return -EINVAL;
1676 
1677 	qdf_status = ucfg_pmo_psoc_clear_target_wake_up(hdd_ctx->psoc);
1678 	QDF_BUG(!qdf_status);
1679 
1680 	status = hif_bus_resume_noirq(hif_ctx);
1681 	QDF_BUG(!status);
1682 
1683 	hdd_debug("bus_resume_noirq done");
1684 
1685 	return status;
1686 }
1687 
1688 /**
1689  * wlan_hdd_bus_reset_resume() - resume wlan bus after reset
1690  *
1691  * This function is called to tell the driver that the device has been resumed
1692  * and it has also been reset. The driver should redo any necessary
1693  * initialization. It is mainly used by the USB bus
1694  *
1695  * Return: int 0 for success, non zero for failure
1696  */
wlan_hdd_bus_reset_resume(void)1697 static int wlan_hdd_bus_reset_resume(void)
1698 {
1699 	struct hif_opaque_softc *scn = cds_get_context(QDF_MODULE_ID_HIF);
1700 
1701 	if (!scn)
1702 		return -EFAULT;
1703 
1704 	return hif_bus_reset_resume(scn);
1705 }
1706 
1707 #ifdef FEATURE_RUNTIME_PM
1708 /**
1709  * hdd_pld_runtime_suspend_cb() - Runtime suspend callback from PMO
1710  *
1711  * Return: 0 on success or error value otherwise
1712  */
hdd_pld_runtime_suspend_cb(void)1713 static int hdd_pld_runtime_suspend_cb(void)
1714 {
1715 	qdf_device_t qdf_dev = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
1716 
1717 	if (!qdf_dev)
1718 		return -EINVAL;
1719 
1720 	return pld_auto_suspend(qdf_dev->dev);
1721 }
1722 
1723 /**
1724  * wlan_hdd_runtime_suspend() - suspend the wlan bus without apps suspend
1725  * @dev: Driver device instance
1726  *
1727  * Each layer is responsible for its own suspend actions.  wma_runtime_suspend
1728  * takes care of the parts of the 802.11 suspend that we want to do for runtime
1729  * suspend.
1730  *
1731  * Return: 0 or errno
1732  */
wlan_hdd_runtime_suspend(struct device * dev)1733 static int wlan_hdd_runtime_suspend(struct device *dev)
1734 {
1735 	int err;
1736 	QDF_STATUS status;
1737 	struct hdd_context *hdd_ctx;
1738 	qdf_time_t delta;
1739 
1740 	hdd_debug("Starting runtime suspend");
1741 
1742 	hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
1743 	err = wlan_hdd_validate_context(hdd_ctx);
1744 	if (err)
1745 		return err;
1746 
1747 	if (hdd_ctx->driver_status != DRIVER_MODULES_ENABLED) {
1748 		hdd_debug("Driver module closed skipping runtime suspend");
1749 		return 0;
1750 	}
1751 
1752 	if (!hdd_is_runtime_pm_enabled(hdd_ctx))
1753 		return 0;
1754 
1755 	if (ucfg_scan_get_pdev_status(hdd_ctx->pdev) !=
1756 	    SCAN_NOT_IN_PROGRESS) {
1757 		hdd_debug("Scan in progress, ignore runtime suspend");
1758 		return -EBUSY;
1759 	}
1760 
1761 	if (ucfg_ipa_is_tx_pending(hdd_ctx->pdev)) {
1762 		hdd_debug("IPA TX comps pending, ignore rtpm suspend");
1763 		return -EBUSY;
1764 	}
1765 
1766 	if (hdd_ctx->config->runtime_pm == hdd_runtime_pm_dynamic &&
1767 	    wlan_hdd_is_cpu_pm_qos_in_progress(hdd_ctx)) {
1768 		hdd_debug("PM QoS Latency constraint, ignore runtime suspend");
1769 		return -EBUSY;
1770 	}
1771 
1772 	status = ucfg_pmo_psoc_bus_runtime_suspend(hdd_ctx->psoc,
1773 						   hdd_pld_runtime_suspend_cb);
1774 	err = qdf_status_to_os_return(status);
1775 
1776 	hdd_ctx->runtime_suspend_done_time_stamp =
1777 						qdf_get_log_timestamp_usecs();
1778 	delta = hdd_ctx->runtime_suspend_done_time_stamp -
1779 		hdd_ctx->runtime_resume_start_time_stamp;
1780 
1781 	if (hdd_ctx->runtime_suspend_done_time_stamp >
1782 	   hdd_ctx->runtime_resume_start_time_stamp)
1783 		hdd_debug("Runtime suspend done result: %d total cxpc up time %lu microseconds",
1784 			  err, delta);
1785 
1786 	if (status == QDF_STATUS_SUCCESS)
1787 		ucfg_dp_bus_bw_compute_timer_stop(hdd_ctx->psoc);
1788 
1789 	hdd_debug("Runtime suspend done result: %d", err);
1790 
1791 	return err;
1792 }
1793 
1794 /**
1795  * hdd_pld_runtime_resume_cb() - Runtime resume callback from PMO
1796  *
1797  * Return: 0 on success or error value otherwise
1798  */
hdd_pld_runtime_resume_cb(void)1799 static int hdd_pld_runtime_resume_cb(void)
1800 {
1801 	qdf_device_t qdf_dev = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
1802 
1803 	if (!qdf_dev)
1804 		return -EINVAL;
1805 
1806 	return pld_auto_resume(qdf_dev->dev);
1807 }
1808 
1809 /**
1810  * wlan_hdd_runtime_resume() - resume the wlan bus from runtime suspend
1811  * @dev: Driver device instance
1812  *
1813  * Sets the runtime pm state and coordinates resume between hif wma and
1814  * ol_txrx.
1815  *
1816  * Return: success since failure is a bug
1817  */
wlan_hdd_runtime_resume(struct device * dev)1818 static int wlan_hdd_runtime_resume(struct device *dev)
1819 {
1820 	struct hdd_context *hdd_ctx;
1821 	QDF_STATUS status;
1822 	qdf_time_t delta;
1823 
1824 	hdd_debug("Starting runtime resume");
1825 
1826 	hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
1827 
1828 	/*
1829 	 * In__hdd_soc_remove, runtime_sync_resume is called before setting
1830 	 * unload_in_progress flag. wlan_hdd_validate_context will cause
1831 	 * resume fail, if driver load/unload in-progress, so not doing
1832 	 * wlan_hdd_validate_context, have only SSR in progress check.
1833 	 */
1834 	if (!hdd_ctx)
1835 		return 0;
1836 
1837 	if (cds_is_driver_recovering()) {
1838 		hdd_debug("Recovery in progress, state:0x%x",
1839 			  cds_get_driver_state());
1840 		return 0;
1841 	}
1842 
1843 	if (hdd_ctx->driver_status != DRIVER_MODULES_ENABLED) {
1844 		hdd_debug("Driver module closed skipping runtime resume");
1845 		return 0;
1846 	}
1847 
1848 	if (!hdd_is_runtime_pm_enabled(hdd_ctx))
1849 		return 0;
1850 
1851 	hdd_ctx->runtime_resume_start_time_stamp =
1852 						qdf_get_log_timestamp_usecs();
1853 	delta = hdd_ctx->runtime_resume_start_time_stamp -
1854 		hdd_ctx->runtime_suspend_done_time_stamp;
1855 	hdd_debug("Starting runtime resume total cxpc down time %lu microseconds",
1856 		  delta);
1857 
1858 	status = ucfg_pmo_psoc_bus_runtime_resume(hdd_ctx->psoc,
1859 						  hdd_pld_runtime_resume_cb);
1860 	if (status != QDF_STATUS_SUCCESS) {
1861 		hdd_err("PMO Runtime resume failed: %d", status);
1862 	} else {
1863 		if (policy_mgr_get_connection_count(hdd_ctx->psoc))
1864 			ucfg_dp_bus_bw_compute_timer_try_start(hdd_ctx->psoc);
1865 	}
1866 
1867 	hdd_debug("Runtime resume done");
1868 
1869 	return 0;
1870 }
1871 #endif
1872 
1873 /**
1874  * wlan_hdd_pld_probe() - probe function registered to PLD
1875  * @dev: device
1876  * @pld_bus_type: PLD bus type
1877  * @bdev: bus device structure
1878  * @id: bus identifier for shared busses
1879  *
1880  * Return: 0 on success
1881  */
wlan_hdd_pld_probe(struct device * dev,enum pld_bus_type pld_bus_type,void * bdev,void * id)1882 static int wlan_hdd_pld_probe(struct device *dev,
1883 			      enum pld_bus_type pld_bus_type,
1884 			      void *bdev,
1885 			      void *id)
1886 {
1887 	enum qdf_bus_type bus_type = to_bus_type(pld_bus_type);
1888 
1889 	if (bus_type == QDF_BUS_TYPE_NONE) {
1890 		hdd_err("Invalid bus type %d->%d", pld_bus_type, bus_type);
1891 		return -EINVAL;
1892 	}
1893 	qdf_ssr_driver_dump_register_region("hang_event_data",
1894 					    g_fw_host_hang_event,
1895 					    sizeof(g_fw_host_hang_event));
1896 
1897 	return hdd_soc_probe(dev, bdev, id, bus_type);
1898 }
1899 
1900 /**
1901  * wlan_hdd_pld_remove() - remove function registered to PLD
1902  * @dev: device to remove
1903  * @bus_type: PLD bus type
1904  *
1905  * Return: void
1906  */
wlan_hdd_pld_remove(struct device * dev,enum pld_bus_type bus_type)1907 static void wlan_hdd_pld_remove(struct device *dev, enum pld_bus_type bus_type)
1908 {
1909 	hdd_enter();
1910 
1911 	hdd_soc_remove(dev);
1912 	qdf_ssr_driver_dump_unregister_region("hang_event_data");
1913 
1914 	hdd_exit();
1915 }
1916 
hdd_soc_idle_shutdown_lock(struct device * dev)1917 static void hdd_soc_idle_shutdown_lock(struct device *dev)
1918 {
1919 	hdd_prevent_suspend(WIFI_POWER_EVENT_WAKELOCK_DRIVER_IDLE_SHUTDOWN);
1920 
1921 	hdd_abort_system_suspend(dev);
1922 }
1923 
hdd_soc_idle_shutdown_unlock(void)1924 static void hdd_soc_idle_shutdown_unlock(void)
1925 {
1926 	hdd_allow_suspend(WIFI_POWER_EVENT_WAKELOCK_DRIVER_IDLE_SHUTDOWN);
1927 }
1928 
1929 /**
1930  * wlan_hdd_pld_idle_shutdown() - wifi module idle shutdown after interface
1931  *                                inactivity timeout has triggered idle shutdown
1932  * @dev: device to remove
1933  * @bus_type: PLD bus type
1934  *
1935  * Return: 0 for success and negative error code for failure
1936  */
wlan_hdd_pld_idle_shutdown(struct device * dev,enum pld_bus_type bus_type)1937 static int wlan_hdd_pld_idle_shutdown(struct device *dev,
1938 				       enum pld_bus_type bus_type)
1939 {
1940 	int ret;
1941 
1942 	hdd_soc_idle_shutdown_lock(dev);
1943 
1944 	ret = hdd_psoc_idle_shutdown(dev);
1945 
1946 	hdd_soc_idle_shutdown_unlock();
1947 
1948 	return ret;
1949 }
1950 
1951 /**
1952  * wlan_hdd_pld_idle_restart() - wifi module idle restart after idle shutdown
1953  * @dev: device to remove
1954  * @bus_type: PLD bus type
1955  *
1956  * Return: 0 for success and negative error code for failure
1957  */
wlan_hdd_pld_idle_restart(struct device * dev,enum pld_bus_type bus_type)1958 static int wlan_hdd_pld_idle_restart(struct device *dev,
1959 				      enum pld_bus_type bus_type)
1960 {
1961 	return hdd_psoc_idle_restart(dev);
1962 }
1963 
1964 /**
1965  * wlan_hdd_pld_shutdown() - shutdown function registered to PLD
1966  * @dev: device to shutdown
1967  * @bus_type: PLD bus type
1968  *
1969  * Return: void
1970  */
wlan_hdd_pld_shutdown(struct device * dev,enum pld_bus_type bus_type)1971 static void wlan_hdd_pld_shutdown(struct device *dev,
1972 				  enum pld_bus_type bus_type)
1973 {
1974 	hdd_enter();
1975 
1976 	hdd_soc_recovery_shutdown(dev);
1977 
1978 	hdd_exit();
1979 }
1980 
1981 /**
1982  * wlan_hdd_pld_reinit() - reinit function registered to PLD
1983  * @dev: device
1984  * @pld_bus_type: PLD bus type
1985  * @bdev: bus device structure
1986  * @id: bus identifier for shared busses
1987  *
1988  * Return: 0 on success
1989  */
wlan_hdd_pld_reinit(struct device * dev,enum pld_bus_type pld_bus_type,void * bdev,void * id)1990 static int wlan_hdd_pld_reinit(struct device *dev,
1991 			       enum pld_bus_type pld_bus_type,
1992 			       void *bdev,
1993 			       void *id)
1994 {
1995 	enum qdf_bus_type bus_type = to_bus_type(pld_bus_type);
1996 
1997 	if (bus_type == QDF_BUS_TYPE_NONE) {
1998 		hdd_err("Invalid bus type %d->%d", pld_bus_type, bus_type);
1999 		return -EINVAL;
2000 	}
2001 
2002 	return hdd_soc_recovery_reinit(dev, bdev, id, bus_type);
2003 }
2004 
2005 /**
2006  * wlan_hdd_pld_crash_shutdown() - crash_shutdown function registered to PLD
2007  * @dev: device
2008  * @bus_type: PLD bus type
2009  *
2010  * Return: void
2011  */
wlan_hdd_pld_crash_shutdown(struct device * dev,enum pld_bus_type bus_type)2012 static void wlan_hdd_pld_crash_shutdown(struct device *dev,
2013 			     enum pld_bus_type bus_type)
2014 {
2015 	wlan_hdd_crash_shutdown();
2016 }
2017 
2018 /**
2019  * wlan_hdd_pld_suspend() - suspend function registered to PLD
2020  * @dev: device
2021  * @bus_type: PLD bus type
2022  * @state: PM state
2023  *
2024  * Return: 0 on success
2025  */
wlan_hdd_pld_suspend(struct device * dev,enum pld_bus_type bus_type,pm_message_t state)2026 static int wlan_hdd_pld_suspend(struct device *dev,
2027 				enum pld_bus_type bus_type,
2028 				pm_message_t state)
2029 
2030 {
2031 	struct osif_psoc_sync *psoc_sync;
2032 	int errno;
2033 	struct hdd_context *hdd_ctx;
2034 
2035 	hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
2036 	if (!hdd_ctx)
2037 		return -ENODEV;
2038 
2039 	errno = wlan_hdd_validate_context(hdd_ctx);
2040 	if (0 != errno) {
2041 		if (pld_is_low_power_mode(hdd_ctx->parent_dev))
2042 			hdd_debug("low power mode (Deep Sleep/Hibernate)");
2043 		else
2044 			return errno;
2045 	}
2046 
2047 	/*
2048 	 * Flush the idle shutdown before ops start.This is done here to avoid
2049 	 * the deadlock as idle shutdown waits for the dsc ops
2050 	 * to complete.
2051 	 */
2052 	hdd_psoc_idle_timer_stop(hdd_ctx);
2053 
2054 
2055 	errno = osif_psoc_sync_op_start(dev, &psoc_sync);
2056 	if (errno)
2057 		return errno;
2058 
2059 	errno = wlan_hdd_bus_suspend();
2060 
2061 	osif_psoc_sync_op_stop(psoc_sync);
2062 
2063 	return errno;
2064 }
2065 
2066 /**
2067  * wlan_hdd_pld_resume() - resume function registered to PLD
2068  * @dev: device
2069  * @bus_type: PLD bus type
2070  *
2071  * Return: 0 on success
2072  */
wlan_hdd_pld_resume(struct device * dev,enum pld_bus_type bus_type)2073 static int wlan_hdd_pld_resume(struct device *dev,
2074 		    enum pld_bus_type bus_type)
2075 {
2076 	struct osif_psoc_sync *psoc_sync;
2077 	int errno;
2078 
2079 	errno = osif_psoc_sync_op_start(dev, &psoc_sync);
2080 	if (errno)
2081 		return errno;
2082 
2083 	errno = wlan_hdd_bus_resume(QDF_SYSTEM_SUSPEND);
2084 
2085 	osif_psoc_sync_op_stop(psoc_sync);
2086 
2087 	return errno;
2088 }
2089 
2090 /**
2091  * wlan_hdd_pld_suspend_noirq() - handle suspend no irq
2092  * @dev: device
2093  * @bus_type: PLD bus type
2094  *
2095  * Complete the actions started by suspend().  Carry out any
2096  * additional operations required for suspending the device that might be
2097  * racing with its driver's interrupt handler, which is guaranteed not to
2098  * run while suspend_noirq() is being executed. Make sure to resume device
2099  * if FW has sent initial wake up message and expecting APPS to wake up.
2100  *
2101  * Return: 0 on success
2102  */
wlan_hdd_pld_suspend_noirq(struct device * dev,enum pld_bus_type bus_type)2103 static int wlan_hdd_pld_suspend_noirq(struct device *dev,
2104 				      enum pld_bus_type bus_type)
2105 {
2106 	struct osif_psoc_sync *psoc_sync;
2107 	int errno;
2108 
2109 	errno = osif_psoc_sync_op_start(dev, &psoc_sync);
2110 	if (errno)
2111 		return errno;
2112 
2113 	errno = wlan_hdd_bus_suspend_noirq();
2114 
2115 	osif_psoc_sync_op_stop(psoc_sync);
2116 
2117 	return errno;
2118 }
2119 
2120 /**
2121  * wlan_hdd_pld_resume_noirq() - handle resume no irq
2122  * @dev: device
2123  * @bus_type: PLD bus type
2124  *
2125  * Prepare for the execution of resume() by carrying out any
2126  * operations required for resuming the device that might be racing with
2127  * its driver's interrupt handler, which is guaranteed not to run while
2128  * resume_noirq() is being executed. Make sure to clear target initial
2129  * wake up request such that next suspend can happen cleanly.
2130  *
2131  * Return: 0 on success
2132  */
wlan_hdd_pld_resume_noirq(struct device * dev,enum pld_bus_type bus_type)2133 static int wlan_hdd_pld_resume_noirq(struct device *dev,
2134 				     enum pld_bus_type bus_type)
2135 {
2136 	struct osif_psoc_sync *psoc_sync;
2137 	int errno;
2138 
2139 	errno = osif_psoc_sync_op_start(dev, &psoc_sync);
2140 	if (errno)
2141 		return errno;
2142 
2143 	errno = wlan_hdd_bus_resume_noirq();
2144 
2145 	osif_psoc_sync_op_stop(psoc_sync);
2146 
2147 	return errno;
2148 }
2149 
2150 /**
2151  * wlan_hdd_pld_reset_resume() - reset resume function registered to PLD
2152  * @dev: device
2153  * @bus_type: PLD bus type
2154  *
2155  * Return: 0 on success
2156  */
wlan_hdd_pld_reset_resume(struct device * dev,enum pld_bus_type bus_type)2157 static int wlan_hdd_pld_reset_resume(struct device *dev,
2158 				     enum pld_bus_type bus_type)
2159 {
2160 	struct osif_psoc_sync *psoc_sync;
2161 	int errno;
2162 
2163 	errno = osif_psoc_sync_op_start(dev, &psoc_sync);
2164 	if (errno)
2165 		return errno;
2166 
2167 	errno = wlan_hdd_bus_reset_resume();
2168 
2169 	osif_psoc_sync_op_stop(psoc_sync);
2170 
2171 	return errno;
2172 }
2173 
2174 /**
2175  * wlan_hdd_pld_notify_handler() - notify_handler function registered to PLD
2176  * @dev: device
2177  * @bus_type: PLD bus type
2178  * @state: Modem power state
2179  *
2180  * Return: void
2181  */
wlan_hdd_pld_notify_handler(struct device * dev,enum pld_bus_type bus_type,int state)2182 static void wlan_hdd_pld_notify_handler(struct device *dev,
2183 			     enum pld_bus_type bus_type,
2184 			     int state)
2185 {
2186 	wlan_hdd_notify_handler(state);
2187 }
2188 
2189 /**
2190  * wlan_hdd_pld_uevent() - platform uevent handler
2191  * @dev: device on which the uevent occurred
2192  * @event_data: uevent parameters
2193  *
2194  * Return: None
2195  */
2196 static void
wlan_hdd_pld_uevent(struct device * dev,struct pld_uevent_data * event_data)2197 wlan_hdd_pld_uevent(struct device *dev, struct pld_uevent_data *event_data)
2198 {
2199 	struct qdf_notifer_data hang_evt_data;
2200 	void *hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
2201 	enum qdf_hang_reason reason = QDF_REASON_UNSPECIFIED;
2202 	uint8_t bus_type;
2203 
2204 	bus_type = pld_get_bus_type(dev);
2205 
2206 	switch (event_data->uevent) {
2207 	case PLD_SMMU_FAULT:
2208 		qdf_set_smmu_fault_state(true);
2209 		hdd_debug("Received smmu fault indication");
2210 		break;
2211 	case PLD_FW_DOWN:
2212 		hdd_debug("Received firmware down indication");
2213 		hdd_dump_log_buffer(NULL, NULL);
2214 		cds_set_target_ready(false);
2215 		cds_set_recovery_in_progress(true);
2216 		hdd_init_start_completion();
2217 
2218 		/* Notify external threads currently waiting on firmware
2219 		 * by forcefully completing waiting events with a "reset"
2220 		 * status. This will cause the event to fail early instead
2221 		 * of timing out.
2222 		 */
2223 		qdf_complete_wait_events();
2224 
2225 		/*
2226 		 * In case of some platforms, uevent will come to the driver in
2227 		 * process context. In that case, it is safe to complete the
2228 		 * SSR cleanup activities in the same context. In case of
2229 		 * other platforms, it will be invoked in interrupt context.
2230 		 * Performing the cleanup in interrupt context is not ideal,
2231 		 * thus defer the cleanup to be done during
2232 		 * hdd_soc_recovery_shutdown
2233 		 */
2234 		if (qdf_in_interrupt() || bus_type == PLD_BUS_TYPE_PCIE)
2235 			break;
2236 
2237 		hdd_soc_recovery_cleanup();
2238 		qdf_atomic_set(&is_recovery_cleanup_done, 1);
2239 
2240 		break;
2241 	case PLD_FW_HANG_EVENT:
2242 		hdd_info("Received firmware hang event");
2243 		cds_get_recovery_reason(&reason);
2244 
2245 		if ((reason == QDF_REASON_UNSPECIFIED) && hif_ctx) {
2246 			hif_display_ctrl_traffic_pipes_state(hif_ctx);
2247 			hif_display_latest_desc_hist(hif_ctx);
2248 		}
2249 
2250 		qdf_mem_zero(&g_fw_host_hang_event, QDF_HANG_EVENT_DATA_SIZE);
2251 		hang_evt_data.hang_data = g_fw_host_hang_event;
2252 		hang_evt_data.offset = 0;
2253 		qdf_hang_event_notifier_call(reason, &hang_evt_data);
2254 		hang_evt_data.offset = QDF_WLAN_HANG_FW_OFFSET;
2255 		if (event_data->hang_data.hang_event_data_len >=
2256 		    QDF_HANG_EVENT_DATA_SIZE / 2)
2257 			event_data->hang_data.hang_event_data_len =
2258 						QDF_HANG_EVENT_DATA_SIZE / 2;
2259 
2260 		if (event_data->hang_data.hang_event_data_len)
2261 			qdf_mem_copy((hang_evt_data.hang_data +
2262 				      hang_evt_data.offset),
2263 				     event_data->hang_data.hang_event_data,
2264 				     event_data->hang_data.hang_event_data_len);
2265 
2266 		hdd_send_hang_data(hang_evt_data.hang_data,
2267 				   QDF_HANG_EVENT_DATA_SIZE);
2268 		break;
2269 	case PLD_BUS_EVENT:
2270 		hdd_debug("Bus event received");
2271 
2272 		/* Currently only link_down taken care.
2273 		 * Need to extend event buffer to define more bus info,
2274 		 * if need later.
2275 		 */
2276 		if (event_data->bus_data.etype == PLD_BUS_EVENT_PCIE_LINK_DOWN)
2277 			host_log_device_status(WLAN_STATUS_BUS_EXCEPTION);
2278 		break;
2279 	case PLD_SYS_REBOOT:
2280 		hdd_info("Received system reboot");
2281 		cds_set_sys_rebooting();
2282 		break;
2283 	default:
2284 		/* other events intentionally not handled */
2285 		hdd_debug("Received uevent %d", event_data->uevent);
2286 		break;
2287 	}
2288 
2289 }
2290 
2291 #ifdef WLAN_FEATURE_SSR_DRIVER_DUMP
2292 static int
wlan_hdd_pld_collect_driver_dump(struct device * dev,enum pld_bus_type bus_type,struct cnss_ssr_driver_dump_entry * input_array,size_t * num_entries_loaded)2293 wlan_hdd_pld_collect_driver_dump(struct device *dev,
2294 				 enum pld_bus_type bus_type,
2295 				 struct cnss_ssr_driver_dump_entry *input_array,
2296 				 size_t *num_entries_loaded)
2297 {
2298 	QDF_STATUS status;
2299 
2300 	status =  qdf_ssr_driver_dump_retrieve_regions(input_array,
2301 						       num_entries_loaded);
2302 	return qdf_status_to_os_return(status);
2303 }
2304 #endif
2305 
2306 #ifdef FEATURE_RUNTIME_PM
2307 /**
2308  * wlan_hdd_pld_runtime_suspend() - runtime suspend function registered to PLD
2309  * @dev: device
2310  * @bus_type: PLD bus type
2311  *
2312  * Return: 0 on success
2313  */
wlan_hdd_pld_runtime_suspend(struct device * dev,enum pld_bus_type bus_type)2314 static int wlan_hdd_pld_runtime_suspend(struct device *dev,
2315 					enum pld_bus_type bus_type)
2316 {
2317 	struct osif_psoc_sync *psoc_sync;
2318 	int errno;
2319 
2320 	errno = osif_psoc_sync_op_start(dev, &psoc_sync);
2321 	if (errno)
2322 		goto out;
2323 
2324 	errno = wlan_hdd_runtime_suspend(dev);
2325 
2326 	osif_psoc_sync_op_stop(psoc_sync);
2327 
2328 out:
2329 	/* If it returns other errno to kernel, it will treat
2330 	 * it as critical issue, so all the future runtime
2331 	 * PM api will return error, pm runtime can't be work
2332 	 * anymore. Such case found in SSR.
2333 	 */
2334 	if (errno && errno != -EAGAIN && errno != -EBUSY)
2335 		errno = -EAGAIN;
2336 	return errno;
2337 }
2338 
2339 /**
2340  * wlan_hdd_pld_runtime_resume() - runtime resume function registered to PLD
2341  * @dev: device
2342  * @bus_type: PLD bus type
2343  *
2344  * Return: 0 on success
2345  */
wlan_hdd_pld_runtime_resume(struct device * dev,enum pld_bus_type bus_type)2346 static int wlan_hdd_pld_runtime_resume(struct device *dev,
2347 				       enum pld_bus_type bus_type)
2348 {
2349 	/* As opposite to suspend, Runtime PM resume can happen
2350 	 * synchronously during driver shutdown or idle shutown,
2351 	 * so remove PSOC sync protection here.
2352 	 */
2353 	return wlan_hdd_runtime_resume(dev);
2354 }
2355 #endif
2356 
2357 struct pld_driver_ops wlan_drv_ops = {
2358 	.probe      = wlan_hdd_pld_probe,
2359 	.remove     = wlan_hdd_pld_remove,
2360 	.idle_shutdown = wlan_hdd_pld_idle_shutdown,
2361 	.idle_restart = wlan_hdd_pld_idle_restart,
2362 	.shutdown   = wlan_hdd_pld_shutdown,
2363 	.reinit     = wlan_hdd_pld_reinit,
2364 	.crash_shutdown = wlan_hdd_pld_crash_shutdown,
2365 	.suspend    = wlan_hdd_pld_suspend,
2366 	.resume     = wlan_hdd_pld_resume,
2367 	.suspend_noirq = wlan_hdd_pld_suspend_noirq,
2368 	.resume_noirq  = wlan_hdd_pld_resume_noirq,
2369 	.reset_resume = wlan_hdd_pld_reset_resume,
2370 	.modem_status = wlan_hdd_pld_notify_handler,
2371 	.uevent = wlan_hdd_pld_uevent,
2372 #ifdef WLAN_FEATURE_SSR_DRIVER_DUMP
2373 	.collect_driver_dump = wlan_hdd_pld_collect_driver_dump,
2374 #endif
2375 #ifdef FEATURE_RUNTIME_PM
2376 	.runtime_suspend = wlan_hdd_pld_runtime_suspend,
2377 	.runtime_resume = wlan_hdd_pld_runtime_resume,
2378 #endif
2379 	.set_curr_therm_cdev_state = wlan_hdd_pld_set_thermal_mitigation,
2380 };
2381 
wlan_hdd_register_driver(void)2382 int wlan_hdd_register_driver(void)
2383 {
2384 	return pld_register_driver(&wlan_drv_ops);
2385 }
2386 
wlan_hdd_unregister_driver(void)2387 void wlan_hdd_unregister_driver(void)
2388 {
2389 	pld_unregister_driver();
2390 }
2391