xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_runtime_pm.c (revision 45a38684b07295822dc8eba39e293408f203eec8)
1 /*
2  * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7 
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/slab.h>
18 #include <linux/interrupt.h>
19 #include <linux/if_arp.h>
20 #include "hif_io32.h"
21 #include "hif_runtime_pm.h"
22 #include "hif.h"
23 #include "target_type.h"
24 #include "hif_main.h"
25 #include "ce_main.h"
26 #include "ce_api.h"
27 #include "ce_internal.h"
28 #include "ce_reg.h"
29 #include "ce_bmi.h"
30 #include "regtable.h"
31 #include "hif_hw_version.h"
32 #include <linux/debugfs.h>
33 #include <linux/seq_file.h>
34 #include "qdf_status.h"
35 #include "qdf_atomic.h"
36 #include "pld_common.h"
37 #include "mp_dev.h"
38 #include "hif_debug.h"
39 
40 #include "ce_tasklet.h"
41 #include "targaddrs.h"
42 #include "hif_exec.h"
43 
44 #ifdef FEATURE_RUNTIME_PM
45 /**
46  * hif_pci_pm_runtime_enabled() - To check if Runtime PM is enabled
47  * @scn: hif context
48  *
49  * This function will check if Runtime PM is enabled or not.
50  *
51  * Return: void
52  */
53 static bool hif_pci_pm_runtime_enabled(struct hif_softc *scn)
54 {
55 	if (scn->hif_config.enable_runtime_pm)
56 		return true;
57 
58 	return pm_runtime_enabled(hif_bus_get_dev(scn));
59 }
60 
61 /**
62  * hif_pm_runtime_state_to_string() - Mapping state into string
63  * @state: runtime pm state
64  *
65  * This function will map the runtime pm state into corresponding
66  * string for debug purpose.
67  *
68  * Return: pointer to the string
69  */
70 static const char *hif_pm_runtime_state_to_string(uint32_t state)
71 {
72 	switch (state) {
73 	case HIF_PM_RUNTIME_STATE_NONE:
74 		return "INIT_STATE";
75 	case HIF_PM_RUNTIME_STATE_ON:
76 		return "ON";
77 	case HIF_PM_RUNTIME_STATE_RESUMING:
78 		return "RESUMING";
79 	case HIF_PM_RUNTIME_STATE_SUSPENDING:
80 		return "SUSPENDING";
81 	case HIF_PM_RUNTIME_STATE_SUSPENDED:
82 		return "SUSPENDED";
83 	default:
84 		return "INVALID STATE";
85 	}
86 }
87 
88 #define HIF_PCI_RUNTIME_PM_STATS(_s, _rpm_ctx, _name) \
89 	seq_printf(_s, "%30s: %u\n", #_name, (_rpm_ctx)->pm_stats._name)
90 /**
91  * hif_pci_runtime_pm_warn() - Runtime PM Debugging API
92  * @hif_ctx: hif_softc context
93  * @msg: log message
94  *
95  * log runtime pm stats when something seems off.
96  *
97  * Return: void
98  */
99 static void hif_pci_runtime_pm_warn(struct hif_softc *scn,
100 				    const char *msg)
101 {
102 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
103 	struct device *dev = hif_bus_get_dev(scn);
104 	struct hif_pm_runtime_lock *ctx;
105 	int i;
106 
107 	hif_nofl_debug("%s: usage_count: %d, pm_state: %s, prevent_suspend_cnt: %d",
108 		       msg, atomic_read(&dev->power.usage_count),
109 		       hif_pm_runtime_state_to_string(
110 				atomic_read(&rpm_ctx->pm_state)),
111 		       rpm_ctx->prevent_suspend_cnt);
112 
113 	hif_nofl_debug("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d",
114 		       dev->power.runtime_status,
115 		       dev->power.runtime_error,
116 		       dev->power.disable_depth,
117 		       dev->power.autosuspend_delay);
118 
119 	hif_nofl_debug("runtime_get: %u, runtime_put: %u, request_resume: %u",
120 		       qdf_atomic_read(&rpm_ctx->pm_stats.runtime_get),
121 		       qdf_atomic_read(&rpm_ctx->pm_stats.runtime_put),
122 		       rpm_ctx->pm_stats.request_resume);
123 
124 	hif_nofl_debug("get  put  get-timestamp put-timestamp :DBGID_NAME");
125 	for (i = 0; i < RTPM_ID_MAX; i++) {
126 		hif_nofl_debug("%-10d %-10d  0x%-10llx  0x%-10llx :%-30s",
127 			       qdf_atomic_read(
128 				       &rpm_ctx->pm_stats.runtime_get_dbgid[i]),
129 			       qdf_atomic_read(
130 				       &rpm_ctx->pm_stats.runtime_put_dbgid[i]),
131 			       rpm_ctx->pm_stats.runtime_get_timestamp_dbgid[i],
132 			       rpm_ctx->pm_stats.runtime_put_timestamp_dbgid[i],
133 			       rtpm_string_from_dbgid(i));
134 	}
135 
136 	hif_nofl_debug("allow_suspend: %u, prevent_suspend: %u",
137 		       qdf_atomic_read(&rpm_ctx->pm_stats.allow_suspend),
138 		       qdf_atomic_read(&rpm_ctx->pm_stats.prevent_suspend));
139 
140 	hif_nofl_debug("prevent_suspend_timeout: %u, allow_suspend_timeout: %u",
141 		       rpm_ctx->pm_stats.prevent_suspend_timeout,
142 		       rpm_ctx->pm_stats.allow_suspend_timeout);
143 
144 	hif_nofl_debug("Suspended: %u, resumed: %u count",
145 		       rpm_ctx->pm_stats.suspended,
146 		       rpm_ctx->pm_stats.resumed);
147 
148 	hif_nofl_debug("suspend_err: %u, runtime_get_err: %u",
149 		       rpm_ctx->pm_stats.suspend_err,
150 		       rpm_ctx->pm_stats.runtime_get_err);
151 
152 	hif_nofl_debug("Active Wakeup Sources preventing Runtime Suspend: ");
153 
154 	list_for_each_entry(ctx, &rpm_ctx->prevent_suspend_list, list) {
155 		hif_nofl_debug("source %s; timeout %d ms",
156 			       ctx->name, ctx->timeout);
157 	}
158 
159 	if (qdf_is_fw_down()) {
160 		hif_err("fw is down");
161 		return;
162 	}
163 
164 	QDF_DEBUG_PANIC("hif_pci_runtime_pm_warn");
165 }
166 
167 /**
168  * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm
169  * @s: file to print to
170  * @data: unused
171  *
172  * debugging tool added to the debug fs for displaying runtimepm stats
173  *
174  * Return: 0
175  */
176 static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data)
177 {
178 	struct hif_softc *scn = s->private;
179 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
180 	struct device *dev = hif_bus_get_dev(scn);
181 	static const char * const autopm_state[] = {"NONE", "ON", "RESUMING",
182 		"SUSPENDING", "SUSPENDED"};
183 	unsigned int msecs_age;
184 	qdf_time_t usecs_age;
185 	int pm_state = atomic_read(&rpm_ctx->pm_state);
186 	unsigned long timer_expires;
187 	struct hif_pm_runtime_lock *ctx;
188 	int i;
189 
190 	seq_printf(s, "%30s: %s\n", "Runtime PM state", autopm_state[pm_state]);
191 	seq_printf(s, "%30s: %ps\n", "Last Resume Caller",
192 		   rpm_ctx->pm_stats.last_resume_caller);
193 	seq_printf(s, "%30s: %ps\n", "Last Busy Marker",
194 		   rpm_ctx->pm_stats.last_busy_marker);
195 
196 	usecs_age = qdf_get_log_timestamp_usecs() -
197 		rpm_ctx->pm_stats.last_busy_timestamp;
198 	seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Timestamp",
199 		   rpm_ctx->pm_stats.last_busy_timestamp / 1000000,
200 		   rpm_ctx->pm_stats.last_busy_timestamp % 1000000);
201 	seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Since",
202 		   usecs_age / 1000000, usecs_age % 1000000);
203 
204 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) {
205 		msecs_age = jiffies_to_msecs(jiffies -
206 					     rpm_ctx->pm_stats.suspend_jiffies);
207 		seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since",
208 			   msecs_age / 1000, msecs_age % 1000);
209 	}
210 
211 	seq_printf(s, "%30s: %d\n", "PM Usage count",
212 		   atomic_read(&dev->power.usage_count));
213 
214 	seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt",
215 		   rpm_ctx->prevent_suspend_cnt);
216 
217 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, suspended);
218 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, suspend_err);
219 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, resumed);
220 
221 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, request_resume);
222 	seq_printf(s, "%30s: %u\n", "prevent_suspend",
223 		   qdf_atomic_read(&rpm_ctx->pm_stats.prevent_suspend));
224 	seq_printf(s, "%30s: %u\n", "allow_suspend",
225 		   qdf_atomic_read(&rpm_ctx->pm_stats.allow_suspend));
226 
227 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, prevent_suspend_timeout);
228 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, allow_suspend_timeout);
229 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, runtime_get_err);
230 
231 	seq_printf(s, "%30s: %u\n", "runtime_get",
232 		   qdf_atomic_read(&rpm_ctx->pm_stats.runtime_get));
233 	seq_printf(s, "%30s: %u\n", "runtime_put",
234 		   qdf_atomic_read(&rpm_ctx->pm_stats.runtime_put));
235 	seq_puts(s, "get  put  get-timestamp put-timestamp :DBGID_NAME\n");
236 	for (i = 0; i < RTPM_ID_MAX; i++) {
237 		seq_printf(s, "%-10d ",
238 			   qdf_atomic_read(
239 				 &rpm_ctx->pm_stats.runtime_get_dbgid[i]));
240 		seq_printf(s, "%-10d ",
241 			   qdf_atomic_read(
242 				 &rpm_ctx->pm_stats.runtime_put_dbgid[i]));
243 		seq_printf(s, "0x%-10llx ",
244 			   rpm_ctx->pm_stats.runtime_get_timestamp_dbgid[i]);
245 		seq_printf(s, "0x%-10llx ",
246 			   rpm_ctx->pm_stats.runtime_put_timestamp_dbgid[i]);
247 		seq_printf(s, ":%-30s\n", rtpm_string_from_dbgid(i));
248 	}
249 
250 	timer_expires = rpm_ctx->runtime_timer_expires;
251 	if (timer_expires > 0) {
252 		msecs_age = jiffies_to_msecs(timer_expires - jiffies);
253 		seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout",
254 			   msecs_age / 1000, msecs_age % 1000);
255 	}
256 
257 	spin_lock_bh(&rpm_ctx->runtime_lock);
258 	if (list_empty(&rpm_ctx->prevent_suspend_list)) {
259 		spin_unlock_bh(&rpm_ctx->runtime_lock);
260 		return 0;
261 	}
262 
263 	seq_printf(s, "%30s: ", "Active Wakeup_Sources");
264 	list_for_each_entry(ctx, &rpm_ctx->prevent_suspend_list, list) {
265 		seq_printf(s, "%s", ctx->name);
266 		if (ctx->timeout)
267 			seq_printf(s, "(%d ms)", ctx->timeout);
268 		seq_puts(s, " ");
269 	}
270 	seq_puts(s, "\n");
271 	spin_unlock_bh(&rpm_ctx->runtime_lock);
272 
273 	return 0;
274 }
275 
276 #undef HIF_PCI_RUNTIME_PM_STATS
277 
278 /**
279  * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats
280  * @inode
281  * @file
282  *
283  * Return: linux error code of single_open.
284  */
285 static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file)
286 {
287 	return single_open(file, hif_pci_pm_runtime_debugfs_show,
288 			inode->i_private);
289 }
290 
291 static const struct file_operations hif_pci_runtime_pm_fops = {
292 	.owner          = THIS_MODULE,
293 	.open           = hif_pci_runtime_pm_open,
294 	.release        = single_release,
295 	.read           = seq_read,
296 	.llseek         = seq_lseek,
297 };
298 
299 /**
300  * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry
301  * @scn: hif context
302  *
303  * creates a debugfs entry to debug the runtime pm feature.
304  */
305 static void hif_runtime_pm_debugfs_create(struct hif_softc *scn)
306 {
307 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
308 
309 	rpm_ctx->pm_dentry = debugfs_create_file("cnss_runtime_pm",
310 						 0400, NULL, scn,
311 						 &hif_pci_runtime_pm_fops);
312 }
313 
314 /**
315  * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry
316  * @sc: pci context
317  *
318  * removes the debugfs entry to debug the runtime pm feature.
319  */
320 static void hif_runtime_pm_debugfs_remove(struct hif_softc *scn)
321 {
322 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
323 
324 	debugfs_remove(rpm_ctx->pm_dentry);
325 }
326 
327 /**
328  * hif_runtime_init() - Initialize Runtime PM
329  * @dev: device structure
330  * @delay: delay to be confgured for auto suspend
331  *
332  * This function will init all the Runtime PM config.
333  *
334  * Return: void
335  */
336 static void hif_runtime_init(struct device *dev, int delay)
337 {
338 	pm_runtime_set_autosuspend_delay(dev, delay);
339 	pm_runtime_use_autosuspend(dev);
340 	pm_runtime_allow(dev);
341 	pm_runtime_mark_last_busy(dev);
342 	pm_runtime_put_noidle(dev);
343 	pm_suspend_ignore_children(dev, true);
344 }
345 
346 /**
347  * hif_runtime_exit() - Deinit/Exit Runtime PM
348  * @dev: device structure
349  *
350  * This function will deinit all the Runtime PM config.
351  *
352  * Return: void
353  */
354 static void hif_runtime_exit(struct device *dev)
355 {
356 	pm_runtime_get_noresume(dev);
357 	pm_runtime_set_active(dev);
358 	/* Symmetric call to make sure default usage count == 2 */
359 	pm_runtime_forbid(dev);
360 }
361 
362 static void hif_pm_runtime_lock_timeout_fn(void *data);
363 
364 /**
365  * hif_pm_runtime_start(): start the runtime pm
366  * @scn: hif context
367  *
368  * After this call, runtime pm will be active.
369  */
370 void hif_pm_runtime_start(struct hif_softc *scn)
371 {
372 	uint32_t mode = hif_get_conparam(scn);
373 	struct device *dev = hif_bus_get_dev(scn);
374 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
375 
376 	if (!scn->hif_config.enable_runtime_pm) {
377 		HIF_INFO("%s: RUNTIME PM is disabled in ini\n", __func__);
378 		return;
379 	}
380 
381 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
382 	    mode == QDF_GLOBAL_MONITOR_MODE) {
383 		HIF_INFO("%s: RUNTIME PM is disabled for FTM/EPPING mode\n",
384 			 __func__);
385 		return;
386 	}
387 
388 	qdf_timer_init(NULL, &rpm_ctx->runtime_timer,
389 		       hif_pm_runtime_lock_timeout_fn,
390 		       scn, QDF_TIMER_TYPE_WAKE_APPS);
391 
392 	HIF_INFO("%s: Enabling RUNTIME PM, Delay: %d ms", __func__,
393 		 scn->hif_config.runtime_pm_delay);
394 
395 	qdf_atomic_set(&rpm_ctx->pm_state, HIF_PM_RUNTIME_STATE_ON);
396 	hif_runtime_init(dev, scn->hif_config.runtime_pm_delay);
397 	hif_runtime_pm_debugfs_create(scn);
398 }
399 
400 /**
401  * hif_pm_runtime_stop(): stop runtime pm
402  * @scn: hif context
403  *
404  * Turns off runtime pm and frees corresponding resources
405  * that were acquired by hif_runtime_pm_start().
406  */
407 void hif_pm_runtime_stop(struct hif_softc *scn)
408 {
409 	uint32_t mode = hif_get_conparam(scn);
410 	struct device *dev = hif_bus_get_dev(scn);
411 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
412 
413 	if (!scn->hif_config.enable_runtime_pm)
414 		return;
415 
416 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
417 	    mode == QDF_GLOBAL_MONITOR_MODE)
418 		return;
419 
420 	hif_runtime_exit(dev);
421 
422 	hif_pm_runtime_sync_resume(GET_HIF_OPAQUE_HDL(scn));
423 
424 	qdf_atomic_set(&rpm_ctx->pm_state, HIF_PM_RUNTIME_STATE_NONE);
425 
426 	hif_runtime_pm_debugfs_remove(scn);
427 	qdf_timer_free(&rpm_ctx->runtime_timer);
428 }
429 
430 /**
431  * hif_pm_runtime_open(): initialize runtime pm
432  * @scn: hif ctx
433  *
434  * Early initialization
435  */
436 void hif_pm_runtime_open(struct hif_softc *scn)
437 {
438 	int i;
439 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
440 
441 	spin_lock_init(&rpm_ctx->runtime_lock);
442 	qdf_atomic_init(&rpm_ctx->pm_state);
443 	hif_runtime_lock_init(&rpm_ctx->prevent_linkdown_lock,
444 			      "prevent_linkdown_lock");
445 	qdf_atomic_set(&rpm_ctx->pm_state, HIF_PM_RUNTIME_STATE_NONE);
446 	qdf_atomic_init(&rpm_ctx->pm_stats.runtime_get);
447 	qdf_atomic_init(&rpm_ctx->pm_stats.runtime_put);
448 	qdf_atomic_init(&rpm_ctx->pm_stats.allow_suspend);
449 	qdf_atomic_init(&rpm_ctx->pm_stats.prevent_suspend);
450 	for (i = 0; i < RTPM_ID_MAX; i++) {
451 		qdf_atomic_init(&rpm_ctx->pm_stats.runtime_get_dbgid[i]);
452 		qdf_atomic_init(&rpm_ctx->pm_stats.runtime_put_dbgid[i]);
453 	}
454 	INIT_LIST_HEAD(&rpm_ctx->prevent_suspend_list);
455 }
456 
457 /**
458  * hif_check_for_get_put_out_of_sync() - Check if Get/Put is out of sync
459  * @scn: hif context
460  *
461  * This function will check if get and put are out of sync or not.
462  *
463  * Return: void
464  */
465 static void  hif_check_for_get_put_out_of_sync(struct hif_softc *scn)
466 {
467 	int32_t i;
468 	int32_t get_count, put_count;
469 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
470 
471 	if (qdf_is_fw_down())
472 		return;
473 
474 	for (i = 0; i < RTPM_ID_MAX; i++) {
475 		get_count = qdf_atomic_read(
476 				&rpm_ctx->pm_stats.runtime_get_dbgid[i]);
477 		put_count = qdf_atomic_read(
478 				&rpm_ctx->pm_stats.runtime_put_dbgid[i]);
479 		if (get_count != put_count) {
480 			QDF_DEBUG_PANIC("%s get-put out of sync. get %d put %d",
481 					rtpm_string_from_dbgid(i),
482 					get_count, put_count);
483 		}
484 	}
485 }
486 
487 /**
488  * hif_pm_runtime_sanitize_on_exit(): sanitize runtime PM gets/puts from driver
489  * @scn: hif context
490  *
491  * Ensure all gets/puts are in sync before exiting runtime PM feature.
492  * Also make sure all runtime PM locks are deinitialized properly.
493  *
494  * Return: void
495  */
496 static void hif_pm_runtime_sanitize_on_exit(struct hif_softc *scn)
497 {
498 	struct hif_pm_runtime_lock *ctx, *tmp;
499 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
500 
501 	hif_check_for_get_put_out_of_sync(scn);
502 
503 	spin_lock_bh(&rpm_ctx->runtime_lock);
504 	list_for_each_entry_safe(ctx, tmp,
505 				 &rpm_ctx->prevent_suspend_list, list) {
506 		spin_unlock_bh(&rpm_ctx->runtime_lock);
507 		hif_runtime_lock_deinit(GET_HIF_OPAQUE_HDL(scn), ctx);
508 		spin_lock_bh(&rpm_ctx->runtime_lock);
509 	}
510 	spin_unlock_bh(&rpm_ctx->runtime_lock);
511 }
512 
513 static int __hif_pm_runtime_allow_suspend(struct hif_softc *scn,
514 					  struct hif_pm_runtime_lock *lock);
515 
516 /**
517  * hif_pm_runtime_sanitize_on_ssr_exit() - Empty the suspend list on SSR
518  * @scn: hif context
519  *
520  * API is used to empty the runtime pm prevent suspend list.
521  *
522  * Return: void
523  */
524 static void hif_pm_runtime_sanitize_on_ssr_exit(struct hif_softc *scn)
525 {
526 	struct hif_pm_runtime_lock *ctx, *tmp;
527 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
528 
529 	spin_lock_bh(&rpm_ctx->runtime_lock);
530 	list_for_each_entry_safe(ctx, tmp,
531 				 &rpm_ctx->prevent_suspend_list, list) {
532 		__hif_pm_runtime_allow_suspend(scn, ctx);
533 	}
534 	spin_unlock_bh(&rpm_ctx->runtime_lock);
535 }
536 
537 /**
538  * hif_pm_runtime_close(): close runtime pm
539  * @scn: hif ctx
540  *
541  * ensure runtime_pm is stopped before closing the driver
542  */
543 void hif_pm_runtime_close(struct hif_softc *scn)
544 {
545 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
546 	struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn);
547 
548 	/*
549 	 * Here cds hif context was already NULL,
550 	 * so calling hif_runtime_lock_deinit, instead of
551 	 * qdf_runtime_lock_deinit(&rpm_ctx->prevent_linkdown_lock);
552 	 */
553 	hif_runtime_lock_deinit(hif_ctx, rpm_ctx->prevent_linkdown_lock.lock);
554 
555 	hif_is_recovery_in_progress(scn) ?
556 		hif_pm_runtime_sanitize_on_ssr_exit(scn) :
557 		hif_pm_runtime_sanitize_on_exit(scn);
558 }
559 
560 /**
561  * hif_pm_runtime_sync_resume() - Invoke synchronous runtime resume.
562  * @hif_ctx: hif context
563  *
564  * This function will invoke synchronous runtime resume.
565  *
566  * Return: status
567  */
568 int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx)
569 {
570 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
571 	struct hif_runtime_pm_ctx *rpm_ctx;
572 	int pm_state;
573 
574 	if (!scn)
575 		return -EINVAL;
576 
577 	if (!hif_pci_pm_runtime_enabled(scn))
578 		return 0;
579 
580 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
581 	pm_state = qdf_atomic_read(&rpm_ctx->pm_state);
582 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
583 	    pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
584 		HIF_INFO("Runtime PM resume is requested by %ps",
585 			 (void *)_RET_IP_);
586 
587 	rpm_ctx->pm_stats.request_resume++;
588 	rpm_ctx->pm_stats.last_resume_caller = (void *)_RET_IP_;
589 
590 	return pm_runtime_resume(hif_bus_get_dev(scn));
591 }
592 
593 /**
594  * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occurring
595  * @scn: hif context
596  * @flag: prevent linkdown if true otherwise allow
597  *
598  * this api should only be called as part of bus prevent linkdown
599  */
600 void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
601 {
602 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
603 
604 	if (flag)
605 		qdf_runtime_pm_prevent_suspend(&rpm_ctx->prevent_linkdown_lock);
606 	else
607 		qdf_runtime_pm_allow_suspend(&rpm_ctx->prevent_linkdown_lock);
608 }
609 
610 /**
611  * __hif_runtime_pm_set_state(): utility function
612  * @state: state to set
613  *
614  * indexes into the runtime pm state and sets it.
615  */
616 static void __hif_runtime_pm_set_state(struct hif_softc *scn,
617 				       enum hif_pm_runtime_state state)
618 {
619 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
620 
621 	if (!rpm_ctx) {
622 		HIF_ERROR("%s: HIF_CTX not initialized",
623 			  __func__);
624 		return;
625 	}
626 
627 	qdf_atomic_set(&rpm_ctx->pm_state, state);
628 }
629 
630 /**
631  * hif_runtime_pm_set_state_on():  adjust runtime pm state
632  *
633  * Notify hif that a the runtime pm state should be on
634  */
635 static void hif_runtime_pm_set_state_on(struct hif_softc *scn)
636 {
637 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_ON);
638 }
639 
640 /**
641  * hif_runtime_pm_set_state_resuming(): adjust runtime pm state
642  *
643  * Notify hif that a runtime pm resuming has started
644  */
645 static void hif_runtime_pm_set_state_resuming(struct hif_softc *scn)
646 {
647 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_RESUMING);
648 }
649 
650 /**
651  * hif_runtime_pm_set_state_suspending(): adjust runtime pm state
652  *
653  * Notify hif that a runtime pm suspend has started
654  */
655 static void hif_runtime_pm_set_state_suspending(struct hif_softc *scn)
656 {
657 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDING);
658 }
659 
660 /**
661  * hif_runtime_pm_set_state_suspended():  adjust runtime pm state
662  *
663  * Notify hif that a runtime suspend attempt has been completed successfully
664  */
665 static void hif_runtime_pm_set_state_suspended(struct hif_softc *scn)
666 {
667 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDED);
668 }
669 
670 /**
671  * hif_log_runtime_suspend_success() - log a successful runtime suspend
672  */
673 static void hif_log_runtime_suspend_success(struct hif_softc *hif_ctx)
674 {
675 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
676 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
677 
678 	if (!rpm_ctx)
679 		return;
680 
681 	rpm_ctx->pm_stats.suspended++;
682 	rpm_ctx->pm_stats.suspend_jiffies = jiffies;
683 }
684 
685 /**
686  * hif_log_runtime_suspend_failure() - log a failed runtime suspend
687  *
688  * log a failed runtime suspend
689  * mark last busy to prevent immediate runtime suspend
690  */
691 static void hif_log_runtime_suspend_failure(void *hif_ctx)
692 {
693 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
694 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
695 
696 	if (!rpm_ctx)
697 		return;
698 
699 	rpm_ctx->pm_stats.suspend_err++;
700 }
701 
702 /**
703  * hif_log_runtime_resume_success() - log a successful runtime resume
704  *
705  * log a successful runtime resume
706  * mark last busy to prevent immediate runtime suspend
707  */
708 static void hif_log_runtime_resume_success(void *hif_ctx)
709 {
710 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
711 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
712 
713 	if (!rpm_ctx)
714 		return;
715 
716 	rpm_ctx->pm_stats.resumed++;
717 }
718 
719 /**
720  * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
721  *
722  * Record the failure.
723  * mark last busy to delay a retry.
724  * adjust the runtime_pm state.
725  */
726 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx)
727 {
728 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
729 
730 	hif_log_runtime_suspend_failure(hif_ctx);
731 	hif_pm_runtime_mark_last_busy(hif_ctx);
732 	hif_runtime_pm_set_state_on(scn);
733 }
734 
735 /**
736  * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend
737  *
738  * Makes sure that the pci link will be taken down by the suspend opperation.
739  * If the hif layer is configured to leave the bus on, runtime suspend will
740  * not save any power.
741  *
742  * Set the runtime suspend state to in progress.
743  *
744  * return -EINVAL if the bus won't go down.  otherwise return 0
745  */
746 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
747 {
748 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
749 
750 	if (!hif_can_suspend_link(hif_ctx)) {
751 		HIF_ERROR("Runtime PM not supported for link up suspend");
752 		return -EINVAL;
753 	}
754 
755 	hif_runtime_pm_set_state_suspending(scn);
756 	return 0;
757 }
758 
759 /**
760  * hif_process_runtime_suspend_success() - bookkeeping of suspend success
761  *
762  * Record the success.
763  * adjust the runtime_pm state
764  */
765 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx)
766 {
767 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
768 
769 	hif_runtime_pm_set_state_suspended(scn);
770 	hif_log_runtime_suspend_success(scn);
771 }
772 
773 /**
774  * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
775  *
776  * update the runtime pm state.
777  */
778 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx)
779 {
780 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
781 
782 	hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
783 	hif_runtime_pm_set_state_resuming(scn);
784 }
785 
786 /**
787  * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
788  *
789  * record the success.
790  * adjust the runtime_pm state
791  */
792 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx)
793 {
794 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
795 
796 	hif_log_runtime_resume_success(hif_ctx);
797 	hif_pm_runtime_mark_last_busy(hif_ctx);
798 	hif_runtime_pm_set_state_on(scn);
799 }
800 
801 /**
802  * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
803  *
804  * Return: 0 for success and non-zero error code for failure
805  */
806 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
807 {
808 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
809 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
810 	int errno;
811 
812 	errno = hif_bus_suspend(hif_ctx);
813 	if (errno) {
814 		HIF_ERROR("%s: failed bus suspend: %d", __func__, errno);
815 		return errno;
816 	}
817 
818 	hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 1);
819 
820 	errno = hif_bus_suspend_noirq(hif_ctx);
821 	if (errno) {
822 		HIF_ERROR("%s: failed bus suspend noirq: %d", __func__, errno);
823 		hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
824 		goto bus_resume;
825 	}
826 
827 	qdf_atomic_set(&rpm_ctx->pm_dp_rx_busy, 0);
828 
829 	return 0;
830 
831 bus_resume:
832 	QDF_BUG(!hif_bus_resume(hif_ctx));
833 
834 	return errno;
835 }
836 
837 /**
838  * hif_fastpath_resume() - resume fastpath for runtimepm
839  *
840  * ensure that the fastpath write index register is up to date
841  * since runtime pm may cause ce_send_fast to skip the register
842  * write.
843  *
844  * fastpath only applicable to legacy copy engine
845  */
846 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
847 {
848 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
849 	struct CE_state *ce_state;
850 
851 	if (!scn)
852 		return;
853 
854 	if (scn->fastpath_mode_on) {
855 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
856 			return;
857 
858 		ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
859 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
860 
861 		/*war_ce_src_ring_write_idx_set */
862 		CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
863 					  ce_state->src_ring->write_index);
864 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
865 		Q_TARGET_ACCESS_END(scn);
866 	}
867 }
868 
869 /**
870  * hif_runtime_resume() - do the bus resume part of a runtime resume
871  *
872  *  Return: 0 for success and non-zero error code for failure
873  */
874 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
875 {
876 	QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
877 	QDF_BUG(!hif_bus_resume(hif_ctx));
878 	return 0;
879 }
880 
881 /**
882  * hif_pm_stats_runtime_get_record() - record runtime get statistics
883  * @scn: hif context
884  * @rtpm_dbgid: debug id to trace who use it
885  *
886  *
887  * Return: void
888  */
889 static void hif_pm_stats_runtime_get_record(struct hif_softc *scn,
890 					    wlan_rtpm_dbgid rtpm_dbgid)
891 {
892 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
893 
894 	if (rtpm_dbgid >= RTPM_ID_MAX) {
895 		QDF_BUG(0);
896 		return;
897 	}
898 	qdf_atomic_inc(&rpm_ctx->pm_stats.runtime_get);
899 	qdf_atomic_inc(&rpm_ctx->pm_stats.runtime_get_dbgid[rtpm_dbgid]);
900 	rpm_ctx->pm_stats.runtime_get_timestamp_dbgid[rtpm_dbgid] =
901 						qdf_get_log_timestamp();
902 }
903 
904 /**
905  * hif_pm_stats_runtime_put_record() - record runtime put statistics
906  * @scn: hif context
907  * @rtpm_dbgid: dbg_id to trace who use it
908  *
909  *
910  * Return: void
911  */
912 static void hif_pm_stats_runtime_put_record(struct hif_softc *scn,
913 					    wlan_rtpm_dbgid rtpm_dbgid)
914 {
915 	struct device *dev = hif_bus_get_dev(scn);
916 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
917 
918 	if (rtpm_dbgid >= RTPM_ID_MAX) {
919 		QDF_BUG(0);
920 		return;
921 	}
922 
923 	if (atomic_read(&dev->power.usage_count) <= 0) {
924 		QDF_BUG(0);
925 		return;
926 	}
927 
928 	qdf_atomic_inc(&rpm_ctx->pm_stats.runtime_put);
929 	qdf_atomic_inc(&rpm_ctx->pm_stats.runtime_put_dbgid[rtpm_dbgid]);
930 	rpm_ctx->pm_stats.runtime_put_timestamp_dbgid[rtpm_dbgid] =
931 						qdf_get_log_timestamp();
932 }
933 
934 /**
935  * hif_pm_runtime_get_sync() - do a get operation with sync resume
936  * @hif_ctx: pointer of HIF context
937  * @rtpm_dbgid: dbgid to trace who use it
938  *
939  * A get operation will prevent a runtime suspend until a corresponding
940  * put is done. Unlike hif_pm_runtime_get(), this API will do a sync
941  * resume instead of requesting a resume if it is runtime PM suspended
942  * so it can only be called in non-atomic context.
943  *
944  * Return: 0 if it is runtime PM resumed otherwise an error code.
945  */
946 int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx,
947 			    wlan_rtpm_dbgid rtpm_dbgid)
948 {
949 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
950 	struct device *dev = hif_bus_get_dev(scn);
951 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
952 	int pm_state;
953 	int ret;
954 
955 	if (!rpm_ctx)
956 		return -EINVAL;
957 
958 	if (!hif_pci_pm_runtime_enabled(scn))
959 		return 0;
960 
961 	pm_state = qdf_atomic_read(&rpm_ctx->pm_state);
962 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
963 	    pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
964 		hif_info_high("Runtime PM resume is requested by %ps",
965 			      (void *)_RET_IP_);
966 
967 	hif_pm_stats_runtime_get_record(scn, rtpm_dbgid);
968 	ret = pm_runtime_get_sync(dev);
969 
970 	/* Get can return 1 if the device is already active, just return
971 	 * success in that case.
972 	 */
973 	if (ret > 0)
974 		ret = 0;
975 
976 	if (ret) {
977 		rpm_ctx->pm_stats.runtime_get_err++;
978 		hif_err("Runtime PM Get Sync error in pm_state: %d, ret: %d",
979 			qdf_atomic_read(&rpm_ctx->pm_state), ret);
980 		hif_pm_runtime_put(hif_ctx, rtpm_dbgid);
981 	}
982 
983 	return ret;
984 }
985 
986 /**
987  * hif_pm_runtime_put_sync_suspend() - do a put operation with sync suspend
988  * @hif_ctx: pointer of HIF context
989  * @rtpm_dbgid: dbgid to trace who use it
990  *
991  * This API will do a runtime put operation followed by a sync suspend if usage
992  * count is 0 so it can only be called in non-atomic context.
993  *
994  * Return: 0 for success otherwise an error code
995  */
996 int hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx,
997 				    wlan_rtpm_dbgid rtpm_dbgid)
998 {
999 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1000 	struct device *dev;
1001 	int usage_count;
1002 	char *err = NULL;
1003 
1004 	if (!scn)
1005 		return -EINVAL;
1006 
1007 	if (!hif_pci_pm_runtime_enabled(scn))
1008 		return 0;
1009 
1010 	dev = hif_bus_get_dev(scn);
1011 	usage_count = atomic_read(&dev->power.usage_count);
1012 	if (usage_count == 2 && !scn->hif_config.enable_runtime_pm)
1013 		err = "Uexpected PUT when runtime PM is disabled";
1014 	else if (usage_count == 0)
1015 		err = "PUT without a GET Operation";
1016 
1017 	if (err) {
1018 		hif_pci_runtime_pm_warn(scn, err);
1019 		return -EINVAL;
1020 	}
1021 
1022 	hif_pm_stats_runtime_put_record(scn, rtpm_dbgid);
1023 	return pm_runtime_put_sync_suspend(dev);
1024 }
1025 
1026 /**
1027  * hif_pm_runtime_request_resume() - Invoke async runtime resume
1028  * @hif_ctx: hif context
1029  *
1030  * This function will invoke asynchronous runtime resume.
1031  *
1032  * Return: status
1033  */
1034 int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx)
1035 {
1036 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1037 	struct hif_runtime_pm_ctx *rpm_ctx;
1038 	int pm_state;
1039 
1040 	if (!scn)
1041 		return -EINVAL;
1042 
1043 	if (!hif_pci_pm_runtime_enabled(scn))
1044 		return 0;
1045 
1046 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
1047 	pm_state = qdf_atomic_read(&rpm_ctx->pm_state);
1048 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
1049 	    pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
1050 		HIF_INFO("Runtime PM resume is requested by %ps",
1051 			 (void *)_RET_IP_);
1052 
1053 	rpm_ctx->pm_stats.request_resume++;
1054 	rpm_ctx->pm_stats.last_resume_caller = (void *)_RET_IP_;
1055 
1056 	return hif_pm_request_resume(hif_bus_get_dev(scn));
1057 }
1058 
1059 /**
1060  * hif_pm_runtime_mark_last_busy() - Mark last busy time
1061  * @hif_ctx: hif context
1062  *
1063  * This function will mark the last busy time, this will be used
1064  * to check if auto suspend delay expired or not.
1065  *
1066  * Return: void
1067  */
1068 void hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx)
1069 {
1070 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1071 	struct hif_runtime_pm_ctx *rpm_ctx;
1072 
1073 	if (!scn)
1074 		return;
1075 
1076 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
1077 	rpm_ctx->pm_stats.last_busy_marker = (void *)_RET_IP_;
1078 	rpm_ctx->pm_stats.last_busy_timestamp = qdf_get_log_timestamp_usecs();
1079 
1080 	pm_runtime_mark_last_busy(hif_bus_get_dev(scn));
1081 
1082 	return;
1083 }
1084 
1085 /**
1086  * hif_pm_runtime_get_noresume() - Inc usage count without resume
1087  * @hif_ctx: hif context
1088  * rtpm_dbgid: Id of the module calling get
1089  *
1090  * This function will increment device usage count to avoid runtime
1091  * suspend, but it would not do resume.
1092  *
1093  * Return: void
1094  */
1095 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx,
1096 				 wlan_rtpm_dbgid rtpm_dbgid)
1097 {
1098 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1099 
1100 	if (!scn)
1101 		return;
1102 
1103 	if (!hif_pci_pm_runtime_enabled(scn))
1104 		return;
1105 
1106 	hif_pm_stats_runtime_get_record(scn, rtpm_dbgid);
1107 	pm_runtime_get_noresume(hif_bus_get_dev(scn));
1108 }
1109 
1110 /**
1111  * hif_pm_runtime_get() - do a get opperation on the device
1112  * @hif_ctx: pointer of HIF context
1113  * @rtpm_dbgid: dbgid to trace who use it
1114  *
1115  * A get opperation will prevent a runtime suspend until a
1116  * corresponding put is done.  This api should be used when sending
1117  * data.
1118  *
1119  * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
1120  * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!!
1121  *
1122  * return: success if the bus is up and a get has been issued
1123  *   otherwise an error code.
1124  */
1125 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx,
1126 		       wlan_rtpm_dbgid rtpm_dbgid)
1127 {
1128 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1129 	struct hif_runtime_pm_ctx *rpm_ctx;
1130 	struct device *dev;
1131 	int ret;
1132 	int pm_state;
1133 
1134 	if (!scn) {
1135 		hif_err("Could not do runtime get, scn is null");
1136 		return -EFAULT;
1137 	}
1138 
1139 	if (!hif_pci_pm_runtime_enabled(scn))
1140 		return 0;
1141 
1142 	dev = hif_bus_get_dev(scn);
1143 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
1144 	pm_state = qdf_atomic_read(&rpm_ctx->pm_state);
1145 
1146 	if (pm_state  == HIF_PM_RUNTIME_STATE_ON ||
1147 	    pm_state == HIF_PM_RUNTIME_STATE_NONE) {
1148 		hif_pm_stats_runtime_get_record(scn, rtpm_dbgid);
1149 		ret = __hif_pm_runtime_get(dev);
1150 
1151 		/* Get can return 1 if the device is already active, just return
1152 		 * success in that case
1153 		 */
1154 		if (ret > 0)
1155 			ret = 0;
1156 
1157 		if (ret)
1158 			hif_pm_runtime_put(hif_ctx, rtpm_dbgid);
1159 
1160 		if (ret && ret != -EINPROGRESS) {
1161 			rpm_ctx->pm_stats.runtime_get_err++;
1162 			hif_err("Runtime Get PM Error in pm_state:%d ret: %d",
1163 				qdf_atomic_read(&rpm_ctx->pm_state), ret);
1164 		}
1165 
1166 		return ret;
1167 	}
1168 
1169 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
1170 	    pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING) {
1171 		hif_info_high("Runtime PM resume is requested by %ps",
1172 			      (void *)_RET_IP_);
1173 		ret = -EAGAIN;
1174 	} else {
1175 		ret = -EBUSY;
1176 	}
1177 
1178 	rpm_ctx->pm_stats.request_resume++;
1179 	rpm_ctx->pm_stats.last_resume_caller = (void *)_RET_IP_;
1180 	hif_pm_request_resume(dev);
1181 
1182 	return ret;
1183 }
1184 
1185 /**
1186  * hif_pm_runtime_put() - do a put operation on the device
1187  * @hif_ctx: pointer of HIF context
1188  * @rtpm_dbgid: dbgid to trace who use it
1189  *
1190  * A put operation will allow a runtime suspend after a corresponding
1191  * get was done.  This api should be used when sending data.
1192  *
1193  * This api will return a failure if runtime pm is stopped
1194  * This api will return failure if it would decrement the usage count below 0.
1195  *
1196  * return: QDF_STATUS_SUCCESS if the put is performed
1197  */
1198 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx,
1199 		       wlan_rtpm_dbgid rtpm_dbgid)
1200 {
1201 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1202 	struct device *dev;
1203 	int usage_count;
1204 	char *error = NULL;
1205 
1206 	if (!scn) {
1207 		HIF_ERROR("%s: Could not do runtime put, scn is null",
1208 			  __func__);
1209 		return -EFAULT;
1210 	}
1211 
1212 	if (!hif_pci_pm_runtime_enabled(scn))
1213 		return 0;
1214 
1215 	dev = hif_bus_get_dev(scn);
1216 	usage_count = atomic_read(&dev->power.usage_count);
1217 	if (usage_count == 2 && !scn->hif_config.enable_runtime_pm)
1218 		error = "Unexpected PUT when runtime PM is disabled";
1219 	else if (usage_count == 0)
1220 		error = "PUT without a GET operation";
1221 
1222 	if (error) {
1223 		hif_pci_runtime_pm_warn(scn, error);
1224 		return -EINVAL;
1225 	}
1226 
1227 	hif_pm_stats_runtime_put_record(scn, rtpm_dbgid);
1228 
1229 	hif_pm_runtime_mark_last_busy(hif_ctx);
1230 	hif_pm_runtime_put_auto(dev);
1231 
1232 	return 0;
1233 }
1234 
1235 /**
1236  * hif_pm_runtime_put_noidle() - do a put operation with no idle
1237  * @hif_ctx: pointer of HIF context
1238  * @rtpm_dbgid: dbgid to trace who use it
1239  *
1240  * This API will do a runtime put no idle operation
1241  *
1242  * Return: 0 for success otherwise an error code
1243  */
1244 int hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx,
1245 			      wlan_rtpm_dbgid rtpm_dbgid)
1246 {
1247 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1248 	struct device *dev;
1249 	int usage_count;
1250 	char *err = NULL;
1251 
1252 	if (!scn)
1253 		return -EINVAL;
1254 
1255 	if (!hif_pci_pm_runtime_enabled(scn))
1256 		return 0;
1257 
1258 	dev = hif_bus_get_dev(scn);
1259 	usage_count = atomic_read(&dev->power.usage_count);
1260 	if (usage_count == 2 && !scn->hif_config.enable_runtime_pm)
1261 		err = "Unexpected PUT when runtime PM is disabled";
1262 	else if (usage_count == 0)
1263 		err = "PUT without a GET operation";
1264 
1265 	if (err) {
1266 		hif_pci_runtime_pm_warn(scn, err);
1267 		return -EINVAL;
1268 	}
1269 
1270 	hif_pm_stats_runtime_put_record(scn, rtpm_dbgid);
1271 	pm_runtime_put_noidle(dev);
1272 
1273 	return 0;
1274 }
1275 
1276 /**
1277  * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol
1278  *                                      reason
1279  * @scn: hif context
1280  * @lock: runtime_pm lock being acquired
1281  *
1282  * Return 0 if successful.
1283  */
1284 static int __hif_pm_runtime_prevent_suspend(struct hif_softc *scn,
1285 					    struct hif_pm_runtime_lock *lock)
1286 {
1287 	struct device *dev = hif_bus_get_dev(scn);
1288 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1289 	int ret = 0;
1290 
1291 	/*
1292 	 * We shouldn't be setting context->timeout to zero here when
1293 	 * context is active as we will have a case where Timeout API's
1294 	 * for the same context called back to back.
1295 	 * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm
1296 	 * Set context->timeout to zero in hif_pm_runtime_prevent_suspend
1297 	 * API to ensure the timeout version is no more active and
1298 	 * list entry of this context will be deleted during allow suspend.
1299 	 */
1300 	if (lock->active)
1301 		return 0;
1302 
1303 	ret = __hif_pm_runtime_get(dev);
1304 
1305 	/**
1306 	 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
1307 	 * RPM_SUSPENDING. Any other negative value is an error.
1308 	 * We shouldn't be do runtime_put here as in later point allow
1309 	 * suspend gets called with the the context and there the usage count
1310 	 * is decremented, so suspend will be prevented.
1311 	 */
1312 
1313 	if (ret < 0 && ret != -EINPROGRESS) {
1314 		rpm_ctx->pm_stats.runtime_get_err++;
1315 		hif_pci_runtime_pm_warn(scn,
1316 					"Prevent Suspend Runtime PM Error");
1317 	}
1318 
1319 	rpm_ctx->prevent_suspend_cnt++;
1320 
1321 	lock->active = true;
1322 
1323 	list_add_tail(&lock->list, &rpm_ctx->prevent_suspend_list);
1324 
1325 	qdf_atomic_inc(&rpm_ctx->pm_stats.prevent_suspend);
1326 
1327 	hif_debug("%s: in pm_state:%s ret: %d", __func__,
1328 		  hif_pm_runtime_state_to_string(
1329 			  qdf_atomic_read(&rpm_ctx->pm_state)),
1330 		  ret);
1331 
1332 	return ret;
1333 }
1334 
1335 /**
1336  * __hif_pm_runtime_allow_suspend() - Allow Runtime suspend
1337  * @scn: hif context
1338  * @lock: runtime pm lock
1339  *
1340  * This function will allow runtime suspend, by decrementing
1341  * device's usage count.
1342  *
1343  * Return: status
1344  */
1345 static int __hif_pm_runtime_allow_suspend(struct hif_softc *scn,
1346 					  struct hif_pm_runtime_lock *lock)
1347 {
1348 	struct device *dev = hif_bus_get_dev(scn);
1349 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1350 	int ret = 0;
1351 	int usage_count;
1352 
1353 	if (rpm_ctx->prevent_suspend_cnt == 0)
1354 		return ret;
1355 
1356 	if (!lock->active)
1357 		return ret;
1358 
1359 	usage_count = atomic_read(&dev->power.usage_count);
1360 
1361 	/*
1362 	 * For runtime PM enabled case, the usage count should never be 0
1363 	 * at this point. For runtime PM disabled case, it should never be
1364 	 * 2 at this point. Catch unexpected PUT without GET here.
1365 	 */
1366 	if ((usage_count == 2 && !scn->hif_config.enable_runtime_pm) ||
1367 	    usage_count == 0) {
1368 		hif_pci_runtime_pm_warn(scn, "PUT without a GET Operation");
1369 		return -EINVAL;
1370 	}
1371 
1372 	list_del(&lock->list);
1373 
1374 	rpm_ctx->prevent_suspend_cnt--;
1375 
1376 	lock->active = false;
1377 	lock->timeout = 0;
1378 
1379 	hif_pm_runtime_mark_last_busy(GET_HIF_OPAQUE_HDL(scn));
1380 	ret = hif_pm_runtime_put_auto(dev);
1381 
1382 	hif_debug("%s: in pm_state:%s ret: %d", __func__,
1383 		  hif_pm_runtime_state_to_string(
1384 			  qdf_atomic_read(&rpm_ctx->pm_state)),
1385 		  ret);
1386 
1387 	qdf_atomic_inc(&rpm_ctx->pm_stats.allow_suspend);
1388 	return ret;
1389 }
1390 
1391 /**
1392  * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout
1393  * @data: calback data that is the pci context
1394  *
1395  * if runtime locks are acquired with a timeout, this function releases
1396  * the locks when the last runtime lock expires.
1397  *
1398  * dummy implementation until lock acquisition is implemented.
1399  */
1400 static void hif_pm_runtime_lock_timeout_fn(void *data)
1401 {
1402 	struct hif_softc *scn = data;
1403 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1404 	unsigned long timer_expires;
1405 	struct hif_pm_runtime_lock *context, *temp;
1406 
1407 	spin_lock_bh(&rpm_ctx->runtime_lock);
1408 
1409 	timer_expires = rpm_ctx->runtime_timer_expires;
1410 
1411 	/* Make sure we are not called too early, this should take care of
1412 	 * following case
1413 	 *
1414 	 * CPU0                         CPU1 (timeout function)
1415 	 * ----                         ----------------------
1416 	 * spin_lock_irq
1417 	 *                              timeout function called
1418 	 *
1419 	 * mod_timer()
1420 	 *
1421 	 * spin_unlock_irq
1422 	 *                              spin_lock_irq
1423 	 */
1424 	if (timer_expires > 0 && !time_after(timer_expires, jiffies)) {
1425 		rpm_ctx->runtime_timer_expires = 0;
1426 		list_for_each_entry_safe(context, temp,
1427 					 &rpm_ctx->prevent_suspend_list, list) {
1428 			if (context->timeout) {
1429 				__hif_pm_runtime_allow_suspend(scn, context);
1430 				rpm_ctx->pm_stats.allow_suspend_timeout++;
1431 			}
1432 		}
1433 	}
1434 
1435 	spin_unlock_bh(&rpm_ctx->runtime_lock);
1436 }
1437 
1438 /**
1439  * hif_pm_runtime_prevent_suspend() - Prevent Runtime suspend
1440  * @scn: hif context
1441  * @data: runtime pm lock
1442  *
1443  * This function will prevent runtime suspend, by incrementing
1444  * device's usage count.
1445  *
1446  * Return: status
1447  */
1448 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
1449 				   struct hif_pm_runtime_lock *data)
1450 {
1451 	struct hif_softc *scn = HIF_GET_SOFTC(ol_sc);
1452 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1453 	struct hif_pm_runtime_lock *context = data;
1454 
1455 	if (!scn->hif_config.enable_runtime_pm)
1456 		return 0;
1457 
1458 	if (!context)
1459 		return -EINVAL;
1460 
1461 	if (in_irq())
1462 		WARN_ON(1);
1463 
1464 	spin_lock_bh(&rpm_ctx->runtime_lock);
1465 	context->timeout = 0;
1466 	__hif_pm_runtime_prevent_suspend(scn, context);
1467 	spin_unlock_bh(&rpm_ctx->runtime_lock);
1468 
1469 	return 0;
1470 }
1471 
1472 /**
1473  * hif_pm_runtime_allow_suspend() - Allow Runtime suspend
1474  * @scn: hif context
1475  * @data: runtime pm lock
1476  *
1477  * This function will allow runtime suspend, by decrementing
1478  * device's usage count.
1479  *
1480  * Return: status
1481  */
1482 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
1483 				 struct hif_pm_runtime_lock *data)
1484 {
1485 	struct hif_softc *scn = HIF_GET_SOFTC(ol_sc);
1486 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1487 	struct hif_pm_runtime_lock *context = data;
1488 
1489 	if (!scn->hif_config.enable_runtime_pm)
1490 		return 0;
1491 
1492 	if (!context)
1493 		return -EINVAL;
1494 
1495 	if (in_irq())
1496 		WARN_ON(1);
1497 
1498 	spin_lock_bh(&rpm_ctx->runtime_lock);
1499 
1500 	__hif_pm_runtime_allow_suspend(scn, context);
1501 
1502 	/* The list can be empty as well in cases where
1503 	 * we have one context in the list and the allow
1504 	 * suspend came before the timer expires and we delete
1505 	 * context above from the list.
1506 	 * When list is empty prevent_suspend count will be zero.
1507 	 */
1508 	if (rpm_ctx->prevent_suspend_cnt == 0 &&
1509 	    rpm_ctx->runtime_timer_expires > 0) {
1510 		qdf_timer_free(&rpm_ctx->runtime_timer);
1511 		rpm_ctx->runtime_timer_expires = 0;
1512 	}
1513 
1514 	spin_unlock_bh(&rpm_ctx->runtime_lock);
1515 
1516 	return 0;
1517 }
1518 
1519 /**
1520  * hif_pm_runtime_prevent_suspend_timeout() - Prevent runtime suspend timeout
1521  * @ol_sc: HIF context
1522  * @lock: which lock is being acquired
1523  * @delay: Timeout in milliseconds
1524  *
1525  * Prevent runtime suspend with a timeout after which runtime suspend would be
1526  * allowed. This API uses a single timer to allow the suspend and timer is
1527  * modified if the timeout is changed before timer fires.
1528  * If the timeout is less than autosuspend_delay then use mark_last_busy instead
1529  * of starting the timer.
1530  *
1531  * It is wise to try not to use this API and correct the design if possible.
1532  *
1533  * Return: 0 on success and negative error code on failure
1534  */
1535 int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
1536 					   struct hif_pm_runtime_lock *lock,
1537 					   unsigned int delay)
1538 {
1539 	struct hif_softc *scn = HIF_GET_SOFTC(ol_sc);
1540 	struct device *dev = hif_bus_get_dev(scn);
1541 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1542 
1543 	int ret = 0;
1544 	unsigned long expires;
1545 	struct hif_pm_runtime_lock *context = lock;
1546 
1547 	if (hif_is_load_or_unload_in_progress(scn)) {
1548 		HIF_ERROR("%s: Load/unload in progress, ignore!",
1549 			  __func__);
1550 		return -EINVAL;
1551 	}
1552 
1553 	if (hif_is_recovery_in_progress(scn)) {
1554 		HIF_ERROR("%s: LOGP in progress, ignore!", __func__);
1555 		return -EINVAL;
1556 	}
1557 
1558 	if (!scn->hif_config.enable_runtime_pm)
1559 		return 0;
1560 
1561 	if (!context)
1562 		return -EINVAL;
1563 
1564 	if (in_irq())
1565 		WARN_ON(1);
1566 
1567 	/*
1568 	 * Don't use internal timer if the timeout is less than auto suspend
1569 	 * delay.
1570 	 */
1571 	if (delay <= dev->power.autosuspend_delay) {
1572 		hif_pm_request_resume(dev);
1573 		hif_pm_runtime_mark_last_busy(ol_sc);
1574 		return ret;
1575 	}
1576 
1577 	expires = jiffies + msecs_to_jiffies(delay);
1578 	expires += !expires;
1579 
1580 	spin_lock_bh(&rpm_ctx->runtime_lock);
1581 
1582 	context->timeout = delay;
1583 	ret = __hif_pm_runtime_prevent_suspend(scn, context);
1584 	rpm_ctx->pm_stats.prevent_suspend_timeout++;
1585 
1586 	/* Modify the timer only if new timeout is after already configured
1587 	 * timeout
1588 	 */
1589 	if (time_after(expires, rpm_ctx->runtime_timer_expires)) {
1590 		qdf_timer_mod(&rpm_ctx->runtime_timer, delay);
1591 		rpm_ctx->runtime_timer_expires = expires;
1592 	}
1593 
1594 	spin_unlock_bh(&rpm_ctx->runtime_lock);
1595 
1596 	HIF_ERROR("%s: pm_state: %s delay: %dms ret: %d\n", __func__,
1597 		  hif_pm_runtime_state_to_string(
1598 			  qdf_atomic_read(&rpm_ctx->pm_state)),
1599 		  delay, ret);
1600 
1601 	return ret;
1602 }
1603 
1604 /**
1605  * hif_runtime_lock_init() - API to initialize Runtime PM context
1606  * @name: Context name
1607  *
1608  * This API initializes the Runtime PM context of the caller and
1609  * return the pointer.
1610  *
1611  * Return: None
1612  */
1613 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
1614 {
1615 	struct hif_pm_runtime_lock *context;
1616 
1617 	HIF_INFO("Initializing Runtime PM wakelock %s", name);
1618 
1619 	context = qdf_mem_malloc(sizeof(*context));
1620 	if (!context)
1621 		return -ENOMEM;
1622 
1623 	context->name = name ? name : "Default";
1624 	lock->lock = context;
1625 
1626 	return 0;
1627 }
1628 
1629 /**
1630  * hif_runtime_lock_deinit() - This API frees the runtime pm ctx
1631  * @data: Runtime PM context
1632  *
1633  * Return: void
1634  */
1635 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
1636 			     struct hif_pm_runtime_lock *data)
1637 {
1638 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1639 	struct hif_runtime_pm_ctx *rpm_ctx;
1640 	struct hif_pm_runtime_lock *context = data;
1641 
1642 	if (!context) {
1643 		HIF_ERROR("Runtime PM wakelock context is NULL");
1644 		return;
1645 	}
1646 
1647 	HIF_INFO("Deinitializing Runtime PM wakelock %s", context->name);
1648 
1649 	/*
1650 	 * Ensure to delete the context list entry and reduce the usage count
1651 	 * before freeing the context if context is active.
1652 	 */
1653 	if (scn) {
1654 		rpm_ctx = hif_bus_get_rpm_ctx(scn);
1655 		spin_lock_bh(&rpm_ctx->runtime_lock);
1656 		__hif_pm_runtime_allow_suspend(scn, context);
1657 		spin_unlock_bh(&rpm_ctx->runtime_lock);
1658 	}
1659 
1660 	qdf_mem_free(context);
1661 }
1662 
1663 /**
1664  * hif_pm_runtime_is_suspended() - API to check if driver has runtime suspended
1665  * @hif_ctx: HIF context
1666  *
1667  * Return: true for runtime suspended, otherwise false
1668  */
1669 bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx)
1670 {
1671 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1672 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1673 
1674 	return qdf_atomic_read(&rpm_ctx->pm_state) ==
1675 					HIF_PM_RUNTIME_STATE_SUSPENDED;
1676 }
1677 
1678 /**
1679  * hif_pm_runtime_get_monitor_wake_intr() - API to get monitor_wake_intr
1680  * @hif_ctx: HIF context
1681  *
1682  * monitor_wake_intr variable can be used to indicate if driver expects wake
1683  * MSI for runtime PM
1684  *
1685  * Return: monitor_wake_intr variable
1686  */
1687 int hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx)
1688 {
1689 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1690 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1691 
1692 	return qdf_atomic_read(&rpm_ctx->monitor_wake_intr);
1693 }
1694 
1695 /**
1696  * hif_pm_runtime_set_monitor_wake_intr() - API to set monitor_wake_intr
1697  * @hif_ctx: HIF context
1698  * @val: value to set
1699  *
1700  * monitor_wake_intr variable can be used to indicate if driver expects wake
1701  * MSI for runtime PM
1702  *
1703  * Return: void
1704  */
1705 void hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx,
1706 					  int val)
1707 {
1708 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1709 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1710 
1711 	qdf_atomic_set(&rpm_ctx->monitor_wake_intr, val);
1712 }
1713 
1714 /**
1715  * hif_pm_runtime_check_and_request_resume() - check if the device is runtime
1716  *					       suspended and request resume.
1717  * @hif_ctx: HIF context
1718  *
1719  * This function is to check if the device is runtime suspended and
1720  * request for runtime resume.
1721  *
1722  * Return: void
1723  */
1724 void hif_pm_runtime_check_and_request_resume(struct hif_opaque_softc *hif_ctx)
1725 {
1726 	if (hif_pm_runtime_get_monitor_wake_intr(hif_ctx)) {
1727 		hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
1728 		hif_pm_runtime_request_resume(hif_ctx);
1729 	}
1730 }
1731 
1732 /**
1733  * hif_pm_runtime_mark_dp_rx_busy() - Set last busy mark my data path
1734  * @hif_ctx: HIF context
1735  *
1736  * Return: void
1737  */
1738 void hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
1739 {
1740 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1741 	struct hif_runtime_pm_ctx *rpm_ctx;
1742 
1743 	if (!scn)
1744 		return;
1745 
1746 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
1747 	qdf_atomic_set(&rpm_ctx->pm_dp_rx_busy, 1);
1748 	rpm_ctx->dp_last_busy_timestamp = qdf_get_log_timestamp_usecs();
1749 
1750 	hif_pm_runtime_mark_last_busy(hif_ctx);
1751 }
1752 
1753 /**
1754  * hif_pm_runtime_is_dp_rx_busy() - Check if last mark busy by dp rx
1755  * @hif_ctx: HIF context
1756  *
1757  * Return: dp rx busy set value
1758  */
1759 int hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
1760 {
1761 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1762 	struct hif_runtime_pm_ctx *rpm_ctx;
1763 
1764 	if (!scn)
1765 		return 0;
1766 
1767 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
1768 	return qdf_atomic_read(&rpm_ctx->pm_dp_rx_busy);
1769 }
1770 
1771 /**
1772  * hif_pm_runtime_get_dp_rx_busy_mark() - Get last busy by dp rx timestamp
1773  * @hif_ctx: HIF context
1774  *
1775  * Return: timestamp of last mark busy by dp rx
1776  */
1777 qdf_time_t hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx)
1778 {
1779 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1780 	struct hif_runtime_pm_ctx *rpm_ctx;
1781 
1782 	if (!scn)
1783 		return 0;
1784 
1785 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
1786 	return rpm_ctx->dp_last_busy_timestamp;
1787 }
1788 #endif /* FEATURE_RUNTIME_PM */
1789