xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_runtime_pm.c (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7 
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/slab.h>
18 #include <linux/interrupt.h>
19 #include <linux/if_arp.h>
20 #include "hif_io32.h"
21 #include "hif_runtime_pm.h"
22 #include "hif.h"
23 #include "target_type.h"
24 #include "hif_main.h"
25 #include "ce_main.h"
26 #include "ce_api.h"
27 #include "ce_internal.h"
28 #include "ce_reg.h"
29 #include "ce_bmi.h"
30 #include "regtable.h"
31 #include "hif_hw_version.h"
32 #include <linux/debugfs.h>
33 #include <linux/seq_file.h>
34 #include "qdf_status.h"
35 #include "qdf_atomic.h"
36 #include "pld_common.h"
37 #include "mp_dev.h"
38 #include "hif_debug.h"
39 
40 #include "ce_tasklet.h"
41 #include "targaddrs.h"
42 #include "hif_exec.h"
43 
44 #define CNSS_RUNTIME_FILE "cnss_runtime_pm"
45 #define CNSS_RUNTIME_FILE_PERM QDF_FILE_USR_READ
46 
47 #ifdef FEATURE_RUNTIME_PM
48 #define PREVENT_LIST_STRING_LEN 200
49 
50 /**
51  * hif_pci_pm_runtime_enabled() - To check if Runtime PM is enabled
52  * @scn: hif context
53  *
54  * This function will check if Runtime PM is enabled or not.
55  *
56  * Return: void
57  */
58 static bool hif_pci_pm_runtime_enabled(struct hif_softc *scn)
59 {
60 	if (scn->hif_config.enable_runtime_pm)
61 		return true;
62 
63 	return pm_runtime_enabled(hif_bus_get_dev(scn));
64 }
65 
66 /**
67  * hif_pm_runtime_state_to_string() - Mapping state into string
68  * @state: runtime pm state
69  *
70  * This function will map the runtime pm state into corresponding
71  * string for debug purpose.
72  *
73  * Return: pointer to the string
74  */
75 static const char *hif_pm_runtime_state_to_string(uint32_t state)
76 {
77 	switch (state) {
78 	case HIF_PM_RUNTIME_STATE_NONE:
79 		return "INIT_STATE";
80 	case HIF_PM_RUNTIME_STATE_ON:
81 		return "ON";
82 	case HIF_PM_RUNTIME_STATE_RESUMING:
83 		return "RESUMING";
84 	case HIF_PM_RUNTIME_STATE_SUSPENDING:
85 		return "SUSPENDING";
86 	case HIF_PM_RUNTIME_STATE_SUSPENDED:
87 		return "SUSPENDED";
88 	default:
89 		return "INVALID STATE";
90 	}
91 }
92 
93 #define HIF_PCI_RUNTIME_PM_STATS(_s, _rpm_ctx, _name) \
94 	seq_printf(_s, "%30s: %u\n", #_name, (_rpm_ctx)->pm_stats._name)
95 /**
96  * hif_pci_runtime_pm_warn() - Runtime PM Debugging API
97  * @hif_ctx: hif_softc context
98  * @msg: log message
99  *
100  * log runtime pm stats when something seems off.
101  *
102  * Return: void
103  */
104 static void hif_pci_runtime_pm_warn(struct hif_softc *scn,
105 				    const char *msg)
106 {
107 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
108 	struct device *dev = hif_bus_get_dev(scn);
109 	struct hif_pm_runtime_lock *ctx;
110 	int i;
111 
112 	hif_nofl_debug("%s: usage_count: %d, pm_state: %s, prevent_suspend_cnt: %d",
113 		       msg, atomic_read(&dev->power.usage_count),
114 		       hif_pm_runtime_state_to_string(
115 				atomic_read(&rpm_ctx->pm_state)),
116 		       rpm_ctx->prevent_suspend_cnt);
117 
118 	hif_nofl_debug("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d",
119 		       dev->power.runtime_status,
120 		       dev->power.runtime_error,
121 		       dev->power.disable_depth,
122 		       dev->power.autosuspend_delay);
123 
124 	hif_nofl_debug("runtime_get: %u, runtime_put: %u, request_resume: %u",
125 		       qdf_atomic_read(&rpm_ctx->pm_stats.runtime_get),
126 		       qdf_atomic_read(&rpm_ctx->pm_stats.runtime_put),
127 		       rpm_ctx->pm_stats.request_resume);
128 
129 	hif_nofl_debug("get  put  get-timestamp put-timestamp :DBGID_NAME");
130 	for (i = 0; i < RTPM_ID_MAX; i++) {
131 		hif_nofl_debug("%-10d %-10d  0x%-10llx  0x%-10llx :%-30s",
132 			       qdf_atomic_read(
133 				       &rpm_ctx->pm_stats.runtime_get_dbgid[i]),
134 			       qdf_atomic_read(
135 				       &rpm_ctx->pm_stats.runtime_put_dbgid[i]),
136 			       rpm_ctx->pm_stats.runtime_get_timestamp_dbgid[i],
137 			       rpm_ctx->pm_stats.runtime_put_timestamp_dbgid[i],
138 			       rtpm_string_from_dbgid(i));
139 	}
140 
141 	hif_nofl_debug("allow_suspend: %u, prevent_suspend: %u",
142 		       qdf_atomic_read(&rpm_ctx->pm_stats.allow_suspend),
143 		       qdf_atomic_read(&rpm_ctx->pm_stats.prevent_suspend));
144 
145 	hif_nofl_debug("prevent_suspend_timeout: %u, allow_suspend_timeout: %u",
146 		       rpm_ctx->pm_stats.prevent_suspend_timeout,
147 		       rpm_ctx->pm_stats.allow_suspend_timeout);
148 
149 	hif_nofl_debug("Suspended: %u, resumed: %u count",
150 		       rpm_ctx->pm_stats.suspended,
151 		       rpm_ctx->pm_stats.resumed);
152 
153 	hif_nofl_debug("suspend_err: %u, runtime_get_err: %u",
154 		       rpm_ctx->pm_stats.suspend_err,
155 		       rpm_ctx->pm_stats.runtime_get_err);
156 
157 	hif_nofl_debug("Active Wakeup Sources preventing Runtime Suspend: ");
158 
159 	list_for_each_entry(ctx, &rpm_ctx->prevent_suspend_list, list) {
160 		hif_nofl_debug("source %s; timeout %d ms",
161 			       ctx->name, ctx->timeout);
162 	}
163 
164 	if (qdf_is_fw_down()) {
165 		hif_err("fw is down");
166 		return;
167 	}
168 
169 	QDF_DEBUG_PANIC("hif_pci_runtime_pm_warn");
170 }
171 
172 /**
173  * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm
174  * @s: file to print to
175  * @data: unused
176  *
177  * debugging tool added to the debug fs for displaying runtimepm stats
178  *
179  * Return: 0
180  */
181 static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data)
182 {
183 	struct hif_softc *scn = s->private;
184 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
185 	struct device *dev = hif_bus_get_dev(scn);
186 	static const char * const autopm_state[] = {"NONE", "ON", "RESUMING",
187 		"SUSPENDING", "SUSPENDED"};
188 	unsigned int msecs_age;
189 	qdf_time_t usecs_age;
190 	int pm_state = atomic_read(&rpm_ctx->pm_state);
191 	unsigned long timer_expires;
192 	struct hif_pm_runtime_lock *ctx;
193 	int i;
194 
195 	seq_printf(s, "%30s: %s\n", "Runtime PM state", autopm_state[pm_state]);
196 	seq_printf(s, "%30s: %d(%s)\n", "last_resume_rtpm_dbgid",
197 		   rpm_ctx->pm_stats.last_resume_rtpm_dbgid,
198 		   rtpm_string_from_dbgid(
199 			   rpm_ctx->pm_stats.last_resume_rtpm_dbgid));
200 	seq_printf(s, "%30s: %ps\n", "Last Busy Marker",
201 		   rpm_ctx->pm_stats.last_busy_marker);
202 
203 	usecs_age = qdf_get_log_timestamp_usecs() -
204 		rpm_ctx->pm_stats.last_busy_timestamp;
205 	seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Timestamp",
206 		   rpm_ctx->pm_stats.last_busy_timestamp / 1000000,
207 		   rpm_ctx->pm_stats.last_busy_timestamp % 1000000);
208 	seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Since",
209 		   usecs_age / 1000000, usecs_age % 1000000);
210 
211 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) {
212 		msecs_age = jiffies_to_msecs(jiffies -
213 					     rpm_ctx->pm_stats.suspend_jiffies);
214 		seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since",
215 			   msecs_age / 1000, msecs_age % 1000);
216 	}
217 
218 	seq_printf(s, "%30s: %d\n", "PM Usage count",
219 		   atomic_read(&dev->power.usage_count));
220 
221 	seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt",
222 		   rpm_ctx->prevent_suspend_cnt);
223 
224 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, suspended);
225 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, suspend_err);
226 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, resumed);
227 
228 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, request_resume);
229 	seq_printf(s, "%30s: %u\n", "prevent_suspend",
230 		   qdf_atomic_read(&rpm_ctx->pm_stats.prevent_suspend));
231 	seq_printf(s, "%30s: %u\n", "allow_suspend",
232 		   qdf_atomic_read(&rpm_ctx->pm_stats.allow_suspend));
233 
234 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, prevent_suspend_timeout);
235 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, allow_suspend_timeout);
236 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, runtime_get_err);
237 
238 	seq_printf(s, "%30s: %u\n", "runtime_get",
239 		   qdf_atomic_read(&rpm_ctx->pm_stats.runtime_get));
240 	seq_printf(s, "%30s: %u\n", "runtime_put",
241 		   qdf_atomic_read(&rpm_ctx->pm_stats.runtime_put));
242 	seq_puts(s, "get  put  get-timestamp put-timestamp :DBGID_NAME\n");
243 	for (i = 0; i < RTPM_ID_MAX; i++) {
244 		seq_printf(s, "%-10d ",
245 			   qdf_atomic_read(
246 				 &rpm_ctx->pm_stats.runtime_get_dbgid[i]));
247 		seq_printf(s, "%-10d ",
248 			   qdf_atomic_read(
249 				 &rpm_ctx->pm_stats.runtime_put_dbgid[i]));
250 		seq_printf(s, "0x%-10llx ",
251 			   rpm_ctx->pm_stats.runtime_get_timestamp_dbgid[i]);
252 		seq_printf(s, "0x%-10llx ",
253 			   rpm_ctx->pm_stats.runtime_put_timestamp_dbgid[i]);
254 		seq_printf(s, ":%-30s\n", rtpm_string_from_dbgid(i));
255 	}
256 
257 	timer_expires = rpm_ctx->runtime_timer_expires;
258 	if (timer_expires > 0) {
259 		msecs_age = jiffies_to_msecs(timer_expires - jiffies);
260 		seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout",
261 			   msecs_age / 1000, msecs_age % 1000);
262 	}
263 
264 	spin_lock_bh(&rpm_ctx->runtime_lock);
265 	if (list_empty(&rpm_ctx->prevent_suspend_list)) {
266 		spin_unlock_bh(&rpm_ctx->runtime_lock);
267 		return 0;
268 	}
269 
270 	seq_printf(s, "%30s: ", "Active Wakeup_Sources");
271 	list_for_each_entry(ctx, &rpm_ctx->prevent_suspend_list, list) {
272 		seq_printf(s, "%s", ctx->name);
273 		if (ctx->timeout)
274 			seq_printf(s, "(%d ms)", ctx->timeout);
275 		seq_puts(s, " ");
276 	}
277 	seq_puts(s, "\n");
278 	spin_unlock_bh(&rpm_ctx->runtime_lock);
279 
280 	return 0;
281 }
282 
283 #undef HIF_PCI_RUNTIME_PM_STATS
284 
285 /**
286  * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats
287  * @inode
288  * @file
289  *
290  * Return: linux error code of single_open.
291  */
292 static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file)
293 {
294 	return single_open(file, hif_pci_pm_runtime_debugfs_show,
295 			inode->i_private);
296 }
297 
298 static const struct file_operations hif_pci_runtime_pm_fops = {
299 	.owner          = THIS_MODULE,
300 	.open           = hif_pci_runtime_pm_open,
301 	.release        = single_release,
302 	.read           = seq_read,
303 	.llseek         = seq_lseek,
304 };
305 
306 /**
307  * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry
308  * @scn: hif context
309  *
310  * creates a debugfs entry to debug the runtime pm feature.
311  */
312 static void hif_runtime_pm_debugfs_create(struct hif_softc *scn)
313 {
314 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
315 
316 	rpm_ctx->pm_dentry = qdf_debugfs_create_entry(CNSS_RUNTIME_FILE,
317 						      CNSS_RUNTIME_FILE_PERM,
318 						      NULL,
319 						      scn,
320 						      &hif_pci_runtime_pm_fops);
321 }
322 
323 /**
324  * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry
325  * @sc: pci context
326  *
327  * removes the debugfs entry to debug the runtime pm feature.
328  */
329 static void hif_runtime_pm_debugfs_remove(struct hif_softc *scn)
330 {
331 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
332 
333 	qdf_debugfs_remove_file(rpm_ctx->pm_dentry);
334 }
335 
336 /**
337  * hif_runtime_init() - Initialize Runtime PM
338  * @dev: device structure
339  * @delay: delay to be confgured for auto suspend
340  *
341  * This function will init all the Runtime PM config.
342  *
343  * Return: void
344  */
345 static void hif_runtime_init(struct device *dev, int delay)
346 {
347 	pm_runtime_set_autosuspend_delay(dev, delay);
348 	pm_runtime_use_autosuspend(dev);
349 	pm_runtime_allow(dev);
350 	pm_runtime_mark_last_busy(dev);
351 	pm_runtime_put_noidle(dev);
352 	pm_suspend_ignore_children(dev, true);
353 }
354 
355 /**
356  * hif_runtime_exit() - Deinit/Exit Runtime PM
357  * @dev: device structure
358  *
359  * This function will deinit all the Runtime PM config.
360  *
361  * Return: void
362  */
363 static void hif_runtime_exit(struct device *dev)
364 {
365 	pm_runtime_get_noresume(dev);
366 	pm_runtime_set_active(dev);
367 	/* Symmetric call to make sure default usage count == 2 */
368 	pm_runtime_forbid(dev);
369 }
370 
371 static void hif_pm_runtime_lock_timeout_fn(void *data);
372 
373 /**
374  * hif_pm_runtime_start(): start the runtime pm
375  * @scn: hif context
376  *
377  * After this call, runtime pm will be active.
378  */
379 void hif_pm_runtime_start(struct hif_softc *scn)
380 {
381 	uint32_t mode = hif_get_conparam(scn);
382 	struct device *dev = hif_bus_get_dev(scn);
383 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
384 
385 	if (!scn->hif_config.enable_runtime_pm) {
386 		hif_info("RUNTIME PM is disabled in ini");
387 		return;
388 	}
389 
390 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
391 	    mode == QDF_GLOBAL_MONITOR_MODE) {
392 		hif_info("RUNTIME PM is disabled for FTM/EPPING mode");
393 		return;
394 	}
395 
396 	qdf_timer_init(NULL, &rpm_ctx->runtime_timer,
397 		       hif_pm_runtime_lock_timeout_fn,
398 		       scn, QDF_TIMER_TYPE_WAKE_APPS);
399 
400 	hif_info("Enabling RUNTIME PM, Delay: %d ms",
401 		 scn->hif_config.runtime_pm_delay);
402 
403 	qdf_atomic_set(&rpm_ctx->pm_state, HIF_PM_RUNTIME_STATE_ON);
404 	hif_runtime_init(dev, scn->hif_config.runtime_pm_delay);
405 	hif_runtime_pm_debugfs_create(scn);
406 }
407 
408 /**
409  * hif_pm_runtime_stop(): stop runtime pm
410  * @scn: hif context
411  *
412  * Turns off runtime pm and frees corresponding resources
413  * that were acquired by hif_runtime_pm_start().
414  */
415 void hif_pm_runtime_stop(struct hif_softc *scn)
416 {
417 	uint32_t mode = hif_get_conparam(scn);
418 	struct device *dev = hif_bus_get_dev(scn);
419 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
420 
421 	if (!scn->hif_config.enable_runtime_pm)
422 		return;
423 
424 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
425 	    mode == QDF_GLOBAL_MONITOR_MODE)
426 		return;
427 
428 	hif_runtime_exit(dev);
429 
430 	hif_pm_runtime_sync_resume(GET_HIF_OPAQUE_HDL(scn), RTPM_ID_PM_STOP);
431 
432 	qdf_atomic_set(&rpm_ctx->pm_state, HIF_PM_RUNTIME_STATE_NONE);
433 
434 	hif_runtime_pm_debugfs_remove(scn);
435 	qdf_timer_free(&rpm_ctx->runtime_timer);
436 }
437 
438 /**
439  * hif_pm_runtime_open(): initialize runtime pm
440  * @scn: hif ctx
441  *
442  * Early initialization
443  */
444 void hif_pm_runtime_open(struct hif_softc *scn)
445 {
446 	int i;
447 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
448 
449 	spin_lock_init(&rpm_ctx->runtime_lock);
450 	qdf_spinlock_create(&rpm_ctx->runtime_suspend_lock);
451 	qdf_atomic_init(&rpm_ctx->pm_state);
452 	hif_runtime_lock_init(&rpm_ctx->prevent_linkdown_lock,
453 			      "prevent_linkdown_lock");
454 	qdf_atomic_set(&rpm_ctx->pm_state, HIF_PM_RUNTIME_STATE_NONE);
455 	qdf_atomic_init(&rpm_ctx->pm_stats.runtime_get);
456 	qdf_atomic_init(&rpm_ctx->pm_stats.runtime_put);
457 	qdf_atomic_init(&rpm_ctx->pm_stats.allow_suspend);
458 	qdf_atomic_init(&rpm_ctx->pm_stats.prevent_suspend);
459 	for (i = 0; i < RTPM_ID_MAX; i++) {
460 		qdf_atomic_init(&rpm_ctx->pm_stats.runtime_get_dbgid[i]);
461 		qdf_atomic_init(&rpm_ctx->pm_stats.runtime_put_dbgid[i]);
462 	}
463 	INIT_LIST_HEAD(&rpm_ctx->prevent_suspend_list);
464 }
465 
466 /**
467  * hif_check_for_get_put_out_of_sync() - Check if Get/Put is out of sync
468  * @scn: hif context
469  *
470  * This function will check if get and put are out of sync or not.
471  *
472  * Return: void
473  */
474 static void  hif_check_for_get_put_out_of_sync(struct hif_softc *scn)
475 {
476 	int32_t i;
477 	int32_t get_count, put_count;
478 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
479 
480 	if (qdf_is_fw_down())
481 		return;
482 
483 	for (i = 0; i < RTPM_ID_MAX; i++) {
484 		get_count = qdf_atomic_read(
485 				&rpm_ctx->pm_stats.runtime_get_dbgid[i]);
486 		put_count = qdf_atomic_read(
487 				&rpm_ctx->pm_stats.runtime_put_dbgid[i]);
488 		if (get_count != put_count) {
489 			QDF_DEBUG_PANIC("%s get-put out of sync. get %d put %d",
490 					rtpm_string_from_dbgid(i),
491 					get_count, put_count);
492 		}
493 	}
494 }
495 
496 /**
497  * hif_pm_runtime_sanitize_on_exit(): sanitize runtime PM gets/puts from driver
498  * @scn: hif context
499  *
500  * Ensure all gets/puts are in sync before exiting runtime PM feature.
501  * Also make sure all runtime PM locks are deinitialized properly.
502  *
503  * Return: void
504  */
505 static void hif_pm_runtime_sanitize_on_exit(struct hif_softc *scn)
506 {
507 	struct hif_pm_runtime_lock *ctx, *tmp;
508 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
509 
510 	hif_check_for_get_put_out_of_sync(scn);
511 
512 	spin_lock_bh(&rpm_ctx->runtime_lock);
513 	list_for_each_entry_safe(ctx, tmp,
514 				 &rpm_ctx->prevent_suspend_list, list) {
515 		spin_unlock_bh(&rpm_ctx->runtime_lock);
516 		hif_runtime_lock_deinit(GET_HIF_OPAQUE_HDL(scn), ctx);
517 		spin_lock_bh(&rpm_ctx->runtime_lock);
518 	}
519 	spin_unlock_bh(&rpm_ctx->runtime_lock);
520 }
521 
522 static int __hif_pm_runtime_allow_suspend(struct hif_softc *scn,
523 					  struct hif_pm_runtime_lock *lock);
524 
525 /**
526  * hif_pm_runtime_sanitize_on_ssr_exit() - Empty the suspend list on SSR
527  * @scn: hif context
528  *
529  * API is used to empty the runtime pm prevent suspend list.
530  *
531  * Return: void
532  */
533 static void hif_pm_runtime_sanitize_on_ssr_exit(struct hif_softc *scn)
534 {
535 	struct hif_pm_runtime_lock *ctx, *tmp;
536 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
537 
538 	spin_lock_bh(&rpm_ctx->runtime_lock);
539 	list_for_each_entry_safe(ctx, tmp,
540 				 &rpm_ctx->prevent_suspend_list, list) {
541 		__hif_pm_runtime_allow_suspend(scn, ctx);
542 	}
543 	spin_unlock_bh(&rpm_ctx->runtime_lock);
544 }
545 
546 /**
547  * hif_pm_runtime_close(): close runtime pm
548  * @scn: hif ctx
549  *
550  * ensure runtime_pm is stopped before closing the driver
551  */
552 void hif_pm_runtime_close(struct hif_softc *scn)
553 {
554 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
555 	struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn);
556 
557 	/*
558 	 * Here cds hif context was already NULL,
559 	 * so calling hif_runtime_lock_deinit, instead of
560 	 * qdf_runtime_lock_deinit(&rpm_ctx->prevent_linkdown_lock);
561 	 */
562 	hif_runtime_lock_deinit(hif_ctx, rpm_ctx->prevent_linkdown_lock.lock);
563 
564 	hif_is_recovery_in_progress(scn) ?
565 		hif_pm_runtime_sanitize_on_ssr_exit(scn) :
566 		hif_pm_runtime_sanitize_on_exit(scn);
567 
568 	qdf_spinlock_destroy(&rpm_ctx->runtime_suspend_lock);
569 }
570 
571 /**
572  * hif_pm_runtime_sync_resume() - Invoke synchronous runtime resume.
573  * @hif_ctx: hif context
574  * @rtpm_dbgid: dbgid to trace who use it
575  *
576  * This function will invoke synchronous runtime resume.
577  *
578  * Return: status
579  */
580 int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx,
581 			       wlan_rtpm_dbgid rtpm_dbgid)
582 {
583 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
584 	struct hif_runtime_pm_ctx *rpm_ctx;
585 	int pm_state;
586 
587 	if (!scn)
588 		return -EINVAL;
589 
590 	if (!hif_pci_pm_runtime_enabled(scn))
591 		return 0;
592 
593 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
594 	pm_state = qdf_atomic_read(&rpm_ctx->pm_state);
595 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
596 	    pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
597 		hif_info("request runtime PM resume, rtpm_dbgid(%d,%s)",
598 			 rtpm_dbgid,
599 			 rtpm_string_from_dbgid(rtpm_dbgid));
600 
601 	rpm_ctx->pm_stats.request_resume++;
602 	rpm_ctx->pm_stats.last_resume_rtpm_dbgid = rtpm_dbgid;
603 
604 	return pm_runtime_resume(hif_bus_get_dev(scn));
605 }
606 
607 /**
608  * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occurring
609  * @scn: hif context
610  * @flag: prevent linkdown if true otherwise allow
611  *
612  * this api should only be called as part of bus prevent linkdown
613  */
614 void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
615 {
616 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
617 
618 	if (flag)
619 		qdf_runtime_pm_prevent_suspend(&rpm_ctx->prevent_linkdown_lock);
620 	else
621 		qdf_runtime_pm_allow_suspend(&rpm_ctx->prevent_linkdown_lock);
622 }
623 
624 /**
625  * __hif_runtime_pm_set_state(): utility function
626  * @state: state to set
627  *
628  * indexes into the runtime pm state and sets it.
629  */
630 static void __hif_runtime_pm_set_state(struct hif_softc *scn,
631 				       enum hif_pm_runtime_state state)
632 {
633 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
634 
635 	if (!rpm_ctx) {
636 		hif_err("HIF_CTX not initialized");
637 		return;
638 	}
639 
640 	qdf_atomic_set(&rpm_ctx->pm_state, state);
641 }
642 
643 /**
644  * hif_runtime_pm_set_state_on():  adjust runtime pm state
645  *
646  * Notify hif that a the runtime pm state should be on
647  */
648 static void hif_runtime_pm_set_state_on(struct hif_softc *scn)
649 {
650 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_ON);
651 }
652 
653 /**
654  * hif_runtime_pm_set_state_resuming(): adjust runtime pm state
655  *
656  * Notify hif that a runtime pm resuming has started
657  */
658 static void hif_runtime_pm_set_state_resuming(struct hif_softc *scn)
659 {
660 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_RESUMING);
661 }
662 
663 /**
664  * hif_runtime_pm_set_state_suspending(): adjust runtime pm state
665  *
666  * Notify hif that a runtime pm suspend has started
667  */
668 static void hif_runtime_pm_set_state_suspending(struct hif_softc *scn)
669 {
670 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDING);
671 }
672 
673 /**
674  * hif_runtime_pm_set_state_suspended():  adjust runtime pm state
675  *
676  * Notify hif that a runtime suspend attempt has been completed successfully
677  */
678 static void hif_runtime_pm_set_state_suspended(struct hif_softc *scn)
679 {
680 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDED);
681 }
682 
683 /**
684  * hif_log_runtime_suspend_success() - log a successful runtime suspend
685  */
686 static void hif_log_runtime_suspend_success(struct hif_softc *hif_ctx)
687 {
688 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
689 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
690 
691 	if (!rpm_ctx)
692 		return;
693 
694 	rpm_ctx->pm_stats.suspended++;
695 	rpm_ctx->pm_stats.suspend_jiffies = jiffies;
696 }
697 
698 /**
699  * hif_log_runtime_suspend_failure() - log a failed runtime suspend
700  *
701  * log a failed runtime suspend
702  * mark last busy to prevent immediate runtime suspend
703  */
704 static void hif_log_runtime_suspend_failure(void *hif_ctx)
705 {
706 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
707 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
708 
709 	if (!rpm_ctx)
710 		return;
711 
712 	rpm_ctx->pm_stats.suspend_err++;
713 }
714 
715 /**
716  * hif_log_runtime_resume_success() - log a successful runtime resume
717  *
718  * log a successful runtime resume
719  * mark last busy to prevent immediate runtime suspend
720  */
721 static void hif_log_runtime_resume_success(void *hif_ctx)
722 {
723 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
724 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
725 
726 	if (!rpm_ctx)
727 		return;
728 
729 	rpm_ctx->pm_stats.resumed++;
730 }
731 
732 /**
733  * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
734  *
735  * Record the failure.
736  * mark last busy to delay a retry.
737  * adjust the runtime_pm state.
738  */
739 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx)
740 {
741 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
742 
743 	hif_log_runtime_suspend_failure(hif_ctx);
744 	hif_pm_runtime_mark_last_busy(hif_ctx);
745 	hif_runtime_pm_set_state_on(scn);
746 }
747 
748 static bool hif_pm_runtime_is_suspend_allowed(struct hif_softc *scn)
749 {
750 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
751 	struct hif_pm_runtime_lock *ctx;
752 	uint32_t prevent_suspend_cnt;
753 	char *str_buf;
754 	bool is_suspend_allowed;
755 	int len = 0;
756 
757 	if (!scn->hif_config.enable_runtime_pm)
758 		return false;
759 
760 	str_buf = qdf_mem_malloc(PREVENT_LIST_STRING_LEN);
761 	if (!str_buf)
762 		return false;
763 
764 	spin_lock_bh(&rpm_ctx->runtime_lock);
765 	prevent_suspend_cnt = rpm_ctx->prevent_suspend_cnt;
766 	is_suspend_allowed = (prevent_suspend_cnt == 0);
767 	if (!is_suspend_allowed) {
768 		list_for_each_entry(ctx, &rpm_ctx->prevent_suspend_list, list)
769 			len += qdf_scnprintf(str_buf + len,
770 				PREVENT_LIST_STRING_LEN - len,
771 				"%s ", ctx->name);
772 	}
773 	spin_unlock_bh(&rpm_ctx->runtime_lock);
774 
775 	if (!is_suspend_allowed)
776 		hif_info("prevent_suspend_cnt %u, prevent_list: %s",
777 			 rpm_ctx->prevent_suspend_cnt, str_buf);
778 
779 	qdf_mem_free(str_buf);
780 
781 	return is_suspend_allowed;
782 }
783 
784 void hif_print_runtime_pm_prevent_list(struct hif_opaque_softc *hif_ctx)
785 {
786 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
787 
788 	hif_pm_runtime_is_suspend_allowed(scn);
789 
790 	hif_info("Up_linkstate_vote %d", scn->linkstate_vote);
791 }
792 
793 /**
794  * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend
795  *
796  * Makes sure that the pci link will be taken down by the suspend opperation.
797  * If the hif layer is configured to leave the bus on, runtime suspend will
798  * not save any power.
799  *
800  * Set the runtime suspend state to in progress.
801  *
802  * return -EINVAL if the bus won't go down.  otherwise return 0
803  */
804 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
805 {
806 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
807 
808 	if (!hif_can_suspend_link(hif_ctx)) {
809 		hif_err("Runtime PM not supported for link up suspend");
810 		return -EINVAL;
811 	}
812 
813 	hif_runtime_pm_set_state_suspending(scn);
814 
815 	/* keep this after set suspending */
816 	if (!hif_pm_runtime_is_suspend_allowed(scn)) {
817 		hif_info("Runtime PM not allowed now");
818 		return -EINVAL;
819 	}
820 
821 	return 0;
822 }
823 
824 /**
825  * hif_process_runtime_suspend_success() - bookkeeping of suspend success
826  *
827  * Record the success.
828  * adjust the runtime_pm state
829  */
830 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx)
831 {
832 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
833 
834 	hif_runtime_pm_set_state_suspended(scn);
835 	hif_log_runtime_suspend_success(scn);
836 }
837 
838 /**
839  * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
840  *
841  * update the runtime pm state.
842  */
843 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx)
844 {
845 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
846 
847 	hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
848 	hif_runtime_pm_set_state_resuming(scn);
849 }
850 
851 /**
852  * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
853  *
854  * record the success.
855  * adjust the runtime_pm state
856  */
857 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx)
858 {
859 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
860 
861 	hif_log_runtime_resume_success(hif_ctx);
862 	hif_pm_runtime_mark_last_busy(hif_ctx);
863 	hif_runtime_pm_set_state_on(scn);
864 }
865 
866 /**
867  * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
868  *
869  * Return: 0 for success and non-zero error code for failure
870  */
871 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
872 {
873 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
874 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
875 	int errno;
876 
877 	errno = hif_bus_suspend(hif_ctx);
878 	if (errno) {
879 		hif_err("Failed bus suspend: %d", errno);
880 		return errno;
881 	}
882 
883 	hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 1);
884 
885 	errno = hif_bus_suspend_noirq(hif_ctx);
886 	if (errno) {
887 		hif_err("Failed bus suspend noirq: %d", errno);
888 		hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
889 		goto bus_resume;
890 	}
891 
892 	qdf_atomic_set(&rpm_ctx->pm_dp_rx_busy, 0);
893 
894 	return 0;
895 
896 bus_resume:
897 	QDF_BUG(!hif_bus_resume(hif_ctx));
898 
899 	return errno;
900 }
901 
902 /**
903  * hif_fastpath_resume() - resume fastpath for runtimepm
904  *
905  * ensure that the fastpath write index register is up to date
906  * since runtime pm may cause ce_send_fast to skip the register
907  * write.
908  *
909  * fastpath only applicable to legacy copy engine
910  */
911 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
912 {
913 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
914 	struct CE_state *ce_state;
915 
916 	if (!scn)
917 		return;
918 
919 	if (scn->fastpath_mode_on) {
920 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
921 			return;
922 
923 		ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
924 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
925 
926 		/*war_ce_src_ring_write_idx_set */
927 		CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
928 					  ce_state->src_ring->write_index);
929 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
930 		Q_TARGET_ACCESS_END(scn);
931 	}
932 }
933 
934 /**
935  * hif_runtime_resume() - do the bus resume part of a runtime resume
936  *
937  *  Return: 0 for success and non-zero error code for failure
938  */
939 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
940 {
941 	int errno;
942 
943 	QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
944 	errno = hif_bus_resume(hif_ctx);
945 	if (errno)
946 		hif_err("Failed runtime resume: %d", errno);
947 
948 	return errno;
949 }
950 
951 /**
952  * hif_pm_stats_runtime_get_record() - record runtime get statistics
953  * @scn: hif context
954  * @rtpm_dbgid: debug id to trace who use it
955  *
956  *
957  * Return: void
958  */
959 static void hif_pm_stats_runtime_get_record(struct hif_softc *scn,
960 					    wlan_rtpm_dbgid rtpm_dbgid)
961 {
962 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
963 
964 	if (rtpm_dbgid >= RTPM_ID_MAX) {
965 		QDF_BUG(0);
966 		return;
967 	}
968 	qdf_atomic_inc(&rpm_ctx->pm_stats.runtime_get);
969 	qdf_atomic_inc(&rpm_ctx->pm_stats.runtime_get_dbgid[rtpm_dbgid]);
970 	rpm_ctx->pm_stats.runtime_get_timestamp_dbgid[rtpm_dbgid] =
971 						qdf_get_log_timestamp();
972 }
973 
974 /**
975  * hif_pm_stats_runtime_put_record() - record runtime put statistics
976  * @scn: hif context
977  * @rtpm_dbgid: dbg_id to trace who use it
978  *
979  *
980  * Return: void
981  */
982 static void hif_pm_stats_runtime_put_record(struct hif_softc *scn,
983 					    wlan_rtpm_dbgid rtpm_dbgid)
984 {
985 	struct device *dev = hif_bus_get_dev(scn);
986 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
987 
988 	if (rtpm_dbgid >= RTPM_ID_MAX) {
989 		QDF_BUG(0);
990 		return;
991 	}
992 
993 	if (atomic_read(&dev->power.usage_count) <= 0) {
994 		QDF_BUG(0);
995 		return;
996 	}
997 
998 	qdf_atomic_inc(&rpm_ctx->pm_stats.runtime_put);
999 	qdf_atomic_inc(&rpm_ctx->pm_stats.runtime_put_dbgid[rtpm_dbgid]);
1000 	rpm_ctx->pm_stats.runtime_put_timestamp_dbgid[rtpm_dbgid] =
1001 						qdf_get_log_timestamp();
1002 }
1003 
1004 /**
1005  * hif_pm_runtime_get_sync() - do a get operation with sync resume
1006  * @hif_ctx: pointer of HIF context
1007  * @rtpm_dbgid: dbgid to trace who use it
1008  *
1009  * A get operation will prevent a runtime suspend until a corresponding
1010  * put is done. Unlike hif_pm_runtime_get(), this API will do a sync
1011  * resume instead of requesting a resume if it is runtime PM suspended
1012  * so it can only be called in non-atomic context.
1013  *
1014  * Return: 0 if it is runtime PM resumed otherwise an error code.
1015  */
1016 int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx,
1017 			    wlan_rtpm_dbgid rtpm_dbgid)
1018 {
1019 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1020 	struct device *dev = hif_bus_get_dev(scn);
1021 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1022 	int pm_state;
1023 	int ret;
1024 
1025 	if (!rpm_ctx)
1026 		return -EINVAL;
1027 
1028 	if (!hif_pci_pm_runtime_enabled(scn))
1029 		return 0;
1030 
1031 	pm_state = qdf_atomic_read(&rpm_ctx->pm_state);
1032 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
1033 	    pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
1034 		hif_info_high("Runtime PM resume is requested by %ps",
1035 			      (void *)_RET_IP_);
1036 
1037 	hif_pm_stats_runtime_get_record(scn, rtpm_dbgid);
1038 	ret = pm_runtime_get_sync(dev);
1039 
1040 	/* Get can return 1 if the device is already active, just return
1041 	 * success in that case.
1042 	 */
1043 	if (ret > 0)
1044 		ret = 0;
1045 
1046 	if (ret) {
1047 		rpm_ctx->pm_stats.runtime_get_err++;
1048 		hif_err("Runtime PM Get Sync error in pm_state: %d, ret: %d",
1049 			qdf_atomic_read(&rpm_ctx->pm_state), ret);
1050 		hif_pm_runtime_put(hif_ctx, rtpm_dbgid);
1051 	}
1052 
1053 	return ret;
1054 }
1055 
1056 /**
1057  * hif_pm_runtime_put_sync_suspend() - do a put operation with sync suspend
1058  * @hif_ctx: pointer of HIF context
1059  * @rtpm_dbgid: dbgid to trace who use it
1060  *
1061  * This API will do a runtime put operation followed by a sync suspend if usage
1062  * count is 0 so it can only be called in non-atomic context.
1063  *
1064  * Return: 0 for success otherwise an error code
1065  */
1066 int hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx,
1067 				    wlan_rtpm_dbgid rtpm_dbgid)
1068 {
1069 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1070 	struct device *dev;
1071 	int usage_count;
1072 	char *err = NULL;
1073 
1074 	if (!scn)
1075 		return -EINVAL;
1076 
1077 	if (!hif_pci_pm_runtime_enabled(scn))
1078 		return 0;
1079 
1080 	dev = hif_bus_get_dev(scn);
1081 	usage_count = atomic_read(&dev->power.usage_count);
1082 	if (usage_count == 2 && !scn->hif_config.enable_runtime_pm)
1083 		err = "Uexpected PUT when runtime PM is disabled";
1084 	else if (usage_count == 0)
1085 		err = "PUT without a GET Operation";
1086 
1087 	if (err) {
1088 		hif_pci_runtime_pm_warn(scn, err);
1089 		return -EINVAL;
1090 	}
1091 
1092 	hif_pm_stats_runtime_put_record(scn, rtpm_dbgid);
1093 	return pm_runtime_put_sync_suspend(dev);
1094 }
1095 
1096 /**
1097  * hif_pm_runtime_request_resume() - Invoke async runtime resume
1098  * @hif_ctx: hif context
1099  * @rtpm_dbgid: dbgid to trace who use it
1100  *
1101  * This function will invoke asynchronous runtime resume.
1102  *
1103  * Return: status
1104  */
1105 int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx,
1106 				  wlan_rtpm_dbgid rtpm_dbgid)
1107 {
1108 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1109 	struct hif_runtime_pm_ctx *rpm_ctx;
1110 	int pm_state;
1111 
1112 	if (!scn)
1113 		return -EINVAL;
1114 
1115 	if (!hif_pci_pm_runtime_enabled(scn))
1116 		return 0;
1117 
1118 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
1119 	pm_state = qdf_atomic_read(&rpm_ctx->pm_state);
1120 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
1121 	    pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
1122 		hif_info("request runtime PM resume, rtpm_dbgid(%d,%s)",
1123 			 rtpm_dbgid,
1124 			 rtpm_string_from_dbgid(rtpm_dbgid));
1125 
1126 	rpm_ctx->pm_stats.request_resume++;
1127 	rpm_ctx->pm_stats.last_resume_rtpm_dbgid = rtpm_dbgid;
1128 
1129 	return hif_pm_request_resume(hif_bus_get_dev(scn));
1130 }
1131 
1132 /**
1133  * hif_pm_runtime_mark_last_busy() - Mark last busy time
1134  * @hif_ctx: hif context
1135  *
1136  * This function will mark the last busy time, this will be used
1137  * to check if auto suspend delay expired or not.
1138  *
1139  * Return: void
1140  */
1141 void hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx)
1142 {
1143 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1144 	struct hif_runtime_pm_ctx *rpm_ctx;
1145 
1146 	if (!scn)
1147 		return;
1148 
1149 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
1150 	rpm_ctx->pm_stats.last_busy_marker = (void *)_RET_IP_;
1151 	rpm_ctx->pm_stats.last_busy_timestamp = qdf_get_log_timestamp_usecs();
1152 
1153 	pm_runtime_mark_last_busy(hif_bus_get_dev(scn));
1154 
1155 	return;
1156 }
1157 
1158 /**
1159  * hif_pm_runtime_get_noresume() - Inc usage count without resume
1160  * @hif_ctx: hif context
1161  * rtpm_dbgid: Id of the module calling get
1162  *
1163  * This function will increment device usage count to avoid runtime
1164  * suspend, but it would not do resume.
1165  *
1166  * Return: void
1167  */
1168 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx,
1169 				 wlan_rtpm_dbgid rtpm_dbgid)
1170 {
1171 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1172 
1173 	if (!scn)
1174 		return;
1175 
1176 	if (!hif_pci_pm_runtime_enabled(scn))
1177 		return;
1178 
1179 	hif_pm_stats_runtime_get_record(scn, rtpm_dbgid);
1180 	pm_runtime_get_noresume(hif_bus_get_dev(scn));
1181 }
1182 
1183 /**
1184  * hif_pm_runtime_get() - do a get opperation on the device
1185  * @hif_ctx: pointer of HIF context
1186  * @rtpm_dbgid: dbgid to trace who use it
1187  * @is_critical_ctx: Indication if this function called via a
1188  *		     critical context
1189  *
1190  * A get opperation will prevent a runtime suspend until a
1191  * corresponding put is done.  This api should be used when sending
1192  * data.
1193  *
1194  * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
1195  * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!!
1196  *
1197  * return: success if the bus is up and a get has been issued
1198  *   otherwise an error code.
1199  */
1200 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx,
1201 		       wlan_rtpm_dbgid rtpm_dbgid,
1202 		       bool is_critical_ctx)
1203 {
1204 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1205 	struct hif_runtime_pm_ctx *rpm_ctx;
1206 	struct device *dev;
1207 	int ret;
1208 	int pm_state;
1209 
1210 	if (!scn) {
1211 		hif_err("Could not do runtime get, scn is null");
1212 		return -EFAULT;
1213 	}
1214 
1215 	if (!hif_pci_pm_runtime_enabled(scn))
1216 		return 0;
1217 
1218 	dev = hif_bus_get_dev(scn);
1219 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
1220 	pm_state = qdf_atomic_read(&rpm_ctx->pm_state);
1221 
1222 	if (pm_state  == HIF_PM_RUNTIME_STATE_ON ||
1223 	    pm_state == HIF_PM_RUNTIME_STATE_NONE) {
1224 		hif_pm_stats_runtime_get_record(scn, rtpm_dbgid);
1225 		ret = __hif_pm_runtime_get(dev);
1226 
1227 		/* Get can return 1 if the device is already active, just return
1228 		 * success in that case
1229 		 */
1230 		if (ret > 0)
1231 			ret = 0;
1232 
1233 		if (ret < 0)
1234 			hif_pm_runtime_put(hif_ctx, rtpm_dbgid);
1235 
1236 		if (ret && ret != -EINPROGRESS) {
1237 			rpm_ctx->pm_stats.runtime_get_err++;
1238 			hif_err("Runtime Get PM Error in pm_state:%d ret: %d",
1239 				qdf_atomic_read(&rpm_ctx->pm_state), ret);
1240 		}
1241 
1242 		return ret;
1243 	}
1244 
1245 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
1246 	    pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING) {
1247 		/* Do not log in performance path */
1248 		if (!is_critical_ctx) {
1249 			hif_info_high("request runtime PM resume, rtpm_dbgid(%d-%s)",
1250 				      rtpm_dbgid,
1251 				      rtpm_string_from_dbgid(rtpm_dbgid));
1252 		}
1253 		ret = -EAGAIN;
1254 	} else {
1255 		ret = -EBUSY;
1256 	}
1257 
1258 	rpm_ctx->pm_stats.request_resume++;
1259 	rpm_ctx->pm_stats.last_resume_rtpm_dbgid = rtpm_dbgid;
1260 	hif_pm_request_resume(dev);
1261 
1262 	return ret;
1263 }
1264 
1265 /**
1266  * hif_pm_runtime_put() - do a put operation on the device
1267  * @hif_ctx: pointer of HIF context
1268  * @rtpm_dbgid: dbgid to trace who use it
1269  *
1270  * A put operation will allow a runtime suspend after a corresponding
1271  * get was done.  This api should be used when sending data.
1272  *
1273  * This api will return a failure if runtime pm is stopped
1274  * This api will return failure if it would decrement the usage count below 0.
1275  *
1276  * return: QDF_STATUS_SUCCESS if the put is performed
1277  */
1278 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx,
1279 		       wlan_rtpm_dbgid rtpm_dbgid)
1280 {
1281 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1282 	struct device *dev;
1283 	int usage_count;
1284 	char *error = NULL;
1285 
1286 	if (!scn) {
1287 		hif_err("Could not do runtime put, scn is null");
1288 		return -EFAULT;
1289 	}
1290 
1291 	if (!hif_pci_pm_runtime_enabled(scn))
1292 		return 0;
1293 
1294 	dev = hif_bus_get_dev(scn);
1295 	usage_count = atomic_read(&dev->power.usage_count);
1296 	if (usage_count == 2 && !scn->hif_config.enable_runtime_pm)
1297 		error = "Unexpected PUT when runtime PM is disabled";
1298 	else if (usage_count == 0)
1299 		error = "PUT without a GET operation";
1300 
1301 	if (error) {
1302 		hif_pci_runtime_pm_warn(scn, error);
1303 		return -EINVAL;
1304 	}
1305 
1306 	hif_pm_stats_runtime_put_record(scn, rtpm_dbgid);
1307 
1308 	hif_pm_runtime_mark_last_busy(hif_ctx);
1309 	hif_pm_runtime_put_auto(dev);
1310 
1311 	return 0;
1312 }
1313 
1314 /**
1315  * hif_pm_runtime_put_noidle() - do a put operation with no idle
1316  * @hif_ctx: pointer of HIF context
1317  * @rtpm_dbgid: dbgid to trace who use it
1318  *
1319  * This API will do a runtime put no idle operation
1320  *
1321  * Return: 0 for success otherwise an error code
1322  */
1323 int hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx,
1324 			      wlan_rtpm_dbgid rtpm_dbgid)
1325 {
1326 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1327 	struct device *dev;
1328 	int usage_count;
1329 	char *err = NULL;
1330 
1331 	if (!scn)
1332 		return -EINVAL;
1333 
1334 	if (!hif_pci_pm_runtime_enabled(scn))
1335 		return 0;
1336 
1337 	dev = hif_bus_get_dev(scn);
1338 	usage_count = atomic_read(&dev->power.usage_count);
1339 	if (usage_count == 2 && !scn->hif_config.enable_runtime_pm)
1340 		err = "Unexpected PUT when runtime PM is disabled";
1341 	else if (usage_count == 0)
1342 		err = "PUT without a GET operation";
1343 
1344 	if (err) {
1345 		hif_pci_runtime_pm_warn(scn, err);
1346 		return -EINVAL;
1347 	}
1348 
1349 	hif_pm_stats_runtime_put_record(scn, rtpm_dbgid);
1350 	pm_runtime_put_noidle(dev);
1351 
1352 	return 0;
1353 }
1354 
1355 /**
1356  * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol
1357  *                                      reason
1358  * @scn: hif context
1359  * @lock: runtime_pm lock being acquired
1360  *
1361  * Return 0 if successful.
1362  */
1363 static int __hif_pm_runtime_prevent_suspend(struct hif_softc *scn,
1364 					    struct hif_pm_runtime_lock *lock)
1365 {
1366 	struct device *dev = hif_bus_get_dev(scn);
1367 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1368 	int ret = 0;
1369 
1370 	/*
1371 	 * We shouldn't be setting context->timeout to zero here when
1372 	 * context is active as we will have a case where Timeout API's
1373 	 * for the same context called back to back.
1374 	 * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm
1375 	 * Set context->timeout to zero in hif_pm_runtime_prevent_suspend
1376 	 * API to ensure the timeout version is no more active and
1377 	 * list entry of this context will be deleted during allow suspend.
1378 	 */
1379 	if (lock->active)
1380 		return 0;
1381 
1382 	ret = __hif_pm_runtime_get(dev);
1383 
1384 	/**
1385 	 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
1386 	 * RPM_SUSPENDING. Any other negative value is an error.
1387 	 * We shouldn't be do runtime_put here as in later point allow
1388 	 * suspend gets called with the the context and there the usage count
1389 	 * is decremented, so suspend will be prevented.
1390 	 */
1391 
1392 	if (ret < 0 && ret != -EINPROGRESS) {
1393 		rpm_ctx->pm_stats.runtime_get_err++;
1394 		hif_pci_runtime_pm_warn(scn,
1395 					"Prevent Suspend Runtime PM Error");
1396 	}
1397 
1398 	rpm_ctx->prevent_suspend_cnt++;
1399 
1400 	lock->active = true;
1401 
1402 	list_add_tail(&lock->list, &rpm_ctx->prevent_suspend_list);
1403 
1404 	qdf_atomic_inc(&rpm_ctx->pm_stats.prevent_suspend);
1405 
1406 	hif_debug("%s: in pm_state:%s ret: %d", __func__,
1407 		  hif_pm_runtime_state_to_string(
1408 			  qdf_atomic_read(&rpm_ctx->pm_state)),
1409 		  ret);
1410 
1411 	return ret;
1412 }
1413 
1414 /**
1415  * __hif_pm_runtime_allow_suspend() - Allow Runtime suspend
1416  * @scn: hif context
1417  * @lock: runtime pm lock
1418  *
1419  * This function will allow runtime suspend, by decrementing
1420  * device's usage count.
1421  *
1422  * Return: status
1423  */
1424 static int __hif_pm_runtime_allow_suspend(struct hif_softc *scn,
1425 					  struct hif_pm_runtime_lock *lock)
1426 {
1427 	struct device *dev = hif_bus_get_dev(scn);
1428 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1429 	int ret = 0;
1430 	int usage_count;
1431 
1432 	if (rpm_ctx->prevent_suspend_cnt == 0)
1433 		return ret;
1434 
1435 	if (!lock->active)
1436 		return ret;
1437 
1438 	usage_count = atomic_read(&dev->power.usage_count);
1439 
1440 	/*
1441 	 * For runtime PM enabled case, the usage count should never be 0
1442 	 * at this point. For runtime PM disabled case, it should never be
1443 	 * 2 at this point. Catch unexpected PUT without GET here.
1444 	 */
1445 	if ((usage_count == 2 && !scn->hif_config.enable_runtime_pm) ||
1446 	    usage_count == 0) {
1447 		hif_pci_runtime_pm_warn(scn, "PUT without a GET Operation");
1448 		return -EINVAL;
1449 	}
1450 
1451 	list_del(&lock->list);
1452 
1453 	rpm_ctx->prevent_suspend_cnt--;
1454 
1455 	lock->active = false;
1456 	lock->timeout = 0;
1457 
1458 	hif_pm_runtime_mark_last_busy(GET_HIF_OPAQUE_HDL(scn));
1459 	ret = hif_pm_runtime_put_auto(dev);
1460 
1461 	hif_debug("%s: in pm_state:%s ret: %d", __func__,
1462 		  hif_pm_runtime_state_to_string(
1463 			  qdf_atomic_read(&rpm_ctx->pm_state)),
1464 		  ret);
1465 
1466 	qdf_atomic_inc(&rpm_ctx->pm_stats.allow_suspend);
1467 	return ret;
1468 }
1469 
1470 /**
1471  * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout
1472  * @data: calback data that is the pci context
1473  *
1474  * if runtime locks are acquired with a timeout, this function releases
1475  * the locks when the last runtime lock expires.
1476  *
1477  * dummy implementation until lock acquisition is implemented.
1478  */
1479 static void hif_pm_runtime_lock_timeout_fn(void *data)
1480 {
1481 	struct hif_softc *scn = data;
1482 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1483 	unsigned long timer_expires;
1484 	struct hif_pm_runtime_lock *context, *temp;
1485 
1486 	spin_lock_bh(&rpm_ctx->runtime_lock);
1487 
1488 	timer_expires = rpm_ctx->runtime_timer_expires;
1489 
1490 	/* Make sure we are not called too early, this should take care of
1491 	 * following case
1492 	 *
1493 	 * CPU0                         CPU1 (timeout function)
1494 	 * ----                         ----------------------
1495 	 * spin_lock_irq
1496 	 *                              timeout function called
1497 	 *
1498 	 * mod_timer()
1499 	 *
1500 	 * spin_unlock_irq
1501 	 *                              spin_lock_irq
1502 	 */
1503 	if (timer_expires > 0 && !time_after(timer_expires, jiffies)) {
1504 		rpm_ctx->runtime_timer_expires = 0;
1505 		list_for_each_entry_safe(context, temp,
1506 					 &rpm_ctx->prevent_suspend_list, list) {
1507 			if (context->timeout) {
1508 				__hif_pm_runtime_allow_suspend(scn, context);
1509 				rpm_ctx->pm_stats.allow_suspend_timeout++;
1510 			}
1511 		}
1512 	}
1513 
1514 	spin_unlock_bh(&rpm_ctx->runtime_lock);
1515 }
1516 
1517 /**
1518  * hif_pm_runtime_prevent_suspend() - Prevent Runtime suspend
1519  * @scn: hif context
1520  * @data: runtime pm lock
1521  *
1522  * This function will prevent runtime suspend, by incrementing
1523  * device's usage count.
1524  *
1525  * Return: status
1526  */
1527 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
1528 				   struct hif_pm_runtime_lock *data)
1529 {
1530 	struct hif_softc *scn = HIF_GET_SOFTC(ol_sc);
1531 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1532 	struct hif_pm_runtime_lock *context = data;
1533 
1534 	if (!scn->hif_config.enable_runtime_pm)
1535 		return 0;
1536 
1537 	if (!context)
1538 		return -EINVAL;
1539 
1540 	if (in_irq())
1541 		WARN_ON(1);
1542 
1543 	spin_lock_bh(&rpm_ctx->runtime_lock);
1544 	context->timeout = 0;
1545 	__hif_pm_runtime_prevent_suspend(scn, context);
1546 	spin_unlock_bh(&rpm_ctx->runtime_lock);
1547 
1548 	return 0;
1549 }
1550 
1551 /**
1552  * hif_pm_runtime_allow_suspend() - Allow Runtime suspend
1553  * @scn: hif context
1554  * @data: runtime pm lock
1555  *
1556  * This function will allow runtime suspend, by decrementing
1557  * device's usage count.
1558  *
1559  * Return: status
1560  */
1561 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
1562 				 struct hif_pm_runtime_lock *data)
1563 {
1564 	struct hif_softc *scn = HIF_GET_SOFTC(ol_sc);
1565 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1566 	struct hif_pm_runtime_lock *context = data;
1567 
1568 	if (!scn->hif_config.enable_runtime_pm)
1569 		return 0;
1570 
1571 	if (!context)
1572 		return -EINVAL;
1573 
1574 	if (in_irq())
1575 		WARN_ON(1);
1576 
1577 	spin_lock_bh(&rpm_ctx->runtime_lock);
1578 
1579 	__hif_pm_runtime_allow_suspend(scn, context);
1580 
1581 	/* The list can be empty as well in cases where
1582 	 * we have one context in the list and the allow
1583 	 * suspend came before the timer expires and we delete
1584 	 * context above from the list.
1585 	 * When list is empty prevent_suspend count will be zero.
1586 	 */
1587 	if (rpm_ctx->prevent_suspend_cnt == 0 &&
1588 	    rpm_ctx->runtime_timer_expires > 0) {
1589 		qdf_timer_free(&rpm_ctx->runtime_timer);
1590 		rpm_ctx->runtime_timer_expires = 0;
1591 	}
1592 
1593 	spin_unlock_bh(&rpm_ctx->runtime_lock);
1594 
1595 	return 0;
1596 }
1597 
1598 /**
1599  * hif_runtime_lock_init() - API to initialize Runtime PM context
1600  * @name: Context name
1601  *
1602  * This API initializes the Runtime PM context of the caller and
1603  * return the pointer.
1604  *
1605  * Return: None
1606  */
1607 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
1608 {
1609 	struct hif_pm_runtime_lock *context;
1610 
1611 	hif_debug("Initializing Runtime PM wakelock %s", name);
1612 
1613 	context = qdf_mem_malloc(sizeof(*context));
1614 	if (!context)
1615 		return -ENOMEM;
1616 
1617 	context->name = name ? name : "Default";
1618 	lock->lock = context;
1619 
1620 	return 0;
1621 }
1622 
1623 /**
1624  * hif_runtime_lock_deinit() - This API frees the runtime pm ctx
1625  * @data: Runtime PM context
1626  *
1627  * Return: void
1628  */
1629 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
1630 			     struct hif_pm_runtime_lock *data)
1631 {
1632 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1633 	struct hif_runtime_pm_ctx *rpm_ctx;
1634 	struct hif_pm_runtime_lock *context = data;
1635 
1636 	if (!context) {
1637 		hif_err("Runtime PM wakelock context is NULL");
1638 		return;
1639 	}
1640 
1641 	hif_debug("Deinitializing Runtime PM wakelock %s", context->name);
1642 
1643 	/*
1644 	 * Ensure to delete the context list entry and reduce the usage count
1645 	 * before freeing the context if context is active.
1646 	 */
1647 	if (scn) {
1648 		rpm_ctx = hif_bus_get_rpm_ctx(scn);
1649 		spin_lock_bh(&rpm_ctx->runtime_lock);
1650 		__hif_pm_runtime_allow_suspend(scn, context);
1651 		spin_unlock_bh(&rpm_ctx->runtime_lock);
1652 	}
1653 
1654 	qdf_mem_free(context);
1655 }
1656 
1657 /**
1658  * hif_pm_runtime_is_suspended() - API to check if driver has runtime suspended
1659  * @hif_ctx: HIF context
1660  *
1661  * Return: true for runtime suspended, otherwise false
1662  */
1663 bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx)
1664 {
1665 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1666 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1667 
1668 	return qdf_atomic_read(&rpm_ctx->pm_state) ==
1669 					HIF_PM_RUNTIME_STATE_SUSPENDED;
1670 }
1671 
1672 /*
1673  * hif_pm_runtime_suspend_lock() - spin_lock on marking runtime suspend
1674  * @hif_ctx: HIF context
1675  *
1676  * Return: void
1677  */
1678 void hif_pm_runtime_suspend_lock(struct hif_opaque_softc *hif_ctx)
1679 {
1680 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1681 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1682 
1683 	qdf_spin_lock_irqsave(&rpm_ctx->runtime_suspend_lock);
1684 }
1685 
1686 /*
1687  * hif_pm_runtime_suspend_unlock() - spin_unlock on marking runtime suspend
1688  * @hif_ctx: HIF context
1689  *
1690  * Return: void
1691  */
1692 void hif_pm_runtime_suspend_unlock(struct hif_opaque_softc *hif_ctx)
1693 {
1694 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1695 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1696 
1697 	qdf_spin_unlock_irqrestore(&rpm_ctx->runtime_suspend_lock);
1698 }
1699 
1700 /**
1701  * hif_pm_runtime_get_monitor_wake_intr() - API to get monitor_wake_intr
1702  * @hif_ctx: HIF context
1703  *
1704  * monitor_wake_intr variable can be used to indicate if driver expects wake
1705  * MSI for runtime PM
1706  *
1707  * Return: monitor_wake_intr variable
1708  */
1709 int hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx)
1710 {
1711 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1712 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1713 
1714 	return qdf_atomic_read(&rpm_ctx->monitor_wake_intr);
1715 }
1716 
1717 /**
1718  * hif_pm_runtime_set_monitor_wake_intr() - API to set monitor_wake_intr
1719  * @hif_ctx: HIF context
1720  * @val: value to set
1721  *
1722  * monitor_wake_intr variable can be used to indicate if driver expects wake
1723  * MSI for runtime PM
1724  *
1725  * Return: void
1726  */
1727 void hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx,
1728 					  int val)
1729 {
1730 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1731 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1732 
1733 	qdf_atomic_set(&rpm_ctx->monitor_wake_intr, val);
1734 }
1735 
1736 /**
1737  * hif_pm_runtime_check_and_request_resume() - check if the device is runtime
1738  *					       suspended and request resume.
1739  * @hif_ctx: HIF context
1740  *
1741  * This function is to check if the device is runtime suspended and
1742  * request for runtime resume.
1743  *
1744  * Return: void
1745  */
1746 void hif_pm_runtime_check_and_request_resume(struct hif_opaque_softc *hif_ctx)
1747 {
1748 	hif_pm_runtime_suspend_lock(hif_ctx);
1749 	if (hif_pm_runtime_is_suspended(hif_ctx)) {
1750 		hif_pm_runtime_suspend_unlock(hif_ctx);
1751 		hif_pm_runtime_request_resume(hif_ctx, RTPM_ID_CE_INTR_HANDLER);
1752 	} else {
1753 		hif_pm_runtime_suspend_unlock(hif_ctx);
1754 	}
1755 }
1756 
1757 /**
1758  * hif_pm_runtime_mark_dp_rx_busy() - Set last busy mark my data path
1759  * @hif_ctx: HIF context
1760  *
1761  * Return: void
1762  */
1763 void hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
1764 {
1765 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1766 	struct hif_runtime_pm_ctx *rpm_ctx;
1767 
1768 	if (!scn)
1769 		return;
1770 
1771 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
1772 	qdf_atomic_set(&rpm_ctx->pm_dp_rx_busy, 1);
1773 	rpm_ctx->dp_last_busy_timestamp = qdf_get_log_timestamp_usecs();
1774 
1775 	hif_pm_runtime_mark_last_busy(hif_ctx);
1776 }
1777 
1778 /**
1779  * hif_pm_runtime_is_dp_rx_busy() - Check if last mark busy by dp rx
1780  * @hif_ctx: HIF context
1781  *
1782  * Return: dp rx busy set value
1783  */
1784 int hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
1785 {
1786 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1787 	struct hif_runtime_pm_ctx *rpm_ctx;
1788 
1789 	if (!scn)
1790 		return 0;
1791 
1792 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
1793 	return qdf_atomic_read(&rpm_ctx->pm_dp_rx_busy);
1794 }
1795 
1796 /**
1797  * hif_pm_runtime_get_dp_rx_busy_mark() - Get last busy by dp rx timestamp
1798  * @hif_ctx: HIF context
1799  *
1800  * Return: timestamp of last mark busy by dp rx
1801  */
1802 qdf_time_t hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx)
1803 {
1804 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1805 	struct hif_runtime_pm_ctx *rpm_ctx;
1806 
1807 	if (!scn)
1808 		return 0;
1809 
1810 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
1811 	return rpm_ctx->dp_last_busy_timestamp;
1812 }
1813 
1814 void hif_pm_set_link_state(struct hif_opaque_softc *hif_handle, uint8_t val)
1815 {
1816 	struct hif_softc *scn = HIF_GET_SOFTC(hif_handle);
1817 
1818 	qdf_atomic_set(&scn->pm_link_state, val);
1819 }
1820 
1821 uint8_t hif_pm_get_link_state(struct hif_opaque_softc *hif_handle)
1822 {
1823 	struct hif_softc *scn = HIF_GET_SOFTC(hif_handle);
1824 
1825 	return qdf_atomic_read(&scn->pm_link_state);
1826 }
1827 
1828 /**
1829  * hif_pm_runtime_update_stats() - API to update RTPM stats for HTC layer
1830  * @scn: hif context
1831  * @rtpm_dbgid: RTPM dbg_id
1832  * @hif_pm_htc_stats: Stats category
1833  *
1834  * Return: void
1835  */
1836 void hif_pm_runtime_update_stats(struct hif_opaque_softc *hif_ctx,
1837 				 wlan_rtpm_dbgid rtpm_dbgid,
1838 				 enum hif_pm_htc_stats stats)
1839 {
1840 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1841 	struct hif_runtime_pm_ctx *rpm_ctx;
1842 
1843 	if (rtpm_dbgid != RTPM_ID_HTC)
1844 		return;
1845 
1846 	if (!scn)
1847 		return;
1848 
1849 	if (!hif_pci_pm_runtime_enabled(scn))
1850 		return;
1851 
1852 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
1853 	if (!rpm_ctx)
1854 		return;
1855 
1856 	switch (stats) {
1857 	case HIF_PM_HTC_STATS_GET_HTT_RESPONSE:
1858 		rpm_ctx->pm_stats.pm_stats_htc.rtpm_get_htt_resp++;
1859 		break;
1860 	case HIF_PM_HTC_STATS_GET_HTT_NO_RESPONSE:
1861 		rpm_ctx->pm_stats.pm_stats_htc.rtpm_get_htt_no_resp++;
1862 		break;
1863 	case HIF_PM_HTC_STATS_PUT_HTT_RESPONSE:
1864 		rpm_ctx->pm_stats.pm_stats_htc.rtpm_put_htt_resp++;
1865 		break;
1866 	case HIF_PM_HTC_STATS_PUT_HTT_NO_RESPONSE:
1867 		rpm_ctx->pm_stats.pm_stats_htc.rtpm_put_htt_no_resp++;
1868 		break;
1869 	case HIF_PM_HTC_STATS_PUT_HTT_ERROR:
1870 		rpm_ctx->pm_stats.pm_stats_htc.rtpm_put_htt_error++;
1871 		break;
1872 	case HIF_PM_HTC_STATS_PUT_HTC_CLEANUP:
1873 		rpm_ctx->pm_stats.pm_stats_htc.rtpm_put_htc_cleanup++;
1874 		break;
1875 	case HIF_PM_HTC_STATS_GET_HTC_KICK_QUEUES:
1876 		rpm_ctx->pm_stats.pm_stats_htc.rtpm_get_htc_kick_queues++;
1877 		break;
1878 	case HIF_PM_HTC_STATS_PUT_HTC_KICK_QUEUES:
1879 		rpm_ctx->pm_stats.pm_stats_htc.rtpm_put_htc_kick_queues++;
1880 		break;
1881 	case HIF_PM_HTC_STATS_GET_HTT_FETCH_PKTS:
1882 		rpm_ctx->pm_stats.pm_stats_htc.rtpm_get_htt_fetch_pkts++;
1883 		break;
1884 	case HIF_PM_HTC_STATS_PUT_HTT_FETCH_PKTS:
1885 		rpm_ctx->pm_stats.pm_stats_htc.rtpm_put_htt_fetch_pkts++;
1886 		break;
1887 	default:
1888 		break;
1889 	}
1890 }
1891 
1892 #endif /* FEATURE_RUNTIME_PM */
1893