xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_runtime_pm.c (revision 2ea97ac98512848a8d721c76dddf82576e7c417e)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7 
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/slab.h>
18 #include <linux/interrupt.h>
19 #include <linux/if_arp.h>
20 #include "hif_io32.h"
21 #include "hif_runtime_pm.h"
22 #include "hif.h"
23 #include "target_type.h"
24 #include "hif_main.h"
25 #include "ce_main.h"
26 #include "ce_api.h"
27 #include "ce_internal.h"
28 #include "ce_reg.h"
29 #include "ce_bmi.h"
30 #include "regtable.h"
31 #include "hif_hw_version.h"
32 #include <linux/debugfs.h>
33 #include <linux/seq_file.h>
34 #include "qdf_status.h"
35 #include "qdf_atomic.h"
36 #include "pld_common.h"
37 #include "mp_dev.h"
38 #include "hif_debug.h"
39 
40 #include "ce_tasklet.h"
41 #include "targaddrs.h"
42 #include "hif_exec.h"
43 
44 #define CNSS_RUNTIME_FILE "cnss_runtime_pm"
45 #define CNSS_RUNTIME_FILE_PERM QDF_FILE_USR_READ
46 
47 #ifdef FEATURE_RUNTIME_PM
48 #define PREVENT_LIST_STRING_LEN 200
49 
50 /**
51  * hif_pci_pm_runtime_enabled() - To check if Runtime PM is enabled
52  * @scn: hif context
53  *
54  * This function will check if Runtime PM is enabled or not.
55  *
56  * Return: void
57  */
58 static bool hif_pci_pm_runtime_enabled(struct hif_softc *scn)
59 {
60 	if (scn->hif_config.enable_runtime_pm)
61 		return true;
62 
63 	return pm_runtime_enabled(hif_bus_get_dev(scn));
64 }
65 
66 /**
67  * hif_pm_runtime_state_to_string() - Mapping state into string
68  * @state: runtime pm state
69  *
70  * This function will map the runtime pm state into corresponding
71  * string for debug purpose.
72  *
73  * Return: pointer to the string
74  */
75 static const char *hif_pm_runtime_state_to_string(uint32_t state)
76 {
77 	switch (state) {
78 	case HIF_PM_RUNTIME_STATE_NONE:
79 		return "INIT_STATE";
80 	case HIF_PM_RUNTIME_STATE_ON:
81 		return "ON";
82 	case HIF_PM_RUNTIME_STATE_RESUMING:
83 		return "RESUMING";
84 	case HIF_PM_RUNTIME_STATE_SUSPENDING:
85 		return "SUSPENDING";
86 	case HIF_PM_RUNTIME_STATE_SUSPENDED:
87 		return "SUSPENDED";
88 	default:
89 		return "INVALID STATE";
90 	}
91 }
92 
93 #define HIF_PCI_RUNTIME_PM_STATS(_s, _rpm_ctx, _name) \
94 	seq_printf(_s, "%30s: %u\n", #_name, (_rpm_ctx)->pm_stats._name)
95 /**
96  * hif_pci_runtime_pm_warn() - Runtime PM Debugging API
97  * @hif_ctx: hif_softc context
98  * @msg: log message
99  *
100  * log runtime pm stats when something seems off.
101  *
102  * Return: void
103  */
104 static void hif_pci_runtime_pm_warn(struct hif_softc *scn,
105 				    const char *msg)
106 {
107 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
108 	struct device *dev = hif_bus_get_dev(scn);
109 	struct hif_pm_runtime_lock *ctx;
110 	int i;
111 
112 	hif_nofl_debug("%s: usage_count: %d, pm_state: %s, prevent_suspend_cnt: %d",
113 		       msg, atomic_read(&dev->power.usage_count),
114 		       hif_pm_runtime_state_to_string(
115 				atomic_read(&rpm_ctx->pm_state)),
116 		       rpm_ctx->prevent_suspend_cnt);
117 
118 	hif_nofl_debug("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d",
119 		       dev->power.runtime_status,
120 		       dev->power.runtime_error,
121 		       dev->power.disable_depth,
122 		       dev->power.autosuspend_delay);
123 
124 	hif_nofl_debug("runtime_get: %u, runtime_put: %u, request_resume: %u",
125 		       qdf_atomic_read(&rpm_ctx->pm_stats.runtime_get),
126 		       qdf_atomic_read(&rpm_ctx->pm_stats.runtime_put),
127 		       rpm_ctx->pm_stats.request_resume);
128 
129 	hif_nofl_debug("get  put  get-timestamp put-timestamp :DBGID_NAME");
130 	for (i = 0; i < RTPM_ID_MAX; i++) {
131 		hif_nofl_debug("%-10d %-10d  0x%-10llx  0x%-10llx :%-30s",
132 			       qdf_atomic_read(
133 				       &rpm_ctx->pm_stats.runtime_get_dbgid[i]),
134 			       qdf_atomic_read(
135 				       &rpm_ctx->pm_stats.runtime_put_dbgid[i]),
136 			       rpm_ctx->pm_stats.runtime_get_timestamp_dbgid[i],
137 			       rpm_ctx->pm_stats.runtime_put_timestamp_dbgid[i],
138 			       rtpm_string_from_dbgid(i));
139 	}
140 
141 	hif_nofl_debug("allow_suspend: %u, prevent_suspend: %u",
142 		       qdf_atomic_read(&rpm_ctx->pm_stats.allow_suspend),
143 		       qdf_atomic_read(&rpm_ctx->pm_stats.prevent_suspend));
144 
145 	hif_nofl_debug("prevent_suspend_timeout: %u, allow_suspend_timeout: %u",
146 		       rpm_ctx->pm_stats.prevent_suspend_timeout,
147 		       rpm_ctx->pm_stats.allow_suspend_timeout);
148 
149 	hif_nofl_debug("Suspended: %u, resumed: %u count",
150 		       rpm_ctx->pm_stats.suspended,
151 		       rpm_ctx->pm_stats.resumed);
152 
153 	hif_nofl_debug("suspend_err: %u, runtime_get_err: %u",
154 		       rpm_ctx->pm_stats.suspend_err,
155 		       rpm_ctx->pm_stats.runtime_get_err);
156 
157 	hif_nofl_debug("Active Wakeup Sources preventing Runtime Suspend: ");
158 
159 	list_for_each_entry(ctx, &rpm_ctx->prevent_suspend_list, list) {
160 		hif_nofl_debug("source %s; timeout %d ms",
161 			       ctx->name, ctx->timeout);
162 	}
163 
164 	if (qdf_is_fw_down()) {
165 		hif_err("fw is down");
166 		return;
167 	}
168 
169 	QDF_DEBUG_PANIC("hif_pci_runtime_pm_warn");
170 }
171 
172 /**
173  * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm
174  * @s: file to print to
175  * @data: unused
176  *
177  * debugging tool added to the debug fs for displaying runtimepm stats
178  *
179  * Return: 0
180  */
181 static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data)
182 {
183 	struct hif_softc *scn = s->private;
184 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
185 	struct device *dev = hif_bus_get_dev(scn);
186 	static const char * const autopm_state[] = {"NONE", "ON", "RESUMING",
187 		"SUSPENDING", "SUSPENDED"};
188 	unsigned int msecs_age;
189 	qdf_time_t usecs_age;
190 	int pm_state = atomic_read(&rpm_ctx->pm_state);
191 	unsigned long timer_expires;
192 	struct hif_pm_runtime_lock *ctx;
193 	int i;
194 
195 	seq_printf(s, "%30s: %s\n", "Runtime PM state", autopm_state[pm_state]);
196 	seq_printf(s, "%30s: %ps\n", "Last Resume Caller",
197 		   rpm_ctx->pm_stats.last_resume_caller);
198 	seq_printf(s, "%30s: %ps\n", "Last Busy Marker",
199 		   rpm_ctx->pm_stats.last_busy_marker);
200 
201 	usecs_age = qdf_get_log_timestamp_usecs() -
202 		rpm_ctx->pm_stats.last_busy_timestamp;
203 	seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Timestamp",
204 		   rpm_ctx->pm_stats.last_busy_timestamp / 1000000,
205 		   rpm_ctx->pm_stats.last_busy_timestamp % 1000000);
206 	seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Since",
207 		   usecs_age / 1000000, usecs_age % 1000000);
208 
209 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) {
210 		msecs_age = jiffies_to_msecs(jiffies -
211 					     rpm_ctx->pm_stats.suspend_jiffies);
212 		seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since",
213 			   msecs_age / 1000, msecs_age % 1000);
214 	}
215 
216 	seq_printf(s, "%30s: %d\n", "PM Usage count",
217 		   atomic_read(&dev->power.usage_count));
218 
219 	seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt",
220 		   rpm_ctx->prevent_suspend_cnt);
221 
222 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, suspended);
223 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, suspend_err);
224 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, resumed);
225 
226 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, request_resume);
227 	seq_printf(s, "%30s: %u\n", "prevent_suspend",
228 		   qdf_atomic_read(&rpm_ctx->pm_stats.prevent_suspend));
229 	seq_printf(s, "%30s: %u\n", "allow_suspend",
230 		   qdf_atomic_read(&rpm_ctx->pm_stats.allow_suspend));
231 
232 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, prevent_suspend_timeout);
233 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, allow_suspend_timeout);
234 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, runtime_get_err);
235 
236 	seq_printf(s, "%30s: %u\n", "runtime_get",
237 		   qdf_atomic_read(&rpm_ctx->pm_stats.runtime_get));
238 	seq_printf(s, "%30s: %u\n", "runtime_put",
239 		   qdf_atomic_read(&rpm_ctx->pm_stats.runtime_put));
240 	seq_puts(s, "get  put  get-timestamp put-timestamp :DBGID_NAME\n");
241 	for (i = 0; i < RTPM_ID_MAX; i++) {
242 		seq_printf(s, "%-10d ",
243 			   qdf_atomic_read(
244 				 &rpm_ctx->pm_stats.runtime_get_dbgid[i]));
245 		seq_printf(s, "%-10d ",
246 			   qdf_atomic_read(
247 				 &rpm_ctx->pm_stats.runtime_put_dbgid[i]));
248 		seq_printf(s, "0x%-10llx ",
249 			   rpm_ctx->pm_stats.runtime_get_timestamp_dbgid[i]);
250 		seq_printf(s, "0x%-10llx ",
251 			   rpm_ctx->pm_stats.runtime_put_timestamp_dbgid[i]);
252 		seq_printf(s, ":%-30s\n", rtpm_string_from_dbgid(i));
253 	}
254 
255 	timer_expires = rpm_ctx->runtime_timer_expires;
256 	if (timer_expires > 0) {
257 		msecs_age = jiffies_to_msecs(timer_expires - jiffies);
258 		seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout",
259 			   msecs_age / 1000, msecs_age % 1000);
260 	}
261 
262 	spin_lock_bh(&rpm_ctx->runtime_lock);
263 	if (list_empty(&rpm_ctx->prevent_suspend_list)) {
264 		spin_unlock_bh(&rpm_ctx->runtime_lock);
265 		return 0;
266 	}
267 
268 	seq_printf(s, "%30s: ", "Active Wakeup_Sources");
269 	list_for_each_entry(ctx, &rpm_ctx->prevent_suspend_list, list) {
270 		seq_printf(s, "%s", ctx->name);
271 		if (ctx->timeout)
272 			seq_printf(s, "(%d ms)", ctx->timeout);
273 		seq_puts(s, " ");
274 	}
275 	seq_puts(s, "\n");
276 	spin_unlock_bh(&rpm_ctx->runtime_lock);
277 
278 	return 0;
279 }
280 
281 #undef HIF_PCI_RUNTIME_PM_STATS
282 
283 /**
284  * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats
285  * @inode
286  * @file
287  *
288  * Return: linux error code of single_open.
289  */
290 static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file)
291 {
292 	return single_open(file, hif_pci_pm_runtime_debugfs_show,
293 			inode->i_private);
294 }
295 
296 static const struct file_operations hif_pci_runtime_pm_fops = {
297 	.owner          = THIS_MODULE,
298 	.open           = hif_pci_runtime_pm_open,
299 	.release        = single_release,
300 	.read           = seq_read,
301 	.llseek         = seq_lseek,
302 };
303 
304 /**
305  * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry
306  * @scn: hif context
307  *
308  * creates a debugfs entry to debug the runtime pm feature.
309  */
310 static void hif_runtime_pm_debugfs_create(struct hif_softc *scn)
311 {
312 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
313 
314 	rpm_ctx->pm_dentry = qdf_debugfs_create_entry(CNSS_RUNTIME_FILE,
315 						      CNSS_RUNTIME_FILE_PERM,
316 						      NULL,
317 						      scn,
318 						      &hif_pci_runtime_pm_fops);
319 }
320 
321 /**
322  * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry
323  * @sc: pci context
324  *
325  * removes the debugfs entry to debug the runtime pm feature.
326  */
327 static void hif_runtime_pm_debugfs_remove(struct hif_softc *scn)
328 {
329 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
330 
331 	qdf_debugfs_remove_file(rpm_ctx->pm_dentry);
332 }
333 
334 /**
335  * hif_runtime_init() - Initialize Runtime PM
336  * @dev: device structure
337  * @delay: delay to be confgured for auto suspend
338  *
339  * This function will init all the Runtime PM config.
340  *
341  * Return: void
342  */
343 static void hif_runtime_init(struct device *dev, int delay)
344 {
345 	pm_runtime_set_autosuspend_delay(dev, delay);
346 	pm_runtime_use_autosuspend(dev);
347 	pm_runtime_allow(dev);
348 	pm_runtime_mark_last_busy(dev);
349 	pm_runtime_put_noidle(dev);
350 	pm_suspend_ignore_children(dev, true);
351 }
352 
353 /**
354  * hif_runtime_exit() - Deinit/Exit Runtime PM
355  * @dev: device structure
356  *
357  * This function will deinit all the Runtime PM config.
358  *
359  * Return: void
360  */
361 static void hif_runtime_exit(struct device *dev)
362 {
363 	pm_runtime_get_noresume(dev);
364 	pm_runtime_set_active(dev);
365 	/* Symmetric call to make sure default usage count == 2 */
366 	pm_runtime_forbid(dev);
367 }
368 
369 static void hif_pm_runtime_lock_timeout_fn(void *data);
370 
371 /**
372  * hif_pm_runtime_start(): start the runtime pm
373  * @scn: hif context
374  *
375  * After this call, runtime pm will be active.
376  */
377 void hif_pm_runtime_start(struct hif_softc *scn)
378 {
379 	uint32_t mode = hif_get_conparam(scn);
380 	struct device *dev = hif_bus_get_dev(scn);
381 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
382 
383 	if (!scn->hif_config.enable_runtime_pm) {
384 		hif_info("RUNTIME PM is disabled in ini");
385 		return;
386 	}
387 
388 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
389 	    mode == QDF_GLOBAL_MONITOR_MODE) {
390 		hif_info("RUNTIME PM is disabled for FTM/EPPING mode");
391 		return;
392 	}
393 
394 	qdf_timer_init(NULL, &rpm_ctx->runtime_timer,
395 		       hif_pm_runtime_lock_timeout_fn,
396 		       scn, QDF_TIMER_TYPE_WAKE_APPS);
397 
398 	hif_info("Enabling RUNTIME PM, Delay: %d ms",
399 		 scn->hif_config.runtime_pm_delay);
400 
401 	qdf_atomic_set(&rpm_ctx->pm_state, HIF_PM_RUNTIME_STATE_ON);
402 	hif_runtime_init(dev, scn->hif_config.runtime_pm_delay);
403 	hif_runtime_pm_debugfs_create(scn);
404 }
405 
406 /**
407  * hif_pm_runtime_stop(): stop runtime pm
408  * @scn: hif context
409  *
410  * Turns off runtime pm and frees corresponding resources
411  * that were acquired by hif_runtime_pm_start().
412  */
413 void hif_pm_runtime_stop(struct hif_softc *scn)
414 {
415 	uint32_t mode = hif_get_conparam(scn);
416 	struct device *dev = hif_bus_get_dev(scn);
417 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
418 
419 	if (!scn->hif_config.enable_runtime_pm)
420 		return;
421 
422 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
423 	    mode == QDF_GLOBAL_MONITOR_MODE)
424 		return;
425 
426 	hif_runtime_exit(dev);
427 
428 	hif_pm_runtime_sync_resume(GET_HIF_OPAQUE_HDL(scn));
429 
430 	qdf_atomic_set(&rpm_ctx->pm_state, HIF_PM_RUNTIME_STATE_NONE);
431 
432 	hif_runtime_pm_debugfs_remove(scn);
433 	qdf_timer_free(&rpm_ctx->runtime_timer);
434 }
435 
436 /**
437  * hif_pm_runtime_open(): initialize runtime pm
438  * @scn: hif ctx
439  *
440  * Early initialization
441  */
442 void hif_pm_runtime_open(struct hif_softc *scn)
443 {
444 	int i;
445 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
446 
447 	spin_lock_init(&rpm_ctx->runtime_lock);
448 	qdf_spinlock_create(&rpm_ctx->runtime_suspend_lock);
449 	qdf_atomic_init(&rpm_ctx->pm_state);
450 	hif_runtime_lock_init(&rpm_ctx->prevent_linkdown_lock,
451 			      "prevent_linkdown_lock");
452 	qdf_atomic_set(&rpm_ctx->pm_state, HIF_PM_RUNTIME_STATE_NONE);
453 	qdf_atomic_init(&rpm_ctx->pm_stats.runtime_get);
454 	qdf_atomic_init(&rpm_ctx->pm_stats.runtime_put);
455 	qdf_atomic_init(&rpm_ctx->pm_stats.allow_suspend);
456 	qdf_atomic_init(&rpm_ctx->pm_stats.prevent_suspend);
457 	for (i = 0; i < RTPM_ID_MAX; i++) {
458 		qdf_atomic_init(&rpm_ctx->pm_stats.runtime_get_dbgid[i]);
459 		qdf_atomic_init(&rpm_ctx->pm_stats.runtime_put_dbgid[i]);
460 	}
461 	INIT_LIST_HEAD(&rpm_ctx->prevent_suspend_list);
462 }
463 
464 /**
465  * hif_check_for_get_put_out_of_sync() - Check if Get/Put is out of sync
466  * @scn: hif context
467  *
468  * This function will check if get and put are out of sync or not.
469  *
470  * Return: void
471  */
472 static void  hif_check_for_get_put_out_of_sync(struct hif_softc *scn)
473 {
474 	int32_t i;
475 	int32_t get_count, put_count;
476 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
477 
478 	if (qdf_is_fw_down())
479 		return;
480 
481 	for (i = 0; i < RTPM_ID_MAX; i++) {
482 		get_count = qdf_atomic_read(
483 				&rpm_ctx->pm_stats.runtime_get_dbgid[i]);
484 		put_count = qdf_atomic_read(
485 				&rpm_ctx->pm_stats.runtime_put_dbgid[i]);
486 		if (get_count != put_count) {
487 			QDF_DEBUG_PANIC("%s get-put out of sync. get %d put %d",
488 					rtpm_string_from_dbgid(i),
489 					get_count, put_count);
490 		}
491 	}
492 }
493 
494 /**
495  * hif_pm_runtime_sanitize_on_exit(): sanitize runtime PM gets/puts from driver
496  * @scn: hif context
497  *
498  * Ensure all gets/puts are in sync before exiting runtime PM feature.
499  * Also make sure all runtime PM locks are deinitialized properly.
500  *
501  * Return: void
502  */
503 static void hif_pm_runtime_sanitize_on_exit(struct hif_softc *scn)
504 {
505 	struct hif_pm_runtime_lock *ctx, *tmp;
506 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
507 
508 	hif_check_for_get_put_out_of_sync(scn);
509 
510 	spin_lock_bh(&rpm_ctx->runtime_lock);
511 	list_for_each_entry_safe(ctx, tmp,
512 				 &rpm_ctx->prevent_suspend_list, list) {
513 		spin_unlock_bh(&rpm_ctx->runtime_lock);
514 		hif_runtime_lock_deinit(GET_HIF_OPAQUE_HDL(scn), ctx);
515 		spin_lock_bh(&rpm_ctx->runtime_lock);
516 	}
517 	spin_unlock_bh(&rpm_ctx->runtime_lock);
518 }
519 
520 static int __hif_pm_runtime_allow_suspend(struct hif_softc *scn,
521 					  struct hif_pm_runtime_lock *lock);
522 
523 /**
524  * hif_pm_runtime_sanitize_on_ssr_exit() - Empty the suspend list on SSR
525  * @scn: hif context
526  *
527  * API is used to empty the runtime pm prevent suspend list.
528  *
529  * Return: void
530  */
531 static void hif_pm_runtime_sanitize_on_ssr_exit(struct hif_softc *scn)
532 {
533 	struct hif_pm_runtime_lock *ctx, *tmp;
534 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
535 
536 	spin_lock_bh(&rpm_ctx->runtime_lock);
537 	list_for_each_entry_safe(ctx, tmp,
538 				 &rpm_ctx->prevent_suspend_list, list) {
539 		__hif_pm_runtime_allow_suspend(scn, ctx);
540 	}
541 	spin_unlock_bh(&rpm_ctx->runtime_lock);
542 }
543 
544 /**
545  * hif_pm_runtime_close(): close runtime pm
546  * @scn: hif ctx
547  *
548  * ensure runtime_pm is stopped before closing the driver
549  */
550 void hif_pm_runtime_close(struct hif_softc *scn)
551 {
552 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
553 	struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn);
554 
555 	/*
556 	 * Here cds hif context was already NULL,
557 	 * so calling hif_runtime_lock_deinit, instead of
558 	 * qdf_runtime_lock_deinit(&rpm_ctx->prevent_linkdown_lock);
559 	 */
560 	hif_runtime_lock_deinit(hif_ctx, rpm_ctx->prevent_linkdown_lock.lock);
561 
562 	hif_is_recovery_in_progress(scn) ?
563 		hif_pm_runtime_sanitize_on_ssr_exit(scn) :
564 		hif_pm_runtime_sanitize_on_exit(scn);
565 
566 	qdf_spinlock_destroy(&rpm_ctx->runtime_suspend_lock);
567 }
568 
569 /**
570  * hif_pm_runtime_sync_resume() - Invoke synchronous runtime resume.
571  * @hif_ctx: hif context
572  *
573  * This function will invoke synchronous runtime resume.
574  *
575  * Return: status
576  */
577 int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx)
578 {
579 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
580 	struct hif_runtime_pm_ctx *rpm_ctx;
581 	int pm_state;
582 
583 	if (!scn)
584 		return -EINVAL;
585 
586 	if (!hif_pci_pm_runtime_enabled(scn))
587 		return 0;
588 
589 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
590 	pm_state = qdf_atomic_read(&rpm_ctx->pm_state);
591 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
592 	    pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
593 		hif_info("Runtime PM resume is requested by %ps",
594 			 (void *)_RET_IP_);
595 
596 	rpm_ctx->pm_stats.request_resume++;
597 	rpm_ctx->pm_stats.last_resume_caller = (void *)_RET_IP_;
598 
599 	return pm_runtime_resume(hif_bus_get_dev(scn));
600 }
601 
602 /**
603  * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occurring
604  * @scn: hif context
605  * @flag: prevent linkdown if true otherwise allow
606  *
607  * this api should only be called as part of bus prevent linkdown
608  */
609 void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
610 {
611 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
612 
613 	if (flag)
614 		qdf_runtime_pm_prevent_suspend(&rpm_ctx->prevent_linkdown_lock);
615 	else
616 		qdf_runtime_pm_allow_suspend(&rpm_ctx->prevent_linkdown_lock);
617 }
618 
619 /**
620  * __hif_runtime_pm_set_state(): utility function
621  * @state: state to set
622  *
623  * indexes into the runtime pm state and sets it.
624  */
625 static void __hif_runtime_pm_set_state(struct hif_softc *scn,
626 				       enum hif_pm_runtime_state state)
627 {
628 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
629 
630 	if (!rpm_ctx) {
631 		hif_err("HIF_CTX not initialized");
632 		return;
633 	}
634 
635 	qdf_atomic_set(&rpm_ctx->pm_state, state);
636 }
637 
638 /**
639  * hif_runtime_pm_set_state_on():  adjust runtime pm state
640  *
641  * Notify hif that a the runtime pm state should be on
642  */
643 static void hif_runtime_pm_set_state_on(struct hif_softc *scn)
644 {
645 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_ON);
646 }
647 
648 /**
649  * hif_runtime_pm_set_state_resuming(): adjust runtime pm state
650  *
651  * Notify hif that a runtime pm resuming has started
652  */
653 static void hif_runtime_pm_set_state_resuming(struct hif_softc *scn)
654 {
655 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_RESUMING);
656 }
657 
658 /**
659  * hif_runtime_pm_set_state_suspending(): adjust runtime pm state
660  *
661  * Notify hif that a runtime pm suspend has started
662  */
663 static void hif_runtime_pm_set_state_suspending(struct hif_softc *scn)
664 {
665 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDING);
666 }
667 
668 /**
669  * hif_runtime_pm_set_state_suspended():  adjust runtime pm state
670  *
671  * Notify hif that a runtime suspend attempt has been completed successfully
672  */
673 static void hif_runtime_pm_set_state_suspended(struct hif_softc *scn)
674 {
675 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDED);
676 }
677 
678 /**
679  * hif_log_runtime_suspend_success() - log a successful runtime suspend
680  */
681 static void hif_log_runtime_suspend_success(struct hif_softc *hif_ctx)
682 {
683 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
684 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
685 
686 	if (!rpm_ctx)
687 		return;
688 
689 	rpm_ctx->pm_stats.suspended++;
690 	rpm_ctx->pm_stats.suspend_jiffies = jiffies;
691 }
692 
693 /**
694  * hif_log_runtime_suspend_failure() - log a failed runtime suspend
695  *
696  * log a failed runtime suspend
697  * mark last busy to prevent immediate runtime suspend
698  */
699 static void hif_log_runtime_suspend_failure(void *hif_ctx)
700 {
701 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
702 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
703 
704 	if (!rpm_ctx)
705 		return;
706 
707 	rpm_ctx->pm_stats.suspend_err++;
708 }
709 
710 /**
711  * hif_log_runtime_resume_success() - log a successful runtime resume
712  *
713  * log a successful runtime resume
714  * mark last busy to prevent immediate runtime suspend
715  */
716 static void hif_log_runtime_resume_success(void *hif_ctx)
717 {
718 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
719 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
720 
721 	if (!rpm_ctx)
722 		return;
723 
724 	rpm_ctx->pm_stats.resumed++;
725 }
726 
727 /**
728  * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
729  *
730  * Record the failure.
731  * mark last busy to delay a retry.
732  * adjust the runtime_pm state.
733  */
734 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx)
735 {
736 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
737 
738 	hif_log_runtime_suspend_failure(hif_ctx);
739 	hif_pm_runtime_mark_last_busy(hif_ctx);
740 	hif_runtime_pm_set_state_on(scn);
741 }
742 
743 static bool hif_pm_runtime_is_suspend_allowed(struct hif_softc *scn)
744 {
745 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
746 	struct hif_pm_runtime_lock *ctx;
747 	uint32_t prevent_suspend_cnt;
748 	char *str_buf;
749 	bool is_suspend_allowed;
750 	int len = 0;
751 
752 	if (!scn->hif_config.enable_runtime_pm)
753 		return false;
754 
755 	str_buf = qdf_mem_malloc(PREVENT_LIST_STRING_LEN);
756 	if (!str_buf)
757 		return false;
758 
759 	spin_lock_bh(&rpm_ctx->runtime_lock);
760 	prevent_suspend_cnt = rpm_ctx->prevent_suspend_cnt;
761 	is_suspend_allowed = (prevent_suspend_cnt == 0);
762 	if (!is_suspend_allowed) {
763 		list_for_each_entry(ctx, &rpm_ctx->prevent_suspend_list, list)
764 			len += qdf_scnprintf(str_buf + len,
765 				PREVENT_LIST_STRING_LEN - len,
766 				"%s ", ctx->name);
767 	}
768 	spin_unlock_bh(&rpm_ctx->runtime_lock);
769 
770 	if (!is_suspend_allowed)
771 		hif_info("prevent_suspend_cnt %u, prevent_list: %s",
772 			 rpm_ctx->prevent_suspend_cnt, str_buf);
773 
774 	qdf_mem_free(str_buf);
775 
776 	return is_suspend_allowed;
777 }
778 
779 /**
780  * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend
781  *
782  * Makes sure that the pci link will be taken down by the suspend opperation.
783  * If the hif layer is configured to leave the bus on, runtime suspend will
784  * not save any power.
785  *
786  * Set the runtime suspend state to in progress.
787  *
788  * return -EINVAL if the bus won't go down.  otherwise return 0
789  */
790 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
791 {
792 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
793 
794 	if (!hif_can_suspend_link(hif_ctx)) {
795 		hif_err("Runtime PM not supported for link up suspend");
796 		return -EINVAL;
797 	}
798 
799 	hif_runtime_pm_set_state_suspending(scn);
800 
801 	/* keep this after set suspending */
802 	if (!hif_pm_runtime_is_suspend_allowed(scn)) {
803 		hif_info("Runtime PM not allowed now");
804 		return -EINVAL;
805 	}
806 
807 	return 0;
808 }
809 
810 /**
811  * hif_process_runtime_suspend_success() - bookkeeping of suspend success
812  *
813  * Record the success.
814  * adjust the runtime_pm state
815  */
816 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx)
817 {
818 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
819 
820 	hif_runtime_pm_set_state_suspended(scn);
821 	hif_log_runtime_suspend_success(scn);
822 }
823 
824 /**
825  * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
826  *
827  * update the runtime pm state.
828  */
829 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx)
830 {
831 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
832 
833 	hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
834 	hif_runtime_pm_set_state_resuming(scn);
835 }
836 
837 /**
838  * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
839  *
840  * record the success.
841  * adjust the runtime_pm state
842  */
843 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx)
844 {
845 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
846 
847 	hif_log_runtime_resume_success(hif_ctx);
848 	hif_pm_runtime_mark_last_busy(hif_ctx);
849 	hif_runtime_pm_set_state_on(scn);
850 }
851 
852 /**
853  * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
854  *
855  * Return: 0 for success and non-zero error code for failure
856  */
857 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
858 {
859 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
860 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
861 	int errno;
862 
863 	errno = hif_bus_suspend(hif_ctx);
864 	if (errno) {
865 		hif_err("Failed bus suspend: %d", errno);
866 		return errno;
867 	}
868 
869 	hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 1);
870 
871 	errno = hif_bus_suspend_noirq(hif_ctx);
872 	if (errno) {
873 		hif_err("Failed bus suspend noirq: %d", errno);
874 		hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
875 		goto bus_resume;
876 	}
877 
878 	qdf_atomic_set(&rpm_ctx->pm_dp_rx_busy, 0);
879 
880 	return 0;
881 
882 bus_resume:
883 	QDF_BUG(!hif_bus_resume(hif_ctx));
884 
885 	return errno;
886 }
887 
888 /**
889  * hif_fastpath_resume() - resume fastpath for runtimepm
890  *
891  * ensure that the fastpath write index register is up to date
892  * since runtime pm may cause ce_send_fast to skip the register
893  * write.
894  *
895  * fastpath only applicable to legacy copy engine
896  */
897 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
898 {
899 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
900 	struct CE_state *ce_state;
901 
902 	if (!scn)
903 		return;
904 
905 	if (scn->fastpath_mode_on) {
906 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
907 			return;
908 
909 		ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
910 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
911 
912 		/*war_ce_src_ring_write_idx_set */
913 		CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
914 					  ce_state->src_ring->write_index);
915 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
916 		Q_TARGET_ACCESS_END(scn);
917 	}
918 }
919 
920 /**
921  * hif_runtime_resume() - do the bus resume part of a runtime resume
922  *
923  *  Return: 0 for success and non-zero error code for failure
924  */
925 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
926 {
927 	int errno;
928 
929 	QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
930 	errno = hif_bus_resume(hif_ctx);
931 	if (errno)
932 		hif_err("Failed runtime resume: %d", errno);
933 
934 	return errno;
935 }
936 
937 /**
938  * hif_pm_stats_runtime_get_record() - record runtime get statistics
939  * @scn: hif context
940  * @rtpm_dbgid: debug id to trace who use it
941  *
942  *
943  * Return: void
944  */
945 static void hif_pm_stats_runtime_get_record(struct hif_softc *scn,
946 					    wlan_rtpm_dbgid rtpm_dbgid)
947 {
948 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
949 
950 	if (rtpm_dbgid >= RTPM_ID_MAX) {
951 		QDF_BUG(0);
952 		return;
953 	}
954 	qdf_atomic_inc(&rpm_ctx->pm_stats.runtime_get);
955 	qdf_atomic_inc(&rpm_ctx->pm_stats.runtime_get_dbgid[rtpm_dbgid]);
956 	rpm_ctx->pm_stats.runtime_get_timestamp_dbgid[rtpm_dbgid] =
957 						qdf_get_log_timestamp();
958 }
959 
960 /**
961  * hif_pm_stats_runtime_put_record() - record runtime put statistics
962  * @scn: hif context
963  * @rtpm_dbgid: dbg_id to trace who use it
964  *
965  *
966  * Return: void
967  */
968 static void hif_pm_stats_runtime_put_record(struct hif_softc *scn,
969 					    wlan_rtpm_dbgid rtpm_dbgid)
970 {
971 	struct device *dev = hif_bus_get_dev(scn);
972 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
973 
974 	if (rtpm_dbgid >= RTPM_ID_MAX) {
975 		QDF_BUG(0);
976 		return;
977 	}
978 
979 	if (atomic_read(&dev->power.usage_count) <= 0) {
980 		QDF_BUG(0);
981 		return;
982 	}
983 
984 	qdf_atomic_inc(&rpm_ctx->pm_stats.runtime_put);
985 	qdf_atomic_inc(&rpm_ctx->pm_stats.runtime_put_dbgid[rtpm_dbgid]);
986 	rpm_ctx->pm_stats.runtime_put_timestamp_dbgid[rtpm_dbgid] =
987 						qdf_get_log_timestamp();
988 }
989 
990 /**
991  * hif_pm_runtime_get_sync() - do a get operation with sync resume
992  * @hif_ctx: pointer of HIF context
993  * @rtpm_dbgid: dbgid to trace who use it
994  *
995  * A get operation will prevent a runtime suspend until a corresponding
996  * put is done. Unlike hif_pm_runtime_get(), this API will do a sync
997  * resume instead of requesting a resume if it is runtime PM suspended
998  * so it can only be called in non-atomic context.
999  *
1000  * Return: 0 if it is runtime PM resumed otherwise an error code.
1001  */
1002 int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx,
1003 			    wlan_rtpm_dbgid rtpm_dbgid)
1004 {
1005 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1006 	struct device *dev = hif_bus_get_dev(scn);
1007 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1008 	int pm_state;
1009 	int ret;
1010 
1011 	if (!rpm_ctx)
1012 		return -EINVAL;
1013 
1014 	if (!hif_pci_pm_runtime_enabled(scn))
1015 		return 0;
1016 
1017 	pm_state = qdf_atomic_read(&rpm_ctx->pm_state);
1018 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
1019 	    pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
1020 		hif_info_high("Runtime PM resume is requested by %ps",
1021 			      (void *)_RET_IP_);
1022 
1023 	hif_pm_stats_runtime_get_record(scn, rtpm_dbgid);
1024 	ret = pm_runtime_get_sync(dev);
1025 
1026 	/* Get can return 1 if the device is already active, just return
1027 	 * success in that case.
1028 	 */
1029 	if (ret > 0)
1030 		ret = 0;
1031 
1032 	if (ret) {
1033 		rpm_ctx->pm_stats.runtime_get_err++;
1034 		hif_err("Runtime PM Get Sync error in pm_state: %d, ret: %d",
1035 			qdf_atomic_read(&rpm_ctx->pm_state), ret);
1036 		hif_pm_runtime_put(hif_ctx, rtpm_dbgid);
1037 	}
1038 
1039 	return ret;
1040 }
1041 
1042 /**
1043  * hif_pm_runtime_put_sync_suspend() - do a put operation with sync suspend
1044  * @hif_ctx: pointer of HIF context
1045  * @rtpm_dbgid: dbgid to trace who use it
1046  *
1047  * This API will do a runtime put operation followed by a sync suspend if usage
1048  * count is 0 so it can only be called in non-atomic context.
1049  *
1050  * Return: 0 for success otherwise an error code
1051  */
1052 int hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx,
1053 				    wlan_rtpm_dbgid rtpm_dbgid)
1054 {
1055 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1056 	struct device *dev;
1057 	int usage_count;
1058 	char *err = NULL;
1059 
1060 	if (!scn)
1061 		return -EINVAL;
1062 
1063 	if (!hif_pci_pm_runtime_enabled(scn))
1064 		return 0;
1065 
1066 	dev = hif_bus_get_dev(scn);
1067 	usage_count = atomic_read(&dev->power.usage_count);
1068 	if (usage_count == 2 && !scn->hif_config.enable_runtime_pm)
1069 		err = "Uexpected PUT when runtime PM is disabled";
1070 	else if (usage_count == 0)
1071 		err = "PUT without a GET Operation";
1072 
1073 	if (err) {
1074 		hif_pci_runtime_pm_warn(scn, err);
1075 		return -EINVAL;
1076 	}
1077 
1078 	hif_pm_stats_runtime_put_record(scn, rtpm_dbgid);
1079 	return pm_runtime_put_sync_suspend(dev);
1080 }
1081 
1082 /**
1083  * hif_pm_runtime_request_resume() - Invoke async runtime resume
1084  * @hif_ctx: hif context
1085  *
1086  * This function will invoke asynchronous runtime resume.
1087  *
1088  * Return: status
1089  */
1090 int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx)
1091 {
1092 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1093 	struct hif_runtime_pm_ctx *rpm_ctx;
1094 	int pm_state;
1095 
1096 	if (!scn)
1097 		return -EINVAL;
1098 
1099 	if (!hif_pci_pm_runtime_enabled(scn))
1100 		return 0;
1101 
1102 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
1103 	pm_state = qdf_atomic_read(&rpm_ctx->pm_state);
1104 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
1105 	    pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
1106 		hif_info("Runtime PM resume is requested by %ps",
1107 			 (void *)_RET_IP_);
1108 
1109 	rpm_ctx->pm_stats.request_resume++;
1110 	rpm_ctx->pm_stats.last_resume_caller = (void *)_RET_IP_;
1111 
1112 	return hif_pm_request_resume(hif_bus_get_dev(scn));
1113 }
1114 
1115 /**
1116  * hif_pm_runtime_mark_last_busy() - Mark last busy time
1117  * @hif_ctx: hif context
1118  *
1119  * This function will mark the last busy time, this will be used
1120  * to check if auto suspend delay expired or not.
1121  *
1122  * Return: void
1123  */
1124 void hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx)
1125 {
1126 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1127 	struct hif_runtime_pm_ctx *rpm_ctx;
1128 
1129 	if (!scn)
1130 		return;
1131 
1132 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
1133 	rpm_ctx->pm_stats.last_busy_marker = (void *)_RET_IP_;
1134 	rpm_ctx->pm_stats.last_busy_timestamp = qdf_get_log_timestamp_usecs();
1135 
1136 	pm_runtime_mark_last_busy(hif_bus_get_dev(scn));
1137 
1138 	return;
1139 }
1140 
1141 /**
1142  * hif_pm_runtime_get_noresume() - Inc usage count without resume
1143  * @hif_ctx: hif context
1144  * rtpm_dbgid: Id of the module calling get
1145  *
1146  * This function will increment device usage count to avoid runtime
1147  * suspend, but it would not do resume.
1148  *
1149  * Return: void
1150  */
1151 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx,
1152 				 wlan_rtpm_dbgid rtpm_dbgid)
1153 {
1154 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1155 
1156 	if (!scn)
1157 		return;
1158 
1159 	if (!hif_pci_pm_runtime_enabled(scn))
1160 		return;
1161 
1162 	hif_pm_stats_runtime_get_record(scn, rtpm_dbgid);
1163 	pm_runtime_get_noresume(hif_bus_get_dev(scn));
1164 }
1165 
1166 /**
1167  * hif_pm_runtime_get() - do a get opperation on the device
1168  * @hif_ctx: pointer of HIF context
1169  * @rtpm_dbgid: dbgid to trace who use it
1170  * @is_critical_ctx: Indication if this function called via a
1171  *		     critical context
1172  *
1173  * A get opperation will prevent a runtime suspend until a
1174  * corresponding put is done.  This api should be used when sending
1175  * data.
1176  *
1177  * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
1178  * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!!
1179  *
1180  * return: success if the bus is up and a get has been issued
1181  *   otherwise an error code.
1182  */
1183 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx,
1184 		       wlan_rtpm_dbgid rtpm_dbgid,
1185 		       bool is_critical_ctx)
1186 {
1187 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1188 	struct hif_runtime_pm_ctx *rpm_ctx;
1189 	struct device *dev;
1190 	int ret;
1191 	int pm_state;
1192 
1193 	if (!scn) {
1194 		hif_err("Could not do runtime get, scn is null");
1195 		return -EFAULT;
1196 	}
1197 
1198 	if (!hif_pci_pm_runtime_enabled(scn))
1199 		return 0;
1200 
1201 	dev = hif_bus_get_dev(scn);
1202 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
1203 	pm_state = qdf_atomic_read(&rpm_ctx->pm_state);
1204 
1205 	if (pm_state  == HIF_PM_RUNTIME_STATE_ON ||
1206 	    pm_state == HIF_PM_RUNTIME_STATE_NONE) {
1207 		hif_pm_stats_runtime_get_record(scn, rtpm_dbgid);
1208 		ret = __hif_pm_runtime_get(dev);
1209 
1210 		/* Get can return 1 if the device is already active, just return
1211 		 * success in that case
1212 		 */
1213 		if (ret > 0)
1214 			ret = 0;
1215 
1216 		if (ret)
1217 			hif_pm_runtime_put(hif_ctx, rtpm_dbgid);
1218 
1219 		if (ret && ret != -EINPROGRESS) {
1220 			rpm_ctx->pm_stats.runtime_get_err++;
1221 			hif_err("Runtime Get PM Error in pm_state:%d ret: %d",
1222 				qdf_atomic_read(&rpm_ctx->pm_state), ret);
1223 		}
1224 
1225 		return ret;
1226 	}
1227 
1228 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
1229 	    pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING) {
1230 		/* Do not log in performance path */
1231 		if (!is_critical_ctx) {
1232 			hif_info_high("Runtime PM resume is requested by %ps",
1233 				      (void *)_RET_IP_);
1234 		}
1235 		ret = -EAGAIN;
1236 	} else {
1237 		ret = -EBUSY;
1238 	}
1239 
1240 	rpm_ctx->pm_stats.request_resume++;
1241 	rpm_ctx->pm_stats.last_resume_caller = (void *)_RET_IP_;
1242 	hif_pm_request_resume(dev);
1243 
1244 	return ret;
1245 }
1246 
1247 /**
1248  * hif_pm_runtime_put() - do a put operation on the device
1249  * @hif_ctx: pointer of HIF context
1250  * @rtpm_dbgid: dbgid to trace who use it
1251  *
1252  * A put operation will allow a runtime suspend after a corresponding
1253  * get was done.  This api should be used when sending data.
1254  *
1255  * This api will return a failure if runtime pm is stopped
1256  * This api will return failure if it would decrement the usage count below 0.
1257  *
1258  * return: QDF_STATUS_SUCCESS if the put is performed
1259  */
1260 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx,
1261 		       wlan_rtpm_dbgid rtpm_dbgid)
1262 {
1263 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1264 	struct device *dev;
1265 	int usage_count;
1266 	char *error = NULL;
1267 
1268 	if (!scn) {
1269 		hif_err("Could not do runtime put, scn is null");
1270 		return -EFAULT;
1271 	}
1272 
1273 	if (!hif_pci_pm_runtime_enabled(scn))
1274 		return 0;
1275 
1276 	dev = hif_bus_get_dev(scn);
1277 	usage_count = atomic_read(&dev->power.usage_count);
1278 	if (usage_count == 2 && !scn->hif_config.enable_runtime_pm)
1279 		error = "Unexpected PUT when runtime PM is disabled";
1280 	else if (usage_count == 0)
1281 		error = "PUT without a GET operation";
1282 
1283 	if (error) {
1284 		hif_pci_runtime_pm_warn(scn, error);
1285 		return -EINVAL;
1286 	}
1287 
1288 	hif_pm_stats_runtime_put_record(scn, rtpm_dbgid);
1289 
1290 	hif_pm_runtime_mark_last_busy(hif_ctx);
1291 	hif_pm_runtime_put_auto(dev);
1292 
1293 	return 0;
1294 }
1295 
1296 /**
1297  * hif_pm_runtime_put_noidle() - do a put operation with no idle
1298  * @hif_ctx: pointer of HIF context
1299  * @rtpm_dbgid: dbgid to trace who use it
1300  *
1301  * This API will do a runtime put no idle operation
1302  *
1303  * Return: 0 for success otherwise an error code
1304  */
1305 int hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx,
1306 			      wlan_rtpm_dbgid rtpm_dbgid)
1307 {
1308 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1309 	struct device *dev;
1310 	int usage_count;
1311 	char *err = NULL;
1312 
1313 	if (!scn)
1314 		return -EINVAL;
1315 
1316 	if (!hif_pci_pm_runtime_enabled(scn))
1317 		return 0;
1318 
1319 	dev = hif_bus_get_dev(scn);
1320 	usage_count = atomic_read(&dev->power.usage_count);
1321 	if (usage_count == 2 && !scn->hif_config.enable_runtime_pm)
1322 		err = "Unexpected PUT when runtime PM is disabled";
1323 	else if (usage_count == 0)
1324 		err = "PUT without a GET operation";
1325 
1326 	if (err) {
1327 		hif_pci_runtime_pm_warn(scn, err);
1328 		return -EINVAL;
1329 	}
1330 
1331 	hif_pm_stats_runtime_put_record(scn, rtpm_dbgid);
1332 	pm_runtime_put_noidle(dev);
1333 
1334 	return 0;
1335 }
1336 
1337 /**
1338  * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol
1339  *                                      reason
1340  * @scn: hif context
1341  * @lock: runtime_pm lock being acquired
1342  *
1343  * Return 0 if successful.
1344  */
1345 static int __hif_pm_runtime_prevent_suspend(struct hif_softc *scn,
1346 					    struct hif_pm_runtime_lock *lock)
1347 {
1348 	struct device *dev = hif_bus_get_dev(scn);
1349 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1350 	int ret = 0;
1351 
1352 	/*
1353 	 * We shouldn't be setting context->timeout to zero here when
1354 	 * context is active as we will have a case where Timeout API's
1355 	 * for the same context called back to back.
1356 	 * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm
1357 	 * Set context->timeout to zero in hif_pm_runtime_prevent_suspend
1358 	 * API to ensure the timeout version is no more active and
1359 	 * list entry of this context will be deleted during allow suspend.
1360 	 */
1361 	if (lock->active)
1362 		return 0;
1363 
1364 	ret = __hif_pm_runtime_get(dev);
1365 
1366 	/**
1367 	 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
1368 	 * RPM_SUSPENDING. Any other negative value is an error.
1369 	 * We shouldn't be do runtime_put here as in later point allow
1370 	 * suspend gets called with the the context and there the usage count
1371 	 * is decremented, so suspend will be prevented.
1372 	 */
1373 
1374 	if (ret < 0 && ret != -EINPROGRESS) {
1375 		rpm_ctx->pm_stats.runtime_get_err++;
1376 		hif_pci_runtime_pm_warn(scn,
1377 					"Prevent Suspend Runtime PM Error");
1378 	}
1379 
1380 	rpm_ctx->prevent_suspend_cnt++;
1381 
1382 	lock->active = true;
1383 
1384 	list_add_tail(&lock->list, &rpm_ctx->prevent_suspend_list);
1385 
1386 	qdf_atomic_inc(&rpm_ctx->pm_stats.prevent_suspend);
1387 
1388 	hif_debug("%s: in pm_state:%s ret: %d", __func__,
1389 		  hif_pm_runtime_state_to_string(
1390 			  qdf_atomic_read(&rpm_ctx->pm_state)),
1391 		  ret);
1392 
1393 	return ret;
1394 }
1395 
1396 /**
1397  * __hif_pm_runtime_allow_suspend() - Allow Runtime suspend
1398  * @scn: hif context
1399  * @lock: runtime pm lock
1400  *
1401  * This function will allow runtime suspend, by decrementing
1402  * device's usage count.
1403  *
1404  * Return: status
1405  */
1406 static int __hif_pm_runtime_allow_suspend(struct hif_softc *scn,
1407 					  struct hif_pm_runtime_lock *lock)
1408 {
1409 	struct device *dev = hif_bus_get_dev(scn);
1410 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1411 	int ret = 0;
1412 	int usage_count;
1413 
1414 	if (rpm_ctx->prevent_suspend_cnt == 0)
1415 		return ret;
1416 
1417 	if (!lock->active)
1418 		return ret;
1419 
1420 	usage_count = atomic_read(&dev->power.usage_count);
1421 
1422 	/*
1423 	 * For runtime PM enabled case, the usage count should never be 0
1424 	 * at this point. For runtime PM disabled case, it should never be
1425 	 * 2 at this point. Catch unexpected PUT without GET here.
1426 	 */
1427 	if ((usage_count == 2 && !scn->hif_config.enable_runtime_pm) ||
1428 	    usage_count == 0) {
1429 		hif_pci_runtime_pm_warn(scn, "PUT without a GET Operation");
1430 		return -EINVAL;
1431 	}
1432 
1433 	list_del(&lock->list);
1434 
1435 	rpm_ctx->prevent_suspend_cnt--;
1436 
1437 	lock->active = false;
1438 	lock->timeout = 0;
1439 
1440 	hif_pm_runtime_mark_last_busy(GET_HIF_OPAQUE_HDL(scn));
1441 	ret = hif_pm_runtime_put_auto(dev);
1442 
1443 	hif_debug("%s: in pm_state:%s ret: %d", __func__,
1444 		  hif_pm_runtime_state_to_string(
1445 			  qdf_atomic_read(&rpm_ctx->pm_state)),
1446 		  ret);
1447 
1448 	qdf_atomic_inc(&rpm_ctx->pm_stats.allow_suspend);
1449 	return ret;
1450 }
1451 
1452 /**
1453  * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout
1454  * @data: calback data that is the pci context
1455  *
1456  * if runtime locks are acquired with a timeout, this function releases
1457  * the locks when the last runtime lock expires.
1458  *
1459  * dummy implementation until lock acquisition is implemented.
1460  */
1461 static void hif_pm_runtime_lock_timeout_fn(void *data)
1462 {
1463 	struct hif_softc *scn = data;
1464 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1465 	unsigned long timer_expires;
1466 	struct hif_pm_runtime_lock *context, *temp;
1467 
1468 	spin_lock_bh(&rpm_ctx->runtime_lock);
1469 
1470 	timer_expires = rpm_ctx->runtime_timer_expires;
1471 
1472 	/* Make sure we are not called too early, this should take care of
1473 	 * following case
1474 	 *
1475 	 * CPU0                         CPU1 (timeout function)
1476 	 * ----                         ----------------------
1477 	 * spin_lock_irq
1478 	 *                              timeout function called
1479 	 *
1480 	 * mod_timer()
1481 	 *
1482 	 * spin_unlock_irq
1483 	 *                              spin_lock_irq
1484 	 */
1485 	if (timer_expires > 0 && !time_after(timer_expires, jiffies)) {
1486 		rpm_ctx->runtime_timer_expires = 0;
1487 		list_for_each_entry_safe(context, temp,
1488 					 &rpm_ctx->prevent_suspend_list, list) {
1489 			if (context->timeout) {
1490 				__hif_pm_runtime_allow_suspend(scn, context);
1491 				rpm_ctx->pm_stats.allow_suspend_timeout++;
1492 			}
1493 		}
1494 	}
1495 
1496 	spin_unlock_bh(&rpm_ctx->runtime_lock);
1497 }
1498 
1499 /**
1500  * hif_pm_runtime_prevent_suspend() - Prevent Runtime suspend
1501  * @scn: hif context
1502  * @data: runtime pm lock
1503  *
1504  * This function will prevent runtime suspend, by incrementing
1505  * device's usage count.
1506  *
1507  * Return: status
1508  */
1509 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
1510 				   struct hif_pm_runtime_lock *data)
1511 {
1512 	struct hif_softc *scn = HIF_GET_SOFTC(ol_sc);
1513 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1514 	struct hif_pm_runtime_lock *context = data;
1515 
1516 	if (!scn->hif_config.enable_runtime_pm)
1517 		return 0;
1518 
1519 	if (!context)
1520 		return -EINVAL;
1521 
1522 	if (in_irq())
1523 		WARN_ON(1);
1524 
1525 	spin_lock_bh(&rpm_ctx->runtime_lock);
1526 	context->timeout = 0;
1527 	__hif_pm_runtime_prevent_suspend(scn, context);
1528 	spin_unlock_bh(&rpm_ctx->runtime_lock);
1529 
1530 	return 0;
1531 }
1532 
1533 /**
1534  * hif_pm_runtime_allow_suspend() - Allow Runtime suspend
1535  * @scn: hif context
1536  * @data: runtime pm lock
1537  *
1538  * This function will allow runtime suspend, by decrementing
1539  * device's usage count.
1540  *
1541  * Return: status
1542  */
1543 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
1544 				 struct hif_pm_runtime_lock *data)
1545 {
1546 	struct hif_softc *scn = HIF_GET_SOFTC(ol_sc);
1547 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1548 	struct hif_pm_runtime_lock *context = data;
1549 
1550 	if (!scn->hif_config.enable_runtime_pm)
1551 		return 0;
1552 
1553 	if (!context)
1554 		return -EINVAL;
1555 
1556 	if (in_irq())
1557 		WARN_ON(1);
1558 
1559 	spin_lock_bh(&rpm_ctx->runtime_lock);
1560 
1561 	__hif_pm_runtime_allow_suspend(scn, context);
1562 
1563 	/* The list can be empty as well in cases where
1564 	 * we have one context in the list and the allow
1565 	 * suspend came before the timer expires and we delete
1566 	 * context above from the list.
1567 	 * When list is empty prevent_suspend count will be zero.
1568 	 */
1569 	if (rpm_ctx->prevent_suspend_cnt == 0 &&
1570 	    rpm_ctx->runtime_timer_expires > 0) {
1571 		qdf_timer_free(&rpm_ctx->runtime_timer);
1572 		rpm_ctx->runtime_timer_expires = 0;
1573 	}
1574 
1575 	spin_unlock_bh(&rpm_ctx->runtime_lock);
1576 
1577 	return 0;
1578 }
1579 
1580 /**
1581  * hif_runtime_lock_init() - API to initialize Runtime PM context
1582  * @name: Context name
1583  *
1584  * This API initializes the Runtime PM context of the caller and
1585  * return the pointer.
1586  *
1587  * Return: None
1588  */
1589 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
1590 {
1591 	struct hif_pm_runtime_lock *context;
1592 
1593 	hif_debug("Initializing Runtime PM wakelock %s", name);
1594 
1595 	context = qdf_mem_malloc(sizeof(*context));
1596 	if (!context)
1597 		return -ENOMEM;
1598 
1599 	context->name = name ? name : "Default";
1600 	lock->lock = context;
1601 
1602 	return 0;
1603 }
1604 
1605 /**
1606  * hif_runtime_lock_deinit() - This API frees the runtime pm ctx
1607  * @data: Runtime PM context
1608  *
1609  * Return: void
1610  */
1611 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
1612 			     struct hif_pm_runtime_lock *data)
1613 {
1614 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1615 	struct hif_runtime_pm_ctx *rpm_ctx;
1616 	struct hif_pm_runtime_lock *context = data;
1617 
1618 	if (!context) {
1619 		hif_err("Runtime PM wakelock context is NULL");
1620 		return;
1621 	}
1622 
1623 	hif_debug("Deinitializing Runtime PM wakelock %s", context->name);
1624 
1625 	/*
1626 	 * Ensure to delete the context list entry and reduce the usage count
1627 	 * before freeing the context if context is active.
1628 	 */
1629 	if (scn) {
1630 		rpm_ctx = hif_bus_get_rpm_ctx(scn);
1631 		spin_lock_bh(&rpm_ctx->runtime_lock);
1632 		__hif_pm_runtime_allow_suspend(scn, context);
1633 		spin_unlock_bh(&rpm_ctx->runtime_lock);
1634 	}
1635 
1636 	qdf_mem_free(context);
1637 }
1638 
1639 /**
1640  * hif_pm_runtime_is_suspended() - API to check if driver has runtime suspended
1641  * @hif_ctx: HIF context
1642  *
1643  * Return: true for runtime suspended, otherwise false
1644  */
1645 bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx)
1646 {
1647 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1648 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1649 
1650 	return qdf_atomic_read(&rpm_ctx->pm_state) ==
1651 					HIF_PM_RUNTIME_STATE_SUSPENDED;
1652 }
1653 
1654 /*
1655  * hif_pm_runtime_suspend_lock() - spin_lock on marking runtime suspend
1656  * @hif_ctx: HIF context
1657  *
1658  * Return: void
1659  */
1660 void hif_pm_runtime_suspend_lock(struct hif_opaque_softc *hif_ctx)
1661 {
1662 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1663 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1664 
1665 	qdf_spin_lock_irqsave(&rpm_ctx->runtime_suspend_lock);
1666 }
1667 
1668 /*
1669  * hif_pm_runtime_suspend_unlock() - spin_unlock on marking runtime suspend
1670  * @hif_ctx: HIF context
1671  *
1672  * Return: void
1673  */
1674 void hif_pm_runtime_suspend_unlock(struct hif_opaque_softc *hif_ctx)
1675 {
1676 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1677 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1678 
1679 	qdf_spin_unlock_irqrestore(&rpm_ctx->runtime_suspend_lock);
1680 }
1681 
1682 /**
1683  * hif_pm_runtime_get_monitor_wake_intr() - API to get monitor_wake_intr
1684  * @hif_ctx: HIF context
1685  *
1686  * monitor_wake_intr variable can be used to indicate if driver expects wake
1687  * MSI for runtime PM
1688  *
1689  * Return: monitor_wake_intr variable
1690  */
1691 int hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx)
1692 {
1693 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1694 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1695 
1696 	return qdf_atomic_read(&rpm_ctx->monitor_wake_intr);
1697 }
1698 
1699 /**
1700  * hif_pm_runtime_set_monitor_wake_intr() - API to set monitor_wake_intr
1701  * @hif_ctx: HIF context
1702  * @val: value to set
1703  *
1704  * monitor_wake_intr variable can be used to indicate if driver expects wake
1705  * MSI for runtime PM
1706  *
1707  * Return: void
1708  */
1709 void hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx,
1710 					  int val)
1711 {
1712 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1713 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1714 
1715 	qdf_atomic_set(&rpm_ctx->monitor_wake_intr, val);
1716 }
1717 
1718 /**
1719  * hif_pm_runtime_check_and_request_resume() - check if the device is runtime
1720  *					       suspended and request resume.
1721  * @hif_ctx: HIF context
1722  *
1723  * This function is to check if the device is runtime suspended and
1724  * request for runtime resume.
1725  *
1726  * Return: void
1727  */
1728 void hif_pm_runtime_check_and_request_resume(struct hif_opaque_softc *hif_ctx)
1729 {
1730 	hif_pm_runtime_suspend_lock(hif_ctx);
1731 	if (hif_pm_runtime_is_suspended(hif_ctx)) {
1732 		hif_pm_runtime_suspend_unlock(hif_ctx);
1733 		hif_pm_runtime_request_resume(hif_ctx);
1734 	} else {
1735 		hif_pm_runtime_suspend_unlock(hif_ctx);
1736 	}
1737 }
1738 
1739 /**
1740  * hif_pm_runtime_mark_dp_rx_busy() - Set last busy mark my data path
1741  * @hif_ctx: HIF context
1742  *
1743  * Return: void
1744  */
1745 void hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
1746 {
1747 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1748 	struct hif_runtime_pm_ctx *rpm_ctx;
1749 
1750 	if (!scn)
1751 		return;
1752 
1753 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
1754 	qdf_atomic_set(&rpm_ctx->pm_dp_rx_busy, 1);
1755 	rpm_ctx->dp_last_busy_timestamp = qdf_get_log_timestamp_usecs();
1756 
1757 	hif_pm_runtime_mark_last_busy(hif_ctx);
1758 }
1759 
1760 /**
1761  * hif_pm_runtime_is_dp_rx_busy() - Check if last mark busy by dp rx
1762  * @hif_ctx: HIF context
1763  *
1764  * Return: dp rx busy set value
1765  */
1766 int hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
1767 {
1768 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1769 	struct hif_runtime_pm_ctx *rpm_ctx;
1770 
1771 	if (!scn)
1772 		return 0;
1773 
1774 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
1775 	return qdf_atomic_read(&rpm_ctx->pm_dp_rx_busy);
1776 }
1777 
1778 /**
1779  * hif_pm_runtime_get_dp_rx_busy_mark() - Get last busy by dp rx timestamp
1780  * @hif_ctx: HIF context
1781  *
1782  * Return: timestamp of last mark busy by dp rx
1783  */
1784 qdf_time_t hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx)
1785 {
1786 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1787 	struct hif_runtime_pm_ctx *rpm_ctx;
1788 
1789 	if (!scn)
1790 		return 0;
1791 
1792 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
1793 	return rpm_ctx->dp_last_busy_timestamp;
1794 }
1795 
1796 void hif_pm_set_link_state(struct hif_opaque_softc *hif_handle, uint8_t val)
1797 {
1798 	struct hif_softc *scn = HIF_GET_SOFTC(hif_handle);
1799 
1800 	qdf_atomic_set(&scn->pm_link_state, val);
1801 }
1802 
1803 uint8_t hif_pm_get_link_state(struct hif_opaque_softc *hif_handle)
1804 {
1805 	struct hif_softc *scn = HIF_GET_SOFTC(hif_handle);
1806 
1807 	return qdf_atomic_read(&scn->pm_link_state);
1808 }
1809 
1810 /**
1811  * hif_pm_runtime_update_stats() - API to update RTPM stats for HTC layer
1812  * @scn: hif context
1813  * @rtpm_dbgid: RTPM dbg_id
1814  * @hif_pm_htc_stats: Stats category
1815  *
1816  * Return: void
1817  */
1818 void hif_pm_runtime_update_stats(struct hif_opaque_softc *hif_ctx,
1819 				 wlan_rtpm_dbgid rtpm_dbgid,
1820 				 enum hif_pm_htc_stats stats)
1821 {
1822 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1823 	struct hif_runtime_pm_ctx *rpm_ctx;
1824 
1825 	if (rtpm_dbgid != RTPM_ID_HTC)
1826 		return;
1827 
1828 	if (!scn)
1829 		return;
1830 
1831 	if (!hif_pci_pm_runtime_enabled(scn))
1832 		return;
1833 
1834 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
1835 	if (!rpm_ctx)
1836 		return;
1837 
1838 	switch (stats) {
1839 	case HIF_PM_HTC_STATS_GET_HTT_RESPONSE:
1840 		rpm_ctx->pm_stats.pm_stats_htc.rtpm_get_htt_resp++;
1841 		break;
1842 	case HIF_PM_HTC_STATS_GET_HTT_NO_RESPONSE:
1843 		rpm_ctx->pm_stats.pm_stats_htc.rtpm_get_htt_no_resp++;
1844 		break;
1845 	case HIF_PM_HTC_STATS_PUT_HTT_RESPONSE:
1846 		rpm_ctx->pm_stats.pm_stats_htc.rtpm_put_htt_resp++;
1847 		break;
1848 	case HIF_PM_HTC_STATS_PUT_HTT_NO_RESPONSE:
1849 		rpm_ctx->pm_stats.pm_stats_htc.rtpm_put_htt_no_resp++;
1850 		break;
1851 	case HIF_PM_HTC_STATS_PUT_HTT_ERROR:
1852 		rpm_ctx->pm_stats.pm_stats_htc.rtpm_put_htt_error++;
1853 		break;
1854 	case HIF_PM_HTC_STATS_PUT_HTC_CLEANUP:
1855 		rpm_ctx->pm_stats.pm_stats_htc.rtpm_put_htc_cleanup++;
1856 		break;
1857 	default:
1858 		break;
1859 	}
1860 }
1861 
1862 #endif /* FEATURE_RUNTIME_PM */
1863