xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_runtime_pm.c (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7 
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/slab.h>
18 #include <linux/interrupt.h>
19 #include <linux/if_arp.h>
20 #include "hif_io32.h"
21 #include "hif_runtime_pm.h"
22 #include "hif.h"
23 #include "target_type.h"
24 #include "hif_main.h"
25 #include "ce_main.h"
26 #include "ce_api.h"
27 #include "ce_internal.h"
28 #include "ce_reg.h"
29 #include "ce_bmi.h"
30 #include "regtable.h"
31 #include "hif_hw_version.h"
32 #include <linux/debugfs.h>
33 #include <linux/seq_file.h>
34 #include "qdf_status.h"
35 #include "qdf_atomic.h"
36 #include "pld_common.h"
37 #include "mp_dev.h"
38 #include "hif_debug.h"
39 
40 #include "ce_tasklet.h"
41 #include "targaddrs.h"
42 #include "hif_exec.h"
43 
44 #ifdef FEATURE_RUNTIME_PM
45 /**
46  * hif_pci_pm_runtime_enabled() - To check if Runtime PM is enabled
47  * @scn: hif context
48  *
49  * This function will check if Runtime PM is enabled or not.
50  *
51  * Return: void
52  */
53 static bool hif_pci_pm_runtime_enabled(struct hif_softc *scn)
54 {
55 	if (scn->hif_config.enable_runtime_pm)
56 		return true;
57 
58 	return pm_runtime_enabled(hif_bus_get_dev(scn));
59 }
60 
61 /**
62  * hif_pm_runtime_state_to_string() - Mapping state into string
63  * @state: runtime pm state
64  *
65  * This function will map the runtime pm state into corresponding
66  * string for debug purpose.
67  *
68  * Return: pointer to the string
69  */
70 static const char *hif_pm_runtime_state_to_string(uint32_t state)
71 {
72 	switch (state) {
73 	case HIF_PM_RUNTIME_STATE_NONE:
74 		return "INIT_STATE";
75 	case HIF_PM_RUNTIME_STATE_ON:
76 		return "ON";
77 	case HIF_PM_RUNTIME_STATE_RESUMING:
78 		return "RESUMING";
79 	case HIF_PM_RUNTIME_STATE_SUSPENDING:
80 		return "SUSPENDING";
81 	case HIF_PM_RUNTIME_STATE_SUSPENDED:
82 		return "SUSPENDED";
83 	default:
84 		return "INVALID STATE";
85 	}
86 }
87 
88 #define HIF_PCI_RUNTIME_PM_STATS(_s, _rpm_ctx, _name) \
89 	seq_printf(_s, "%30s: %u\n", #_name, (_rpm_ctx)->pm_stats._name)
90 /**
91  * hif_pci_runtime_pm_warn() - Runtime PM Debugging API
92  * @hif_ctx: hif_softc context
93  * @msg: log message
94  *
95  * log runtime pm stats when something seems off.
96  *
97  * Return: void
98  */
99 static void hif_pci_runtime_pm_warn(struct hif_softc *scn,
100 				    const char *msg)
101 {
102 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
103 	struct device *dev = hif_bus_get_dev(scn);
104 	struct hif_pm_runtime_lock *ctx;
105 	int i;
106 
107 	hif_nofl_debug("%s: usage_count: %d, pm_state: %s, prevent_suspend_cnt: %d",
108 		       msg, atomic_read(&dev->power.usage_count),
109 		       hif_pm_runtime_state_to_string(
110 				atomic_read(&rpm_ctx->pm_state)),
111 		       rpm_ctx->prevent_suspend_cnt);
112 
113 	hif_nofl_debug("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d",
114 		       dev->power.runtime_status,
115 		       dev->power.runtime_error,
116 		       dev->power.disable_depth,
117 		       dev->power.autosuspend_delay);
118 
119 	hif_nofl_debug("runtime_get: %u, runtime_put: %u, request_resume: %u",
120 		       qdf_atomic_read(&rpm_ctx->pm_stats.runtime_get),
121 		       qdf_atomic_read(&rpm_ctx->pm_stats.runtime_put),
122 		       rpm_ctx->pm_stats.request_resume);
123 
124 	hif_nofl_debug("get  put  get-timestamp put-timestamp :DBGID_NAME");
125 	for (i = 0; i < RTPM_ID_MAX; i++) {
126 		hif_nofl_debug("%-10d %-10d  0x%-10llx  0x%-10llx :%-30s",
127 			       qdf_atomic_read(
128 				       &rpm_ctx->pm_stats.runtime_get_dbgid[i]),
129 			       qdf_atomic_read(
130 				       &rpm_ctx->pm_stats.runtime_put_dbgid[i]),
131 			       rpm_ctx->pm_stats.runtime_get_timestamp_dbgid[i],
132 			       rpm_ctx->pm_stats.runtime_put_timestamp_dbgid[i],
133 			       rtpm_string_from_dbgid(i));
134 	}
135 
136 	hif_nofl_debug("allow_suspend: %u, prevent_suspend: %u",
137 		       qdf_atomic_read(&rpm_ctx->pm_stats.allow_suspend),
138 		       qdf_atomic_read(&rpm_ctx->pm_stats.prevent_suspend));
139 
140 	hif_nofl_debug("prevent_suspend_timeout: %u, allow_suspend_timeout: %u",
141 		       rpm_ctx->pm_stats.prevent_suspend_timeout,
142 		       rpm_ctx->pm_stats.allow_suspend_timeout);
143 
144 	hif_nofl_debug("Suspended: %u, resumed: %u count",
145 		       rpm_ctx->pm_stats.suspended,
146 		       rpm_ctx->pm_stats.resumed);
147 
148 	hif_nofl_debug("suspend_err: %u, runtime_get_err: %u",
149 		       rpm_ctx->pm_stats.suspend_err,
150 		       rpm_ctx->pm_stats.runtime_get_err);
151 
152 	hif_nofl_debug("Active Wakeup Sources preventing Runtime Suspend: ");
153 
154 	list_for_each_entry(ctx, &rpm_ctx->prevent_suspend_list, list) {
155 		hif_nofl_debug("source %s; timeout %d ms",
156 			       ctx->name, ctx->timeout);
157 	}
158 
159 	if (qdf_is_fw_down()) {
160 		hif_err("fw is down");
161 		return;
162 	}
163 
164 	QDF_DEBUG_PANIC("hif_pci_runtime_pm_warn");
165 }
166 
167 /**
168  * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm
169  * @s: file to print to
170  * @data: unused
171  *
172  * debugging tool added to the debug fs for displaying runtimepm stats
173  *
174  * Return: 0
175  */
176 static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data)
177 {
178 	struct hif_softc *scn = s->private;
179 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
180 	struct device *dev = hif_bus_get_dev(scn);
181 	static const char * const autopm_state[] = {"NONE", "ON", "RESUMING",
182 		"SUSPENDING", "SUSPENDED"};
183 	unsigned int msecs_age;
184 	qdf_time_t usecs_age;
185 	int pm_state = atomic_read(&rpm_ctx->pm_state);
186 	unsigned long timer_expires;
187 	struct hif_pm_runtime_lock *ctx;
188 	int i;
189 
190 	seq_printf(s, "%30s: %s\n", "Runtime PM state", autopm_state[pm_state]);
191 	seq_printf(s, "%30s: %ps\n", "Last Resume Caller",
192 		   rpm_ctx->pm_stats.last_resume_caller);
193 	seq_printf(s, "%30s: %ps\n", "Last Busy Marker",
194 		   rpm_ctx->pm_stats.last_busy_marker);
195 
196 	usecs_age = qdf_get_log_timestamp_usecs() -
197 		rpm_ctx->pm_stats.last_busy_timestamp;
198 	seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Timestamp",
199 		   rpm_ctx->pm_stats.last_busy_timestamp / 1000000,
200 		   rpm_ctx->pm_stats.last_busy_timestamp % 1000000);
201 	seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Since",
202 		   usecs_age / 1000000, usecs_age % 1000000);
203 
204 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) {
205 		msecs_age = jiffies_to_msecs(jiffies -
206 					     rpm_ctx->pm_stats.suspend_jiffies);
207 		seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since",
208 			   msecs_age / 1000, msecs_age % 1000);
209 	}
210 
211 	seq_printf(s, "%30s: %d\n", "PM Usage count",
212 		   atomic_read(&dev->power.usage_count));
213 
214 	seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt",
215 		   rpm_ctx->prevent_suspend_cnt);
216 
217 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, suspended);
218 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, suspend_err);
219 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, resumed);
220 
221 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, request_resume);
222 	seq_printf(s, "%30s: %u\n", "prevent_suspend",
223 		   qdf_atomic_read(&rpm_ctx->pm_stats.prevent_suspend));
224 	seq_printf(s, "%30s: %u\n", "allow_suspend",
225 		   qdf_atomic_read(&rpm_ctx->pm_stats.allow_suspend));
226 
227 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, prevent_suspend_timeout);
228 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, allow_suspend_timeout);
229 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, runtime_get_err);
230 
231 	seq_printf(s, "%30s: %u\n", "runtime_get",
232 		   qdf_atomic_read(&rpm_ctx->pm_stats.runtime_get));
233 	seq_printf(s, "%30s: %u\n", "runtime_put",
234 		   qdf_atomic_read(&rpm_ctx->pm_stats.runtime_put));
235 	seq_puts(s, "get  put  get-timestamp put-timestamp :DBGID_NAME\n");
236 	for (i = 0; i < RTPM_ID_MAX; i++) {
237 		seq_printf(s, "%-10d ",
238 			   qdf_atomic_read(
239 				 &rpm_ctx->pm_stats.runtime_get_dbgid[i]));
240 		seq_printf(s, "%-10d ",
241 			   qdf_atomic_read(
242 				 &rpm_ctx->pm_stats.runtime_put_dbgid[i]));
243 		seq_printf(s, "0x%-10llx ",
244 			   rpm_ctx->pm_stats.runtime_get_timestamp_dbgid[i]);
245 		seq_printf(s, "0x%-10llx ",
246 			   rpm_ctx->pm_stats.runtime_put_timestamp_dbgid[i]);
247 		seq_printf(s, ":%-30s\n", rtpm_string_from_dbgid(i));
248 	}
249 
250 	timer_expires = rpm_ctx->runtime_timer_expires;
251 	if (timer_expires > 0) {
252 		msecs_age = jiffies_to_msecs(timer_expires - jiffies);
253 		seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout",
254 			   msecs_age / 1000, msecs_age % 1000);
255 	}
256 
257 	spin_lock_bh(&rpm_ctx->runtime_lock);
258 	if (list_empty(&rpm_ctx->prevent_suspend_list)) {
259 		spin_unlock_bh(&rpm_ctx->runtime_lock);
260 		return 0;
261 	}
262 
263 	seq_printf(s, "%30s: ", "Active Wakeup_Sources");
264 	list_for_each_entry(ctx, &rpm_ctx->prevent_suspend_list, list) {
265 		seq_printf(s, "%s", ctx->name);
266 		if (ctx->timeout)
267 			seq_printf(s, "(%d ms)", ctx->timeout);
268 		seq_puts(s, " ");
269 	}
270 	seq_puts(s, "\n");
271 	spin_unlock_bh(&rpm_ctx->runtime_lock);
272 
273 	return 0;
274 }
275 
276 #undef HIF_PCI_RUNTIME_PM_STATS
277 
278 /**
279  * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats
280  * @inode
281  * @file
282  *
283  * Return: linux error code of single_open.
284  */
285 static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file)
286 {
287 	return single_open(file, hif_pci_pm_runtime_debugfs_show,
288 			inode->i_private);
289 }
290 
291 static const struct file_operations hif_pci_runtime_pm_fops = {
292 	.owner          = THIS_MODULE,
293 	.open           = hif_pci_runtime_pm_open,
294 	.release        = single_release,
295 	.read           = seq_read,
296 	.llseek         = seq_lseek,
297 };
298 
299 /**
300  * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry
301  * @scn: hif context
302  *
303  * creates a debugfs entry to debug the runtime pm feature.
304  */
305 static void hif_runtime_pm_debugfs_create(struct hif_softc *scn)
306 {
307 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
308 
309 	rpm_ctx->pm_dentry = debugfs_create_file("cnss_runtime_pm",
310 						 0400, NULL, scn,
311 						 &hif_pci_runtime_pm_fops);
312 }
313 
314 /**
315  * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry
316  * @sc: pci context
317  *
318  * removes the debugfs entry to debug the runtime pm feature.
319  */
320 static void hif_runtime_pm_debugfs_remove(struct hif_softc *scn)
321 {
322 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
323 
324 	debugfs_remove(rpm_ctx->pm_dentry);
325 }
326 
327 /**
328  * hif_runtime_init() - Initialize Runtime PM
329  * @dev: device structure
330  * @delay: delay to be confgured for auto suspend
331  *
332  * This function will init all the Runtime PM config.
333  *
334  * Return: void
335  */
336 static void hif_runtime_init(struct device *dev, int delay)
337 {
338 	pm_runtime_set_autosuspend_delay(dev, delay);
339 	pm_runtime_use_autosuspend(dev);
340 	pm_runtime_allow(dev);
341 	pm_runtime_mark_last_busy(dev);
342 	pm_runtime_put_noidle(dev);
343 	pm_suspend_ignore_children(dev, true);
344 }
345 
346 /**
347  * hif_runtime_exit() - Deinit/Exit Runtime PM
348  * @dev: device structure
349  *
350  * This function will deinit all the Runtime PM config.
351  *
352  * Return: void
353  */
354 static void hif_runtime_exit(struct device *dev)
355 {
356 	pm_runtime_get_noresume(dev);
357 	pm_runtime_set_active(dev);
358 	/* Symmetric call to make sure default usage count == 2 */
359 	pm_runtime_forbid(dev);
360 }
361 
362 static void hif_pm_runtime_lock_timeout_fn(void *data);
363 
364 /**
365  * hif_pm_runtime_start(): start the runtime pm
366  * @scn: hif context
367  *
368  * After this call, runtime pm will be active.
369  */
370 void hif_pm_runtime_start(struct hif_softc *scn)
371 {
372 	uint32_t mode = hif_get_conparam(scn);
373 	struct device *dev = hif_bus_get_dev(scn);
374 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
375 
376 	if (!scn->hif_config.enable_runtime_pm) {
377 		hif_info("RUNTIME PM is disabled in ini");
378 		return;
379 	}
380 
381 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
382 	    mode == QDF_GLOBAL_MONITOR_MODE) {
383 		hif_info("RUNTIME PM is disabled for FTM/EPPING mode");
384 		return;
385 	}
386 
387 	qdf_timer_init(NULL, &rpm_ctx->runtime_timer,
388 		       hif_pm_runtime_lock_timeout_fn,
389 		       scn, QDF_TIMER_TYPE_WAKE_APPS);
390 
391 	hif_info("Enabling RUNTIME PM, Delay: %d ms",
392 		 scn->hif_config.runtime_pm_delay);
393 
394 	qdf_atomic_set(&rpm_ctx->pm_state, HIF_PM_RUNTIME_STATE_ON);
395 	hif_runtime_init(dev, scn->hif_config.runtime_pm_delay);
396 	hif_runtime_pm_debugfs_create(scn);
397 }
398 
399 /**
400  * hif_pm_runtime_stop(): stop runtime pm
401  * @scn: hif context
402  *
403  * Turns off runtime pm and frees corresponding resources
404  * that were acquired by hif_runtime_pm_start().
405  */
406 void hif_pm_runtime_stop(struct hif_softc *scn)
407 {
408 	uint32_t mode = hif_get_conparam(scn);
409 	struct device *dev = hif_bus_get_dev(scn);
410 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
411 
412 	if (!scn->hif_config.enable_runtime_pm)
413 		return;
414 
415 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
416 	    mode == QDF_GLOBAL_MONITOR_MODE)
417 		return;
418 
419 	hif_runtime_exit(dev);
420 
421 	hif_pm_runtime_sync_resume(GET_HIF_OPAQUE_HDL(scn));
422 
423 	qdf_atomic_set(&rpm_ctx->pm_state, HIF_PM_RUNTIME_STATE_NONE);
424 
425 	hif_runtime_pm_debugfs_remove(scn);
426 	qdf_timer_free(&rpm_ctx->runtime_timer);
427 }
428 
429 /**
430  * hif_pm_runtime_open(): initialize runtime pm
431  * @scn: hif ctx
432  *
433  * Early initialization
434  */
435 void hif_pm_runtime_open(struct hif_softc *scn)
436 {
437 	int i;
438 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
439 
440 	spin_lock_init(&rpm_ctx->runtime_lock);
441 	qdf_atomic_init(&rpm_ctx->pm_state);
442 	hif_runtime_lock_init(&rpm_ctx->prevent_linkdown_lock,
443 			      "prevent_linkdown_lock");
444 	qdf_atomic_set(&rpm_ctx->pm_state, HIF_PM_RUNTIME_STATE_NONE);
445 	qdf_atomic_init(&rpm_ctx->pm_stats.runtime_get);
446 	qdf_atomic_init(&rpm_ctx->pm_stats.runtime_put);
447 	qdf_atomic_init(&rpm_ctx->pm_stats.allow_suspend);
448 	qdf_atomic_init(&rpm_ctx->pm_stats.prevent_suspend);
449 	for (i = 0; i < RTPM_ID_MAX; i++) {
450 		qdf_atomic_init(&rpm_ctx->pm_stats.runtime_get_dbgid[i]);
451 		qdf_atomic_init(&rpm_ctx->pm_stats.runtime_put_dbgid[i]);
452 	}
453 	INIT_LIST_HEAD(&rpm_ctx->prevent_suspend_list);
454 }
455 
456 /**
457  * hif_check_for_get_put_out_of_sync() - Check if Get/Put is out of sync
458  * @scn: hif context
459  *
460  * This function will check if get and put are out of sync or not.
461  *
462  * Return: void
463  */
464 static void  hif_check_for_get_put_out_of_sync(struct hif_softc *scn)
465 {
466 	int32_t i;
467 	int32_t get_count, put_count;
468 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
469 
470 	if (qdf_is_fw_down())
471 		return;
472 
473 	for (i = 0; i < RTPM_ID_MAX; i++) {
474 		get_count = qdf_atomic_read(
475 				&rpm_ctx->pm_stats.runtime_get_dbgid[i]);
476 		put_count = qdf_atomic_read(
477 				&rpm_ctx->pm_stats.runtime_put_dbgid[i]);
478 		if (get_count != put_count) {
479 			QDF_DEBUG_PANIC("%s get-put out of sync. get %d put %d",
480 					rtpm_string_from_dbgid(i),
481 					get_count, put_count);
482 		}
483 	}
484 }
485 
486 /**
487  * hif_pm_runtime_sanitize_on_exit(): sanitize runtime PM gets/puts from driver
488  * @scn: hif context
489  *
490  * Ensure all gets/puts are in sync before exiting runtime PM feature.
491  * Also make sure all runtime PM locks are deinitialized properly.
492  *
493  * Return: void
494  */
495 static void hif_pm_runtime_sanitize_on_exit(struct hif_softc *scn)
496 {
497 	struct hif_pm_runtime_lock *ctx, *tmp;
498 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
499 
500 	hif_check_for_get_put_out_of_sync(scn);
501 
502 	spin_lock_bh(&rpm_ctx->runtime_lock);
503 	list_for_each_entry_safe(ctx, tmp,
504 				 &rpm_ctx->prevent_suspend_list, list) {
505 		spin_unlock_bh(&rpm_ctx->runtime_lock);
506 		hif_runtime_lock_deinit(GET_HIF_OPAQUE_HDL(scn), ctx);
507 		spin_lock_bh(&rpm_ctx->runtime_lock);
508 	}
509 	spin_unlock_bh(&rpm_ctx->runtime_lock);
510 }
511 
512 static int __hif_pm_runtime_allow_suspend(struct hif_softc *scn,
513 					  struct hif_pm_runtime_lock *lock);
514 
515 /**
516  * hif_pm_runtime_sanitize_on_ssr_exit() - Empty the suspend list on SSR
517  * @scn: hif context
518  *
519  * API is used to empty the runtime pm prevent suspend list.
520  *
521  * Return: void
522  */
523 static void hif_pm_runtime_sanitize_on_ssr_exit(struct hif_softc *scn)
524 {
525 	struct hif_pm_runtime_lock *ctx, *tmp;
526 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
527 
528 	spin_lock_bh(&rpm_ctx->runtime_lock);
529 	list_for_each_entry_safe(ctx, tmp,
530 				 &rpm_ctx->prevent_suspend_list, list) {
531 		__hif_pm_runtime_allow_suspend(scn, ctx);
532 	}
533 	spin_unlock_bh(&rpm_ctx->runtime_lock);
534 }
535 
536 /**
537  * hif_pm_runtime_close(): close runtime pm
538  * @scn: hif ctx
539  *
540  * ensure runtime_pm is stopped before closing the driver
541  */
542 void hif_pm_runtime_close(struct hif_softc *scn)
543 {
544 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
545 	struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn);
546 
547 	/*
548 	 * Here cds hif context was already NULL,
549 	 * so calling hif_runtime_lock_deinit, instead of
550 	 * qdf_runtime_lock_deinit(&rpm_ctx->prevent_linkdown_lock);
551 	 */
552 	hif_runtime_lock_deinit(hif_ctx, rpm_ctx->prevent_linkdown_lock.lock);
553 
554 	hif_is_recovery_in_progress(scn) ?
555 		hif_pm_runtime_sanitize_on_ssr_exit(scn) :
556 		hif_pm_runtime_sanitize_on_exit(scn);
557 }
558 
559 /**
560  * hif_pm_runtime_sync_resume() - Invoke synchronous runtime resume.
561  * @hif_ctx: hif context
562  *
563  * This function will invoke synchronous runtime resume.
564  *
565  * Return: status
566  */
567 int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx)
568 {
569 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
570 	struct hif_runtime_pm_ctx *rpm_ctx;
571 	int pm_state;
572 
573 	if (!scn)
574 		return -EINVAL;
575 
576 	if (!hif_pci_pm_runtime_enabled(scn))
577 		return 0;
578 
579 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
580 	pm_state = qdf_atomic_read(&rpm_ctx->pm_state);
581 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
582 	    pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
583 		hif_info("Runtime PM resume is requested by %ps",
584 			 (void *)_RET_IP_);
585 
586 	rpm_ctx->pm_stats.request_resume++;
587 	rpm_ctx->pm_stats.last_resume_caller = (void *)_RET_IP_;
588 
589 	return pm_runtime_resume(hif_bus_get_dev(scn));
590 }
591 
592 /**
593  * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occurring
594  * @scn: hif context
595  * @flag: prevent linkdown if true otherwise allow
596  *
597  * this api should only be called as part of bus prevent linkdown
598  */
599 void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
600 {
601 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
602 
603 	if (flag)
604 		qdf_runtime_pm_prevent_suspend(&rpm_ctx->prevent_linkdown_lock);
605 	else
606 		qdf_runtime_pm_allow_suspend(&rpm_ctx->prevent_linkdown_lock);
607 }
608 
609 /**
610  * __hif_runtime_pm_set_state(): utility function
611  * @state: state to set
612  *
613  * indexes into the runtime pm state and sets it.
614  */
615 static void __hif_runtime_pm_set_state(struct hif_softc *scn,
616 				       enum hif_pm_runtime_state state)
617 {
618 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
619 
620 	if (!rpm_ctx) {
621 		hif_err("HIF_CTX not initialized");
622 		return;
623 	}
624 
625 	qdf_atomic_set(&rpm_ctx->pm_state, state);
626 }
627 
628 /**
629  * hif_runtime_pm_set_state_on():  adjust runtime pm state
630  *
631  * Notify hif that a the runtime pm state should be on
632  */
633 static void hif_runtime_pm_set_state_on(struct hif_softc *scn)
634 {
635 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_ON);
636 }
637 
638 /**
639  * hif_runtime_pm_set_state_resuming(): adjust runtime pm state
640  *
641  * Notify hif that a runtime pm resuming has started
642  */
643 static void hif_runtime_pm_set_state_resuming(struct hif_softc *scn)
644 {
645 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_RESUMING);
646 }
647 
648 /**
649  * hif_runtime_pm_set_state_suspending(): adjust runtime pm state
650  *
651  * Notify hif that a runtime pm suspend has started
652  */
653 static void hif_runtime_pm_set_state_suspending(struct hif_softc *scn)
654 {
655 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDING);
656 }
657 
658 /**
659  * hif_runtime_pm_set_state_suspended():  adjust runtime pm state
660  *
661  * Notify hif that a runtime suspend attempt has been completed successfully
662  */
663 static void hif_runtime_pm_set_state_suspended(struct hif_softc *scn)
664 {
665 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDED);
666 }
667 
668 /**
669  * hif_log_runtime_suspend_success() - log a successful runtime suspend
670  */
671 static void hif_log_runtime_suspend_success(struct hif_softc *hif_ctx)
672 {
673 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
674 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
675 
676 	if (!rpm_ctx)
677 		return;
678 
679 	rpm_ctx->pm_stats.suspended++;
680 	rpm_ctx->pm_stats.suspend_jiffies = jiffies;
681 }
682 
683 /**
684  * hif_log_runtime_suspend_failure() - log a failed runtime suspend
685  *
686  * log a failed runtime suspend
687  * mark last busy to prevent immediate runtime suspend
688  */
689 static void hif_log_runtime_suspend_failure(void *hif_ctx)
690 {
691 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
692 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
693 
694 	if (!rpm_ctx)
695 		return;
696 
697 	rpm_ctx->pm_stats.suspend_err++;
698 }
699 
700 /**
701  * hif_log_runtime_resume_success() - log a successful runtime resume
702  *
703  * log a successful runtime resume
704  * mark last busy to prevent immediate runtime suspend
705  */
706 static void hif_log_runtime_resume_success(void *hif_ctx)
707 {
708 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
709 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
710 
711 	if (!rpm_ctx)
712 		return;
713 
714 	rpm_ctx->pm_stats.resumed++;
715 }
716 
717 /**
718  * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
719  *
720  * Record the failure.
721  * mark last busy to delay a retry.
722  * adjust the runtime_pm state.
723  */
724 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx)
725 {
726 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
727 
728 	hif_log_runtime_suspend_failure(hif_ctx);
729 	hif_pm_runtime_mark_last_busy(hif_ctx);
730 	hif_runtime_pm_set_state_on(scn);
731 }
732 
733 /**
734  * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend
735  *
736  * Makes sure that the pci link will be taken down by the suspend opperation.
737  * If the hif layer is configured to leave the bus on, runtime suspend will
738  * not save any power.
739  *
740  * Set the runtime suspend state to in progress.
741  *
742  * return -EINVAL if the bus won't go down.  otherwise return 0
743  */
744 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
745 {
746 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
747 
748 	if (!hif_can_suspend_link(hif_ctx)) {
749 		hif_err("Runtime PM not supported for link up suspend");
750 		return -EINVAL;
751 	}
752 
753 	hif_runtime_pm_set_state_suspending(scn);
754 	return 0;
755 }
756 
757 /**
758  * hif_process_runtime_suspend_success() - bookkeeping of suspend success
759  *
760  * Record the success.
761  * adjust the runtime_pm state
762  */
763 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx)
764 {
765 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
766 
767 	hif_runtime_pm_set_state_suspended(scn);
768 	hif_log_runtime_suspend_success(scn);
769 }
770 
771 /**
772  * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
773  *
774  * update the runtime pm state.
775  */
776 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx)
777 {
778 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
779 
780 	hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
781 	hif_runtime_pm_set_state_resuming(scn);
782 }
783 
784 /**
785  * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
786  *
787  * record the success.
788  * adjust the runtime_pm state
789  */
790 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx)
791 {
792 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
793 
794 	hif_log_runtime_resume_success(hif_ctx);
795 	hif_pm_runtime_mark_last_busy(hif_ctx);
796 	hif_runtime_pm_set_state_on(scn);
797 }
798 
799 /**
800  * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
801  *
802  * Return: 0 for success and non-zero error code for failure
803  */
804 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
805 {
806 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
807 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
808 	int errno;
809 
810 	errno = hif_bus_suspend(hif_ctx);
811 	if (errno) {
812 		hif_err("Failed bus suspend: %d", errno);
813 		return errno;
814 	}
815 
816 	hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 1);
817 
818 	errno = hif_bus_suspend_noirq(hif_ctx);
819 	if (errno) {
820 		hif_err("Failed bus suspend noirq: %d", errno);
821 		hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
822 		goto bus_resume;
823 	}
824 
825 	qdf_atomic_set(&rpm_ctx->pm_dp_rx_busy, 0);
826 
827 	return 0;
828 
829 bus_resume:
830 	QDF_BUG(!hif_bus_resume(hif_ctx));
831 
832 	return errno;
833 }
834 
835 /**
836  * hif_fastpath_resume() - resume fastpath for runtimepm
837  *
838  * ensure that the fastpath write index register is up to date
839  * since runtime pm may cause ce_send_fast to skip the register
840  * write.
841  *
842  * fastpath only applicable to legacy copy engine
843  */
844 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
845 {
846 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
847 	struct CE_state *ce_state;
848 
849 	if (!scn)
850 		return;
851 
852 	if (scn->fastpath_mode_on) {
853 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
854 			return;
855 
856 		ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
857 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
858 
859 		/*war_ce_src_ring_write_idx_set */
860 		CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
861 					  ce_state->src_ring->write_index);
862 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
863 		Q_TARGET_ACCESS_END(scn);
864 	}
865 }
866 
867 /**
868  * hif_runtime_resume() - do the bus resume part of a runtime resume
869  *
870  *  Return: 0 for success and non-zero error code for failure
871  */
872 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
873 {
874 	int errno;
875 
876 	QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
877 	errno = hif_bus_resume(hif_ctx);
878 	if (errno)
879 		hif_err("Failed runtime resume: %d", errno);
880 
881 	return errno;
882 }
883 
884 /**
885  * hif_pm_stats_runtime_get_record() - record runtime get statistics
886  * @scn: hif context
887  * @rtpm_dbgid: debug id to trace who use it
888  *
889  *
890  * Return: void
891  */
892 static void hif_pm_stats_runtime_get_record(struct hif_softc *scn,
893 					    wlan_rtpm_dbgid rtpm_dbgid)
894 {
895 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
896 
897 	if (rtpm_dbgid >= RTPM_ID_MAX) {
898 		QDF_BUG(0);
899 		return;
900 	}
901 	qdf_atomic_inc(&rpm_ctx->pm_stats.runtime_get);
902 	qdf_atomic_inc(&rpm_ctx->pm_stats.runtime_get_dbgid[rtpm_dbgid]);
903 	rpm_ctx->pm_stats.runtime_get_timestamp_dbgid[rtpm_dbgid] =
904 						qdf_get_log_timestamp();
905 }
906 
907 /**
908  * hif_pm_stats_runtime_put_record() - record runtime put statistics
909  * @scn: hif context
910  * @rtpm_dbgid: dbg_id to trace who use it
911  *
912  *
913  * Return: void
914  */
915 static void hif_pm_stats_runtime_put_record(struct hif_softc *scn,
916 					    wlan_rtpm_dbgid rtpm_dbgid)
917 {
918 	struct device *dev = hif_bus_get_dev(scn);
919 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
920 
921 	if (rtpm_dbgid >= RTPM_ID_MAX) {
922 		QDF_BUG(0);
923 		return;
924 	}
925 
926 	if (atomic_read(&dev->power.usage_count) <= 0) {
927 		QDF_BUG(0);
928 		return;
929 	}
930 
931 	qdf_atomic_inc(&rpm_ctx->pm_stats.runtime_put);
932 	qdf_atomic_inc(&rpm_ctx->pm_stats.runtime_put_dbgid[rtpm_dbgid]);
933 	rpm_ctx->pm_stats.runtime_put_timestamp_dbgid[rtpm_dbgid] =
934 						qdf_get_log_timestamp();
935 }
936 
937 /**
938  * hif_pm_runtime_get_sync() - do a get operation with sync resume
939  * @hif_ctx: pointer of HIF context
940  * @rtpm_dbgid: dbgid to trace who use it
941  *
942  * A get operation will prevent a runtime suspend until a corresponding
943  * put is done. Unlike hif_pm_runtime_get(), this API will do a sync
944  * resume instead of requesting a resume if it is runtime PM suspended
945  * so it can only be called in non-atomic context.
946  *
947  * Return: 0 if it is runtime PM resumed otherwise an error code.
948  */
949 int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx,
950 			    wlan_rtpm_dbgid rtpm_dbgid)
951 {
952 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
953 	struct device *dev = hif_bus_get_dev(scn);
954 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
955 	int pm_state;
956 	int ret;
957 
958 	if (!rpm_ctx)
959 		return -EINVAL;
960 
961 	if (!hif_pci_pm_runtime_enabled(scn))
962 		return 0;
963 
964 	pm_state = qdf_atomic_read(&rpm_ctx->pm_state);
965 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
966 	    pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
967 		hif_info_high("Runtime PM resume is requested by %ps",
968 			      (void *)_RET_IP_);
969 
970 	hif_pm_stats_runtime_get_record(scn, rtpm_dbgid);
971 	ret = pm_runtime_get_sync(dev);
972 
973 	/* Get can return 1 if the device is already active, just return
974 	 * success in that case.
975 	 */
976 	if (ret > 0)
977 		ret = 0;
978 
979 	if (ret) {
980 		rpm_ctx->pm_stats.runtime_get_err++;
981 		hif_err("Runtime PM Get Sync error in pm_state: %d, ret: %d",
982 			qdf_atomic_read(&rpm_ctx->pm_state), ret);
983 		hif_pm_runtime_put(hif_ctx, rtpm_dbgid);
984 	}
985 
986 	return ret;
987 }
988 
989 /**
990  * hif_pm_runtime_put_sync_suspend() - do a put operation with sync suspend
991  * @hif_ctx: pointer of HIF context
992  * @rtpm_dbgid: dbgid to trace who use it
993  *
994  * This API will do a runtime put operation followed by a sync suspend if usage
995  * count is 0 so it can only be called in non-atomic context.
996  *
997  * Return: 0 for success otherwise an error code
998  */
999 int hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx,
1000 				    wlan_rtpm_dbgid rtpm_dbgid)
1001 {
1002 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1003 	struct device *dev;
1004 	int usage_count;
1005 	char *err = NULL;
1006 
1007 	if (!scn)
1008 		return -EINVAL;
1009 
1010 	if (!hif_pci_pm_runtime_enabled(scn))
1011 		return 0;
1012 
1013 	dev = hif_bus_get_dev(scn);
1014 	usage_count = atomic_read(&dev->power.usage_count);
1015 	if (usage_count == 2 && !scn->hif_config.enable_runtime_pm)
1016 		err = "Uexpected PUT when runtime PM is disabled";
1017 	else if (usage_count == 0)
1018 		err = "PUT without a GET Operation";
1019 
1020 	if (err) {
1021 		hif_pci_runtime_pm_warn(scn, err);
1022 		return -EINVAL;
1023 	}
1024 
1025 	hif_pm_stats_runtime_put_record(scn, rtpm_dbgid);
1026 	return pm_runtime_put_sync_suspend(dev);
1027 }
1028 
1029 /**
1030  * hif_pm_runtime_request_resume() - Invoke async runtime resume
1031  * @hif_ctx: hif context
1032  *
1033  * This function will invoke asynchronous runtime resume.
1034  *
1035  * Return: status
1036  */
1037 int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx)
1038 {
1039 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1040 	struct hif_runtime_pm_ctx *rpm_ctx;
1041 	int pm_state;
1042 
1043 	if (!scn)
1044 		return -EINVAL;
1045 
1046 	if (!hif_pci_pm_runtime_enabled(scn))
1047 		return 0;
1048 
1049 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
1050 	pm_state = qdf_atomic_read(&rpm_ctx->pm_state);
1051 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
1052 	    pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
1053 		hif_info("Runtime PM resume is requested by %ps",
1054 			 (void *)_RET_IP_);
1055 
1056 	rpm_ctx->pm_stats.request_resume++;
1057 	rpm_ctx->pm_stats.last_resume_caller = (void *)_RET_IP_;
1058 
1059 	return hif_pm_request_resume(hif_bus_get_dev(scn));
1060 }
1061 
1062 /**
1063  * hif_pm_runtime_mark_last_busy() - Mark last busy time
1064  * @hif_ctx: hif context
1065  *
1066  * This function will mark the last busy time, this will be used
1067  * to check if auto suspend delay expired or not.
1068  *
1069  * Return: void
1070  */
1071 void hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx)
1072 {
1073 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1074 	struct hif_runtime_pm_ctx *rpm_ctx;
1075 
1076 	if (!scn)
1077 		return;
1078 
1079 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
1080 	rpm_ctx->pm_stats.last_busy_marker = (void *)_RET_IP_;
1081 	rpm_ctx->pm_stats.last_busy_timestamp = qdf_get_log_timestamp_usecs();
1082 
1083 	pm_runtime_mark_last_busy(hif_bus_get_dev(scn));
1084 
1085 	return;
1086 }
1087 
1088 /**
1089  * hif_pm_runtime_get_noresume() - Inc usage count without resume
1090  * @hif_ctx: hif context
1091  * rtpm_dbgid: Id of the module calling get
1092  *
1093  * This function will increment device usage count to avoid runtime
1094  * suspend, but it would not do resume.
1095  *
1096  * Return: void
1097  */
1098 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx,
1099 				 wlan_rtpm_dbgid rtpm_dbgid)
1100 {
1101 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1102 
1103 	if (!scn)
1104 		return;
1105 
1106 	if (!hif_pci_pm_runtime_enabled(scn))
1107 		return;
1108 
1109 	hif_pm_stats_runtime_get_record(scn, rtpm_dbgid);
1110 	pm_runtime_get_noresume(hif_bus_get_dev(scn));
1111 }
1112 
1113 /**
1114  * hif_pm_runtime_get() - do a get opperation on the device
1115  * @hif_ctx: pointer of HIF context
1116  * @rtpm_dbgid: dbgid to trace who use it
1117  *
1118  * A get opperation will prevent a runtime suspend until a
1119  * corresponding put is done.  This api should be used when sending
1120  * data.
1121  *
1122  * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
1123  * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!!
1124  *
1125  * return: success if the bus is up and a get has been issued
1126  *   otherwise an error code.
1127  */
1128 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx,
1129 		       wlan_rtpm_dbgid rtpm_dbgid)
1130 {
1131 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1132 	struct hif_runtime_pm_ctx *rpm_ctx;
1133 	struct device *dev;
1134 	int ret;
1135 	int pm_state;
1136 
1137 	if (!scn) {
1138 		hif_err("Could not do runtime get, scn is null");
1139 		return -EFAULT;
1140 	}
1141 
1142 	if (!hif_pci_pm_runtime_enabled(scn))
1143 		return 0;
1144 
1145 	dev = hif_bus_get_dev(scn);
1146 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
1147 	pm_state = qdf_atomic_read(&rpm_ctx->pm_state);
1148 
1149 	if (pm_state  == HIF_PM_RUNTIME_STATE_ON ||
1150 	    pm_state == HIF_PM_RUNTIME_STATE_NONE) {
1151 		hif_pm_stats_runtime_get_record(scn, rtpm_dbgid);
1152 		ret = __hif_pm_runtime_get(dev);
1153 
1154 		/* Get can return 1 if the device is already active, just return
1155 		 * success in that case
1156 		 */
1157 		if (ret > 0)
1158 			ret = 0;
1159 
1160 		if (ret)
1161 			hif_pm_runtime_put(hif_ctx, rtpm_dbgid);
1162 
1163 		if (ret && ret != -EINPROGRESS) {
1164 			rpm_ctx->pm_stats.runtime_get_err++;
1165 			hif_err("Runtime Get PM Error in pm_state:%d ret: %d",
1166 				qdf_atomic_read(&rpm_ctx->pm_state), ret);
1167 		}
1168 
1169 		return ret;
1170 	}
1171 
1172 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
1173 	    pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING) {
1174 		hif_info_high("Runtime PM resume is requested by %ps",
1175 			      (void *)_RET_IP_);
1176 		ret = -EAGAIN;
1177 	} else {
1178 		ret = -EBUSY;
1179 	}
1180 
1181 	rpm_ctx->pm_stats.request_resume++;
1182 	rpm_ctx->pm_stats.last_resume_caller = (void *)_RET_IP_;
1183 	hif_pm_request_resume(dev);
1184 
1185 	return ret;
1186 }
1187 
1188 /**
1189  * hif_pm_runtime_put() - do a put operation on the device
1190  * @hif_ctx: pointer of HIF context
1191  * @rtpm_dbgid: dbgid to trace who use it
1192  *
1193  * A put operation will allow a runtime suspend after a corresponding
1194  * get was done.  This api should be used when sending data.
1195  *
1196  * This api will return a failure if runtime pm is stopped
1197  * This api will return failure if it would decrement the usage count below 0.
1198  *
1199  * return: QDF_STATUS_SUCCESS if the put is performed
1200  */
1201 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx,
1202 		       wlan_rtpm_dbgid rtpm_dbgid)
1203 {
1204 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1205 	struct device *dev;
1206 	int usage_count;
1207 	char *error = NULL;
1208 
1209 	if (!scn) {
1210 		hif_err("Could not do runtime put, scn is null");
1211 		return -EFAULT;
1212 	}
1213 
1214 	if (!hif_pci_pm_runtime_enabled(scn))
1215 		return 0;
1216 
1217 	dev = hif_bus_get_dev(scn);
1218 	usage_count = atomic_read(&dev->power.usage_count);
1219 	if (usage_count == 2 && !scn->hif_config.enable_runtime_pm)
1220 		error = "Unexpected PUT when runtime PM is disabled";
1221 	else if (usage_count == 0)
1222 		error = "PUT without a GET operation";
1223 
1224 	if (error) {
1225 		hif_pci_runtime_pm_warn(scn, error);
1226 		return -EINVAL;
1227 	}
1228 
1229 	hif_pm_stats_runtime_put_record(scn, rtpm_dbgid);
1230 
1231 	hif_pm_runtime_mark_last_busy(hif_ctx);
1232 	hif_pm_runtime_put_auto(dev);
1233 
1234 	return 0;
1235 }
1236 
1237 /**
1238  * hif_pm_runtime_put_noidle() - do a put operation with no idle
1239  * @hif_ctx: pointer of HIF context
1240  * @rtpm_dbgid: dbgid to trace who use it
1241  *
1242  * This API will do a runtime put no idle operation
1243  *
1244  * Return: 0 for success otherwise an error code
1245  */
1246 int hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx,
1247 			      wlan_rtpm_dbgid rtpm_dbgid)
1248 {
1249 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1250 	struct device *dev;
1251 	int usage_count;
1252 	char *err = NULL;
1253 
1254 	if (!scn)
1255 		return -EINVAL;
1256 
1257 	if (!hif_pci_pm_runtime_enabled(scn))
1258 		return 0;
1259 
1260 	dev = hif_bus_get_dev(scn);
1261 	usage_count = atomic_read(&dev->power.usage_count);
1262 	if (usage_count == 2 && !scn->hif_config.enable_runtime_pm)
1263 		err = "Unexpected PUT when runtime PM is disabled";
1264 	else if (usage_count == 0)
1265 		err = "PUT without a GET operation";
1266 
1267 	if (err) {
1268 		hif_pci_runtime_pm_warn(scn, err);
1269 		return -EINVAL;
1270 	}
1271 
1272 	hif_pm_stats_runtime_put_record(scn, rtpm_dbgid);
1273 	pm_runtime_put_noidle(dev);
1274 
1275 	return 0;
1276 }
1277 
1278 /**
1279  * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol
1280  *                                      reason
1281  * @scn: hif context
1282  * @lock: runtime_pm lock being acquired
1283  *
1284  * Return 0 if successful.
1285  */
1286 static int __hif_pm_runtime_prevent_suspend(struct hif_softc *scn,
1287 					    struct hif_pm_runtime_lock *lock)
1288 {
1289 	struct device *dev = hif_bus_get_dev(scn);
1290 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1291 	int ret = 0;
1292 
1293 	/*
1294 	 * We shouldn't be setting context->timeout to zero here when
1295 	 * context is active as we will have a case where Timeout API's
1296 	 * for the same context called back to back.
1297 	 * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm
1298 	 * Set context->timeout to zero in hif_pm_runtime_prevent_suspend
1299 	 * API to ensure the timeout version is no more active and
1300 	 * list entry of this context will be deleted during allow suspend.
1301 	 */
1302 	if (lock->active)
1303 		return 0;
1304 
1305 	ret = __hif_pm_runtime_get(dev);
1306 
1307 	/**
1308 	 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
1309 	 * RPM_SUSPENDING. Any other negative value is an error.
1310 	 * We shouldn't be do runtime_put here as in later point allow
1311 	 * suspend gets called with the the context and there the usage count
1312 	 * is decremented, so suspend will be prevented.
1313 	 */
1314 
1315 	if (ret < 0 && ret != -EINPROGRESS) {
1316 		rpm_ctx->pm_stats.runtime_get_err++;
1317 		hif_pci_runtime_pm_warn(scn,
1318 					"Prevent Suspend Runtime PM Error");
1319 	}
1320 
1321 	rpm_ctx->prevent_suspend_cnt++;
1322 
1323 	lock->active = true;
1324 
1325 	list_add_tail(&lock->list, &rpm_ctx->prevent_suspend_list);
1326 
1327 	qdf_atomic_inc(&rpm_ctx->pm_stats.prevent_suspend);
1328 
1329 	hif_debug("%s: in pm_state:%s ret: %d", __func__,
1330 		  hif_pm_runtime_state_to_string(
1331 			  qdf_atomic_read(&rpm_ctx->pm_state)),
1332 		  ret);
1333 
1334 	return ret;
1335 }
1336 
1337 /**
1338  * __hif_pm_runtime_allow_suspend() - Allow Runtime suspend
1339  * @scn: hif context
1340  * @lock: runtime pm lock
1341  *
1342  * This function will allow runtime suspend, by decrementing
1343  * device's usage count.
1344  *
1345  * Return: status
1346  */
1347 static int __hif_pm_runtime_allow_suspend(struct hif_softc *scn,
1348 					  struct hif_pm_runtime_lock *lock)
1349 {
1350 	struct device *dev = hif_bus_get_dev(scn);
1351 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1352 	int ret = 0;
1353 	int usage_count;
1354 
1355 	if (rpm_ctx->prevent_suspend_cnt == 0)
1356 		return ret;
1357 
1358 	if (!lock->active)
1359 		return ret;
1360 
1361 	usage_count = atomic_read(&dev->power.usage_count);
1362 
1363 	/*
1364 	 * For runtime PM enabled case, the usage count should never be 0
1365 	 * at this point. For runtime PM disabled case, it should never be
1366 	 * 2 at this point. Catch unexpected PUT without GET here.
1367 	 */
1368 	if ((usage_count == 2 && !scn->hif_config.enable_runtime_pm) ||
1369 	    usage_count == 0) {
1370 		hif_pci_runtime_pm_warn(scn, "PUT without a GET Operation");
1371 		return -EINVAL;
1372 	}
1373 
1374 	list_del(&lock->list);
1375 
1376 	rpm_ctx->prevent_suspend_cnt--;
1377 
1378 	lock->active = false;
1379 	lock->timeout = 0;
1380 
1381 	hif_pm_runtime_mark_last_busy(GET_HIF_OPAQUE_HDL(scn));
1382 	ret = hif_pm_runtime_put_auto(dev);
1383 
1384 	hif_debug("%s: in pm_state:%s ret: %d", __func__,
1385 		  hif_pm_runtime_state_to_string(
1386 			  qdf_atomic_read(&rpm_ctx->pm_state)),
1387 		  ret);
1388 
1389 	qdf_atomic_inc(&rpm_ctx->pm_stats.allow_suspend);
1390 	return ret;
1391 }
1392 
1393 /**
1394  * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout
1395  * @data: calback data that is the pci context
1396  *
1397  * if runtime locks are acquired with a timeout, this function releases
1398  * the locks when the last runtime lock expires.
1399  *
1400  * dummy implementation until lock acquisition is implemented.
1401  */
1402 static void hif_pm_runtime_lock_timeout_fn(void *data)
1403 {
1404 	struct hif_softc *scn = data;
1405 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1406 	unsigned long timer_expires;
1407 	struct hif_pm_runtime_lock *context, *temp;
1408 
1409 	spin_lock_bh(&rpm_ctx->runtime_lock);
1410 
1411 	timer_expires = rpm_ctx->runtime_timer_expires;
1412 
1413 	/* Make sure we are not called too early, this should take care of
1414 	 * following case
1415 	 *
1416 	 * CPU0                         CPU1 (timeout function)
1417 	 * ----                         ----------------------
1418 	 * spin_lock_irq
1419 	 *                              timeout function called
1420 	 *
1421 	 * mod_timer()
1422 	 *
1423 	 * spin_unlock_irq
1424 	 *                              spin_lock_irq
1425 	 */
1426 	if (timer_expires > 0 && !time_after(timer_expires, jiffies)) {
1427 		rpm_ctx->runtime_timer_expires = 0;
1428 		list_for_each_entry_safe(context, temp,
1429 					 &rpm_ctx->prevent_suspend_list, list) {
1430 			if (context->timeout) {
1431 				__hif_pm_runtime_allow_suspend(scn, context);
1432 				rpm_ctx->pm_stats.allow_suspend_timeout++;
1433 			}
1434 		}
1435 	}
1436 
1437 	spin_unlock_bh(&rpm_ctx->runtime_lock);
1438 }
1439 
1440 /**
1441  * hif_pm_runtime_prevent_suspend() - Prevent Runtime suspend
1442  * @scn: hif context
1443  * @data: runtime pm lock
1444  *
1445  * This function will prevent runtime suspend, by incrementing
1446  * device's usage count.
1447  *
1448  * Return: status
1449  */
1450 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
1451 				   struct hif_pm_runtime_lock *data)
1452 {
1453 	struct hif_softc *scn = HIF_GET_SOFTC(ol_sc);
1454 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1455 	struct hif_pm_runtime_lock *context = data;
1456 
1457 	if (!scn->hif_config.enable_runtime_pm)
1458 		return 0;
1459 
1460 	if (!context)
1461 		return -EINVAL;
1462 
1463 	if (in_irq())
1464 		WARN_ON(1);
1465 
1466 	spin_lock_bh(&rpm_ctx->runtime_lock);
1467 	context->timeout = 0;
1468 	__hif_pm_runtime_prevent_suspend(scn, context);
1469 	spin_unlock_bh(&rpm_ctx->runtime_lock);
1470 
1471 	return 0;
1472 }
1473 
1474 /**
1475  * hif_pm_runtime_allow_suspend() - Allow Runtime suspend
1476  * @scn: hif context
1477  * @data: runtime pm lock
1478  *
1479  * This function will allow runtime suspend, by decrementing
1480  * device's usage count.
1481  *
1482  * Return: status
1483  */
1484 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
1485 				 struct hif_pm_runtime_lock *data)
1486 {
1487 	struct hif_softc *scn = HIF_GET_SOFTC(ol_sc);
1488 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1489 	struct hif_pm_runtime_lock *context = data;
1490 
1491 	if (!scn->hif_config.enable_runtime_pm)
1492 		return 0;
1493 
1494 	if (!context)
1495 		return -EINVAL;
1496 
1497 	if (in_irq())
1498 		WARN_ON(1);
1499 
1500 	spin_lock_bh(&rpm_ctx->runtime_lock);
1501 
1502 	__hif_pm_runtime_allow_suspend(scn, context);
1503 
1504 	/* The list can be empty as well in cases where
1505 	 * we have one context in the list and the allow
1506 	 * suspend came before the timer expires and we delete
1507 	 * context above from the list.
1508 	 * When list is empty prevent_suspend count will be zero.
1509 	 */
1510 	if (rpm_ctx->prevent_suspend_cnt == 0 &&
1511 	    rpm_ctx->runtime_timer_expires > 0) {
1512 		qdf_timer_free(&rpm_ctx->runtime_timer);
1513 		rpm_ctx->runtime_timer_expires = 0;
1514 	}
1515 
1516 	spin_unlock_bh(&rpm_ctx->runtime_lock);
1517 
1518 	return 0;
1519 }
1520 
1521 /**
1522  * hif_runtime_lock_init() - API to initialize Runtime PM context
1523  * @name: Context name
1524  *
1525  * This API initializes the Runtime PM context of the caller and
1526  * return the pointer.
1527  *
1528  * Return: None
1529  */
1530 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
1531 {
1532 	struct hif_pm_runtime_lock *context;
1533 
1534 	hif_info("Initializing Runtime PM wakelock %s", name);
1535 
1536 	context = qdf_mem_malloc(sizeof(*context));
1537 	if (!context)
1538 		return -ENOMEM;
1539 
1540 	context->name = name ? name : "Default";
1541 	lock->lock = context;
1542 
1543 	return 0;
1544 }
1545 
1546 /**
1547  * hif_runtime_lock_deinit() - This API frees the runtime pm ctx
1548  * @data: Runtime PM context
1549  *
1550  * Return: void
1551  */
1552 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
1553 			     struct hif_pm_runtime_lock *data)
1554 {
1555 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1556 	struct hif_runtime_pm_ctx *rpm_ctx;
1557 	struct hif_pm_runtime_lock *context = data;
1558 
1559 	if (!context) {
1560 		hif_err("Runtime PM wakelock context is NULL");
1561 		return;
1562 	}
1563 
1564 	hif_info("Deinitializing Runtime PM wakelock %s", context->name);
1565 
1566 	/*
1567 	 * Ensure to delete the context list entry and reduce the usage count
1568 	 * before freeing the context if context is active.
1569 	 */
1570 	if (scn) {
1571 		rpm_ctx = hif_bus_get_rpm_ctx(scn);
1572 		spin_lock_bh(&rpm_ctx->runtime_lock);
1573 		__hif_pm_runtime_allow_suspend(scn, context);
1574 		spin_unlock_bh(&rpm_ctx->runtime_lock);
1575 	}
1576 
1577 	qdf_mem_free(context);
1578 }
1579 
1580 /**
1581  * hif_pm_runtime_is_suspended() - API to check if driver has runtime suspended
1582  * @hif_ctx: HIF context
1583  *
1584  * Return: true for runtime suspended, otherwise false
1585  */
1586 bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx)
1587 {
1588 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1589 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1590 
1591 	return qdf_atomic_read(&rpm_ctx->pm_state) ==
1592 					HIF_PM_RUNTIME_STATE_SUSPENDED;
1593 }
1594 
1595 /**
1596  * hif_pm_runtime_get_monitor_wake_intr() - API to get monitor_wake_intr
1597  * @hif_ctx: HIF context
1598  *
1599  * monitor_wake_intr variable can be used to indicate if driver expects wake
1600  * MSI for runtime PM
1601  *
1602  * Return: monitor_wake_intr variable
1603  */
1604 int hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx)
1605 {
1606 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1607 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1608 
1609 	return qdf_atomic_read(&rpm_ctx->monitor_wake_intr);
1610 }
1611 
1612 /**
1613  * hif_pm_runtime_set_monitor_wake_intr() - API to set monitor_wake_intr
1614  * @hif_ctx: HIF context
1615  * @val: value to set
1616  *
1617  * monitor_wake_intr variable can be used to indicate if driver expects wake
1618  * MSI for runtime PM
1619  *
1620  * Return: void
1621  */
1622 void hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx,
1623 					  int val)
1624 {
1625 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1626 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1627 
1628 	qdf_atomic_set(&rpm_ctx->monitor_wake_intr, val);
1629 }
1630 
1631 /**
1632  * hif_pm_runtime_check_and_request_resume() - check if the device is runtime
1633  *					       suspended and request resume.
1634  * @hif_ctx: HIF context
1635  *
1636  * This function is to check if the device is runtime suspended and
1637  * request for runtime resume.
1638  *
1639  * Return: void
1640  */
1641 void hif_pm_runtime_check_and_request_resume(struct hif_opaque_softc *hif_ctx)
1642 {
1643 	if (hif_pm_runtime_get_monitor_wake_intr(hif_ctx)) {
1644 		hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
1645 		hif_pm_runtime_request_resume(hif_ctx);
1646 	}
1647 }
1648 
1649 /**
1650  * hif_pm_runtime_mark_dp_rx_busy() - Set last busy mark my data path
1651  * @hif_ctx: HIF context
1652  *
1653  * Return: void
1654  */
1655 void hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
1656 {
1657 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1658 	struct hif_runtime_pm_ctx *rpm_ctx;
1659 
1660 	if (!scn)
1661 		return;
1662 
1663 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
1664 	qdf_atomic_set(&rpm_ctx->pm_dp_rx_busy, 1);
1665 	rpm_ctx->dp_last_busy_timestamp = qdf_get_log_timestamp_usecs();
1666 
1667 	hif_pm_runtime_mark_last_busy(hif_ctx);
1668 }
1669 
1670 /**
1671  * hif_pm_runtime_is_dp_rx_busy() - Check if last mark busy by dp rx
1672  * @hif_ctx: HIF context
1673  *
1674  * Return: dp rx busy set value
1675  */
1676 int hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
1677 {
1678 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1679 	struct hif_runtime_pm_ctx *rpm_ctx;
1680 
1681 	if (!scn)
1682 		return 0;
1683 
1684 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
1685 	return qdf_atomic_read(&rpm_ctx->pm_dp_rx_busy);
1686 }
1687 
1688 /**
1689  * hif_pm_runtime_get_dp_rx_busy_mark() - Get last busy by dp rx timestamp
1690  * @hif_ctx: HIF context
1691  *
1692  * Return: timestamp of last mark busy by dp rx
1693  */
1694 qdf_time_t hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx)
1695 {
1696 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1697 	struct hif_runtime_pm_ctx *rpm_ctx;
1698 
1699 	if (!scn)
1700 		return 0;
1701 
1702 	rpm_ctx = hif_bus_get_rpm_ctx(scn);
1703 	return rpm_ctx->dp_last_busy_timestamp;
1704 }
1705 #endif /* FEATURE_RUNTIME_PM */
1706