xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_runtime_pm.c (revision 6d768494e5ce14eb1603a695c86739d12ecc6ec2)
1 /*
2  * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7 
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/slab.h>
18 #include <linux/interrupt.h>
19 #include <linux/if_arp.h>
20 #include "hif_io32.h"
21 #include "hif_runtime_pm.h"
22 #include "hif.h"
23 #include "target_type.h"
24 #include "hif_main.h"
25 #include "ce_main.h"
26 #include "ce_api.h"
27 #include "ce_internal.h"
28 #include "ce_reg.h"
29 #include "ce_bmi.h"
30 #include "regtable.h"
31 #include "hif_hw_version.h"
32 #include <linux/debugfs.h>
33 #include <linux/seq_file.h>
34 #include "qdf_status.h"
35 #include "qdf_atomic.h"
36 #include "pld_common.h"
37 #include "mp_dev.h"
38 #include "hif_debug.h"
39 
40 #include "ce_tasklet.h"
41 #include "targaddrs.h"
42 #include "hif_exec.h"
43 
44 #ifdef FEATURE_RUNTIME_PM
45 /**
46  * hif_pci_pm_runtime_enabled() - To check if Runtime PM is enabled
47  * @scn: hif context
48  *
49  * This function will check if Runtime PM is enabled or not.
50  *
51  * Return: void
52  */
53 static bool hif_pci_pm_runtime_enabled(struct hif_softc *scn)
54 {
55 	if (scn->hif_config.enable_runtime_pm)
56 		return true;
57 
58 	return pm_runtime_enabled(hif_bus_get_dev(scn));
59 }
60 
61 /**
62  * hif_pm_runtime_state_to_string() - Mapping state into string
63  * @state: runtime pm state
64  *
65  * This function will map the runtime pm state into corresponding
66  * string for debug purpose.
67  *
68  * Return: pointer to the string
69  */
70 static const char *hif_pm_runtime_state_to_string(uint32_t state)
71 {
72 	switch (state) {
73 	case HIF_PM_RUNTIME_STATE_NONE:
74 		return "INIT_STATE";
75 	case HIF_PM_RUNTIME_STATE_ON:
76 		return "ON";
77 	case HIF_PM_RUNTIME_STATE_RESUMING:
78 		return "RESUMING";
79 	case HIF_PM_RUNTIME_STATE_SUSPENDING:
80 		return "SUSPENDING";
81 	case HIF_PM_RUNTIME_STATE_SUSPENDED:
82 		return "SUSPENDED";
83 	default:
84 		return "INVALID STATE";
85 	}
86 }
87 
88 #define HIF_PCI_RUNTIME_PM_STATS(_s, _rpm_ctx, _name) \
89 	seq_printf(_s, "%30s: %u\n", #_name, (_rpm_ctx)->pm_stats._name)
90 /**
91  * hif_pci_runtime_pm_warn() - Runtime PM Debugging API
92  * @hif_ctx: hif_softc context
93  * @msg: log message
94  *
95  * log runtime pm stats when something seems off.
96  *
97  * Return: void
98  */
99 static void hif_pci_runtime_pm_warn(struct hif_softc *scn,
100 				    const char *msg)
101 {
102 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
103 	struct device *dev = hif_bus_get_dev(scn);
104 	struct hif_pm_runtime_lock *ctx;
105 	int i;
106 
107 	hif_nofl_debug("%s: usage_count: %d, pm_state: %s, prevent_suspend_cnt: %d",
108 		       msg, atomic_read(&dev->power.usage_count),
109 		       hif_pm_runtime_state_to_string(
110 				atomic_read(&rpm_ctx->pm_state)),
111 		       rpm_ctx->prevent_suspend_cnt);
112 
113 	hif_nofl_debug("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d",
114 		       dev->power.runtime_status,
115 		       dev->power.runtime_error,
116 		       dev->power.disable_depth,
117 		       dev->power.autosuspend_delay);
118 
119 	hif_nofl_debug("runtime_get: %u, runtime_put: %u, request_resume: %u",
120 		       qdf_atomic_read(&rpm_ctx->pm_stats.runtime_get),
121 		       qdf_atomic_read(&rpm_ctx->pm_stats.runtime_put),
122 		       rpm_ctx->pm_stats.request_resume);
123 
124 	hif_nofl_debug("get  put  get-timestamp put-timestamp :DBGID_NAME");
125 	for (i = 0; i < RTPM_ID_MAX; i++) {
126 		hif_nofl_debug("%-10d %-10d  0x%-10llx  0x%-10llx :%-30s",
127 			       qdf_atomic_read(
128 				       &rpm_ctx->pm_stats.runtime_get_dbgid[i]),
129 			       qdf_atomic_read(
130 				       &rpm_ctx->pm_stats.runtime_put_dbgid[i]),
131 			       rpm_ctx->pm_stats.runtime_get_timestamp_dbgid[i],
132 			       rpm_ctx->pm_stats.runtime_put_timestamp_dbgid[i],
133 			       rtpm_string_from_dbgid(i));
134 	}
135 
136 	hif_nofl_debug("allow_suspend: %u, prevent_suspend: %u",
137 		       qdf_atomic_read(&rpm_ctx->pm_stats.allow_suspend),
138 		       qdf_atomic_read(&rpm_ctx->pm_stats.prevent_suspend));
139 
140 	hif_nofl_debug("prevent_suspend_timeout: %u, allow_suspend_timeout: %u",
141 		       rpm_ctx->pm_stats.prevent_suspend_timeout,
142 		       rpm_ctx->pm_stats.allow_suspend_timeout);
143 
144 	hif_nofl_debug("Suspended: %u, resumed: %u count",
145 		       rpm_ctx->pm_stats.suspended,
146 		       rpm_ctx->pm_stats.resumed);
147 
148 	hif_nofl_debug("suspend_err: %u, runtime_get_err: %u",
149 		       rpm_ctx->pm_stats.suspend_err,
150 		       rpm_ctx->pm_stats.runtime_get_err);
151 
152 	hif_nofl_debug("Active Wakeup Sources preventing Runtime Suspend: ");
153 
154 	list_for_each_entry(ctx, &rpm_ctx->prevent_suspend_list, list) {
155 		hif_nofl_debug("source %s; timeout %d ms",
156 			       ctx->name, ctx->timeout);
157 	}
158 
159 	if (qdf_is_fw_down()) {
160 		hif_err("fw is down");
161 		return;
162 	}
163 
164 	QDF_DEBUG_PANIC("hif_pci_runtime_pm_warn");
165 }
166 
167 /**
168  * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm
169  * @s: file to print to
170  * @data: unused
171  *
172  * debugging tool added to the debug fs for displaying runtimepm stats
173  *
174  * Return: 0
175  */
176 static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data)
177 {
178 	struct hif_softc *scn = s->private;
179 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
180 	struct device *dev = hif_bus_get_dev(scn);
181 	static const char * const autopm_state[] = {"NONE", "ON", "RESUMING",
182 		"SUSPENDING", "SUSPENDED"};
183 	unsigned int msecs_age;
184 	qdf_time_t usecs_age;
185 	int pm_state = atomic_read(&rpm_ctx->pm_state);
186 	unsigned long timer_expires;
187 	struct hif_pm_runtime_lock *ctx;
188 	int i;
189 
190 	seq_printf(s, "%30s: %s\n", "Runtime PM state", autopm_state[pm_state]);
191 	seq_printf(s, "%30s: %ps\n", "Last Resume Caller",
192 		   rpm_ctx->pm_stats.last_resume_caller);
193 	seq_printf(s, "%30s: %ps\n", "Last Busy Marker",
194 		   rpm_ctx->pm_stats.last_busy_marker);
195 
196 	usecs_age = qdf_get_log_timestamp_usecs() -
197 		rpm_ctx->pm_stats.last_busy_timestamp;
198 	seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Timestamp",
199 		   rpm_ctx->pm_stats.last_busy_timestamp / 1000000,
200 		   rpm_ctx->pm_stats.last_busy_timestamp % 1000000);
201 	seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Since",
202 		   usecs_age / 1000000, usecs_age % 1000000);
203 
204 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) {
205 		msecs_age = jiffies_to_msecs(jiffies -
206 					     rpm_ctx->pm_stats.suspend_jiffies);
207 		seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since",
208 			   msecs_age / 1000, msecs_age % 1000);
209 	}
210 
211 	seq_printf(s, "%30s: %d\n", "PM Usage count",
212 		   atomic_read(&dev->power.usage_count));
213 
214 	seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt",
215 		   rpm_ctx->prevent_suspend_cnt);
216 
217 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, suspended);
218 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, suspend_err);
219 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, resumed);
220 
221 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, request_resume);
222 	seq_printf(s, "%30s: %u\n", "prevent_suspend",
223 		   qdf_atomic_read(&rpm_ctx->pm_stats.prevent_suspend));
224 	seq_printf(s, "%30s: %u\n", "allow_suspend",
225 		   qdf_atomic_read(&rpm_ctx->pm_stats.allow_suspend));
226 
227 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, prevent_suspend_timeout);
228 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, allow_suspend_timeout);
229 	HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, runtime_get_err);
230 
231 	seq_printf(s, "%30s: %u\n", "runtime_get",
232 		   qdf_atomic_read(&rpm_ctx->pm_stats.runtime_get));
233 	seq_printf(s, "%30s: %u\n", "runtime_put",
234 		   qdf_atomic_read(&rpm_ctx->pm_stats.runtime_put));
235 	seq_puts(s, "get  put  get-timestamp put-timestamp :DBGID_NAME\n");
236 	for (i = 0; i < RTPM_ID_MAX; i++) {
237 		seq_printf(s, "%-10d ",
238 			   qdf_atomic_read(
239 				 &rpm_ctx->pm_stats.runtime_get_dbgid[i]));
240 		seq_printf(s, "%-10d ",
241 			   qdf_atomic_read(
242 				 &rpm_ctx->pm_stats.runtime_put_dbgid[i]));
243 		seq_printf(s, "0x%-10llx ",
244 			   rpm_ctx->pm_stats.runtime_get_timestamp_dbgid[i]);
245 		seq_printf(s, "0x%-10llx ",
246 			   rpm_ctx->pm_stats.runtime_put_timestamp_dbgid[i]);
247 		seq_printf(s, ":%-30s\n", rtpm_string_from_dbgid(i));
248 	}
249 
250 	timer_expires = rpm_ctx->runtime_timer_expires;
251 	if (timer_expires > 0) {
252 		msecs_age = jiffies_to_msecs(timer_expires - jiffies);
253 		seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout",
254 			   msecs_age / 1000, msecs_age % 1000);
255 	}
256 
257 	spin_lock_bh(&rpm_ctx->runtime_lock);
258 	if (list_empty(&rpm_ctx->prevent_suspend_list)) {
259 		spin_unlock_bh(&rpm_ctx->runtime_lock);
260 		return 0;
261 	}
262 
263 	seq_printf(s, "%30s: ", "Active Wakeup_Sources");
264 	list_for_each_entry(ctx, &rpm_ctx->prevent_suspend_list, list) {
265 		seq_printf(s, "%s", ctx->name);
266 		if (ctx->timeout)
267 			seq_printf(s, "(%d ms)", ctx->timeout);
268 		seq_puts(s, " ");
269 	}
270 	seq_puts(s, "\n");
271 	spin_unlock_bh(&rpm_ctx->runtime_lock);
272 
273 	return 0;
274 }
275 
276 #undef HIF_PCI_RUNTIME_PM_STATS
277 
278 /**
279  * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats
280  * @inode
281  * @file
282  *
283  * Return: linux error code of single_open.
284  */
285 static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file)
286 {
287 	return single_open(file, hif_pci_pm_runtime_debugfs_show,
288 			inode->i_private);
289 }
290 
291 static const struct file_operations hif_pci_runtime_pm_fops = {
292 	.owner          = THIS_MODULE,
293 	.open           = hif_pci_runtime_pm_open,
294 	.release        = single_release,
295 	.read           = seq_read,
296 	.llseek         = seq_lseek,
297 };
298 
299 /**
300  * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry
301  * @scn: hif context
302  *
303  * creates a debugfs entry to debug the runtime pm feature.
304  */
305 static void hif_runtime_pm_debugfs_create(struct hif_softc *scn)
306 {
307 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
308 
309 	rpm_ctx->pm_dentry = debugfs_create_file("cnss_runtime_pm",
310 						 0400, NULL, scn,
311 						 &hif_pci_runtime_pm_fops);
312 }
313 
314 /**
315  * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry
316  * @sc: pci context
317  *
318  * removes the debugfs entry to debug the runtime pm feature.
319  */
320 static void hif_runtime_pm_debugfs_remove(struct hif_softc *scn)
321 {
322 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
323 
324 	debugfs_remove(rpm_ctx->pm_dentry);
325 }
326 
327 /**
328  * hif_runtime_init() - Initialize Runtime PM
329  * @dev: device structure
330  * @delay: delay to be confgured for auto suspend
331  *
332  * This function will init all the Runtime PM config.
333  *
334  * Return: void
335  */
336 static void hif_runtime_init(struct device *dev, int delay)
337 {
338 	pm_runtime_set_autosuspend_delay(dev, delay);
339 	pm_runtime_use_autosuspend(dev);
340 	pm_runtime_allow(dev);
341 	pm_runtime_mark_last_busy(dev);
342 	pm_runtime_put_noidle(dev);
343 	pm_suspend_ignore_children(dev, true);
344 }
345 
346 /**
347  * hif_runtime_exit() - Deinit/Exit Runtime PM
348  * @dev: device structure
349  *
350  * This function will deinit all the Runtime PM config.
351  *
352  * Return: void
353  */
354 static void hif_runtime_exit(struct device *dev)
355 {
356 	pm_runtime_get_noresume(dev);
357 	pm_runtime_set_active(dev);
358 	/* Symmetric call to make sure default usage count == 2 */
359 	pm_runtime_forbid(dev);
360 }
361 
362 static void hif_pm_runtime_lock_timeout_fn(void *data);
363 
364 /**
365  * hif_pm_runtime_start(): start the runtime pm
366  * @scn: hif context
367  *
368  * After this call, runtime pm will be active.
369  */
370 void hif_pm_runtime_start(struct hif_softc *scn)
371 {
372 	uint32_t mode = hif_get_conparam(scn);
373 	struct device *dev = hif_bus_get_dev(scn);
374 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
375 
376 	if (!scn->hif_config.enable_runtime_pm) {
377 		HIF_INFO("%s: RUNTIME PM is disabled in ini\n", __func__);
378 		return;
379 	}
380 
381 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
382 	    mode == QDF_GLOBAL_MONITOR_MODE) {
383 		HIF_INFO("%s: RUNTIME PM is disabled for FTM/EPPING mode\n",
384 			 __func__);
385 		return;
386 	}
387 
388 	qdf_timer_init(NULL, &rpm_ctx->runtime_timer,
389 		       hif_pm_runtime_lock_timeout_fn,
390 		       scn, QDF_TIMER_TYPE_WAKE_APPS);
391 
392 	HIF_INFO("%s: Enabling RUNTIME PM, Delay: %d ms", __func__,
393 		 scn->hif_config.runtime_pm_delay);
394 
395 	qdf_atomic_set(&rpm_ctx->pm_state, HIF_PM_RUNTIME_STATE_ON);
396 	hif_runtime_init(dev, scn->hif_config.runtime_pm_delay);
397 	hif_runtime_pm_debugfs_create(scn);
398 }
399 
400 /**
401  * hif_pm_runtime_stop(): stop runtime pm
402  * @scn: hif context
403  *
404  * Turns off runtime pm and frees corresponding resources
405  * that were acquired by hif_runtime_pm_start().
406  */
407 void hif_pm_runtime_stop(struct hif_softc *scn)
408 {
409 	uint32_t mode = hif_get_conparam(scn);
410 	struct device *dev = hif_bus_get_dev(scn);
411 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
412 
413 	if (!scn->hif_config.enable_runtime_pm)
414 		return;
415 
416 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
417 	    mode == QDF_GLOBAL_MONITOR_MODE)
418 		return;
419 
420 	hif_runtime_exit(dev);
421 
422 	hif_pm_runtime_sync_resume(GET_HIF_OPAQUE_HDL(scn));
423 
424 	qdf_atomic_set(&rpm_ctx->pm_state, HIF_PM_RUNTIME_STATE_NONE);
425 
426 	hif_runtime_pm_debugfs_remove(scn);
427 	qdf_timer_free(&rpm_ctx->runtime_timer);
428 }
429 
430 /**
431  * hif_pm_runtime_open(): initialize runtime pm
432  * @scn: hif ctx
433  *
434  * Early initialization
435  */
436 void hif_pm_runtime_open(struct hif_softc *scn)
437 {
438 	int i;
439 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
440 
441 	spin_lock_init(&rpm_ctx->runtime_lock);
442 	qdf_atomic_init(&rpm_ctx->pm_state);
443 	hif_runtime_lock_init(&rpm_ctx->prevent_linkdown_lock,
444 			      "prevent_linkdown_lock");
445 	qdf_atomic_set(&rpm_ctx->pm_state, HIF_PM_RUNTIME_STATE_NONE);
446 	qdf_atomic_init(&rpm_ctx->pm_stats.runtime_get);
447 	qdf_atomic_init(&rpm_ctx->pm_stats.runtime_put);
448 	qdf_atomic_init(&rpm_ctx->pm_stats.allow_suspend);
449 	qdf_atomic_init(&rpm_ctx->pm_stats.prevent_suspend);
450 	for (i = 0; i < RTPM_ID_MAX; i++) {
451 		qdf_atomic_init(&rpm_ctx->pm_stats.runtime_get_dbgid[i]);
452 		qdf_atomic_init(&rpm_ctx->pm_stats.runtime_put_dbgid[i]);
453 	}
454 	INIT_LIST_HEAD(&rpm_ctx->prevent_suspend_list);
455 }
456 
457 /**
458  * hif_check_for_get_put_out_of_sync() - Check if Get/Put is out of sync
459  * @scn: hif context
460  *
461  * This function will check if get and put are out of sync or not.
462  *
463  * Return: void
464  */
465 static void  hif_check_for_get_put_out_of_sync(struct hif_softc *scn)
466 {
467 	int32_t i;
468 	int32_t get_count, put_count;
469 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
470 
471 	if (qdf_is_fw_down())
472 		return;
473 
474 	for (i = 0; i < RTPM_ID_MAX; i++) {
475 		get_count = qdf_atomic_read(
476 				&rpm_ctx->pm_stats.runtime_get_dbgid[i]);
477 		put_count = qdf_atomic_read(
478 				&rpm_ctx->pm_stats.runtime_put_dbgid[i]);
479 		if (get_count != put_count) {
480 			QDF_DEBUG_PANIC("%s get-put out of sync. get %d put %d",
481 					rtpm_string_from_dbgid(i),
482 					get_count, put_count);
483 		}
484 	}
485 }
486 
487 /**
488  * hif_pm_runtime_sanitize_on_exit(): sanitize runtime PM gets/puts from driver
489  * @scn: hif context
490  *
491  * Ensure all gets/puts are in sync before exiting runtime PM feature.
492  * Also make sure all runtime PM locks are deinitialized properly.
493  *
494  * Return: void
495  */
496 static void hif_pm_runtime_sanitize_on_exit(struct hif_softc *scn)
497 {
498 	struct hif_pm_runtime_lock *ctx, *tmp;
499 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
500 
501 	hif_check_for_get_put_out_of_sync(scn);
502 
503 	spin_lock_bh(&rpm_ctx->runtime_lock);
504 	list_for_each_entry_safe(ctx, tmp,
505 				 &rpm_ctx->prevent_suspend_list, list) {
506 		spin_unlock_bh(&rpm_ctx->runtime_lock);
507 		hif_runtime_lock_deinit(GET_HIF_OPAQUE_HDL(scn), ctx);
508 		spin_lock_bh(&rpm_ctx->runtime_lock);
509 	}
510 	spin_unlock_bh(&rpm_ctx->runtime_lock);
511 }
512 
513 static int __hif_pm_runtime_allow_suspend(struct hif_softc *scn,
514 					  struct hif_pm_runtime_lock *lock);
515 
516 /**
517  * hif_pm_runtime_sanitize_on_ssr_exit() - Empty the suspend list on SSR
518  * @scn: hif context
519  *
520  * API is used to empty the runtime pm prevent suspend list.
521  *
522  * Return: void
523  */
524 static void hif_pm_runtime_sanitize_on_ssr_exit(struct hif_softc *scn)
525 {
526 	struct hif_pm_runtime_lock *ctx, *tmp;
527 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
528 
529 	spin_lock_bh(&rpm_ctx->runtime_lock);
530 	list_for_each_entry_safe(ctx, tmp,
531 				 &rpm_ctx->prevent_suspend_list, list) {
532 		__hif_pm_runtime_allow_suspend(scn, ctx);
533 	}
534 	spin_unlock_bh(&rpm_ctx->runtime_lock);
535 }
536 
537 /**
538  * hif_pm_runtime_close(): close runtime pm
539  * @scn: hif ctx
540  *
541  * ensure runtime_pm is stopped before closing the driver
542  */
543 void hif_pm_runtime_close(struct hif_softc *scn)
544 {
545 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
546 	struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn);
547 
548 	/*
549 	 * Here cds hif context was already NULL,
550 	 * so calling hif_runtime_lock_deinit, instead of
551 	 * qdf_runtime_lock_deinit(&rpm_ctx->prevent_linkdown_lock);
552 	 */
553 	hif_runtime_lock_deinit(hif_ctx, rpm_ctx->prevent_linkdown_lock.lock);
554 
555 	hif_is_recovery_in_progress(scn) ?
556 		hif_pm_runtime_sanitize_on_ssr_exit(scn) :
557 		hif_pm_runtime_sanitize_on_exit(scn);
558 }
559 
560 /**
561  * hif_pm_runtime_sync_resume() - Invoke synchronous runtime resume.
562  * @hif_ctx: hif context
563  *
564  * This function will invoke synchronous runtime resume.
565  *
566  * Return: status
567  */
568 int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx)
569 {
570 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
571 	struct device *dev = hif_bus_get_dev(scn);
572 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
573 	int pm_state;
574 
575 	if (!scn)
576 		return -EINVAL;
577 
578 	if (!hif_pci_pm_runtime_enabled(scn))
579 		return 0;
580 
581 	pm_state = qdf_atomic_read(&rpm_ctx->pm_state);
582 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
583 	    pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
584 		HIF_INFO("Runtime PM resume is requested by %ps",
585 			 (void *)_RET_IP_);
586 
587 	rpm_ctx->pm_stats.request_resume++;
588 	rpm_ctx->pm_stats.last_resume_caller = (void *)_RET_IP_;
589 
590 	return pm_runtime_resume(dev);
591 }
592 
593 /**
594  * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occurring
595  * @scn: hif context
596  * @flag: prevent linkdown if true otherwise allow
597  *
598  * this api should only be called as part of bus prevent linkdown
599  */
600 void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
601 {
602 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
603 
604 	if (flag)
605 		qdf_runtime_pm_prevent_suspend(&rpm_ctx->prevent_linkdown_lock);
606 	else
607 		qdf_runtime_pm_allow_suspend(&rpm_ctx->prevent_linkdown_lock);
608 }
609 
610 /**
611  * __hif_runtime_pm_set_state(): utility function
612  * @state: state to set
613  *
614  * indexes into the runtime pm state and sets it.
615  */
616 static void __hif_runtime_pm_set_state(struct hif_softc *scn,
617 				       enum hif_pm_runtime_state state)
618 {
619 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
620 
621 	if (!rpm_ctx) {
622 		HIF_ERROR("%s: HIF_CTX not initialized",
623 			  __func__);
624 		return;
625 	}
626 
627 	qdf_atomic_set(&rpm_ctx->pm_state, state);
628 }
629 
630 /**
631  * hif_runtime_pm_set_state_on():  adjust runtime pm state
632  *
633  * Notify hif that a the runtime pm state should be on
634  */
635 static void hif_runtime_pm_set_state_on(struct hif_softc *scn)
636 {
637 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_ON);
638 }
639 
640 /**
641  * hif_runtime_pm_set_state_resuming(): adjust runtime pm state
642  *
643  * Notify hif that a runtime pm resuming has started
644  */
645 static void hif_runtime_pm_set_state_resuming(struct hif_softc *scn)
646 {
647 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_RESUMING);
648 }
649 
650 /**
651  * hif_runtime_pm_set_state_suspending(): adjust runtime pm state
652  *
653  * Notify hif that a runtime pm suspend has started
654  */
655 static void hif_runtime_pm_set_state_suspending(struct hif_softc *scn)
656 {
657 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDING);
658 }
659 
660 /**
661  * hif_runtime_pm_set_state_suspended():  adjust runtime pm state
662  *
663  * Notify hif that a runtime suspend attempt has been completed successfully
664  */
665 static void hif_runtime_pm_set_state_suspended(struct hif_softc *scn)
666 {
667 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDED);
668 }
669 
670 /**
671  * hif_log_runtime_suspend_success() - log a successful runtime suspend
672  */
673 static void hif_log_runtime_suspend_success(struct hif_softc *hif_ctx)
674 {
675 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
676 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
677 
678 	if (!rpm_ctx)
679 		return;
680 
681 	rpm_ctx->pm_stats.suspended++;
682 	rpm_ctx->pm_stats.suspend_jiffies = jiffies;
683 }
684 
685 /**
686  * hif_log_runtime_suspend_failure() - log a failed runtime suspend
687  *
688  * log a failed runtime suspend
689  * mark last busy to prevent immediate runtime suspend
690  */
691 static void hif_log_runtime_suspend_failure(void *hif_ctx)
692 {
693 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
694 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
695 
696 	if (!rpm_ctx)
697 		return;
698 
699 	rpm_ctx->pm_stats.suspend_err++;
700 }
701 
702 /**
703  * hif_log_runtime_resume_success() - log a successful runtime resume
704  *
705  * log a successful runtime resume
706  * mark last busy to prevent immediate runtime suspend
707  */
708 static void hif_log_runtime_resume_success(void *hif_ctx)
709 {
710 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
711 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
712 
713 	if (!rpm_ctx)
714 		return;
715 
716 	rpm_ctx->pm_stats.resumed++;
717 }
718 
719 /**
720  * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
721  *
722  * Record the failure.
723  * mark last busy to delay a retry.
724  * adjust the runtime_pm state.
725  */
726 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx)
727 {
728 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
729 
730 	hif_log_runtime_suspend_failure(hif_ctx);
731 	hif_pm_runtime_mark_last_busy(hif_ctx);
732 	hif_runtime_pm_set_state_on(scn);
733 }
734 
735 /**
736  * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend
737  *
738  * Makes sure that the pci link will be taken down by the suspend opperation.
739  * If the hif layer is configured to leave the bus on, runtime suspend will
740  * not save any power.
741  *
742  * Set the runtime suspend state to in progress.
743  *
744  * return -EINVAL if the bus won't go down.  otherwise return 0
745  */
746 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
747 {
748 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
749 
750 	if (!hif_can_suspend_link(hif_ctx)) {
751 		HIF_ERROR("Runtime PM not supported for link up suspend");
752 		return -EINVAL;
753 	}
754 
755 	hif_runtime_pm_set_state_suspending(scn);
756 	return 0;
757 }
758 
759 /**
760  * hif_process_runtime_suspend_success() - bookkeeping of suspend success
761  *
762  * Record the success.
763  * adjust the runtime_pm state
764  */
765 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx)
766 {
767 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
768 
769 	hif_runtime_pm_set_state_suspended(scn);
770 	hif_log_runtime_suspend_success(scn);
771 }
772 
773 /**
774  * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
775  *
776  * update the runtime pm state.
777  */
778 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx)
779 {
780 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
781 
782 	hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
783 	hif_runtime_pm_set_state_resuming(scn);
784 }
785 
786 /**
787  * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
788  *
789  * record the success.
790  * adjust the runtime_pm state
791  */
792 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx)
793 {
794 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
795 
796 	hif_log_runtime_resume_success(hif_ctx);
797 	hif_pm_runtime_mark_last_busy(hif_ctx);
798 	hif_runtime_pm_set_state_on(scn);
799 }
800 
801 /**
802  * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
803  *
804  * Return: 0 for success and non-zero error code for failure
805  */
806 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
807 {
808 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
809 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
810 	int errno;
811 
812 	errno = hif_bus_suspend(hif_ctx);
813 	if (errno) {
814 		HIF_ERROR("%s: failed bus suspend: %d", __func__, errno);
815 		return errno;
816 	}
817 
818 	hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 1);
819 
820 	errno = hif_bus_suspend_noirq(hif_ctx);
821 	if (errno) {
822 		HIF_ERROR("%s: failed bus suspend noirq: %d", __func__, errno);
823 		hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
824 		goto bus_resume;
825 	}
826 
827 	qdf_atomic_set(&rpm_ctx->pm_dp_rx_busy, 0);
828 
829 	return 0;
830 
831 bus_resume:
832 	QDF_BUG(!hif_bus_resume(hif_ctx));
833 
834 	return errno;
835 }
836 
837 /**
838  * hif_fastpath_resume() - resume fastpath for runtimepm
839  *
840  * ensure that the fastpath write index register is up to date
841  * since runtime pm may cause ce_send_fast to skip the register
842  * write.
843  *
844  * fastpath only applicable to legacy copy engine
845  */
846 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
847 {
848 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
849 	struct CE_state *ce_state;
850 
851 	if (!scn)
852 		return;
853 
854 	if (scn->fastpath_mode_on) {
855 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
856 			return;
857 
858 		ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
859 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
860 
861 		/*war_ce_src_ring_write_idx_set */
862 		CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
863 					  ce_state->src_ring->write_index);
864 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
865 		Q_TARGET_ACCESS_END(scn);
866 	}
867 }
868 
869 /**
870  * hif_runtime_resume() - do the bus resume part of a runtime resume
871  *
872  *  Return: 0 for success and non-zero error code for failure
873  */
874 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
875 {
876 	QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
877 	QDF_BUG(!hif_bus_resume(hif_ctx));
878 	return 0;
879 }
880 
881 /**
882  * hif_pm_stats_runtime_get_record() - record runtime get statistics
883  * @scn: hif context
884  * @rtpm_dbgid: debug id to trace who use it
885  *
886  *
887  * Return: void
888  */
889 static void hif_pm_stats_runtime_get_record(struct hif_softc *scn,
890 					    wlan_rtpm_dbgid rtpm_dbgid)
891 {
892 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
893 
894 	if (rtpm_dbgid >= RTPM_ID_MAX) {
895 		QDF_BUG(0);
896 		return;
897 	}
898 	qdf_atomic_inc(&rpm_ctx->pm_stats.runtime_get);
899 	qdf_atomic_inc(&rpm_ctx->pm_stats.runtime_get_dbgid[rtpm_dbgid]);
900 	rpm_ctx->pm_stats.runtime_get_timestamp_dbgid[rtpm_dbgid] =
901 						qdf_get_log_timestamp();
902 }
903 
904 /**
905  * hif_pm_stats_runtime_put_record() - record runtime put statistics
906  * @scn: hif context
907  * @rtpm_dbgid: dbg_id to trace who use it
908  *
909  *
910  * Return: void
911  */
912 static void hif_pm_stats_runtime_put_record(struct hif_softc *scn,
913 					    wlan_rtpm_dbgid rtpm_dbgid)
914 {
915 	struct device *dev = hif_bus_get_dev(scn);
916 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
917 
918 	if (rtpm_dbgid >= RTPM_ID_MAX) {
919 		QDF_BUG(0);
920 		return;
921 	}
922 
923 	if (atomic_read(&dev->power.usage_count) <= 0) {
924 		QDF_BUG(0);
925 		return;
926 	}
927 
928 	qdf_atomic_inc(&rpm_ctx->pm_stats.runtime_put);
929 	qdf_atomic_inc(&rpm_ctx->pm_stats.runtime_put_dbgid[rtpm_dbgid]);
930 	rpm_ctx->pm_stats.runtime_put_timestamp_dbgid[rtpm_dbgid] =
931 						qdf_get_log_timestamp();
932 }
933 
934 /**
935  * hif_pm_runtime_get_sync() - do a get operation with sync resume
936  * @hif_ctx: pointer of HIF context
937  * @rtpm_dbgid: dbgid to trace who use it
938  *
939  * A get operation will prevent a runtime suspend until a corresponding
940  * put is done. Unlike hif_pm_runtime_get(), this API will do a sync
941  * resume instead of requesting a resume if it is runtime PM suspended
942  * so it can only be called in non-atomic context.
943  *
944  * Return: 0 if it is runtime PM resumed otherwise an error code.
945  */
946 int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx,
947 			    wlan_rtpm_dbgid rtpm_dbgid)
948 {
949 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
950 	struct device *dev = hif_bus_get_dev(scn);
951 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
952 	int pm_state;
953 	int ret;
954 
955 	if (!rpm_ctx)
956 		return -EINVAL;
957 
958 	if (!hif_pci_pm_runtime_enabled(scn))
959 		return 0;
960 
961 	pm_state = qdf_atomic_read(&rpm_ctx->pm_state);
962 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
963 	    pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
964 		hif_info_high("Runtime PM resume is requested by %ps",
965 			      (void *)_RET_IP_);
966 
967 	hif_pm_stats_runtime_get_record(scn, rtpm_dbgid);
968 	ret = pm_runtime_get_sync(dev);
969 
970 	/* Get can return 1 if the device is already active, just return
971 	 * success in that case.
972 	 */
973 	if (ret > 0)
974 		ret = 0;
975 
976 	if (ret) {
977 		rpm_ctx->pm_stats.runtime_get_err++;
978 		hif_err("Runtime PM Get Sync error in pm_state: %d, ret: %d",
979 			qdf_atomic_read(&rpm_ctx->pm_state), ret);
980 		hif_pm_runtime_put(hif_ctx, rtpm_dbgid);
981 	}
982 
983 	return ret;
984 }
985 
986 /**
987  * hif_pm_runtime_put_sync_suspend() - do a put operation with sync suspend
988  * @hif_ctx: pointer of HIF context
989  * @rtpm_dbgid: dbgid to trace who use it
990  *
991  * This API will do a runtime put operation followed by a sync suspend if usage
992  * count is 0 so it can only be called in non-atomic context.
993  *
994  * Return: 0 for success otherwise an error code
995  */
996 int hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx,
997 				    wlan_rtpm_dbgid rtpm_dbgid)
998 {
999 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1000 	struct device *dev = hif_bus_get_dev(scn);
1001 	int usage_count;
1002 	char *err = NULL;
1003 
1004 	if (!scn)
1005 		return -EINVAL;
1006 
1007 	if (!hif_pci_pm_runtime_enabled(scn))
1008 		return 0;
1009 
1010 	usage_count = atomic_read(&dev->power.usage_count);
1011 	if (usage_count == 2 && !scn->hif_config.enable_runtime_pm)
1012 		err = "Uexpected PUT when runtime PM is disabled";
1013 	else if (usage_count == 0)
1014 		err = "PUT without a GET Operation";
1015 
1016 	if (err) {
1017 		hif_pci_runtime_pm_warn(scn, err);
1018 		return -EINVAL;
1019 	}
1020 
1021 	hif_pm_stats_runtime_put_record(scn, rtpm_dbgid);
1022 	return pm_runtime_put_sync_suspend(dev);
1023 }
1024 
1025 /**
1026  * hif_pm_runtime_request_resume() - Invoke async runtime resume
1027  * @hif_ctx: hif context
1028  *
1029  * This function will invoke asynchronous runtime resume.
1030  *
1031  * Return: status
1032  */
1033 int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx)
1034 {
1035 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1036 	struct device *dev = hif_bus_get_dev(scn);
1037 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1038 	int pm_state;
1039 
1040 	if (!scn)
1041 		return -EINVAL;
1042 
1043 	if (!hif_pci_pm_runtime_enabled(scn))
1044 		return 0;
1045 
1046 	pm_state = qdf_atomic_read(&rpm_ctx->pm_state);
1047 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
1048 	    pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
1049 		HIF_INFO("Runtime PM resume is requested by %ps",
1050 			 (void *)_RET_IP_);
1051 
1052 	rpm_ctx->pm_stats.request_resume++;
1053 	rpm_ctx->pm_stats.last_resume_caller = (void *)_RET_IP_;
1054 
1055 	return hif_pm_request_resume(dev);
1056 }
1057 
1058 /**
1059  * hif_pm_runtime_mark_last_busy() - Mark last busy time
1060  * @hif_ctx: hif context
1061  *
1062  * This function will mark the last busy time, this will be used
1063  * to check if auto suspend delay expired or not.
1064  *
1065  * Return: void
1066  */
1067 void hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx)
1068 {
1069 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1070 	struct device *dev = hif_bus_get_dev(scn);
1071 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1072 
1073 	if (!scn)
1074 		return;
1075 
1076 	rpm_ctx->pm_stats.last_busy_marker = (void *)_RET_IP_;
1077 	rpm_ctx->pm_stats.last_busy_timestamp = qdf_get_log_timestamp_usecs();
1078 
1079 	return pm_runtime_mark_last_busy(dev);
1080 }
1081 
1082 /**
1083  * hif_pm_runtime_get_noresume() - Inc usage count without resume
1084  * @hif_ctx: hif context
1085  * rtpm_dbgid: Id of the module calling get
1086  *
1087  * This function will increment device usage count to avoid runtime
1088  * suspend, but it would not do resume.
1089  *
1090  * Return: void
1091  */
1092 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx,
1093 				 wlan_rtpm_dbgid rtpm_dbgid)
1094 {
1095 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1096 	struct device *dev = hif_bus_get_dev(scn);
1097 
1098 	if (!scn)
1099 		return;
1100 
1101 	if (!hif_pci_pm_runtime_enabled(scn))
1102 		return;
1103 
1104 	hif_pm_stats_runtime_get_record(scn, rtpm_dbgid);
1105 	pm_runtime_get_noresume(dev);
1106 }
1107 
1108 /**
1109  * hif_pm_runtime_get() - do a get opperation on the device
1110  * @hif_ctx: pointer of HIF context
1111  * @rtpm_dbgid: dbgid to trace who use it
1112  *
1113  * A get opperation will prevent a runtime suspend until a
1114  * corresponding put is done.  This api should be used when sending
1115  * data.
1116  *
1117  * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
1118  * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!!
1119  *
1120  * return: success if the bus is up and a get has been issued
1121  *   otherwise an error code.
1122  */
1123 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx,
1124 		       wlan_rtpm_dbgid rtpm_dbgid)
1125 {
1126 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1127 	struct device *dev = hif_bus_get_dev(scn);
1128 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1129 	int ret;
1130 	int pm_state;
1131 
1132 	if (!scn) {
1133 		hif_err("Could not do runtime get, scn is null");
1134 		return -EFAULT;
1135 	}
1136 
1137 	if (!hif_pci_pm_runtime_enabled(scn))
1138 		return 0;
1139 
1140 	pm_state = qdf_atomic_read(&rpm_ctx->pm_state);
1141 
1142 	if (pm_state  == HIF_PM_RUNTIME_STATE_ON ||
1143 	    pm_state == HIF_PM_RUNTIME_STATE_NONE) {
1144 		hif_pm_stats_runtime_get_record(scn, rtpm_dbgid);
1145 		ret = __hif_pm_runtime_get(dev);
1146 
1147 		/* Get can return 1 if the device is already active, just return
1148 		 * success in that case
1149 		 */
1150 		if (ret > 0)
1151 			ret = 0;
1152 
1153 		if (ret)
1154 			hif_pm_runtime_put(hif_ctx, rtpm_dbgid);
1155 
1156 		if (ret && ret != -EINPROGRESS) {
1157 			rpm_ctx->pm_stats.runtime_get_err++;
1158 			hif_err("Runtime Get PM Error in pm_state:%d ret: %d",
1159 				qdf_atomic_read(&rpm_ctx->pm_state), ret);
1160 		}
1161 
1162 		return ret;
1163 	}
1164 
1165 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
1166 	    pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING) {
1167 		hif_info_high("Runtime PM resume is requested by %ps",
1168 			      (void *)_RET_IP_);
1169 		ret = -EAGAIN;
1170 	} else {
1171 		ret = -EBUSY;
1172 	}
1173 
1174 	rpm_ctx->pm_stats.request_resume++;
1175 	rpm_ctx->pm_stats.last_resume_caller = (void *)_RET_IP_;
1176 	hif_pm_request_resume(dev);
1177 
1178 	return ret;
1179 }
1180 
1181 /**
1182  * hif_pm_runtime_put() - do a put operation on the device
1183  * @hif_ctx: pointer of HIF context
1184  * @rtpm_dbgid: dbgid to trace who use it
1185  *
1186  * A put operation will allow a runtime suspend after a corresponding
1187  * get was done.  This api should be used when sending data.
1188  *
1189  * This api will return a failure if runtime pm is stopped
1190  * This api will return failure if it would decrement the usage count below 0.
1191  *
1192  * return: QDF_STATUS_SUCCESS if the put is performed
1193  */
1194 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx,
1195 		       wlan_rtpm_dbgid rtpm_dbgid)
1196 {
1197 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1198 	struct device *dev = hif_bus_get_dev(scn);
1199 	int usage_count;
1200 	char *error = NULL;
1201 
1202 	if (!scn) {
1203 		HIF_ERROR("%s: Could not do runtime put, scn is null",
1204 			  __func__);
1205 		return -EFAULT;
1206 	}
1207 
1208 	if (!hif_pci_pm_runtime_enabled(scn))
1209 		return 0;
1210 
1211 	usage_count = atomic_read(&dev->power.usage_count);
1212 	if (usage_count == 2 && !scn->hif_config.enable_runtime_pm)
1213 		error = "Unexpected PUT when runtime PM is disabled";
1214 	else if (usage_count == 0)
1215 		error = "PUT without a GET operation";
1216 
1217 	if (error) {
1218 		hif_pci_runtime_pm_warn(scn, error);
1219 		return -EINVAL;
1220 	}
1221 
1222 	hif_pm_stats_runtime_put_record(scn, rtpm_dbgid);
1223 
1224 	hif_pm_runtime_mark_last_busy(hif_ctx);
1225 	hif_pm_runtime_put_auto(dev);
1226 
1227 	return 0;
1228 }
1229 
1230 /**
1231  * hif_pm_runtime_put_noidle() - do a put operation with no idle
1232  * @hif_ctx: pointer of HIF context
1233  * @rtpm_dbgid: dbgid to trace who use it
1234  *
1235  * This API will do a runtime put no idle operation
1236  *
1237  * Return: 0 for success otherwise an error code
1238  */
1239 int hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx,
1240 			      wlan_rtpm_dbgid rtpm_dbgid)
1241 {
1242 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1243 	struct device *dev = hif_bus_get_dev(scn);
1244 	int usage_count;
1245 	char *err = NULL;
1246 
1247 	if (!scn)
1248 		return -EINVAL;
1249 
1250 	if (!hif_pci_pm_runtime_enabled(scn))
1251 		return 0;
1252 
1253 	usage_count = atomic_read(&dev->power.usage_count);
1254 	if (usage_count == 2 && !scn->hif_config.enable_runtime_pm)
1255 		err = "Unexpected PUT when runtime PM is disabled";
1256 	else if (usage_count == 0)
1257 		err = "PUT without a GET operation";
1258 
1259 	if (err) {
1260 		hif_pci_runtime_pm_warn(scn, err);
1261 		return -EINVAL;
1262 	}
1263 
1264 	hif_pm_stats_runtime_put_record(scn, rtpm_dbgid);
1265 	pm_runtime_put_noidle(dev);
1266 
1267 	return 0;
1268 }
1269 
1270 /**
1271  * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol
1272  *                                      reason
1273  * @scn: hif context
1274  * @lock: runtime_pm lock being acquired
1275  *
1276  * Return 0 if successful.
1277  */
1278 static int __hif_pm_runtime_prevent_suspend(struct hif_softc *scn,
1279 					    struct hif_pm_runtime_lock *lock)
1280 {
1281 	struct device *dev = hif_bus_get_dev(scn);
1282 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1283 	int ret = 0;
1284 
1285 	/*
1286 	 * We shouldn't be setting context->timeout to zero here when
1287 	 * context is active as we will have a case where Timeout API's
1288 	 * for the same context called back to back.
1289 	 * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm
1290 	 * Set context->timeout to zero in hif_pm_runtime_prevent_suspend
1291 	 * API to ensure the timeout version is no more active and
1292 	 * list entry of this context will be deleted during allow suspend.
1293 	 */
1294 	if (lock->active)
1295 		return 0;
1296 
1297 	ret = __hif_pm_runtime_get(dev);
1298 
1299 	/**
1300 	 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
1301 	 * RPM_SUSPENDING. Any other negative value is an error.
1302 	 * We shouldn't be do runtime_put here as in later point allow
1303 	 * suspend gets called with the the context and there the usage count
1304 	 * is decremented, so suspend will be prevented.
1305 	 */
1306 
1307 	if (ret < 0 && ret != -EINPROGRESS) {
1308 		rpm_ctx->pm_stats.runtime_get_err++;
1309 		hif_pci_runtime_pm_warn(scn,
1310 					"Prevent Suspend Runtime PM Error");
1311 	}
1312 
1313 	rpm_ctx->prevent_suspend_cnt++;
1314 
1315 	lock->active = true;
1316 
1317 	list_add_tail(&lock->list, &rpm_ctx->prevent_suspend_list);
1318 
1319 	qdf_atomic_inc(&rpm_ctx->pm_stats.prevent_suspend);
1320 
1321 	hif_debug("%s: in pm_state:%s ret: %d", __func__,
1322 		  hif_pm_runtime_state_to_string(
1323 			  qdf_atomic_read(&rpm_ctx->pm_state)),
1324 		  ret);
1325 
1326 	return ret;
1327 }
1328 
1329 /**
1330  * __hif_pm_runtime_allow_suspend() - Allow Runtime suspend
1331  * @scn: hif context
1332  * @lock: runtime pm lock
1333  *
1334  * This function will allow runtime suspend, by decrementing
1335  * device's usage count.
1336  *
1337  * Return: status
1338  */
1339 static int __hif_pm_runtime_allow_suspend(struct hif_softc *scn,
1340 					  struct hif_pm_runtime_lock *lock)
1341 {
1342 	struct device *dev = hif_bus_get_dev(scn);
1343 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1344 	int ret = 0;
1345 	int usage_count;
1346 
1347 	if (rpm_ctx->prevent_suspend_cnt == 0)
1348 		return ret;
1349 
1350 	if (!lock->active)
1351 		return ret;
1352 
1353 	usage_count = atomic_read(&dev->power.usage_count);
1354 
1355 	/*
1356 	 * For runtime PM enabled case, the usage count should never be 0
1357 	 * at this point. For runtime PM disabled case, it should never be
1358 	 * 2 at this point. Catch unexpected PUT without GET here.
1359 	 */
1360 	if ((usage_count == 2 && !scn->hif_config.enable_runtime_pm) ||
1361 	    usage_count == 0) {
1362 		hif_pci_runtime_pm_warn(scn, "PUT without a GET Operation");
1363 		return -EINVAL;
1364 	}
1365 
1366 	list_del(&lock->list);
1367 
1368 	rpm_ctx->prevent_suspend_cnt--;
1369 
1370 	lock->active = false;
1371 	lock->timeout = 0;
1372 
1373 	hif_pm_runtime_mark_last_busy(GET_HIF_OPAQUE_HDL(scn));
1374 	ret = hif_pm_runtime_put_auto(dev);
1375 
1376 	hif_debug("%s: in pm_state:%s ret: %d", __func__,
1377 		  hif_pm_runtime_state_to_string(
1378 			  qdf_atomic_read(&rpm_ctx->pm_state)),
1379 		  ret);
1380 
1381 	qdf_atomic_inc(&rpm_ctx->pm_stats.allow_suspend);
1382 	return ret;
1383 }
1384 
1385 /**
1386  * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout
1387  * @data: calback data that is the pci context
1388  *
1389  * if runtime locks are acquired with a timeout, this function releases
1390  * the locks when the last runtime lock expires.
1391  *
1392  * dummy implementation until lock acquisition is implemented.
1393  */
1394 static void hif_pm_runtime_lock_timeout_fn(void *data)
1395 {
1396 	struct hif_softc *scn = data;
1397 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1398 	unsigned long timer_expires;
1399 	struct hif_pm_runtime_lock *context, *temp;
1400 
1401 	spin_lock_bh(&rpm_ctx->runtime_lock);
1402 
1403 	timer_expires = rpm_ctx->runtime_timer_expires;
1404 
1405 	/* Make sure we are not called too early, this should take care of
1406 	 * following case
1407 	 *
1408 	 * CPU0                         CPU1 (timeout function)
1409 	 * ----                         ----------------------
1410 	 * spin_lock_irq
1411 	 *                              timeout function called
1412 	 *
1413 	 * mod_timer()
1414 	 *
1415 	 * spin_unlock_irq
1416 	 *                              spin_lock_irq
1417 	 */
1418 	if (timer_expires > 0 && !time_after(timer_expires, jiffies)) {
1419 		rpm_ctx->runtime_timer_expires = 0;
1420 		list_for_each_entry_safe(context, temp,
1421 					 &rpm_ctx->prevent_suspend_list, list) {
1422 			if (context->timeout) {
1423 				__hif_pm_runtime_allow_suspend(scn, context);
1424 				rpm_ctx->pm_stats.allow_suspend_timeout++;
1425 			}
1426 		}
1427 	}
1428 
1429 	spin_unlock_bh(&rpm_ctx->runtime_lock);
1430 }
1431 
1432 /**
1433  * hif_pm_runtime_prevent_suspend() - Prevent Runtime suspend
1434  * @scn: hif context
1435  * @data: runtime pm lock
1436  *
1437  * This function will prevent runtime suspend, by incrementing
1438  * device's usage count.
1439  *
1440  * Return: status
1441  */
1442 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
1443 				   struct hif_pm_runtime_lock *data)
1444 {
1445 	struct hif_softc *scn = HIF_GET_SOFTC(ol_sc);
1446 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1447 	struct hif_pm_runtime_lock *context = data;
1448 
1449 	if (!scn->hif_config.enable_runtime_pm)
1450 		return 0;
1451 
1452 	if (!context)
1453 		return -EINVAL;
1454 
1455 	if (in_irq())
1456 		WARN_ON(1);
1457 
1458 	spin_lock_bh(&rpm_ctx->runtime_lock);
1459 	context->timeout = 0;
1460 	__hif_pm_runtime_prevent_suspend(scn, context);
1461 	spin_unlock_bh(&rpm_ctx->runtime_lock);
1462 
1463 	return 0;
1464 }
1465 
1466 /**
1467  * hif_pm_runtime_allow_suspend() - Allow Runtime suspend
1468  * @scn: hif context
1469  * @data: runtime pm lock
1470  *
1471  * This function will allow runtime suspend, by decrementing
1472  * device's usage count.
1473  *
1474  * Return: status
1475  */
1476 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
1477 				 struct hif_pm_runtime_lock *data)
1478 {
1479 	struct hif_softc *scn = HIF_GET_SOFTC(ol_sc);
1480 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1481 	struct hif_pm_runtime_lock *context = data;
1482 
1483 	if (!scn->hif_config.enable_runtime_pm)
1484 		return 0;
1485 
1486 	if (!context)
1487 		return -EINVAL;
1488 
1489 	if (in_irq())
1490 		WARN_ON(1);
1491 
1492 	spin_lock_bh(&rpm_ctx->runtime_lock);
1493 
1494 	__hif_pm_runtime_allow_suspend(scn, context);
1495 
1496 	/* The list can be empty as well in cases where
1497 	 * we have one context in the list and the allow
1498 	 * suspend came before the timer expires and we delete
1499 	 * context above from the list.
1500 	 * When list is empty prevent_suspend count will be zero.
1501 	 */
1502 	if (rpm_ctx->prevent_suspend_cnt == 0 &&
1503 	    rpm_ctx->runtime_timer_expires > 0) {
1504 		qdf_timer_free(&rpm_ctx->runtime_timer);
1505 		rpm_ctx->runtime_timer_expires = 0;
1506 	}
1507 
1508 	spin_unlock_bh(&rpm_ctx->runtime_lock);
1509 
1510 	return 0;
1511 }
1512 
1513 /**
1514  * hif_pm_runtime_prevent_suspend_timeout() - Prevent runtime suspend timeout
1515  * @ol_sc: HIF context
1516  * @lock: which lock is being acquired
1517  * @delay: Timeout in milliseconds
1518  *
1519  * Prevent runtime suspend with a timeout after which runtime suspend would be
1520  * allowed. This API uses a single timer to allow the suspend and timer is
1521  * modified if the timeout is changed before timer fires.
1522  * If the timeout is less than autosuspend_delay then use mark_last_busy instead
1523  * of starting the timer.
1524  *
1525  * It is wise to try not to use this API and correct the design if possible.
1526  *
1527  * Return: 0 on success and negative error code on failure
1528  */
1529 int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
1530 					   struct hif_pm_runtime_lock *lock,
1531 					   unsigned int delay)
1532 {
1533 	struct hif_softc *scn = HIF_GET_SOFTC(ol_sc);
1534 	struct device *dev = hif_bus_get_dev(scn);
1535 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1536 
1537 	int ret = 0;
1538 	unsigned long expires;
1539 	struct hif_pm_runtime_lock *context = lock;
1540 
1541 	if (hif_is_load_or_unload_in_progress(scn)) {
1542 		HIF_ERROR("%s: Load/unload in progress, ignore!",
1543 			  __func__);
1544 		return -EINVAL;
1545 	}
1546 
1547 	if (hif_is_recovery_in_progress(scn)) {
1548 		HIF_ERROR("%s: LOGP in progress, ignore!", __func__);
1549 		return -EINVAL;
1550 	}
1551 
1552 	if (!scn->hif_config.enable_runtime_pm)
1553 		return 0;
1554 
1555 	if (!context)
1556 		return -EINVAL;
1557 
1558 	if (in_irq())
1559 		WARN_ON(1);
1560 
1561 	/*
1562 	 * Don't use internal timer if the timeout is less than auto suspend
1563 	 * delay.
1564 	 */
1565 	if (delay <= dev->power.autosuspend_delay) {
1566 		hif_pm_request_resume(dev);
1567 		hif_pm_runtime_mark_last_busy(ol_sc);
1568 		return ret;
1569 	}
1570 
1571 	expires = jiffies + msecs_to_jiffies(delay);
1572 	expires += !expires;
1573 
1574 	spin_lock_bh(&rpm_ctx->runtime_lock);
1575 
1576 	context->timeout = delay;
1577 	ret = __hif_pm_runtime_prevent_suspend(scn, context);
1578 	rpm_ctx->pm_stats.prevent_suspend_timeout++;
1579 
1580 	/* Modify the timer only if new timeout is after already configured
1581 	 * timeout
1582 	 */
1583 	if (time_after(expires, rpm_ctx->runtime_timer_expires)) {
1584 		qdf_timer_mod(&rpm_ctx->runtime_timer, delay);
1585 		rpm_ctx->runtime_timer_expires = expires;
1586 	}
1587 
1588 	spin_unlock_bh(&rpm_ctx->runtime_lock);
1589 
1590 	HIF_ERROR("%s: pm_state: %s delay: %dms ret: %d\n", __func__,
1591 		  hif_pm_runtime_state_to_string(
1592 			  qdf_atomic_read(&rpm_ctx->pm_state)),
1593 		  delay, ret);
1594 
1595 	return ret;
1596 }
1597 
1598 /**
1599  * hif_runtime_lock_init() - API to initialize Runtime PM context
1600  * @name: Context name
1601  *
1602  * This API initializes the Runtime PM context of the caller and
1603  * return the pointer.
1604  *
1605  * Return: None
1606  */
1607 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
1608 {
1609 	struct hif_pm_runtime_lock *context;
1610 
1611 	HIF_INFO("Initializing Runtime PM wakelock %s", name);
1612 
1613 	context = qdf_mem_malloc(sizeof(*context));
1614 	if (!context)
1615 		return -ENOMEM;
1616 
1617 	context->name = name ? name : "Default";
1618 	lock->lock = context;
1619 
1620 	return 0;
1621 }
1622 
1623 /**
1624  * hif_runtime_lock_deinit() - This API frees the runtime pm ctx
1625  * @data: Runtime PM context
1626  *
1627  * Return: void
1628  */
1629 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
1630 			     struct hif_pm_runtime_lock *data)
1631 {
1632 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1633 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1634 	struct hif_pm_runtime_lock *context = data;
1635 
1636 	if (!context) {
1637 		HIF_ERROR("Runtime PM wakelock context is NULL");
1638 		return;
1639 	}
1640 
1641 	HIF_INFO("Deinitializing Runtime PM wakelock %s", context->name);
1642 
1643 	/*
1644 	 * Ensure to delete the context list entry and reduce the usage count
1645 	 * before freeing the context if context is active.
1646 	 */
1647 	if (scn) {
1648 		spin_lock_bh(&rpm_ctx->runtime_lock);
1649 		__hif_pm_runtime_allow_suspend(scn, context);
1650 		spin_unlock_bh(&rpm_ctx->runtime_lock);
1651 	}
1652 
1653 	qdf_mem_free(context);
1654 }
1655 
1656 /**
1657  * hif_pm_runtime_is_suspended() - API to check if driver has runtime suspended
1658  * @hif_ctx: HIF context
1659  *
1660  * Return: true for runtime suspended, otherwise false
1661  */
1662 bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx)
1663 {
1664 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1665 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1666 
1667 	return qdf_atomic_read(&rpm_ctx->pm_state) ==
1668 					HIF_PM_RUNTIME_STATE_SUSPENDED;
1669 }
1670 
1671 /**
1672  * hif_pm_runtime_get_monitor_wake_intr() - API to get monitor_wake_intr
1673  * @hif_ctx: HIF context
1674  *
1675  * monitor_wake_intr variable can be used to indicate if driver expects wake
1676  * MSI for runtime PM
1677  *
1678  * Return: monitor_wake_intr variable
1679  */
1680 int hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx)
1681 {
1682 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1683 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1684 
1685 	return qdf_atomic_read(&rpm_ctx->monitor_wake_intr);
1686 }
1687 
1688 /**
1689  * hif_pm_runtime_set_monitor_wake_intr() - API to set monitor_wake_intr
1690  * @hif_ctx: HIF context
1691  * @val: value to set
1692  *
1693  * monitor_wake_intr variable can be used to indicate if driver expects wake
1694  * MSI for runtime PM
1695  *
1696  * Return: void
1697  */
1698 void hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx,
1699 					  int val)
1700 {
1701 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1702 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1703 
1704 	qdf_atomic_set(&rpm_ctx->monitor_wake_intr, val);
1705 }
1706 
1707 /**
1708  * hif_pm_runtime_mark_dp_rx_busy() - Set last busy mark my data path
1709  * @hif_ctx: HIF context
1710  *
1711  * Return: void
1712  */
1713 void hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
1714 {
1715 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1716 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1717 
1718 	if (!scn)
1719 		return;
1720 
1721 	qdf_atomic_set(&rpm_ctx->pm_dp_rx_busy, 1);
1722 	rpm_ctx->dp_last_busy_timestamp = qdf_get_log_timestamp_usecs();
1723 
1724 	hif_pm_runtime_mark_last_busy(hif_ctx);
1725 }
1726 
1727 /**
1728  * hif_pm_runtime_is_dp_rx_busy() - Check if last mark busy by dp rx
1729  * @hif_ctx: HIF context
1730  *
1731  * Return: dp rx busy set value
1732  */
1733 int hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
1734 {
1735 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1736 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1737 
1738 	if (!scn)
1739 		return 0;
1740 
1741 	return qdf_atomic_read(&rpm_ctx->pm_dp_rx_busy);
1742 }
1743 
1744 /**
1745  * hif_pm_runtime_get_dp_rx_busy_mark() - Get last busy by dp rx timestamp
1746  * @hif_ctx: HIF context
1747  *
1748  * Return: timestamp of last mark busy by dp rx
1749  */
1750 qdf_time_t hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx)
1751 {
1752 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1753 	struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
1754 
1755 	if (!scn)
1756 		return 0;
1757 
1758 	return rpm_ctx->dp_last_busy_timestamp;
1759 }
1760 #endif /* FEATURE_RUNTIME_PM */
1761