xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_runtime_pm.c (revision 8cfe6b10058a04cafb17eed051f2ddf11bee8931)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8 
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <linux/slab.h>
19 #include <linux/interrupt.h>
20 #include <linux/if_arp.h>
21 #include "hif_io32.h"
22 #include "hif_runtime_pm.h"
23 #include "hif.h"
24 #include "target_type.h"
25 #include "hif_main.h"
26 #include "ce_main.h"
27 #include "ce_api.h"
28 #include "ce_internal.h"
29 #include "ce_reg.h"
30 #include "ce_bmi.h"
31 #include "regtable.h"
32 #include "hif_hw_version.h"
33 #include <linux/debugfs.h>
34 #include <linux/seq_file.h>
35 #include "qdf_status.h"
36 #include "qdf_atomic.h"
37 #include "pld_common.h"
38 #include "mp_dev.h"
39 #include "hif_debug.h"
40 
41 #include "ce_tasklet.h"
42 #include "targaddrs.h"
43 #include "hif_exec.h"
44 
45 #define CNSS_RUNTIME_FILE "cnss_runtime_pm"
46 #define CNSS_RUNTIME_FILE_PERM QDF_FILE_USR_READ
47 
48 #ifdef FEATURE_RUNTIME_PM
49 
50 static struct hif_rtpm_ctx g_hif_rtpm_ctx;
51 static struct hif_rtpm_ctx *gp_hif_rtpm_ctx;
52 
53 /**
54  * hif_rtpm_id_to_string() - Convert dbgid to respective string
55  * @id: debug id
56  *
57  * Debug support function to convert  dbgid to string.
58  * Please note to add new string in the array at index equal to
59  * its enum value in wlan_rtpm_dbgid.
60  *
61  * Return: String of ID
62  */
63 static const char *hif_rtpm_id_to_string(enum hif_rtpm_client_id id)
64 {
65 	static const char * const strings[] = {
66 					"HIF_RTPM_ID_RESERVED",
67 					"HIF_RTPM_HAL_REO_CMD",
68 					"HIF_RTPM_WMI",
69 					"HIF_RTPM_HTT",
70 					"HIF_RTPM_DP",
71 					"HIF_RTPM_RING_STATS",
72 					"HIF_RTPM_CE",
73 					"HIF_RTPM_FORCE_WAKE",
74 					"HIF_RTPM_ID_PM_QOS_NOTIFY",
75 					"HIF_RTPM_ID_WIPHY_SUSPEND",
76 					"HIF_RTPM_ID_MAX"
77 	};
78 
79 	return strings[id];
80 }
81 
82 /**
83  * hif_rtpm_read_usage_count() - Read device usage count
84  *
85  * Return: current usage count
86  */
87 static inline int hif_rtpm_read_usage_count(void)
88 {
89 	return qdf_atomic_read(&gp_hif_rtpm_ctx->dev->power.usage_count);
90 }
91 
92 #define HIF_RTPM_STATS(_s, _rtpm_ctx, _name) \
93 	seq_printf(_s, "%30s: %u\n", #_name, (_rtpm_ctx)->stats._name)
94 
95 /**
96  * hif_rtpm_debugfs_show(): show debug stats for runtimepm
97  * @s: file to print to
98  * @data: unused
99  *
100  * debugging tool added to the debug fs for displaying runtimepm stats
101  *
102  * Return: 0
103  */
104 static int hif_rtpm_debugfs_show(struct seq_file *s, void *data)
105 {
106 	struct hif_rtpm_client *client = NULL;
107 	struct hif_pm_runtime_lock *ctx;
108 	static const char * const autopm_state[] = {"NONE", "ON", "RESUMING",
109 			"RESUMING_LINKUP", "SUSPENDING", "SUSPENDED"};
110 	int pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
111 	int i;
112 
113 	seq_printf(s, "%30s: %llu\n", "Current timestamp",
114 		   qdf_get_log_timestamp());
115 
116 	seq_printf(s, "%30s: %s\n", "Runtime PM state", autopm_state[pm_state]);
117 
118 	seq_printf(s, "%30s: %llu\n", "Last Busy timestamp",
119 		   gp_hif_rtpm_ctx->stats.last_busy_ts);
120 
121 	seq_printf(s, "%30s: %ps\n", "Last Busy Marker",
122 		   gp_hif_rtpm_ctx->stats.last_busy_marker);
123 
124 	seq_puts(s, "Rx busy marker counts:\n");
125 	seq_printf(s, "%30s: %u %llu\n", hif_rtpm_id_to_string(HIF_RTPM_ID_DP),
126 		   gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_DP]->last_busy_cnt,
127 		   gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_DP]->last_busy_ts);
128 
129 	seq_printf(s, "%30s: %u %llu\n", hif_rtpm_id_to_string(HIF_RTPM_ID_CE),
130 		   gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_CE]->last_busy_cnt,
131 		   gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_CE]->last_busy_ts);
132 
133 	HIF_RTPM_STATS(s, gp_hif_rtpm_ctx, last_busy_id);
134 
135 	if (pm_state == HIF_RTPM_STATE_SUSPENDED) {
136 		seq_printf(s, "%30s: %llx us\n", "Suspended Since",
137 			   gp_hif_rtpm_ctx->stats.suspend_ts);
138 	}
139 
140 	HIF_RTPM_STATS(s, gp_hif_rtpm_ctx, resume_count);
141 	HIF_RTPM_STATS(s, gp_hif_rtpm_ctx, suspend_count);
142 	HIF_RTPM_STATS(s, gp_hif_rtpm_ctx, suspend_err_count);
143 
144 	seq_printf(s, "%30s: %d\n", "PM Usage count",
145 		   hif_rtpm_read_usage_count());
146 
147 	seq_puts(s, "get  put  get-timestamp put-timestamp :DBGID_NAME\n");
148 	for (i = 0; i < HIF_RTPM_ID_MAX; i++) {
149 		client = gp_hif_rtpm_ctx->clients[i];
150 		if (!client)
151 			continue;
152 		seq_printf(s, "%-10d ", qdf_atomic_read(&client->get_count));
153 		seq_printf(s, "%-10d ", qdf_atomic_read(&client->put_count));
154 		seq_printf(s, "0x%-10llx ", client->get_ts);
155 		seq_printf(s, "0x%-10llx ", client->put_ts);
156 		seq_printf(s, ":%-2d %-30s\n", i, hif_rtpm_id_to_string(i));
157 	}
158 	seq_puts(s, "\n");
159 
160 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
161 	if (list_empty(&gp_hif_rtpm_ctx->prevent_list)) {
162 		qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
163 		return 0;
164 	}
165 
166 	seq_printf(s, "%30s: ", "Active Wakeup_Sources");
167 	list_for_each_entry(ctx, &gp_hif_rtpm_ctx->prevent_list, list) {
168 		seq_printf(s, "%s", ctx->name);
169 		seq_puts(s, " ");
170 	}
171 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
172 	seq_puts(s, "\n");
173 
174 	return 0;
175 }
176 
177 #undef HIF_RTPM_STATS
178 
179 /**
180  * hif_rtpm_debugfs_open() - open a debug fs file to access the runtime pm stats
181  * @inode:
182  * @file:
183  *
184  * Return: linux error code of single_open.
185  */
186 static int hif_rtpm_debugfs_open(struct inode *inode, struct file *file)
187 {
188 	return single_open(file, hif_rtpm_debugfs_show,
189 			inode->i_private);
190 }
191 
192 static const struct file_operations hif_rtpm_fops = {
193 	.owner          = THIS_MODULE,
194 	.open           = hif_rtpm_debugfs_open,
195 	.release        = single_release,
196 	.read           = seq_read,
197 	.llseek         = seq_lseek,
198 };
199 
200 /**
201  * hif_rtpm_debugfs_create() - creates runtimepm debugfs entry
202  *
203  * creates a debugfs entry to debug the runtime pm feature.
204  */
205 static void hif_rtpm_debugfs_create(void)
206 {
207 	gp_hif_rtpm_ctx->pm_dentry = qdf_debugfs_create_entry(CNSS_RUNTIME_FILE,
208 							CNSS_RUNTIME_FILE_PERM,
209 							NULL,
210 							NULL,
211 							&hif_rtpm_fops);
212 }
213 
214 /**
215  * hif_rtpm_debugfs_remove() - removes runtimepm debugfs entry
216  *
217  * removes the debugfs entry to debug the runtime pm feature.
218  */
219 static void hif_rtpm_debugfs_remove(void)
220 {
221 	qdf_debugfs_remove_file(gp_hif_rtpm_ctx->pm_dentry);
222 }
223 
224 /**
225  * hif_rtpm_init() - Initialize Runtime PM
226  * @dev: device structure
227  * @delay: delay to be configured for auto suspend
228  *
229  * This function will init all the Runtime PM config.
230  *
231  * Return: void
232  */
233 static void hif_rtpm_init(struct device *dev, int delay)
234 {
235 	pm_runtime_set_autosuspend_delay(dev, delay);
236 	pm_runtime_use_autosuspend(dev);
237 	pm_runtime_allow(dev);
238 	pm_runtime_mark_last_busy(dev);
239 	pm_runtime_put_noidle(dev);
240 	pm_suspend_ignore_children(dev, true);
241 }
242 
243 /**
244  * hif_rtpm_exit() - Deinit/Exit Runtime PM
245  * @dev: device structure
246  *
247  * This function will deinit all the Runtime PM config.
248  *
249  * Return: void
250  */
251 static void hif_rtpm_exit(struct device *dev)
252 {
253 	pm_runtime_get_noresume(dev);
254 	pm_runtime_set_active(dev);
255 	pm_runtime_forbid(dev);
256 }
257 
258 static void hif_rtpm_alloc_last_busy_hist(void)
259 {
260 	int i;
261 
262 	for (i = 0; i < CE_COUNT_MAX; i++) {
263 		if (i != CE_ID_1 && i != CE_ID_2 && i != CE_ID_7) {
264 			gp_hif_rtpm_ctx->busy_hist[i] = NULL;
265 			continue;
266 		}
267 
268 		gp_hif_rtpm_ctx->busy_hist[i] =
269 			qdf_mem_malloc(sizeof(struct hif_rtpm_last_busy_hist));
270 		if (!gp_hif_rtpm_ctx->busy_hist[i])
271 			return;
272 	}
273 }
274 
275 static void hif_rtpm_free_last_busy_hist(void)
276 {
277 	int i;
278 
279 	for (i = 0; i < CE_COUNT_MAX; i++) {
280 		if (i != CE_ID_1 && i != CE_ID_2 && i != CE_ID_7)
281 			continue;
282 
283 		qdf_mem_free(gp_hif_rtpm_ctx->busy_hist[i]);
284 	}
285 }
286 
287 void hif_rtpm_open(struct hif_softc *scn)
288 {
289 	gp_hif_rtpm_ctx = &g_hif_rtpm_ctx;
290 	gp_hif_rtpm_ctx->dev = scn->qdf_dev->dev;
291 	qdf_spinlock_create(&gp_hif_rtpm_ctx->runtime_lock);
292 	qdf_spinlock_create(&gp_hif_rtpm_ctx->runtime_suspend_lock);
293 	qdf_spinlock_create(&gp_hif_rtpm_ctx->prevent_list_lock);
294 	qdf_atomic_init(&gp_hif_rtpm_ctx->pm_state);
295 	qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, HIF_RTPM_STATE_NONE);
296 	qdf_atomic_init(&gp_hif_rtpm_ctx->monitor_wake_intr);
297 	INIT_LIST_HEAD(&gp_hif_rtpm_ctx->prevent_list);
298 	gp_hif_rtpm_ctx->client_count = 0;
299 	gp_hif_rtpm_ctx->pending_job = 0;
300 	hif_rtpm_register(HIF_RTPM_ID_CE, NULL);
301 	hif_rtpm_register(HIF_RTPM_ID_FORCE_WAKE, NULL);
302 	hif_rtpm_alloc_last_busy_hist();
303 	hif_info_high("Runtime PM attached");
304 }
305 
306 static int __hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *lock);
307 
308 /**
309  * hif_rtpm_sanitize_exit(): sanitize runtime PM gets/puts from driver
310  *
311  * Ensure all gets/puts are in sync before exiting runtime PM feature.
312  * Also make sure all runtime PM locks are deinitialized properly.
313  *
314  * Return: void
315  */
316 static void hif_rtpm_sanitize_exit(void)
317 {
318 	struct hif_pm_runtime_lock *ctx, *tmp;
319 	struct hif_rtpm_client *client;
320 	int i, active_count;
321 
322 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
323 	list_for_each_entry_safe(ctx, tmp,
324 				 &gp_hif_rtpm_ctx->prevent_list, list) {
325 		hif_runtime_lock_deinit(ctx);
326 	}
327 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
328 
329 	/* check if get and put out of sync for all clients */
330 	for (i = 0; i < HIF_RTPM_ID_MAX; i++) {
331 		client = gp_hif_rtpm_ctx->clients[i];
332 		if (client) {
333 			if (qdf_atomic_read(&client->active_count)) {
334 				active_count =
335 					qdf_atomic_read(&client->active_count);
336 				hif_err("Client active: %u- %s", i,
337 					hif_rtpm_id_to_string(i));
338 				QDF_DEBUG_PANIC("Client active on exit!");
339 				while (active_count--)
340 					__hif_rtpm_put_noidle(
341 							gp_hif_rtpm_ctx->dev);
342 			}
343 			QDF_DEBUG_PANIC("Client not deinitialized");
344 			qdf_mem_free(client);
345 			gp_hif_rtpm_ctx->clients[i] = NULL;
346 		}
347 	}
348 }
349 
350 /**
351  * hif_rtpm_sanitize_ssr_exit() - Empty the suspend list on SSR
352  *
353  * API is used to empty the runtime pm prevent suspend list.
354  *
355  * Return: void
356  */
357 static void hif_rtpm_sanitize_ssr_exit(void)
358 {
359 	struct hif_pm_runtime_lock *ctx, *tmp;
360 
361 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
362 	list_for_each_entry_safe(ctx, tmp,
363 				 &gp_hif_rtpm_ctx->prevent_list, list) {
364 		__hif_pm_runtime_allow_suspend(ctx);
365 	}
366 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
367 }
368 
369 void hif_rtpm_close(struct hif_softc *scn)
370 {
371 	hif_rtpm_free_last_busy_hist();
372 	hif_rtpm_deregister(HIF_RTPM_ID_CE);
373 	hif_rtpm_deregister(HIF_RTPM_ID_FORCE_WAKE);
374 
375 	hif_is_recovery_in_progress(scn) ?
376 		hif_rtpm_sanitize_ssr_exit() :
377 		hif_rtpm_sanitize_exit();
378 
379 	qdf_mem_set(gp_hif_rtpm_ctx, sizeof(*gp_hif_rtpm_ctx), 0);
380 	gp_hif_rtpm_ctx = NULL;
381 	hif_info_high("Runtime PM context detached");
382 }
383 
384 void hif_rtpm_start(struct hif_softc *scn)
385 {
386 	uint32_t mode = hif_get_conparam(scn);
387 
388 	gp_hif_rtpm_ctx->enable_rpm = scn->hif_config.enable_runtime_pm;
389 
390 	if (!gp_hif_rtpm_ctx->enable_rpm) {
391 		hif_info_high("RUNTIME PM is disabled in ini");
392 		return;
393 	}
394 
395 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
396 	    mode == QDF_GLOBAL_MONITOR_MODE) {
397 		hif_info("RUNTIME PM is disabled for FTM/EPPING/MONITOR mode");
398 		return;
399 	}
400 
401 	hif_info_high("Enabling RUNTIME PM, Delay: %d ms",
402 		      scn->hif_config.runtime_pm_delay);
403 
404 	qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, HIF_RTPM_STATE_ON);
405 	hif_rtpm_init(gp_hif_rtpm_ctx->dev, scn->hif_config.runtime_pm_delay);
406 	gp_hif_rtpm_ctx->cfg_delay = scn->hif_config.runtime_pm_delay;
407 	gp_hif_rtpm_ctx->delay = gp_hif_rtpm_ctx->cfg_delay;
408 	hif_rtpm_debugfs_create();
409 }
410 
411 void hif_rtpm_stop(struct hif_softc *scn)
412 {
413 	uint32_t mode = hif_get_conparam(scn);
414 
415 	if (!gp_hif_rtpm_ctx->enable_rpm)
416 		return;
417 
418 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
419 	    mode == QDF_GLOBAL_MONITOR_MODE)
420 		return;
421 
422 	hif_rtpm_exit(gp_hif_rtpm_ctx->dev);
423 
424 	hif_rtpm_sync_resume();
425 
426 	qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, HIF_RTPM_STATE_NONE);
427 	hif_rtpm_debugfs_remove();
428 }
429 
430 QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rtpm_cbk)(void))
431 {
432 	struct hif_rtpm_client *client;
433 
434 	if (qdf_unlikely(!gp_hif_rtpm_ctx)) {
435 		hif_err("Runtime PM context NULL");
436 		return QDF_STATUS_E_FAILURE;
437 	}
438 
439 	if (id >= HIF_RTPM_ID_MAX || gp_hif_rtpm_ctx->clients[id]) {
440 		hif_err("Invalid client %d", id);
441 		return QDF_STATUS_E_INVAL;
442 	}
443 
444 	client = qdf_mem_malloc(sizeof(struct hif_rtpm_client));
445 	if (!client)
446 		return QDF_STATUS_E_NOMEM;
447 
448 	client->hif_rtpm_cbk = hif_rtpm_cbk;
449 	qdf_atomic_init(&client->active_count);
450 	qdf_atomic_init(&client->get_count);
451 	qdf_atomic_init(&client->put_count);
452 
453 	gp_hif_rtpm_ctx->clients[id] = client;
454 	gp_hif_rtpm_ctx->client_count++;
455 
456 	return QDF_STATUS_SUCCESS;
457 }
458 
459 QDF_STATUS hif_rtpm_deregister(uint32_t id)
460 {
461 	struct hif_rtpm_client *client;
462 	int active_count;
463 
464 	if (qdf_unlikely(!gp_hif_rtpm_ctx)) {
465 		hif_err("Runtime PM context NULL");
466 		return QDF_STATUS_E_FAILURE;
467 	}
468 
469 	if (id >= HIF_RTPM_ID_MAX || !gp_hif_rtpm_ctx->clients[id]) {
470 		hif_err("invalid client, id: %u", id);
471 		return QDF_STATUS_E_INVAL;
472 	}
473 
474 	client = gp_hif_rtpm_ctx->clients[id];
475 	if (qdf_atomic_read(&client->active_count)) {
476 		active_count = qdf_atomic_read(&client->active_count);
477 		hif_err("Client: %u-%s Runtime PM active",
478 			id, hif_rtpm_id_to_string(id));
479 		hif_err("last get called: 0x%llx, get count: %d, put count: %d",
480 			client->get_ts, qdf_atomic_read(&client->get_count),
481 			qdf_atomic_read(&client->put_count));
482 		QDF_DEBUG_PANIC("Get and PUT call out of sync!");
483 		while (active_count--)
484 			__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
485 	}
486 
487 	qdf_mem_free(client);
488 	gp_hif_rtpm_ctx->clients[id] = NULL;
489 
490 	return QDF_STATUS_SUCCESS;
491 }
492 
493 QDF_STATUS hif_rtpm_set_autosuspend_delay(int delay)
494 {
495 	if (delay < HIF_RTPM_DELAY_MIN || delay > HIF_RTPM_DELAY_MAX) {
496 		hif_err("Invalid delay value %d ms", delay);
497 		return QDF_STATUS_E_INVAL;
498 	}
499 
500 	__hif_rtpm_set_autosuspend_delay(gp_hif_rtpm_ctx->dev, delay);
501 	gp_hif_rtpm_ctx->delay = delay;
502 	hif_info_high("RTPM delay set: %d ms", delay);
503 
504 	return QDF_STATUS_SUCCESS;
505 }
506 
507 QDF_STATUS hif_rtpm_restore_autosuspend_delay(void)
508 {
509 	if (gp_hif_rtpm_ctx->delay == gp_hif_rtpm_ctx->cfg_delay) {
510 		hif_info_rl("RTPM delay already default: %d",
511 			    gp_hif_rtpm_ctx->delay);
512 		return QDF_STATUS_E_ALREADY;
513 	}
514 
515 	__hif_rtpm_set_autosuspend_delay(gp_hif_rtpm_ctx->dev,
516 					 gp_hif_rtpm_ctx->cfg_delay);
517 	gp_hif_rtpm_ctx->delay = gp_hif_rtpm_ctx->cfg_delay;
518 	hif_info_rl("RTPM delay set: %d ms", gp_hif_rtpm_ctx->delay);
519 
520 	return QDF_STATUS_SUCCESS;
521 }
522 
523 int hif_rtpm_get_autosuspend_delay(void)
524 {
525 	return gp_hif_rtpm_ctx->delay;
526 }
527 
528 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
529 {
530 	struct hif_pm_runtime_lock *context;
531 
532 	if (qdf_unlikely(!gp_hif_rtpm_ctx)) {
533 		hif_err("Runtime PM context NULL");
534 		return QDF_STATUS_E_FAILURE;
535 	}
536 
537 	hif_debug("Initializing Runtime PM wakelock %s", name);
538 
539 	context = qdf_mem_malloc(sizeof(*context));
540 	if (!context)
541 		return -ENOMEM;
542 
543 	context->name = name ? name : "Default";
544 	lock->lock = context;
545 
546 	return 0;
547 }
548 
549 void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *lock)
550 {
551 	if (!lock) {
552 		hif_err("Runtime PM lock already freed");
553 		return;
554 	}
555 
556 	hif_debug("Deinitializing Runtime PM wakelock %s", lock->name);
557 
558 	if (gp_hif_rtpm_ctx) {
559 		qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
560 		__hif_pm_runtime_allow_suspend(lock);
561 		qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
562 	}
563 
564 	qdf_mem_free(lock);
565 }
566 
567 /**
568  * hif_rtpm_enabled() - To check if Runtime PM is enabled
569  *
570  * This function will check if Runtime PM is enabled or not.
571  *
572  * Return: void
573  */
574 static bool hif_rtpm_enabled(void)
575 {
576 	if (qdf_unlikely(!gp_hif_rtpm_ctx))
577 		return false;
578 
579 	if (gp_hif_rtpm_ctx->enable_rpm)
580 		return true;
581 
582 	return __hif_rtpm_enabled(gp_hif_rtpm_ctx->dev);
583 }
584 
585 QDF_STATUS hif_rtpm_get(uint8_t type, uint32_t id)
586 {
587 	struct hif_rtpm_client *client = NULL;
588 	int ret = QDF_STATUS_E_FAILURE;
589 	int pm_state;
590 
591 	if (!hif_rtpm_enabled())
592 		return QDF_STATUS_SUCCESS;
593 
594 	if (id >= HIF_RTPM_ID_MAX || !gp_hif_rtpm_ctx->clients[id]) {
595 		QDF_DEBUG_PANIC("Invalid client, id: %u", id);
596 		return -QDF_STATUS_E_INVAL;
597 	}
598 
599 	client = gp_hif_rtpm_ctx->clients[id];
600 
601 	if (type != HIF_RTPM_GET_ASYNC) {
602 		switch (type) {
603 		case HIF_RTPM_GET_FORCE:
604 			ret = __hif_rtpm_get(gp_hif_rtpm_ctx->dev);
605 			break;
606 		case HIF_RTPM_GET_SYNC:
607 			ret = __hif_rtpm_get_sync(gp_hif_rtpm_ctx->dev);
608 			break;
609 		case HIF_RTPM_GET_NORESUME:
610 			__hif_rtpm_get_noresume(gp_hif_rtpm_ctx->dev);
611 			ret = 0;
612 			break;
613 		default:
614 			QDF_DEBUG_PANIC("Invalid call type");
615 			return QDF_STATUS_E_BADMSG;
616 		}
617 
618 		if (ret < 0 && ret != -EINPROGRESS) {
619 			hif_err("pm_state: %d ret: %d",
620 				qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
621 				ret);
622 			__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
623 		} else {
624 			ret = QDF_STATUS_SUCCESS;
625 		}
626 		goto out;
627 	}
628 
629 	pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
630 	if (pm_state <= HIF_RTPM_STATE_RESUMING_LINKUP) {
631 		ret = __hif_rtpm_get(gp_hif_rtpm_ctx->dev);
632 		/* Get will return 1 if the device is already active,
633 		 * just return success in that case
634 		 */
635 		if (ret > 0) {
636 			ret = QDF_STATUS_SUCCESS;
637 		} else if (ret == 0 || ret == -EINPROGRESS) {
638 			qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
639 			pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
640 			if (pm_state >= HIF_RTPM_STATE_RESUMING) {
641 				__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
642 				gp_hif_rtpm_ctx->stats.request_resume_ts =
643 							qdf_get_log_timestamp();
644 				gp_hif_rtpm_ctx->stats.request_resume_id = id;
645 				ret = QDF_STATUS_E_FAILURE;
646 			} else {
647 				ret = QDF_STATUS_SUCCESS;
648 			}
649 			qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
650 		} else if (ret < 0) {
651 			hif_err("pm_state: %d ret: %d",
652 				qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
653 				ret);
654 			__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
655 		}
656 	} else if (pm_state >= HIF_RTPM_STATE_RESUMING) {
657 		/* Do not log in performance path */
658 		if (id != HIF_RTPM_ID_DP)
659 			hif_info_high("request RTPM resume by %d- %s",
660 				      id, hif_rtpm_id_to_string(id));
661 		__hif_rtpm_request_resume(gp_hif_rtpm_ctx->dev);
662 		gp_hif_rtpm_ctx->stats.request_resume_ts =
663 						qdf_get_log_timestamp();
664 		gp_hif_rtpm_ctx->stats.request_resume_id = id;
665 		return QDF_STATUS_E_FAILURE;
666 	}
667 
668 out:
669 	if (QDF_IS_STATUS_SUCCESS(ret)) {
670 		qdf_atomic_inc(&client->active_count);
671 		qdf_atomic_inc(&client->get_count);
672 		client->get_ts = qdf_get_log_timestamp();
673 	}
674 
675 	return ret;
676 }
677 
678 QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id)
679 {
680 	struct hif_rtpm_client *client;
681 	int usage_count;
682 
683 	if (!hif_rtpm_enabled())
684 		return QDF_STATUS_SUCCESS;
685 
686 	if (id >= HIF_RTPM_ID_MAX || !gp_hif_rtpm_ctx->clients[id]) {
687 		hif_err("Invalid client, id: %u", id);
688 		return QDF_STATUS_E_INVAL;
689 	}
690 
691 	client = gp_hif_rtpm_ctx->clients[id];
692 
693 	usage_count = hif_rtpm_read_usage_count();
694 	if (usage_count == 2 && !gp_hif_rtpm_ctx->enable_rpm) {
695 		hif_err("Unexpected PUT when runtime PM is disabled");
696 		QDF_BUG(0);
697 		return QDF_STATUS_E_CANCELED;
698 	} else if (!usage_count || !qdf_atomic_read(&client->active_count)) {
699 		hif_info_high("Put without a Get operation, %u-%s",
700 			      id, hif_rtpm_id_to_string(id));
701 		return QDF_STATUS_E_CANCELED;
702 	}
703 
704 	switch (type) {
705 	case HIF_RTPM_PUT_ASYNC:
706 		__hif_rtpm_put_auto(gp_hif_rtpm_ctx->dev);
707 		break;
708 	case HIF_RTPM_PUT_NOIDLE:
709 		__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
710 		break;
711 	case HIF_RTPM_PUT_SYNC_SUSPEND:
712 		__hif_rtpm_put_sync_suspend(gp_hif_rtpm_ctx->dev);
713 		break;
714 	default:
715 		QDF_DEBUG_PANIC("Invalid call type");
716 		return QDF_STATUS_E_BADMSG;
717 	}
718 
719 	__hif_rtpm_mark_last_busy(gp_hif_rtpm_ctx->dev);
720 	qdf_atomic_dec(&client->active_count);
721 	qdf_atomic_inc(&client->put_count);
722 	client->put_ts = qdf_get_log_timestamp();
723 	gp_hif_rtpm_ctx->stats.last_busy_ts = client->put_ts;
724 
725 	return QDF_STATUS_SUCCESS;
726 }
727 
728 /**
729  * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol
730  *                                      reason
731  * @lock: runtime_pm lock being acquired
732  *
733  * Return: 0 if successful.
734  */
735 static int __hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *lock)
736 {
737 	int ret = 0;
738 
739 	if (lock->active)
740 		return 0;
741 
742 	ret = __hif_rtpm_get(gp_hif_rtpm_ctx->dev);
743 
744 	/**
745 	 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
746 	 * RPM_SUSPENDING. Any other negative value is an error.
747 	 * We shouldn't do runtime_put here as in later point allow
748 	 * suspend gets called with the context and there the usage count
749 	 * is decremented, so suspend will be prevented.
750 	 */
751 	if (ret < 0 && ret != -EINPROGRESS) {
752 		gp_hif_rtpm_ctx->stats.runtime_get_err++;
753 		hif_err("pm_state: %d ret: %d",
754 			qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
755 			ret);
756 	}
757 
758 	list_add_tail(&lock->list, &gp_hif_rtpm_ctx->prevent_list);
759 	lock->active = true;
760 	gp_hif_rtpm_ctx->prevent_cnt++;
761 	gp_hif_rtpm_ctx->stats.prevent_suspend++;
762 	return ret;
763 }
764 
765 /**
766  * __hif_pm_runtime_allow_suspend() - Allow Runtime suspend
767  * @lock: runtime pm lock
768  *
769  * This function will allow runtime suspend, by decrementing
770  * device's usage count.
771  *
772  * Return: status
773  */
774 static int __hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *lock)
775 {
776 	int ret = 0;
777 	int usage_count;
778 
779 	if (gp_hif_rtpm_ctx->prevent_cnt == 0 || !lock->active)
780 		return ret;
781 
782 	usage_count = hif_rtpm_read_usage_count();
783 	/*
784 	 * For runtime PM enabled case, the usage count should never be 0
785 	 * at this point. For runtime PM disabled case, it should never be
786 	 * 2 at this point. Catch unexpected PUT without GET here.
787 	 */
788 	if (usage_count == 2 && !gp_hif_rtpm_ctx->enable_rpm) {
789 		hif_err("Unexpected PUT when runtime PM is disabled");
790 		QDF_BUG(0);
791 		return QDF_STATUS_E_CANCELED;
792 	} else if (!usage_count) {
793 		hif_info_high("Put without a Get operation, %s", lock->name);
794 		return QDF_STATUS_E_CANCELED;
795 	}
796 
797 	hif_rtpm_mark_last_busy(HIF_RTPM_ID_RESERVED);
798 	ret = __hif_rtpm_put_auto(gp_hif_rtpm_ctx->dev);
799 
800 	list_del(&lock->list);
801 	lock->active = false;
802 	gp_hif_rtpm_ctx->prevent_cnt--;
803 	gp_hif_rtpm_ctx->stats.allow_suspend++;
804 	return ret;
805 }
806 
807 int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *lock)
808 {
809 	if (!hif_rtpm_enabled() || !lock)
810 		return -EINVAL;
811 
812 	if (in_irq())
813 		WARN_ON(1);
814 
815 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
816 	__hif_pm_runtime_prevent_suspend(lock);
817 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
818 
819 	if (qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state) >=
820 		HIF_RTPM_STATE_SUSPENDING)
821 		hif_info_high("request RTPM resume by %s",
822 			      lock->name);
823 
824 	return 0;
825 }
826 
827 /**
828  * __hif_pm_runtime_prevent_suspend_sync() - synchronized prevent runtime
829  *  suspend for a protocol reason
830  * @lock: runtime_pm lock being acquired
831  *
832  * Return: 0 if successful.
833  */
834 static
835 int __hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *lock)
836 {
837 	int ret = 0;
838 
839 	if (lock->active)
840 		return 0;
841 
842 	ret = __hif_rtpm_get_sync(gp_hif_rtpm_ctx->dev);
843 
844 	/**
845 	 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
846 	 * RPM_SUSPENDING. Any other negative value is an error.
847 	 * We shouldn't do runtime_put here as in later point allow
848 	 * suspend gets called with the context and there the usage count
849 	 * is decremented, so suspend will be prevented.
850 	 */
851 	if (ret < 0 && ret != -EINPROGRESS) {
852 		gp_hif_rtpm_ctx->stats.runtime_get_err++;
853 		hif_err("pm_state: %d ret: %d",
854 			qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
855 			ret);
856 	}
857 
858 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
859 	list_add_tail(&lock->list, &gp_hif_rtpm_ctx->prevent_list);
860 	lock->active = true;
861 	gp_hif_rtpm_ctx->prevent_cnt++;
862 	gp_hif_rtpm_ctx->stats.prevent_suspend++;
863 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
864 
865 	return ret;
866 }
867 
868 int hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *lock)
869 {
870 	if (!hif_rtpm_enabled())
871 		return 0;
872 
873 	if (!lock)
874 		return -EINVAL;
875 
876 	if (in_irq())
877 		WARN_ON(1);
878 
879 	__hif_pm_runtime_prevent_suspend_sync(lock);
880 
881 	if (qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state) >=
882 		HIF_RTPM_STATE_SUSPENDING)
883 		hif_info_high("request RTPM resume by %s",
884 			      lock->name);
885 
886 	return 0;
887 }
888 
889 int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *lock)
890 {
891 	if (!hif_rtpm_enabled())
892 		return 0;
893 
894 	if (!lock)
895 		return -EINVAL;
896 
897 	if (in_irq())
898 		WARN_ON(1);
899 
900 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
901 	__hif_pm_runtime_allow_suspend(lock);
902 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
903 
904 	return 0;
905 }
906 
907 QDF_STATUS hif_rtpm_sync_resume(void)
908 {
909 	struct device *dev;
910 	int pm_state;
911 	int ret;
912 
913 	if (!hif_rtpm_enabled())
914 		return 0;
915 
916 	dev = gp_hif_rtpm_ctx->dev;
917 	pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
918 
919 	ret = __hif_rtpm_resume(dev);
920 	__hif_rtpm_mark_last_busy(dev);
921 
922 	if (ret >= 0) {
923 		gp_hif_rtpm_ctx->stats.resume_count++;
924 		gp_hif_rtpm_ctx->stats.resume_ts = qdf_get_log_timestamp();
925 		gp_hif_rtpm_ctx->stats.last_busy_ts =
926 					gp_hif_rtpm_ctx->stats.resume_ts;
927 		return QDF_STATUS_SUCCESS;
928 	}
929 
930 	hif_err("pm_state: %d, err: %d", pm_state, ret);
931 	return QDF_STATUS_E_FAILURE;
932 }
933 
934 void hif_rtpm_request_resume(void)
935 {
936 	__hif_rtpm_request_resume(gp_hif_rtpm_ctx->dev);
937 	hif_info_high("request RTPM resume %s", (char *)_RET_IP_);
938 }
939 
940 void hif_rtpm_check_and_request_resume(void)
941 {
942 	hif_rtpm_suspend_lock();
943 	if (qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state) >=
944 			HIF_RTPM_STATE_SUSPENDING) {
945 		hif_rtpm_suspend_unlock();
946 		__hif_rtpm_request_resume(gp_hif_rtpm_ctx->dev);
947 		gp_hif_rtpm_ctx->stats.request_resume_ts =
948 						qdf_get_log_timestamp();
949 		gp_hif_rtpm_ctx->stats.request_resume_id = HIF_RTPM_ID_RESERVED;
950 	} else {
951 		__hif_rtpm_mark_last_busy(gp_hif_rtpm_ctx->dev);
952 		gp_hif_rtpm_ctx->stats.last_busy_ts = qdf_get_log_timestamp();
953 		hif_rtpm_suspend_unlock();
954 	}
955 }
956 
957 int hif_rtpm_get_monitor_wake_intr(void)
958 {
959 	return qdf_atomic_read(&gp_hif_rtpm_ctx->monitor_wake_intr);
960 }
961 
962 void hif_rtpm_set_monitor_wake_intr(int val)
963 {
964 	qdf_atomic_set(&gp_hif_rtpm_ctx->monitor_wake_intr, val);
965 }
966 
967 void hif_rtpm_display_last_busy_hist(struct hif_opaque_softc *hif_ctx)
968 {
969 	struct hif_softc *scn;
970 	struct hif_rtpm_ctx *rtpm_ctx = gp_hif_rtpm_ctx;
971 	struct hif_rtpm_last_busy_hist *hist;
972 	unsigned long cur_idx;
973 	int i;
974 
975 	scn = HIF_GET_SOFTC(hif_ctx);
976 	if (!scn)
977 		return;
978 
979 	hif_info_high("RTPM last busy ts:%llu client:%s from:%ps",
980 		      rtpm_ctx->stats.last_busy_ts,
981 		      hif_rtpm_id_to_string(rtpm_ctx->stats.last_busy_id),
982 		      rtpm_ctx->stats.last_busy_marker);
983 
984 	/*Display CE and DP clients RTPM stats*/
985 	for (i = 0; i < HIF_RTPM_ID_MAX; i++) {
986 		if (!rtpm_ctx->clients[i] ||
987 		    (i != HIF_RTPM_ID_CE && i != HIF_RTPM_ID_DP))
988 			continue;
989 		hif_info_high("RTPM client:%s busy_ts:%llu get_ts:%llu put_ts:%llu get_cnt:%d put_cnt:%d",
990 			      hif_rtpm_id_to_string(i),
991 			      rtpm_ctx->clients[i]->last_busy_ts,
992 			      rtpm_ctx->clients[i]->get_ts,
993 			      rtpm_ctx->clients[i]->put_ts,
994 			      qdf_atomic_read(&rtpm_ctx->clients[i]->get_count),
995 			      qdf_atomic_read(&rtpm_ctx->clients[i]->put_count));
996 	}
997 
998 	for (i = 0; i < CE_COUNT_MAX; i++) {
999 		hist = gp_hif_rtpm_ctx->busy_hist[i];
1000 		if (!hist)
1001 			continue;
1002 		cur_idx = hist->last_busy_idx;
1003 
1004 		hif_info_high("RTPM CE-%u last busy_cnt:%lu cur_idx:%lu ts1:%llu ts2:%llu ts3:%llu ts4:%llu",
1005 			      i, hist->last_busy_cnt, cur_idx,
1006 			      hist->last_busy_ts[cur_idx & HIF_RTPM_BUSY_HIST_MASK],
1007 			      hist->last_busy_ts[(cur_idx + 4) & HIF_RTPM_BUSY_HIST_MASK],
1008 			      hist->last_busy_ts[(cur_idx + 8) & HIF_RTPM_BUSY_HIST_MASK],
1009 			      hist->last_busy_ts[(cur_idx + 12) & HIF_RTPM_BUSY_HIST_MASK]);
1010 	}
1011 }
1012 
1013 void hif_rtpm_record_ce_last_busy_evt(struct hif_softc *scn,
1014 				      unsigned long ce_id)
1015 {
1016 	struct hif_rtpm_last_busy_hist *hist;
1017 	unsigned long idx;
1018 
1019 	if (!scn || !gp_hif_rtpm_ctx->busy_hist[ce_id])
1020 		return;
1021 
1022 	hist = gp_hif_rtpm_ctx->busy_hist[ce_id];
1023 	hist->last_busy_cnt++;
1024 	hist->last_busy_idx++;
1025 	idx = hist->last_busy_idx & HIF_RTPM_BUSY_HIST_MASK;
1026 	hist->last_busy_ts[idx] = qdf_get_log_timestamp();
1027 }
1028 
1029 void hif_rtpm_mark_last_busy(uint32_t id)
1030 {
1031 	__hif_rtpm_mark_last_busy(gp_hif_rtpm_ctx->dev);
1032 	gp_hif_rtpm_ctx->stats.last_busy_ts = qdf_get_log_timestamp();
1033 	gp_hif_rtpm_ctx->stats.last_busy_id = id;
1034 	gp_hif_rtpm_ctx->stats.last_busy_marker = (void *)_RET_IP_;
1035 	if (gp_hif_rtpm_ctx->clients[id]) {
1036 		gp_hif_rtpm_ctx->clients[id]->last_busy_cnt++;
1037 		gp_hif_rtpm_ctx->clients[id]->last_busy_ts =
1038 					gp_hif_rtpm_ctx->stats.last_busy_ts;
1039 	}
1040 }
1041 
1042 void hif_rtpm_set_client_job(uint32_t client_id)
1043 {
1044 	int pm_state;
1045 
1046 	if (!gp_hif_rtpm_ctx->clients[client_id])
1047 		return;
1048 
1049 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1050 	pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
1051 	if (pm_state <= HIF_RTPM_STATE_RESUMING_LINKUP &&
1052 	    gp_hif_rtpm_ctx->clients[client_id]->hif_rtpm_cbk)
1053 		gp_hif_rtpm_ctx->clients[client_id]->hif_rtpm_cbk();
1054 	else
1055 		qdf_set_bit(client_id, &gp_hif_rtpm_ctx->pending_job);
1056 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1057 }
1058 
1059 /**
1060  * hif_rtpm_pending_job() - continue jobs when bus resumed
1061  *
1062  * Return: Void
1063  */
1064 static void hif_rtpm_pending_job(void)
1065 {
1066 	int i;
1067 
1068 	for (i = 0; i < gp_hif_rtpm_ctx->client_count; i++) {
1069 		if (qdf_test_and_clear_bit(i, &gp_hif_rtpm_ctx->pending_job)) {
1070 			qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1071 			if (gp_hif_rtpm_ctx->clients[i]->hif_rtpm_cbk)
1072 				gp_hif_rtpm_ctx->clients[i]->hif_rtpm_cbk();
1073 			qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1074 		}
1075 	}
1076 }
1077 
1078 #define PREVENT_LIST_STRING_LEN 200
1079 
1080 void hif_rtpm_print_prevent_list(void)
1081 {
1082 	struct hif_rtpm_client *client;
1083 	struct hif_pm_runtime_lock *ctx;
1084 	char *str_buf;
1085 	int i, prevent_list_count, len = 0;
1086 
1087 	str_buf = qdf_mem_malloc(PREVENT_LIST_STRING_LEN);
1088 	if (!str_buf)
1089 		return;
1090 
1091 	qdf_spin_lock(&gp_hif_rtpm_ctx->prevent_list_lock);
1092 	prevent_list_count = gp_hif_rtpm_ctx->prevent_cnt;
1093 	if (prevent_list_count) {
1094 		list_for_each_entry(ctx, &gp_hif_rtpm_ctx->prevent_list, list)
1095 			len += qdf_scnprintf(str_buf + len,
1096 				PREVENT_LIST_STRING_LEN - len,
1097 				"%s ", ctx->name);
1098 	}
1099 	qdf_spin_unlock(&gp_hif_rtpm_ctx->prevent_list_lock);
1100 
1101 	if (prevent_list_count)
1102 		hif_info_high("prevent_suspend_cnt %u, prevent_list: %s",
1103 			      prevent_list_count, str_buf);
1104 
1105 	qdf_mem_free(str_buf);
1106 
1107 	for (i = 0; i < HIF_RTPM_ID_MAX; i++) {
1108 		client = gp_hif_rtpm_ctx->clients[i];
1109 		if (client && qdf_atomic_read(&client->active_count))
1110 			hif_info_high("client: %d: %s- active count: %d", i,
1111 				      hif_rtpm_id_to_string(i),
1112 				      qdf_atomic_read(&client->active_count));
1113 	}
1114 }
1115 
1116 /**
1117  * hif_rtpm_is_suspend_allowed() - Reject suspend if client is active
1118  *
1119  * Return: True if no clients are active
1120  */
1121 static bool hif_rtpm_is_suspend_allowed(void)
1122 {
1123 	if (!gp_hif_rtpm_ctx || !gp_hif_rtpm_ctx->enable_rpm)
1124 		return false;
1125 
1126 	if (!hif_rtpm_read_usage_count())
1127 		return true;
1128 
1129 	return false;
1130 }
1131 
1132 void hif_rtpm_suspend_lock(void)
1133 {
1134 	qdf_spin_lock_irqsave(&gp_hif_rtpm_ctx->runtime_suspend_lock);
1135 }
1136 
1137 void hif_rtpm_suspend_unlock(void)
1138 {
1139 	qdf_spin_unlock_irqrestore(&gp_hif_rtpm_ctx->runtime_suspend_lock);
1140 }
1141 
1142 /**
1143  * hif_rtpm_set_state(): utility function
1144  * @state: state to set
1145  *
1146  * Return: Void
1147  */
1148 static inline
1149 void hif_rtpm_set_state(enum hif_rtpm_state state)
1150 {
1151 	qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, state);
1152 }
1153 
1154 int hif_rtpm_get_state(void)
1155 {
1156 	return qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
1157 }
1158 
1159 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
1160 {
1161 	if (!hif_can_suspend_link(hif_ctx)) {
1162 		hif_err("Runtime PM not supported for link up suspend");
1163 		return -EINVAL;
1164 	}
1165 
1166 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1167 	hif_rtpm_set_state(HIF_RTPM_STATE_SUSPENDING);
1168 
1169 	/* keep this after set suspending */
1170 	if (!hif_rtpm_is_suspend_allowed()) {
1171 		qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1172 		hif_rtpm_print_prevent_list();
1173 		gp_hif_rtpm_ctx->stats.suspend_err_count++;
1174 		gp_hif_rtpm_ctx->stats.suspend_err_ts = qdf_get_log_timestamp();
1175 		hif_info_high("Runtime PM not allowed now");
1176 		return -EINVAL;
1177 	}
1178 
1179 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1180 
1181 	return QDF_STATUS_SUCCESS;
1182 }
1183 
1184 void hif_process_runtime_suspend_success(void)
1185 {
1186 	hif_rtpm_set_state(HIF_RTPM_STATE_SUSPENDED);
1187 	gp_hif_rtpm_ctx->stats.suspend_count++;
1188 	gp_hif_rtpm_ctx->stats.suspend_ts = qdf_get_log_timestamp();
1189 }
1190 
1191 void hif_process_runtime_suspend_failure(void)
1192 {
1193 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1194 	hif_rtpm_set_state(HIF_RTPM_STATE_ON);
1195 	hif_rtpm_pending_job();
1196 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1197 
1198 	gp_hif_rtpm_ctx->stats.suspend_err_count++;
1199 	gp_hif_rtpm_ctx->stats.suspend_err_ts = qdf_get_log_timestamp();
1200 	gp_hif_rtpm_ctx->stats.last_busy_ts = qdf_get_log_timestamp();
1201 	hif_rtpm_mark_last_busy(HIF_RTPM_ID_RESERVED);
1202 }
1203 
1204 void hif_pre_runtime_resume(void)
1205 {
1206 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1207 	hif_rtpm_set_monitor_wake_intr(0);
1208 	hif_rtpm_set_state(HIF_RTPM_STATE_RESUMING);
1209 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1210 }
1211 
1212 void hif_process_runtime_resume_linkup(void)
1213 {
1214 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1215 	hif_rtpm_set_state(HIF_RTPM_STATE_RESUMING_LINKUP);
1216 	hif_rtpm_pending_job();
1217 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1218 }
1219 
1220 void hif_process_runtime_resume_success(void)
1221 {
1222 	hif_rtpm_set_state(HIF_RTPM_STATE_ON);
1223 	gp_hif_rtpm_ctx->stats.resume_count++;
1224 	gp_hif_rtpm_ctx->stats.resume_ts = qdf_get_log_timestamp();
1225 	gp_hif_rtpm_ctx->stats.last_busy_ts = gp_hif_rtpm_ctx->stats.resume_ts;
1226 	hif_rtpm_mark_last_busy(HIF_RTPM_ID_RESERVED);
1227 }
1228 
1229 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
1230 {
1231 	int errno;
1232 
1233 	errno = hif_bus_suspend(hif_ctx);
1234 	if (errno) {
1235 		hif_err("Failed bus suspend: %d", errno);
1236 		return errno;
1237 	}
1238 
1239 	hif_rtpm_set_monitor_wake_intr(1);
1240 
1241 	errno = hif_bus_suspend_noirq(hif_ctx);
1242 	if (errno) {
1243 		hif_err("Failed bus suspend noirq: %d", errno);
1244 		hif_rtpm_set_monitor_wake_intr(0);
1245 		goto bus_resume;
1246 	}
1247 
1248 	return 0;
1249 
1250 bus_resume:
1251 	QDF_BUG(!hif_bus_resume(hif_ctx));
1252 
1253 	return errno;
1254 }
1255 
1256 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
1257 {
1258 	int errno;
1259 
1260 	QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
1261 	errno = hif_bus_resume(hif_ctx);
1262 	if (errno)
1263 		hif_err("Failed runtime resume: %d", errno);
1264 
1265 	return errno;
1266 }
1267 
1268 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
1269 {
1270 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1271 	struct CE_state *ce_state;
1272 
1273 	if (!scn)
1274 		return;
1275 
1276 	if (scn->fastpath_mode_on) {
1277 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1278 			return;
1279 
1280 		ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
1281 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1282 
1283 		/*war_ce_src_ring_write_idx_set */
1284 		CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
1285 					  ce_state->src_ring->write_index);
1286 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1287 		Q_TARGET_ACCESS_END(scn);
1288 	}
1289 }
1290 #endif /* FEATURE_RUNTIME_PM */
1291