xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_runtime_pm.c (revision 70a19e16789e308182f63b15c75decec7bf0b342)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8 
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <linux/slab.h>
19 #include <linux/interrupt.h>
20 #include <linux/if_arp.h>
21 #include "hif_io32.h"
22 #include "hif_runtime_pm.h"
23 #include "hif.h"
24 #include "target_type.h"
25 #include "hif_main.h"
26 #include "ce_main.h"
27 #include "ce_api.h"
28 #include "ce_internal.h"
29 #include "ce_reg.h"
30 #include "ce_bmi.h"
31 #include "regtable.h"
32 #include "hif_hw_version.h"
33 #include <linux/debugfs.h>
34 #include <linux/seq_file.h>
35 #include "qdf_status.h"
36 #include "qdf_atomic.h"
37 #include "pld_common.h"
38 #include "mp_dev.h"
39 #include "hif_debug.h"
40 
41 #include "ce_tasklet.h"
42 #include "targaddrs.h"
43 #include "hif_exec.h"
44 
45 #define CNSS_RUNTIME_FILE "cnss_runtime_pm"
46 #define CNSS_RUNTIME_FILE_PERM QDF_FILE_USR_READ
47 
48 #ifdef FEATURE_RUNTIME_PM
49 
50 static struct hif_rtpm_ctx g_hif_rtpm_ctx;
51 static struct hif_rtpm_ctx *gp_hif_rtpm_ctx;
52 
53 /**
54  * hif_rtpm_id_to_string() - Convert dbgid to respective string
55  * @id: debug id
56  *
57  * Debug support function to convert  dbgid to string.
58  * Please note to add new string in the array at index equal to
59  * its enum value in wlan_rtpm_dbgid.
60  *
61  * Return: String of ID
62  */
63 static const char *hif_rtpm_id_to_string(enum hif_rtpm_client_id id)
64 {
65 	static const char * const strings[] = {
66 					"HIF_RTPM_ID_RESERVED",
67 					"HIF_RTPM_HAL_REO_CMD",
68 					"HIF_RTPM_WMI",
69 					"HIF_RTPM_HTT",
70 					"HIF_RTPM_DP",
71 					"HIF_RTPM_RING_STATS",
72 					"HIF_RTPM_CE",
73 					"HIF_RTPM_FORCE_WAKE",
74 					"HIF_RTPM_ID_PM_QOS_NOTIFY",
75 					"HIF_RTPM_ID_WIPHY_SUSPEND",
76 					"HIF_RTPM_ID_MAX"
77 	};
78 
79 	return strings[id];
80 }
81 
82 /**
83  * hif_rtpm_read_usage_count() - Read device usage count
84  *
85  * Return: current usage count
86  */
87 static inline int hif_rtpm_read_usage_count(void)
88 {
89 	return qdf_atomic_read(&gp_hif_rtpm_ctx->dev->power.usage_count);
90 }
91 
92 #define HIF_RTPM_STATS(_s, _rtpm_ctx, _name) \
93 	seq_printf(_s, "%30s: %u\n", #_name, (_rtpm_ctx)->stats._name)
94 
95 /**
96  * hif_rtpm_debugfs_show(): show debug stats for runtimepm
97  * @s: file to print to
98  * @data: unused
99  *
100  * debugging tool added to the debug fs for displaying runtimepm stats
101  *
102  * Return: 0
103  */
104 static int hif_rtpm_debugfs_show(struct seq_file *s, void *data)
105 {
106 	struct hif_rtpm_client *client = NULL;
107 	struct hif_pm_runtime_lock *ctx;
108 	static const char * const autopm_state[] = {"NONE", "ON", "RESUMING",
109 			"RESUMING_LINKUP", "SUSPENDING", "SUSPENDED"};
110 	int pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
111 	int i;
112 
113 	seq_printf(s, "%30s: %llu\n", "Current timestamp",
114 		   qdf_get_log_timestamp());
115 
116 	seq_printf(s, "%30s: %s\n", "Runtime PM state", autopm_state[pm_state]);
117 
118 	seq_printf(s, "%30s: %llu\n", "Last Busy timestamp",
119 		   gp_hif_rtpm_ctx->stats.last_busy_ts);
120 
121 	seq_printf(s, "%30s: %ps\n", "Last Busy Marker",
122 		   gp_hif_rtpm_ctx->stats.last_busy_marker);
123 
124 	seq_puts(s, "Rx busy marker counts:\n");
125 	seq_printf(s, "%30s: %u %llu\n", hif_rtpm_id_to_string(HIF_RTPM_ID_DP),
126 		   gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_DP]->last_busy_cnt,
127 		   gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_DP]->last_busy_ts);
128 
129 	seq_printf(s, "%30s: %u %llu\n", hif_rtpm_id_to_string(HIF_RTPM_ID_CE),
130 		   gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_CE]->last_busy_cnt,
131 		   gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_CE]->last_busy_ts);
132 
133 	HIF_RTPM_STATS(s, gp_hif_rtpm_ctx, last_busy_id);
134 
135 	if (pm_state == HIF_RTPM_STATE_SUSPENDED) {
136 		seq_printf(s, "%30s: %llx us\n", "Suspended Since",
137 			   gp_hif_rtpm_ctx->stats.suspend_ts);
138 	}
139 
140 	HIF_RTPM_STATS(s, gp_hif_rtpm_ctx, resume_count);
141 	HIF_RTPM_STATS(s, gp_hif_rtpm_ctx, suspend_count);
142 	HIF_RTPM_STATS(s, gp_hif_rtpm_ctx, suspend_err_count);
143 
144 	seq_printf(s, "%30s: %d\n", "PM Usage count",
145 		   hif_rtpm_read_usage_count());
146 
147 	seq_puts(s, "get  put  get-timestamp put-timestamp :DBGID_NAME\n");
148 	for (i = 0; i < HIF_RTPM_ID_MAX; i++) {
149 		client = gp_hif_rtpm_ctx->clients[i];
150 		if (!client)
151 			continue;
152 		seq_printf(s, "%-10d ", qdf_atomic_read(&client->get_count));
153 		seq_printf(s, "%-10d ", qdf_atomic_read(&client->put_count));
154 		seq_printf(s, "0x%-10llx ", client->get_ts);
155 		seq_printf(s, "0x%-10llx ", client->put_ts);
156 		seq_printf(s, ":%-2d %-30s\n", i, hif_rtpm_id_to_string(i));
157 	}
158 	seq_puts(s, "\n");
159 
160 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
161 	if (list_empty(&gp_hif_rtpm_ctx->prevent_list)) {
162 		qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
163 		return 0;
164 	}
165 
166 	seq_printf(s, "%30s: ", "Active Wakeup_Sources");
167 	list_for_each_entry(ctx, &gp_hif_rtpm_ctx->prevent_list, list) {
168 		seq_printf(s, "%s", ctx->name);
169 		seq_puts(s, " ");
170 	}
171 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
172 
173 	return 0;
174 }
175 
176 #undef HIF_RTPM_STATS
177 
178 /**
179  * hif_rtpm_debugfs_open() - open a debug fs file to access the runtime pm stats
180  * @inode:
181  * @file:
182  *
183  * Return: linux error code of single_open.
184  */
185 static int hif_rtpm_debugfs_open(struct inode *inode, struct file *file)
186 {
187 	return single_open(file, hif_rtpm_debugfs_show,
188 			inode->i_private);
189 }
190 
191 static const struct file_operations hif_rtpm_fops = {
192 	.owner          = THIS_MODULE,
193 	.open           = hif_rtpm_debugfs_open,
194 	.release        = single_release,
195 	.read           = seq_read,
196 	.llseek         = seq_lseek,
197 };
198 
199 /**
200  * hif_rtpm_debugfs_create() - creates runtimepm debugfs entry
201  *
202  * creates a debugfs entry to debug the runtime pm feature.
203  */
204 static void hif_rtpm_debugfs_create(void)
205 {
206 	gp_hif_rtpm_ctx->pm_dentry = qdf_debugfs_create_entry(CNSS_RUNTIME_FILE,
207 							CNSS_RUNTIME_FILE_PERM,
208 							NULL,
209 							NULL,
210 							&hif_rtpm_fops);
211 }
212 
213 /**
214  * hif_rtpm_debugfs_remove() - removes runtimepm debugfs entry
215  *
216  * removes the debugfs entry to debug the runtime pm feature.
217  */
218 static void hif_rtpm_debugfs_remove(void)
219 {
220 	qdf_debugfs_remove_file(gp_hif_rtpm_ctx->pm_dentry);
221 }
222 
223 /**
224  * hif_rtpm_init() - Initialize Runtime PM
225  * @dev: device structure
226  * @delay: delay to be configured for auto suspend
227  *
228  * This function will init all the Runtime PM config.
229  *
230  * Return: void
231  */
232 static void hif_rtpm_init(struct device *dev, int delay)
233 {
234 	pm_runtime_set_autosuspend_delay(dev, delay);
235 	pm_runtime_use_autosuspend(dev);
236 	pm_runtime_allow(dev);
237 	pm_runtime_mark_last_busy(dev);
238 	pm_runtime_put_noidle(dev);
239 	pm_suspend_ignore_children(dev, true);
240 }
241 
242 /**
243  * hif_rtpm_exit() - Deinit/Exit Runtime PM
244  * @dev: device structure
245  *
246  * This function will deinit all the Runtime PM config.
247  *
248  * Return: void
249  */
250 static void hif_rtpm_exit(struct device *dev)
251 {
252 	pm_runtime_get_noresume(dev);
253 	pm_runtime_set_active(dev);
254 	pm_runtime_forbid(dev);
255 }
256 
257 static void hif_rtpm_alloc_last_busy_hist(void)
258 {
259 	int i;
260 
261 	for (i = 0; i < CE_COUNT_MAX; i++) {
262 		if (i != CE_ID_1 && i != CE_ID_2 && i != CE_ID_7) {
263 			gp_hif_rtpm_ctx->busy_hist[i] = NULL;
264 			continue;
265 		}
266 
267 		gp_hif_rtpm_ctx->busy_hist[i] =
268 			qdf_mem_malloc(sizeof(struct hif_rtpm_last_busy_hist));
269 		if (!gp_hif_rtpm_ctx->busy_hist[i])
270 			return;
271 	}
272 }
273 
274 static void hif_rtpm_free_last_busy_hist(void)
275 {
276 	int i;
277 
278 	for (i = 0; i < CE_COUNT_MAX; i++) {
279 		if (i != CE_ID_1 && i != CE_ID_2 && i != CE_ID_7)
280 			continue;
281 
282 		qdf_mem_free(gp_hif_rtpm_ctx->busy_hist[i]);
283 	}
284 }
285 
286 void hif_rtpm_open(struct hif_softc *scn)
287 {
288 	gp_hif_rtpm_ctx = &g_hif_rtpm_ctx;
289 	gp_hif_rtpm_ctx->dev = scn->qdf_dev->dev;
290 	qdf_spinlock_create(&gp_hif_rtpm_ctx->runtime_lock);
291 	qdf_spinlock_create(&gp_hif_rtpm_ctx->runtime_suspend_lock);
292 	qdf_spinlock_create(&gp_hif_rtpm_ctx->prevent_list_lock);
293 	qdf_atomic_init(&gp_hif_rtpm_ctx->pm_state);
294 	qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, HIF_RTPM_STATE_NONE);
295 	qdf_atomic_init(&gp_hif_rtpm_ctx->monitor_wake_intr);
296 	INIT_LIST_HEAD(&gp_hif_rtpm_ctx->prevent_list);
297 	gp_hif_rtpm_ctx->client_count = 0;
298 	gp_hif_rtpm_ctx->pending_job = 0;
299 	hif_rtpm_register(HIF_RTPM_ID_CE, NULL);
300 	hif_rtpm_register(HIF_RTPM_ID_FORCE_WAKE, NULL);
301 	hif_rtpm_alloc_last_busy_hist();
302 	hif_info_high("Runtime PM attached");
303 }
304 
305 static int __hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *lock);
306 
307 /**
308  * hif_rtpm_sanitize_exit(): sanitize runtime PM gets/puts from driver
309  *
310  * Ensure all gets/puts are in sync before exiting runtime PM feature.
311  * Also make sure all runtime PM locks are deinitialized properly.
312  *
313  * Return: void
314  */
315 static void hif_rtpm_sanitize_exit(void)
316 {
317 	struct hif_pm_runtime_lock *ctx, *tmp;
318 	struct hif_rtpm_client *client;
319 	int i, active_count;
320 
321 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
322 	list_for_each_entry_safe(ctx, tmp,
323 				 &gp_hif_rtpm_ctx->prevent_list, list) {
324 		hif_runtime_lock_deinit(ctx);
325 	}
326 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
327 
328 	/* check if get and put out of sync for all clients */
329 	for (i = 0; i < HIF_RTPM_ID_MAX; i++) {
330 		client = gp_hif_rtpm_ctx->clients[i];
331 		if (client) {
332 			if (qdf_atomic_read(&client->active_count)) {
333 				active_count =
334 					qdf_atomic_read(&client->active_count);
335 				hif_err("Client active: %u- %s", i,
336 					hif_rtpm_id_to_string(i));
337 				QDF_DEBUG_PANIC("Client active on exit!");
338 				while (active_count--)
339 					__hif_rtpm_put_noidle(
340 							gp_hif_rtpm_ctx->dev);
341 			}
342 			QDF_DEBUG_PANIC("Client not deinitialized");
343 			qdf_mem_free(client);
344 			gp_hif_rtpm_ctx->clients[i] = NULL;
345 		}
346 	}
347 }
348 
349 /**
350  * hif_rtpm_sanitize_ssr_exit() - Empty the suspend list on SSR
351  *
352  * API is used to empty the runtime pm prevent suspend list.
353  *
354  * Return: void
355  */
356 static void hif_rtpm_sanitize_ssr_exit(void)
357 {
358 	struct hif_pm_runtime_lock *ctx, *tmp;
359 
360 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
361 	list_for_each_entry_safe(ctx, tmp,
362 				 &gp_hif_rtpm_ctx->prevent_list, list) {
363 		__hif_pm_runtime_allow_suspend(ctx);
364 	}
365 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
366 }
367 
368 void hif_rtpm_close(struct hif_softc *scn)
369 {
370 	hif_rtpm_free_last_busy_hist();
371 	hif_rtpm_deregister(HIF_RTPM_ID_CE);
372 	hif_rtpm_deregister(HIF_RTPM_ID_FORCE_WAKE);
373 
374 	hif_is_recovery_in_progress(scn) ?
375 		hif_rtpm_sanitize_ssr_exit() :
376 		hif_rtpm_sanitize_exit();
377 
378 	qdf_mem_set(gp_hif_rtpm_ctx, sizeof(*gp_hif_rtpm_ctx), 0);
379 	gp_hif_rtpm_ctx = NULL;
380 	hif_info_high("Runtime PM context detached");
381 }
382 
383 void hif_rtpm_start(struct hif_softc *scn)
384 {
385 	uint32_t mode = hif_get_conparam(scn);
386 
387 	gp_hif_rtpm_ctx->enable_rpm = scn->hif_config.enable_runtime_pm;
388 
389 	if (!gp_hif_rtpm_ctx->enable_rpm) {
390 		hif_info_high("RUNTIME PM is disabled in ini");
391 		return;
392 	}
393 
394 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
395 	    mode == QDF_GLOBAL_MONITOR_MODE) {
396 		hif_info("RUNTIME PM is disabled for FTM/EPPING/MONITOR mode");
397 		return;
398 	}
399 
400 	hif_info_high("Enabling RUNTIME PM, Delay: %d ms",
401 		      scn->hif_config.runtime_pm_delay);
402 
403 	qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, HIF_RTPM_STATE_ON);
404 	hif_rtpm_init(gp_hif_rtpm_ctx->dev, scn->hif_config.runtime_pm_delay);
405 	gp_hif_rtpm_ctx->cfg_delay = scn->hif_config.runtime_pm_delay;
406 	gp_hif_rtpm_ctx->delay = gp_hif_rtpm_ctx->cfg_delay;
407 	hif_rtpm_debugfs_create();
408 }
409 
410 void hif_rtpm_stop(struct hif_softc *scn)
411 {
412 	uint32_t mode = hif_get_conparam(scn);
413 
414 	if (!gp_hif_rtpm_ctx->enable_rpm)
415 		return;
416 
417 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
418 	    mode == QDF_GLOBAL_MONITOR_MODE)
419 		return;
420 
421 	hif_rtpm_exit(gp_hif_rtpm_ctx->dev);
422 
423 	hif_rtpm_sync_resume();
424 
425 	qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, HIF_RTPM_STATE_NONE);
426 	hif_rtpm_debugfs_remove();
427 }
428 
429 QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rtpm_cbk)(void))
430 {
431 	struct hif_rtpm_client *client;
432 
433 	if (qdf_unlikely(!gp_hif_rtpm_ctx)) {
434 		hif_err("Runtime PM context NULL");
435 		return QDF_STATUS_E_FAILURE;
436 	}
437 
438 	if (id >= HIF_RTPM_ID_MAX || gp_hif_rtpm_ctx->clients[id]) {
439 		hif_err("Invalid client %d", id);
440 		return QDF_STATUS_E_INVAL;
441 	}
442 
443 	client = qdf_mem_malloc(sizeof(struct hif_rtpm_client));
444 	if (!client)
445 		return QDF_STATUS_E_NOMEM;
446 
447 	client->hif_rtpm_cbk = hif_rtpm_cbk;
448 	qdf_atomic_init(&client->active_count);
449 	qdf_atomic_init(&client->get_count);
450 	qdf_atomic_init(&client->put_count);
451 
452 	gp_hif_rtpm_ctx->clients[id] = client;
453 	gp_hif_rtpm_ctx->client_count++;
454 
455 	return QDF_STATUS_SUCCESS;
456 }
457 
458 QDF_STATUS hif_rtpm_deregister(uint32_t id)
459 {
460 	struct hif_rtpm_client *client;
461 	int active_count;
462 
463 	if (qdf_unlikely(!gp_hif_rtpm_ctx)) {
464 		hif_err("Runtime PM context NULL");
465 		return QDF_STATUS_E_FAILURE;
466 	}
467 
468 	if (id >= HIF_RTPM_ID_MAX || !gp_hif_rtpm_ctx->clients[id]) {
469 		hif_err("invalid client, id: %u", id);
470 		return QDF_STATUS_E_INVAL;
471 	}
472 
473 	client = gp_hif_rtpm_ctx->clients[id];
474 	if (qdf_atomic_read(&client->active_count)) {
475 		active_count = qdf_atomic_read(&client->active_count);
476 		hif_err("Client: %u-%s Runtime PM active",
477 			id, hif_rtpm_id_to_string(id));
478 		hif_err("last get called: 0x%llx, get count: %d, put count: %d",
479 			client->get_ts, qdf_atomic_read(&client->get_count),
480 			qdf_atomic_read(&client->put_count));
481 		QDF_DEBUG_PANIC("Get and PUT call out of sync!");
482 		while (active_count--)
483 			__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
484 	}
485 
486 	qdf_mem_free(client);
487 	gp_hif_rtpm_ctx->clients[id] = NULL;
488 
489 	return QDF_STATUS_SUCCESS;
490 }
491 
492 QDF_STATUS hif_rtpm_set_autosuspend_delay(int delay)
493 {
494 	if (delay < HIF_RTPM_DELAY_MIN || delay > HIF_RTPM_DELAY_MAX) {
495 		hif_err("Invalid delay value %d ms", delay);
496 		return QDF_STATUS_E_INVAL;
497 	}
498 
499 	__hif_rtpm_set_autosuspend_delay(gp_hif_rtpm_ctx->dev, delay);
500 	gp_hif_rtpm_ctx->delay = delay;
501 	hif_info_high("RTPM delay set: %d ms", delay);
502 
503 	return QDF_STATUS_SUCCESS;
504 }
505 
506 QDF_STATUS hif_rtpm_restore_autosuspend_delay(void)
507 {
508 	if (gp_hif_rtpm_ctx->delay == gp_hif_rtpm_ctx->cfg_delay) {
509 		hif_info_rl("RTPM delay already default: %d",
510 			    gp_hif_rtpm_ctx->delay);
511 		return QDF_STATUS_E_ALREADY;
512 	}
513 
514 	__hif_rtpm_set_autosuspend_delay(gp_hif_rtpm_ctx->dev,
515 					 gp_hif_rtpm_ctx->cfg_delay);
516 	gp_hif_rtpm_ctx->delay = gp_hif_rtpm_ctx->cfg_delay;
517 	hif_info_rl("RTPM delay set: %d ms", gp_hif_rtpm_ctx->delay);
518 
519 	return QDF_STATUS_SUCCESS;
520 }
521 
522 int hif_rtpm_get_autosuspend_delay(void)
523 {
524 	return gp_hif_rtpm_ctx->delay;
525 }
526 
527 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
528 {
529 	struct hif_pm_runtime_lock *context;
530 
531 	if (qdf_unlikely(!gp_hif_rtpm_ctx)) {
532 		hif_err("Runtime PM context NULL");
533 		return QDF_STATUS_E_FAILURE;
534 	}
535 
536 	hif_debug("Initializing Runtime PM wakelock %s", name);
537 
538 	context = qdf_mem_malloc(sizeof(*context));
539 	if (!context)
540 		return -ENOMEM;
541 
542 	context->name = name ? name : "Default";
543 	lock->lock = context;
544 
545 	return 0;
546 }
547 
548 void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *lock)
549 {
550 	if (!lock) {
551 		hif_err("Runtime PM lock already freed");
552 		return;
553 	}
554 
555 	hif_debug("Deinitializing Runtime PM wakelock %s", lock->name);
556 
557 	if (gp_hif_rtpm_ctx) {
558 		qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
559 		__hif_pm_runtime_allow_suspend(lock);
560 		qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
561 	}
562 
563 	qdf_mem_free(lock);
564 }
565 
566 /**
567  * hif_rtpm_enabled() - To check if Runtime PM is enabled
568  *
569  * This function will check if Runtime PM is enabled or not.
570  *
571  * Return: void
572  */
573 static bool hif_rtpm_enabled(void)
574 {
575 	if (qdf_unlikely(!gp_hif_rtpm_ctx))
576 		return false;
577 
578 	if (gp_hif_rtpm_ctx->enable_rpm)
579 		return true;
580 
581 	return __hif_rtpm_enabled(gp_hif_rtpm_ctx->dev);
582 }
583 
584 QDF_STATUS hif_rtpm_get(uint8_t type, uint32_t id)
585 {
586 	struct hif_rtpm_client *client = NULL;
587 	int ret = QDF_STATUS_E_FAILURE;
588 	int pm_state;
589 
590 	if (!hif_rtpm_enabled())
591 		return QDF_STATUS_SUCCESS;
592 
593 	if (id >= HIF_RTPM_ID_MAX || !gp_hif_rtpm_ctx->clients[id]) {
594 		QDF_DEBUG_PANIC("Invalid client, id: %u", id);
595 		return -QDF_STATUS_E_INVAL;
596 	}
597 
598 	client = gp_hif_rtpm_ctx->clients[id];
599 
600 	if (type != HIF_RTPM_GET_ASYNC) {
601 		switch (type) {
602 		case HIF_RTPM_GET_FORCE:
603 			ret = __hif_rtpm_get(gp_hif_rtpm_ctx->dev);
604 			break;
605 		case HIF_RTPM_GET_SYNC:
606 			ret = __hif_rtpm_get_sync(gp_hif_rtpm_ctx->dev);
607 			break;
608 		case HIF_RTPM_GET_NORESUME:
609 			__hif_rtpm_get_noresume(gp_hif_rtpm_ctx->dev);
610 			ret = 0;
611 			break;
612 		default:
613 			QDF_DEBUG_PANIC("Invalid call type");
614 			return QDF_STATUS_E_BADMSG;
615 		}
616 
617 		if (ret < 0 && ret != -EINPROGRESS) {
618 			hif_err("pm_state: %d ret: %d",
619 				qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
620 				ret);
621 			__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
622 		} else {
623 			ret = QDF_STATUS_SUCCESS;
624 		}
625 		goto out;
626 	}
627 
628 	pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
629 	if (pm_state <= HIF_RTPM_STATE_RESUMING_LINKUP) {
630 		ret = __hif_rtpm_get(gp_hif_rtpm_ctx->dev);
631 		/* Get will return 1 if the device is already active,
632 		 * just return success in that case
633 		 */
634 		if (ret > 0) {
635 			ret = QDF_STATUS_SUCCESS;
636 		} else if (ret == 0 || ret == -EINPROGRESS) {
637 			qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
638 			pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
639 			if (pm_state >= HIF_RTPM_STATE_RESUMING) {
640 				__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
641 				gp_hif_rtpm_ctx->stats.request_resume_ts =
642 							qdf_get_log_timestamp();
643 				gp_hif_rtpm_ctx->stats.request_resume_id = id;
644 				ret = QDF_STATUS_E_FAILURE;
645 			} else {
646 				ret = QDF_STATUS_SUCCESS;
647 			}
648 			qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
649 		} else if (ret < 0) {
650 			hif_err("pm_state: %d ret: %d",
651 				qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
652 				ret);
653 			__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
654 		}
655 	} else if (pm_state >= HIF_RTPM_STATE_RESUMING) {
656 		/* Do not log in performance path */
657 		if (id != HIF_RTPM_ID_DP)
658 			hif_info_high("request RTPM resume by %d- %s",
659 				      id, hif_rtpm_id_to_string(id));
660 		__hif_rtpm_request_resume(gp_hif_rtpm_ctx->dev);
661 		gp_hif_rtpm_ctx->stats.request_resume_ts =
662 						qdf_get_log_timestamp();
663 		gp_hif_rtpm_ctx->stats.request_resume_id = id;
664 		return QDF_STATUS_E_FAILURE;
665 	}
666 
667 out:
668 	if (QDF_IS_STATUS_SUCCESS(ret)) {
669 		qdf_atomic_inc(&client->active_count);
670 		qdf_atomic_inc(&client->get_count);
671 		client->get_ts = qdf_get_log_timestamp();
672 	}
673 
674 	return ret;
675 }
676 
677 QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id)
678 {
679 	struct hif_rtpm_client *client;
680 	int usage_count;
681 
682 	if (!hif_rtpm_enabled())
683 		return QDF_STATUS_SUCCESS;
684 
685 	if (id >= HIF_RTPM_ID_MAX || !gp_hif_rtpm_ctx->clients[id]) {
686 		hif_err("Invalid client, id: %u", id);
687 		return QDF_STATUS_E_INVAL;
688 	}
689 
690 	client = gp_hif_rtpm_ctx->clients[id];
691 
692 	usage_count = hif_rtpm_read_usage_count();
693 	if (usage_count == 2 && !gp_hif_rtpm_ctx->enable_rpm) {
694 		hif_err("Unexpected PUT when runtime PM is disabled");
695 		QDF_BUG(0);
696 		return QDF_STATUS_E_CANCELED;
697 	} else if (!usage_count || !qdf_atomic_read(&client->active_count)) {
698 		hif_info_high("Put without a Get operation, %u-%s",
699 			      id, hif_rtpm_id_to_string(id));
700 		return QDF_STATUS_E_CANCELED;
701 	}
702 
703 	switch (type) {
704 	case HIF_RTPM_PUT_ASYNC:
705 		__hif_rtpm_put_auto(gp_hif_rtpm_ctx->dev);
706 		break;
707 	case HIF_RTPM_PUT_NOIDLE:
708 		__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
709 		break;
710 	case HIF_RTPM_PUT_SYNC_SUSPEND:
711 		__hif_rtpm_put_sync_suspend(gp_hif_rtpm_ctx->dev);
712 		break;
713 	default:
714 		QDF_DEBUG_PANIC("Invalid call type");
715 		return QDF_STATUS_E_BADMSG;
716 	}
717 
718 	__hif_rtpm_mark_last_busy(gp_hif_rtpm_ctx->dev);
719 	qdf_atomic_dec(&client->active_count);
720 	qdf_atomic_inc(&client->put_count);
721 	client->put_ts = qdf_get_log_timestamp();
722 	gp_hif_rtpm_ctx->stats.last_busy_ts = client->put_ts;
723 
724 	return QDF_STATUS_SUCCESS;
725 }
726 
727 /**
728  * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol
729  *                                      reason
730  * @lock: runtime_pm lock being acquired
731  *
732  * Return: 0 if successful.
733  */
734 static int __hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *lock)
735 {
736 	int ret = 0;
737 
738 	if (lock->active)
739 		return 0;
740 
741 	ret = __hif_rtpm_get(gp_hif_rtpm_ctx->dev);
742 
743 	/**
744 	 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
745 	 * RPM_SUSPENDING. Any other negative value is an error.
746 	 * We shouldn't do runtime_put here as in later point allow
747 	 * suspend gets called with the context and there the usage count
748 	 * is decremented, so suspend will be prevented.
749 	 */
750 	if (ret < 0 && ret != -EINPROGRESS) {
751 		gp_hif_rtpm_ctx->stats.runtime_get_err++;
752 		hif_err("pm_state: %d ret: %d",
753 			qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
754 			ret);
755 	}
756 
757 	list_add_tail(&lock->list, &gp_hif_rtpm_ctx->prevent_list);
758 	lock->active = true;
759 	gp_hif_rtpm_ctx->prevent_cnt++;
760 	gp_hif_rtpm_ctx->stats.prevent_suspend++;
761 	return ret;
762 }
763 
764 /**
765  * __hif_pm_runtime_allow_suspend() - Allow Runtime suspend
766  * @lock: runtime pm lock
767  *
768  * This function will allow runtime suspend, by decrementing
769  * device's usage count.
770  *
771  * Return: status
772  */
773 static int __hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *lock)
774 {
775 	int ret = 0;
776 	int usage_count;
777 
778 	if (gp_hif_rtpm_ctx->prevent_cnt == 0 || !lock->active)
779 		return ret;
780 
781 	usage_count = hif_rtpm_read_usage_count();
782 	/*
783 	 * For runtime PM enabled case, the usage count should never be 0
784 	 * at this point. For runtime PM disabled case, it should never be
785 	 * 2 at this point. Catch unexpected PUT without GET here.
786 	 */
787 	if (usage_count == 2 && !gp_hif_rtpm_ctx->enable_rpm) {
788 		hif_err("Unexpected PUT when runtime PM is disabled");
789 		QDF_BUG(0);
790 		return QDF_STATUS_E_CANCELED;
791 	} else if (!usage_count) {
792 		hif_info_high("Put without a Get operation, %s", lock->name);
793 		return QDF_STATUS_E_CANCELED;
794 	}
795 
796 	hif_rtpm_mark_last_busy(HIF_RTPM_ID_RESERVED);
797 	ret = __hif_rtpm_put_auto(gp_hif_rtpm_ctx->dev);
798 
799 	list_del(&lock->list);
800 	lock->active = false;
801 	gp_hif_rtpm_ctx->prevent_cnt--;
802 	gp_hif_rtpm_ctx->stats.allow_suspend++;
803 	return ret;
804 }
805 
806 int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *lock)
807 {
808 	if (!hif_rtpm_enabled() || !lock)
809 		return -EINVAL;
810 
811 	if (in_irq())
812 		WARN_ON(1);
813 
814 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
815 	__hif_pm_runtime_prevent_suspend(lock);
816 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
817 
818 	if (qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state) >=
819 		HIF_RTPM_STATE_SUSPENDING)
820 		hif_info_high("request RTPM resume by %s",
821 			      lock->name);
822 
823 	return 0;
824 }
825 
826 /**
827  * __hif_pm_runtime_prevent_suspend_sync() - synchronized prevent runtime
828  *  suspend for a protocol reason
829  * @lock: runtime_pm lock being acquired
830  *
831  * Return: 0 if successful.
832  */
833 static
834 int __hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *lock)
835 {
836 	int ret = 0;
837 
838 	if (lock->active)
839 		return 0;
840 
841 	ret = __hif_rtpm_get_sync(gp_hif_rtpm_ctx->dev);
842 
843 	/**
844 	 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
845 	 * RPM_SUSPENDING. Any other negative value is an error.
846 	 * We shouldn't do runtime_put here as in later point allow
847 	 * suspend gets called with the context and there the usage count
848 	 * is decremented, so suspend will be prevented.
849 	 */
850 	if (ret < 0 && ret != -EINPROGRESS) {
851 		gp_hif_rtpm_ctx->stats.runtime_get_err++;
852 		hif_err("pm_state: %d ret: %d",
853 			qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
854 			ret);
855 	}
856 
857 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
858 	list_add_tail(&lock->list, &gp_hif_rtpm_ctx->prevent_list);
859 	lock->active = true;
860 	gp_hif_rtpm_ctx->prevent_cnt++;
861 	gp_hif_rtpm_ctx->stats.prevent_suspend++;
862 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
863 
864 	return ret;
865 }
866 
867 int hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *lock)
868 {
869 	if (!hif_rtpm_enabled())
870 		return 0;
871 
872 	if (!lock)
873 		return -EINVAL;
874 
875 	if (in_irq())
876 		WARN_ON(1);
877 
878 	__hif_pm_runtime_prevent_suspend_sync(lock);
879 
880 	if (qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state) >=
881 		HIF_RTPM_STATE_SUSPENDING)
882 		hif_info_high("request RTPM resume by %s",
883 			      lock->name);
884 
885 	return 0;
886 }
887 
888 int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *lock)
889 {
890 	if (!hif_rtpm_enabled())
891 		return 0;
892 
893 	if (!lock)
894 		return -EINVAL;
895 
896 	if (in_irq())
897 		WARN_ON(1);
898 
899 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
900 	__hif_pm_runtime_allow_suspend(lock);
901 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
902 
903 	return 0;
904 }
905 
906 QDF_STATUS hif_rtpm_sync_resume(void)
907 {
908 	struct device *dev;
909 	int pm_state;
910 	int ret;
911 
912 	if (!hif_rtpm_enabled())
913 		return 0;
914 
915 	dev = gp_hif_rtpm_ctx->dev;
916 	pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
917 
918 	ret = __hif_rtpm_resume(dev);
919 	__hif_rtpm_mark_last_busy(dev);
920 
921 	if (ret >= 0) {
922 		gp_hif_rtpm_ctx->stats.resume_count++;
923 		gp_hif_rtpm_ctx->stats.resume_ts = qdf_get_log_timestamp();
924 		gp_hif_rtpm_ctx->stats.last_busy_ts =
925 					gp_hif_rtpm_ctx->stats.resume_ts;
926 		return QDF_STATUS_SUCCESS;
927 	}
928 
929 	hif_err("pm_state: %d, err: %d", pm_state, ret);
930 	return QDF_STATUS_E_FAILURE;
931 }
932 
933 void hif_rtpm_request_resume(void)
934 {
935 	__hif_rtpm_request_resume(gp_hif_rtpm_ctx->dev);
936 	hif_info_high("request RTPM resume %s", (char *)_RET_IP_);
937 }
938 
939 void hif_rtpm_check_and_request_resume(void)
940 {
941 	hif_rtpm_suspend_lock();
942 	if (qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state) >=
943 			HIF_RTPM_STATE_SUSPENDING) {
944 		hif_rtpm_suspend_unlock();
945 		__hif_rtpm_request_resume(gp_hif_rtpm_ctx->dev);
946 		gp_hif_rtpm_ctx->stats.request_resume_ts =
947 						qdf_get_log_timestamp();
948 		gp_hif_rtpm_ctx->stats.request_resume_id = HIF_RTPM_ID_RESERVED;
949 	} else {
950 		__hif_rtpm_mark_last_busy(gp_hif_rtpm_ctx->dev);
951 		gp_hif_rtpm_ctx->stats.last_busy_ts = qdf_get_log_timestamp();
952 		hif_rtpm_suspend_unlock();
953 	}
954 }
955 
956 int hif_rtpm_get_monitor_wake_intr(void)
957 {
958 	return qdf_atomic_read(&gp_hif_rtpm_ctx->monitor_wake_intr);
959 }
960 
961 void hif_rtpm_set_monitor_wake_intr(int val)
962 {
963 	qdf_atomic_set(&gp_hif_rtpm_ctx->monitor_wake_intr, val);
964 }
965 
966 void hif_rtpm_display_last_busy_hist(struct hif_opaque_softc *hif_ctx)
967 {
968 	struct hif_softc *scn;
969 	struct hif_rtpm_ctx *rtpm_ctx = gp_hif_rtpm_ctx;
970 	struct hif_rtpm_last_busy_hist *hist;
971 	unsigned long cur_idx;
972 	int i;
973 
974 	scn = HIF_GET_SOFTC(hif_ctx);
975 	if (!scn)
976 		return;
977 
978 	hif_info_high("RTPM last busy ts:%llu client:%s from:%ps",
979 		      rtpm_ctx->stats.last_busy_ts,
980 		      hif_rtpm_id_to_string(rtpm_ctx->stats.last_busy_id),
981 		      rtpm_ctx->stats.last_busy_marker);
982 
983 	/*Display CE and DP clients RTPM stats*/
984 	for (i = 0; i < HIF_RTPM_ID_MAX; i++) {
985 		if (!rtpm_ctx->clients[i] ||
986 		    (i != HIF_RTPM_ID_CE && i != HIF_RTPM_ID_DP))
987 			continue;
988 		hif_info_high("RTPM client:%s busy_ts:%llu get_ts:%llu put_ts:%llu get_cnt:%d put_cnt:%d",
989 			      hif_rtpm_id_to_string(i),
990 			      rtpm_ctx->clients[i]->last_busy_ts,
991 			      rtpm_ctx->clients[i]->get_ts,
992 			      rtpm_ctx->clients[i]->put_ts,
993 			      qdf_atomic_read(&rtpm_ctx->clients[i]->get_count),
994 			      qdf_atomic_read(&rtpm_ctx->clients[i]->put_count));
995 	}
996 
997 	for (i = 0; i < CE_COUNT_MAX; i++) {
998 		hist = gp_hif_rtpm_ctx->busy_hist[i];
999 		if (!hist)
1000 			continue;
1001 		cur_idx = hist->last_busy_idx;
1002 
1003 		hif_info_high("RTPM CE-%u last busy_cnt:%lu cur_idx:%lu ts1:%llu ts2:%llu ts3:%llu ts4:%llu",
1004 			      i, hist->last_busy_cnt, cur_idx,
1005 			      hist->last_busy_ts[cur_idx & HIF_RTPM_BUSY_HIST_MASK],
1006 			      hist->last_busy_ts[(cur_idx + 4) & HIF_RTPM_BUSY_HIST_MASK],
1007 			      hist->last_busy_ts[(cur_idx + 8) & HIF_RTPM_BUSY_HIST_MASK],
1008 			      hist->last_busy_ts[(cur_idx + 12) & HIF_RTPM_BUSY_HIST_MASK]);
1009 	}
1010 }
1011 
1012 void hif_rtpm_record_ce_last_busy_evt(struct hif_softc *scn,
1013 				      unsigned long ce_id)
1014 {
1015 	struct hif_rtpm_last_busy_hist *hist;
1016 	unsigned long idx;
1017 
1018 	if (!scn || !gp_hif_rtpm_ctx->busy_hist[ce_id])
1019 		return;
1020 
1021 	hist = gp_hif_rtpm_ctx->busy_hist[ce_id];
1022 	hist->last_busy_cnt++;
1023 	hist->last_busy_idx++;
1024 	idx = hist->last_busy_idx & HIF_RTPM_BUSY_HIST_MASK;
1025 	hist->last_busy_ts[idx] = qdf_get_log_timestamp();
1026 }
1027 
1028 void hif_rtpm_mark_last_busy(uint32_t id)
1029 {
1030 	__hif_rtpm_mark_last_busy(gp_hif_rtpm_ctx->dev);
1031 	gp_hif_rtpm_ctx->stats.last_busy_ts = qdf_get_log_timestamp();
1032 	gp_hif_rtpm_ctx->stats.last_busy_id = id;
1033 	gp_hif_rtpm_ctx->stats.last_busy_marker = (void *)_RET_IP_;
1034 	if (gp_hif_rtpm_ctx->clients[id]) {
1035 		gp_hif_rtpm_ctx->clients[id]->last_busy_cnt++;
1036 		gp_hif_rtpm_ctx->clients[id]->last_busy_ts =
1037 					gp_hif_rtpm_ctx->stats.last_busy_ts;
1038 	}
1039 }
1040 
1041 void hif_rtpm_set_client_job(uint32_t client_id)
1042 {
1043 	int pm_state;
1044 
1045 	if (!gp_hif_rtpm_ctx->clients[client_id])
1046 		return;
1047 
1048 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1049 	pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
1050 	if (pm_state <= HIF_RTPM_STATE_RESUMING_LINKUP &&
1051 	    gp_hif_rtpm_ctx->clients[client_id]->hif_rtpm_cbk)
1052 		gp_hif_rtpm_ctx->clients[client_id]->hif_rtpm_cbk();
1053 	else
1054 		qdf_set_bit(client_id, &gp_hif_rtpm_ctx->pending_job);
1055 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1056 }
1057 
1058 /**
1059  * hif_rtpm_pending_job() - continue jobs when bus resumed
1060  *
1061  * Return: Void
1062  */
1063 static void hif_rtpm_pending_job(void)
1064 {
1065 	int i;
1066 
1067 	for (i = 0; i < gp_hif_rtpm_ctx->client_count; i++) {
1068 		if (qdf_test_and_clear_bit(i, &gp_hif_rtpm_ctx->pending_job)) {
1069 			qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1070 			if (gp_hif_rtpm_ctx->clients[i]->hif_rtpm_cbk)
1071 				gp_hif_rtpm_ctx->clients[i]->hif_rtpm_cbk();
1072 			qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1073 		}
1074 	}
1075 }
1076 
1077 #define PREVENT_LIST_STRING_LEN 200
1078 
1079 void hif_rtpm_print_prevent_list(void)
1080 {
1081 	struct hif_rtpm_client *client;
1082 	struct hif_pm_runtime_lock *ctx;
1083 	char *str_buf;
1084 	int i, prevent_list_count, len = 0;
1085 
1086 	str_buf = qdf_mem_malloc(PREVENT_LIST_STRING_LEN);
1087 	if (!str_buf)
1088 		return;
1089 
1090 	qdf_spin_lock(&gp_hif_rtpm_ctx->prevent_list_lock);
1091 	prevent_list_count = gp_hif_rtpm_ctx->prevent_cnt;
1092 	if (prevent_list_count) {
1093 		list_for_each_entry(ctx, &gp_hif_rtpm_ctx->prevent_list, list)
1094 			len += qdf_scnprintf(str_buf + len,
1095 				PREVENT_LIST_STRING_LEN - len,
1096 				"%s ", ctx->name);
1097 	}
1098 	qdf_spin_unlock(&gp_hif_rtpm_ctx->prevent_list_lock);
1099 
1100 	if (prevent_list_count)
1101 		hif_info_high("prevent_suspend_cnt %u, prevent_list: %s",
1102 			      prevent_list_count, str_buf);
1103 
1104 	qdf_mem_free(str_buf);
1105 
1106 	for (i = 0; i < HIF_RTPM_ID_MAX; i++) {
1107 		client = gp_hif_rtpm_ctx->clients[i];
1108 		if (client && qdf_atomic_read(&client->active_count))
1109 			hif_info_high("client: %d: %s- active count: %d", i,
1110 				      hif_rtpm_id_to_string(i),
1111 				      qdf_atomic_read(&client->active_count));
1112 	}
1113 }
1114 
1115 /**
1116  * hif_rtpm_is_suspend_allowed() - Reject suspend if client is active
1117  *
1118  * Return: True if no clients are active
1119  */
1120 static bool hif_rtpm_is_suspend_allowed(void)
1121 {
1122 	if (!gp_hif_rtpm_ctx || !gp_hif_rtpm_ctx->enable_rpm)
1123 		return false;
1124 
1125 	if (!hif_rtpm_read_usage_count())
1126 		return true;
1127 
1128 	return false;
1129 }
1130 
1131 void hif_rtpm_suspend_lock(void)
1132 {
1133 	qdf_spin_lock_irqsave(&gp_hif_rtpm_ctx->runtime_suspend_lock);
1134 }
1135 
1136 void hif_rtpm_suspend_unlock(void)
1137 {
1138 	qdf_spin_unlock_irqrestore(&gp_hif_rtpm_ctx->runtime_suspend_lock);
1139 }
1140 
1141 /**
1142  * hif_rtpm_set_state(): utility function
1143  * @state: state to set
1144  *
1145  * Return: Void
1146  */
1147 static inline
1148 void hif_rtpm_set_state(enum hif_rtpm_state state)
1149 {
1150 	qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, state);
1151 }
1152 
1153 int hif_rtpm_get_state(void)
1154 {
1155 	return qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
1156 }
1157 
1158 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
1159 {
1160 	if (!hif_can_suspend_link(hif_ctx)) {
1161 		hif_err("Runtime PM not supported for link up suspend");
1162 		return -EINVAL;
1163 	}
1164 
1165 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1166 	hif_rtpm_set_state(HIF_RTPM_STATE_SUSPENDING);
1167 
1168 	/* keep this after set suspending */
1169 	if (!hif_rtpm_is_suspend_allowed()) {
1170 		qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1171 		hif_rtpm_print_prevent_list();
1172 		gp_hif_rtpm_ctx->stats.suspend_err_count++;
1173 		gp_hif_rtpm_ctx->stats.suspend_err_ts = qdf_get_log_timestamp();
1174 		hif_info_high("Runtime PM not allowed now");
1175 		return -EINVAL;
1176 	}
1177 
1178 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1179 
1180 	return QDF_STATUS_SUCCESS;
1181 }
1182 
1183 void hif_process_runtime_suspend_success(void)
1184 {
1185 	hif_rtpm_set_state(HIF_RTPM_STATE_SUSPENDED);
1186 	gp_hif_rtpm_ctx->stats.suspend_count++;
1187 	gp_hif_rtpm_ctx->stats.suspend_ts = qdf_get_log_timestamp();
1188 }
1189 
1190 void hif_process_runtime_suspend_failure(void)
1191 {
1192 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1193 	hif_rtpm_set_state(HIF_RTPM_STATE_ON);
1194 	hif_rtpm_pending_job();
1195 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1196 
1197 	gp_hif_rtpm_ctx->stats.suspend_err_count++;
1198 	gp_hif_rtpm_ctx->stats.suspend_err_ts = qdf_get_log_timestamp();
1199 	gp_hif_rtpm_ctx->stats.last_busy_ts = qdf_get_log_timestamp();
1200 	hif_rtpm_mark_last_busy(HIF_RTPM_ID_RESERVED);
1201 }
1202 
1203 void hif_pre_runtime_resume(void)
1204 {
1205 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1206 	hif_rtpm_set_monitor_wake_intr(0);
1207 	hif_rtpm_set_state(HIF_RTPM_STATE_RESUMING);
1208 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1209 }
1210 
1211 void hif_process_runtime_resume_linkup(void)
1212 {
1213 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1214 	hif_rtpm_set_state(HIF_RTPM_STATE_RESUMING_LINKUP);
1215 	hif_rtpm_pending_job();
1216 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1217 }
1218 
1219 void hif_process_runtime_resume_success(void)
1220 {
1221 	hif_rtpm_set_state(HIF_RTPM_STATE_ON);
1222 	gp_hif_rtpm_ctx->stats.resume_count++;
1223 	gp_hif_rtpm_ctx->stats.resume_ts = qdf_get_log_timestamp();
1224 	gp_hif_rtpm_ctx->stats.last_busy_ts = gp_hif_rtpm_ctx->stats.resume_ts;
1225 	hif_rtpm_mark_last_busy(HIF_RTPM_ID_RESERVED);
1226 }
1227 
1228 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
1229 {
1230 	int errno;
1231 
1232 	errno = hif_bus_suspend(hif_ctx);
1233 	if (errno) {
1234 		hif_err("Failed bus suspend: %d", errno);
1235 		return errno;
1236 	}
1237 
1238 	hif_rtpm_set_monitor_wake_intr(1);
1239 
1240 	errno = hif_bus_suspend_noirq(hif_ctx);
1241 	if (errno) {
1242 		hif_err("Failed bus suspend noirq: %d", errno);
1243 		hif_rtpm_set_monitor_wake_intr(0);
1244 		goto bus_resume;
1245 	}
1246 
1247 	return 0;
1248 
1249 bus_resume:
1250 	QDF_BUG(!hif_bus_resume(hif_ctx));
1251 
1252 	return errno;
1253 }
1254 
1255 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
1256 {
1257 	int errno;
1258 
1259 	QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
1260 	errno = hif_bus_resume(hif_ctx);
1261 	if (errno)
1262 		hif_err("Failed runtime resume: %d", errno);
1263 
1264 	return errno;
1265 }
1266 
1267 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
1268 {
1269 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1270 	struct CE_state *ce_state;
1271 
1272 	if (!scn)
1273 		return;
1274 
1275 	if (scn->fastpath_mode_on) {
1276 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1277 			return;
1278 
1279 		ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
1280 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1281 
1282 		/*war_ce_src_ring_write_idx_set */
1283 		CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
1284 					  ce_state->src_ring->write_index);
1285 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1286 		Q_TARGET_ACCESS_END(scn);
1287 	}
1288 }
1289 #endif /* FEATURE_RUNTIME_PM */
1290