xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_runtime_pm.c (revision 22f89679c1f1aeaf62e34eaee8c5ca99467bd241)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8 
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <linux/slab.h>
19 #include <linux/interrupt.h>
20 #include <linux/if_arp.h>
21 #include "hif_io32.h"
22 #include "hif_runtime_pm.h"
23 #include "hif.h"
24 #include "target_type.h"
25 #include "hif_main.h"
26 #include "ce_main.h"
27 #include "ce_api.h"
28 #include "ce_internal.h"
29 #include "ce_reg.h"
30 #include "ce_bmi.h"
31 #include "regtable.h"
32 #include "hif_hw_version.h"
33 #include <linux/debugfs.h>
34 #include <linux/seq_file.h>
35 #include "qdf_status.h"
36 #include "qdf_atomic.h"
37 #include "pld_common.h"
38 #include "mp_dev.h"
39 #include "hif_debug.h"
40 
41 #include "ce_tasklet.h"
42 #include "targaddrs.h"
43 #include "hif_exec.h"
44 
45 #define CNSS_RUNTIME_FILE "cnss_runtime_pm"
46 #define CNSS_RUNTIME_FILE_PERM QDF_FILE_USR_READ
47 
48 #ifdef FEATURE_RUNTIME_PM
49 
50 static struct hif_rtpm_ctx g_hif_rtpm_ctx;
51 static struct hif_rtpm_ctx *gp_hif_rtpm_ctx;
52 
53 /**
54  * hif_rtpm_id_to_string() - Convert dbgid to respective string
55  * @id -  debug id
56  *
57  * Debug support function to convert  dbgid to string.
58  * Please note to add new string in the array at index equal to
59  * its enum value in wlan_rtpm_dbgid.
60  *
61  * Return: String of ID
62  */
63 static const char *hif_rtpm_id_to_string(enum hif_rtpm_client_id id)
64 {
65 	static const char * const strings[] = {
66 					"HIF_RTPM_ID_RESERVED",
67 					"HIF_RTPM_HAL_REO_CMD",
68 					"HIF_RTPM_WMI",
69 					"HIF_RTPM_HTT",
70 					"HIF_RTPM_DP",
71 					"HIF_RTPM_RING_STATS",
72 					"HIF_RTPM_CE",
73 					"HIF_RTPM_FORCE_WAKE",
74 					"HIF_RTPM_ID_PM_QOS_NOTIFY",
75 					"HIF_RTPM_ID_WIPHY_SUSPEND",
76 					"HIF_RTPM_ID_MAX"
77 	};
78 
79 	return strings[id];
80 }
81 
82 /**
83  * hif_rtpm_read_usage_count() - Read device usage count
84  * @dev: device structure
85  *
86  * Return: current usage count
87  */
88 static inline int hif_rtpm_read_usage_count(void)
89 {
90 	return qdf_atomic_read(&gp_hif_rtpm_ctx->dev->power.usage_count);
91 }
92 
93 #define HIF_RTPM_STATS(_s, _rtpm_ctx, _name) \
94 	seq_printf(_s, "%30s: %u\n", #_name, (_rtpm_ctx)->stats._name)
95 
96 /**
97  * hif_rtpm_debugfs_show(): show debug stats for runtimepm
98  * @s: file to print to
99  * @data: unused
100  *
101  * debugging tool added to the debug fs for displaying runtimepm stats
102  *
103  * Return: 0
104  */
105 static int hif_rtpm_debugfs_show(struct seq_file *s, void *data)
106 {
107 	struct hif_rtpm_client *client = NULL;
108 	struct hif_pm_runtime_lock *ctx;
109 	static const char * const autopm_state[] = {"NONE", "ON", "RESUMING",
110 			"RESUMING_LINKUP", "SUSPENDING", "SUSPENDED"};
111 	int pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
112 	int i;
113 
114 	seq_printf(s, "%30s: %llu\n", "Current timestamp",
115 		   qdf_get_log_timestamp());
116 
117 	seq_printf(s, "%30s: %s\n", "Runtime PM state", autopm_state[pm_state]);
118 
119 	seq_printf(s, "%30s: %llu\n", "Last Busy timestamp",
120 		   gp_hif_rtpm_ctx->stats.last_busy_ts);
121 
122 	seq_printf(s, "%30s: %ps\n", "Last Busy Marker",
123 		   gp_hif_rtpm_ctx->stats.last_busy_marker);
124 
125 	seq_puts(s, "Rx busy marker counts:\n");
126 	seq_printf(s, "%30s: %u %llu\n", hif_rtpm_id_to_string(HIF_RTPM_ID_DP),
127 		   gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_DP]->last_busy_cnt,
128 		   gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_DP]->last_busy_ts);
129 
130 	seq_printf(s, "%30s: %u %llu\n", hif_rtpm_id_to_string(HIF_RTPM_ID_CE),
131 		   gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_CE]->last_busy_cnt,
132 		   gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_CE]->last_busy_ts);
133 
134 	HIF_RTPM_STATS(s, gp_hif_rtpm_ctx, last_busy_id);
135 
136 	if (pm_state == HIF_RTPM_STATE_SUSPENDED) {
137 		seq_printf(s, "%30s: %llx us\n", "Suspended Since",
138 			   gp_hif_rtpm_ctx->stats.suspend_ts);
139 	}
140 
141 	HIF_RTPM_STATS(s, gp_hif_rtpm_ctx, resume_count);
142 	HIF_RTPM_STATS(s, gp_hif_rtpm_ctx, suspend_count);
143 	HIF_RTPM_STATS(s, gp_hif_rtpm_ctx, suspend_err_count);
144 
145 	seq_printf(s, "%30s: %d\n", "PM Usage count",
146 		   hif_rtpm_read_usage_count());
147 
148 	seq_puts(s, "get  put  get-timestamp put-timestamp :DBGID_NAME\n");
149 	for (i = 0; i < HIF_RTPM_ID_MAX; i++) {
150 		client = gp_hif_rtpm_ctx->clients[i];
151 		if (!client)
152 			continue;
153 		seq_printf(s, "%-10d ", qdf_atomic_read(&client->get_count));
154 		seq_printf(s, "%-10d ", qdf_atomic_read(&client->put_count));
155 		seq_printf(s, "0x%-10llx ", client->get_ts);
156 		seq_printf(s, "0x%-10llx ", client->put_ts);
157 		seq_printf(s, ":%-2d %-30s\n", i, hif_rtpm_id_to_string(i));
158 	}
159 	seq_puts(s, "\n");
160 
161 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
162 	if (list_empty(&gp_hif_rtpm_ctx->prevent_list)) {
163 		qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
164 		return 0;
165 	}
166 
167 	seq_printf(s, "%30s: ", "Active Wakeup_Sources");
168 	list_for_each_entry(ctx, &gp_hif_rtpm_ctx->prevent_list, list) {
169 		seq_printf(s, "%s", ctx->name);
170 		seq_puts(s, " ");
171 	}
172 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
173 
174 	return 0;
175 }
176 
177 #undef HIF_RTPM_STATS
178 
179 /**
180  * hif_rtpm_debugfs_open() - open a debug fs file to access the runtime pm stats
181  * @inode
182  * @file
183  *
184  * Return: linux error code of single_open.
185  */
186 static int hif_rtpm_debugfs_open(struct inode *inode, struct file *file)
187 {
188 	return single_open(file, hif_rtpm_debugfs_show,
189 			inode->i_private);
190 }
191 
192 static const struct file_operations hif_rtpm_fops = {
193 	.owner          = THIS_MODULE,
194 	.open           = hif_rtpm_debugfs_open,
195 	.release        = single_release,
196 	.read           = seq_read,
197 	.llseek         = seq_lseek,
198 };
199 
200 /**
201  * hif_rtpm_debugfs_create() - creates runtimepm debugfs entry
202  * @scn: hif context
203  *
204  * creates a debugfs entry to debug the runtime pm feature.
205  */
206 static void hif_rtpm_debugfs_create(void)
207 {
208 	gp_hif_rtpm_ctx->pm_dentry = qdf_debugfs_create_entry(CNSS_RUNTIME_FILE,
209 							CNSS_RUNTIME_FILE_PERM,
210 							NULL,
211 							NULL,
212 							&hif_rtpm_fops);
213 }
214 
215 /**
216  * hif_rtpm_debugfs_remove() - removes runtimepm debugfs entry
217  * @scn: pci context
218  *
219  * removes the debugfs entry to debug the runtime pm feature.
220  */
221 static void hif_rtpm_debugfs_remove(void)
222 {
223 	qdf_debugfs_remove_file(gp_hif_rtpm_ctx->pm_dentry);
224 }
225 
226 /**
227  * hif_rtpm_init() - Initialize Runtime PM
228  * @dev: device structure
229  * @delay: delay to be configured for auto suspend
230  *
231  * This function will init all the Runtime PM config.
232  *
233  * Return: void
234  */
235 static void hif_rtpm_init(struct device *dev, int delay)
236 {
237 	pm_runtime_set_autosuspend_delay(dev, delay);
238 	pm_runtime_use_autosuspend(dev);
239 	pm_runtime_allow(dev);
240 	pm_runtime_mark_last_busy(dev);
241 	pm_runtime_put_noidle(dev);
242 	pm_suspend_ignore_children(dev, true);
243 }
244 
245 /**
246  * hif_rtpm_exit() - Deinit/Exit Runtime PM
247  * @dev: device structure
248  *
249  * This function will deinit all the Runtime PM config.
250  *
251  * Return: void
252  */
253 static void hif_rtpm_exit(struct device *dev)
254 {
255 	pm_runtime_get_noresume(dev);
256 	pm_runtime_set_active(dev);
257 	pm_runtime_forbid(dev);
258 }
259 
260 static void hif_rtpm_alloc_last_busy_hist(void)
261 {
262 	int i;
263 
264 	for (i = 0; i < CE_COUNT_MAX; i++) {
265 		if (i != CE_ID_1 && i != CE_ID_2 && i != CE_ID_7) {
266 			gp_hif_rtpm_ctx->busy_hist[i] = NULL;
267 			continue;
268 		}
269 
270 		gp_hif_rtpm_ctx->busy_hist[i] =
271 			qdf_mem_malloc(sizeof(struct hif_rtpm_last_busy_hist));
272 		if (!gp_hif_rtpm_ctx->busy_hist[i])
273 			return;
274 	}
275 }
276 
277 static void hif_rtpm_free_last_busy_hist(void)
278 {
279 	int i;
280 
281 	for (i = 0; i < CE_COUNT_MAX; i++) {
282 		if (i != CE_ID_1 && i != CE_ID_2 && i != CE_ID_7)
283 			continue;
284 
285 		qdf_mem_free(gp_hif_rtpm_ctx->busy_hist[i]);
286 	}
287 }
288 
289 void hif_rtpm_open(struct hif_softc *scn)
290 {
291 	gp_hif_rtpm_ctx = &g_hif_rtpm_ctx;
292 	gp_hif_rtpm_ctx->dev = scn->qdf_dev->dev;
293 	qdf_spinlock_create(&gp_hif_rtpm_ctx->runtime_lock);
294 	qdf_spinlock_create(&gp_hif_rtpm_ctx->runtime_suspend_lock);
295 	qdf_spinlock_create(&gp_hif_rtpm_ctx->prevent_list_lock);
296 	qdf_atomic_init(&gp_hif_rtpm_ctx->pm_state);
297 	qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, HIF_RTPM_STATE_NONE);
298 	qdf_atomic_init(&gp_hif_rtpm_ctx->monitor_wake_intr);
299 	INIT_LIST_HEAD(&gp_hif_rtpm_ctx->prevent_list);
300 	gp_hif_rtpm_ctx->client_count = 0;
301 	gp_hif_rtpm_ctx->pending_job = 0;
302 	hif_rtpm_register(HIF_RTPM_ID_CE, NULL);
303 	hif_rtpm_register(HIF_RTPM_ID_FORCE_WAKE, NULL);
304 	hif_rtpm_alloc_last_busy_hist();
305 	hif_info_high("Runtime PM attached");
306 }
307 
308 static int __hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *lock);
309 
310 /**
311  * hif_rtpm_sanitize_exit(): sanitize runtime PM gets/puts from driver
312  *
313  * Ensure all gets/puts are in sync before exiting runtime PM feature.
314  * Also make sure all runtime PM locks are deinitialized properly.
315  *
316  * Return: void
317  */
318 static void hif_rtpm_sanitize_exit(void)
319 {
320 	struct hif_pm_runtime_lock *ctx, *tmp;
321 	struct hif_rtpm_client *client;
322 	int i, active_count;
323 
324 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
325 	list_for_each_entry_safe(ctx, tmp,
326 				 &gp_hif_rtpm_ctx->prevent_list, list) {
327 		hif_runtime_lock_deinit(ctx);
328 	}
329 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
330 
331 	/* check if get and put out of sync for all clients */
332 	for (i = 0; i < HIF_RTPM_ID_MAX; i++) {
333 		client = gp_hif_rtpm_ctx->clients[i];
334 		if (client) {
335 			if (qdf_atomic_read(&client->active_count)) {
336 				active_count =
337 					qdf_atomic_read(&client->active_count);
338 				hif_err("Client active: %u- %s", i,
339 					hif_rtpm_id_to_string(i));
340 				QDF_DEBUG_PANIC("Client active on exit!");
341 				while (active_count--)
342 					__hif_rtpm_put_noidle(
343 							gp_hif_rtpm_ctx->dev);
344 			}
345 			QDF_DEBUG_PANIC("Client not deinitialized");
346 			qdf_mem_free(client);
347 			gp_hif_rtpm_ctx->clients[i] = NULL;
348 		}
349 	}
350 }
351 
352 /**
353  * hif_rtpm_sanitize_on_ssr_exit() - Empty the suspend list on SSR
354  *
355  * API is used to empty the runtime pm prevent suspend list.
356  *
357  * Return: void
358  */
359 static void hif_rtpm_sanitize_ssr_exit(void)
360 {
361 	struct hif_pm_runtime_lock *ctx, *tmp;
362 
363 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
364 	list_for_each_entry_safe(ctx, tmp,
365 				 &gp_hif_rtpm_ctx->prevent_list, list) {
366 		__hif_pm_runtime_allow_suspend(ctx);
367 	}
368 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
369 }
370 
371 void hif_rtpm_close(struct hif_softc *scn)
372 {
373 	hif_rtpm_free_last_busy_hist();
374 	hif_rtpm_deregister(HIF_RTPM_ID_CE);
375 	hif_rtpm_deregister(HIF_RTPM_ID_FORCE_WAKE);
376 
377 	hif_is_recovery_in_progress(scn) ?
378 		hif_rtpm_sanitize_ssr_exit() :
379 		hif_rtpm_sanitize_exit();
380 
381 	qdf_mem_set(gp_hif_rtpm_ctx, sizeof(*gp_hif_rtpm_ctx), 0);
382 	gp_hif_rtpm_ctx = NULL;
383 	hif_info_high("Runtime PM context detached");
384 }
385 
386 void hif_rtpm_start(struct hif_softc *scn)
387 {
388 	uint32_t mode = hif_get_conparam(scn);
389 
390 	gp_hif_rtpm_ctx->enable_rpm = scn->hif_config.enable_runtime_pm;
391 
392 	if (!gp_hif_rtpm_ctx->enable_rpm) {
393 		hif_info_high("RUNTIME PM is disabled in ini");
394 		return;
395 	}
396 
397 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
398 	    mode == QDF_GLOBAL_MONITOR_MODE) {
399 		hif_info("RUNTIME PM is disabled for FTM/EPPING/MONITOR mode");
400 		return;
401 	}
402 
403 	hif_info_high("Enabling RUNTIME PM, Delay: %d ms",
404 		      scn->hif_config.runtime_pm_delay);
405 
406 	qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, HIF_RTPM_STATE_ON);
407 	hif_rtpm_init(gp_hif_rtpm_ctx->dev, scn->hif_config.runtime_pm_delay);
408 	gp_hif_rtpm_ctx->cfg_delay = scn->hif_config.runtime_pm_delay;
409 	gp_hif_rtpm_ctx->delay = gp_hif_rtpm_ctx->cfg_delay;
410 	hif_rtpm_debugfs_create();
411 }
412 
413 void hif_rtpm_stop(struct hif_softc *scn)
414 {
415 	uint32_t mode = hif_get_conparam(scn);
416 
417 	if (!gp_hif_rtpm_ctx->enable_rpm)
418 		return;
419 
420 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
421 	    mode == QDF_GLOBAL_MONITOR_MODE)
422 		return;
423 
424 	hif_rtpm_exit(gp_hif_rtpm_ctx->dev);
425 
426 	hif_rtpm_sync_resume();
427 
428 	qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, HIF_RTPM_STATE_NONE);
429 	hif_rtpm_debugfs_remove();
430 }
431 
432 QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rtpm_cbk)(void))
433 {
434 	struct hif_rtpm_client *client;
435 
436 	if (qdf_unlikely(!gp_hif_rtpm_ctx)) {
437 		hif_err("Runtime PM context NULL");
438 		return QDF_STATUS_E_FAILURE;
439 	}
440 
441 	if (id >= HIF_RTPM_ID_MAX || gp_hif_rtpm_ctx->clients[id]) {
442 		hif_err("Invalid client %d", id);
443 		return QDF_STATUS_E_INVAL;
444 	}
445 
446 	client = qdf_mem_malloc(sizeof(struct hif_rtpm_client));
447 	if (!client)
448 		return QDF_STATUS_E_NOMEM;
449 
450 	client->hif_rtpm_cbk = hif_rtpm_cbk;
451 	qdf_atomic_init(&client->active_count);
452 	qdf_atomic_init(&client->get_count);
453 	qdf_atomic_init(&client->put_count);
454 
455 	gp_hif_rtpm_ctx->clients[id] = client;
456 	gp_hif_rtpm_ctx->client_count++;
457 
458 	return QDF_STATUS_SUCCESS;
459 }
460 
461 QDF_STATUS hif_rtpm_deregister(uint32_t id)
462 {
463 	struct hif_rtpm_client *client;
464 	int active_count;
465 
466 	if (qdf_unlikely(!gp_hif_rtpm_ctx)) {
467 		hif_err("Runtime PM context NULL");
468 		return QDF_STATUS_E_FAILURE;
469 	}
470 
471 	if (id >= HIF_RTPM_ID_MAX || !gp_hif_rtpm_ctx->clients[id]) {
472 		hif_err("invalid client, id: %u", id);
473 		return QDF_STATUS_E_INVAL;
474 	}
475 
476 	client = gp_hif_rtpm_ctx->clients[id];
477 	if (qdf_atomic_read(&client->active_count)) {
478 		active_count = qdf_atomic_read(&client->active_count);
479 		hif_err("Client: %u-%s Runtime PM active",
480 			id, hif_rtpm_id_to_string(id));
481 		hif_err("last get called: 0x%llx, get count: %d, put count: %d",
482 			client->get_ts, qdf_atomic_read(&client->get_count),
483 			qdf_atomic_read(&client->put_count));
484 		QDF_DEBUG_PANIC("Get and PUT call out of sync!");
485 		while (active_count--)
486 			__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
487 	}
488 
489 	qdf_mem_free(client);
490 	gp_hif_rtpm_ctx->clients[id] = NULL;
491 
492 	return QDF_STATUS_SUCCESS;
493 }
494 
495 QDF_STATUS hif_rtpm_set_autosuspend_delay(int delay)
496 {
497 	if (delay < HIF_RTPM_DELAY_MIN || delay > HIF_RTPM_DELAY_MAX) {
498 		hif_err("Invalid delay value %d ms", delay);
499 		return QDF_STATUS_E_INVAL;
500 	}
501 
502 	__hif_rtpm_set_autosuspend_delay(gp_hif_rtpm_ctx->dev, delay);
503 	gp_hif_rtpm_ctx->delay = delay;
504 	hif_info_high("RTPM delay set: %d ms", delay);
505 
506 	return QDF_STATUS_SUCCESS;
507 }
508 
509 QDF_STATUS hif_rtpm_restore_autosuspend_delay(void)
510 {
511 	if (gp_hif_rtpm_ctx->delay == gp_hif_rtpm_ctx->cfg_delay) {
512 		hif_info_rl("RTPM delay already default: %d",
513 			    gp_hif_rtpm_ctx->delay);
514 		return QDF_STATUS_E_ALREADY;
515 	}
516 
517 	__hif_rtpm_set_autosuspend_delay(gp_hif_rtpm_ctx->dev,
518 					 gp_hif_rtpm_ctx->cfg_delay);
519 	gp_hif_rtpm_ctx->delay = gp_hif_rtpm_ctx->cfg_delay;
520 	hif_info_rl("RTPM delay set: %d ms", gp_hif_rtpm_ctx->delay);
521 
522 	return QDF_STATUS_SUCCESS;
523 }
524 
525 int hif_rtpm_get_autosuspend_delay(void)
526 {
527 	return gp_hif_rtpm_ctx->delay;
528 }
529 
530 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
531 {
532 	struct hif_pm_runtime_lock *context;
533 
534 	if (qdf_unlikely(!gp_hif_rtpm_ctx)) {
535 		hif_err("Runtime PM context NULL");
536 		return QDF_STATUS_E_FAILURE;
537 	}
538 
539 	hif_debug("Initializing Runtime PM wakelock %s", name);
540 
541 	context = qdf_mem_malloc(sizeof(*context));
542 	if (!context)
543 		return -ENOMEM;
544 
545 	context->name = name ? name : "Default";
546 	lock->lock = context;
547 
548 	return 0;
549 }
550 
551 void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *lock)
552 {
553 	if (!lock) {
554 		hif_err("Runtime PM lock already freed");
555 		return;
556 	}
557 
558 	hif_debug("Deinitializing Runtime PM wakelock %s", lock->name);
559 
560 	if (gp_hif_rtpm_ctx) {
561 		qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
562 		__hif_pm_runtime_allow_suspend(lock);
563 		qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
564 	}
565 
566 	qdf_mem_free(lock);
567 }
568 
569 /**
570  * hif_rtpm_enabled() - To check if Runtime PM is enabled
571  *
572  * This function will check if Runtime PM is enabled or not.
573  *
574  * Return: void
575  */
576 static bool hif_rtpm_enabled(void)
577 {
578 	if (qdf_unlikely(!gp_hif_rtpm_ctx))
579 		return false;
580 
581 	if (gp_hif_rtpm_ctx->enable_rpm)
582 		return true;
583 
584 	return __hif_rtpm_enabled(gp_hif_rtpm_ctx->dev);
585 }
586 
587 QDF_STATUS hif_rtpm_get(uint8_t type, uint32_t id)
588 {
589 	struct hif_rtpm_client *client = NULL;
590 	int ret = QDF_STATUS_E_FAILURE;
591 	int pm_state;
592 
593 	if (!hif_rtpm_enabled())
594 		return QDF_STATUS_SUCCESS;
595 
596 	if (id >= HIF_RTPM_ID_MAX || !gp_hif_rtpm_ctx->clients[id]) {
597 		QDF_DEBUG_PANIC("Invalid client, id: %u", id);
598 		return -QDF_STATUS_E_INVAL;
599 	}
600 
601 	client = gp_hif_rtpm_ctx->clients[id];
602 
603 	if (type != HIF_RTPM_GET_ASYNC) {
604 		switch (type) {
605 		case HIF_RTPM_GET_FORCE:
606 			ret = __hif_rtpm_get(gp_hif_rtpm_ctx->dev);
607 			break;
608 		case HIF_RTPM_GET_SYNC:
609 			ret = __hif_rtpm_get_sync(gp_hif_rtpm_ctx->dev);
610 			break;
611 		case HIF_RTPM_GET_NORESUME:
612 			__hif_rtpm_get_noresume(gp_hif_rtpm_ctx->dev);
613 			ret = 0;
614 			break;
615 		default:
616 			QDF_DEBUG_PANIC("Invalid call type");
617 			return QDF_STATUS_E_BADMSG;
618 		}
619 
620 		if (ret < 0 && ret != -EINPROGRESS) {
621 			hif_err("pm_state: %d ret: %d",
622 				qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
623 				ret);
624 			__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
625 		} else {
626 			ret = QDF_STATUS_SUCCESS;
627 		}
628 		goto out;
629 	}
630 
631 	pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
632 	if (pm_state <= HIF_RTPM_STATE_RESUMING_LINKUP) {
633 		ret = __hif_rtpm_get(gp_hif_rtpm_ctx->dev);
634 		/* Get will return 1 if the device is already active,
635 		 * just return success in that case
636 		 */
637 		if (ret > 0) {
638 			ret = QDF_STATUS_SUCCESS;
639 		} else if (ret == 0 || ret == -EINPROGRESS) {
640 			qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
641 			pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
642 			if (pm_state >= HIF_RTPM_STATE_RESUMING) {
643 				__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
644 				gp_hif_rtpm_ctx->stats.request_resume_ts =
645 							qdf_get_log_timestamp();
646 				gp_hif_rtpm_ctx->stats.request_resume_id = id;
647 				ret = QDF_STATUS_E_FAILURE;
648 			} else {
649 				ret = QDF_STATUS_SUCCESS;
650 			}
651 			qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
652 		} else if (ret < 0) {
653 			hif_err("pm_state: %d ret: %d",
654 				qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
655 				ret);
656 			__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
657 		}
658 	} else if (pm_state >= HIF_RTPM_STATE_RESUMING) {
659 		/* Do not log in performance path */
660 		if (id != HIF_RTPM_ID_DP)
661 			hif_info_high("request RTPM resume by %d- %s",
662 				      id, hif_rtpm_id_to_string(id));
663 		__hif_rtpm_request_resume(gp_hif_rtpm_ctx->dev);
664 		gp_hif_rtpm_ctx->stats.request_resume_ts =
665 						qdf_get_log_timestamp();
666 		gp_hif_rtpm_ctx->stats.request_resume_id = id;
667 		return QDF_STATUS_E_FAILURE;
668 	}
669 
670 out:
671 	if (QDF_IS_STATUS_SUCCESS(ret)) {
672 		qdf_atomic_inc(&client->active_count);
673 		qdf_atomic_inc(&client->get_count);
674 		client->get_ts = qdf_get_log_timestamp();
675 	}
676 
677 	return ret;
678 }
679 
680 QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id)
681 {
682 	struct hif_rtpm_client *client;
683 	int usage_count;
684 
685 	if (!hif_rtpm_enabled())
686 		return QDF_STATUS_SUCCESS;
687 
688 	if (id >= HIF_RTPM_ID_MAX || !gp_hif_rtpm_ctx->clients[id]) {
689 		hif_err("Invalid client, id: %u", id);
690 		return QDF_STATUS_E_INVAL;
691 	}
692 
693 	client = gp_hif_rtpm_ctx->clients[id];
694 
695 	usage_count = hif_rtpm_read_usage_count();
696 	if (usage_count == 2 && !gp_hif_rtpm_ctx->enable_rpm) {
697 		hif_err("Unexpected PUT when runtime PM is disabled");
698 		QDF_BUG(0);
699 		return QDF_STATUS_E_CANCELED;
700 	} else if (!usage_count || !qdf_atomic_read(&client->active_count)) {
701 		hif_info_high("Put without a Get operation, %u-%s",
702 			      id, hif_rtpm_id_to_string(id));
703 		return QDF_STATUS_E_CANCELED;
704 	}
705 
706 	switch (type) {
707 	case HIF_RTPM_PUT_ASYNC:
708 		__hif_rtpm_put_auto(gp_hif_rtpm_ctx->dev);
709 		break;
710 	case HIF_RTPM_PUT_NOIDLE:
711 		__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
712 		break;
713 	case HIF_RTPM_PUT_SYNC_SUSPEND:
714 		__hif_rtpm_put_sync_suspend(gp_hif_rtpm_ctx->dev);
715 		break;
716 	default:
717 		QDF_DEBUG_PANIC("Invalid call type");
718 		return QDF_STATUS_E_BADMSG;
719 	}
720 
721 	__hif_rtpm_mark_last_busy(gp_hif_rtpm_ctx->dev);
722 	qdf_atomic_dec(&client->active_count);
723 	qdf_atomic_inc(&client->put_count);
724 	client->put_ts = qdf_get_log_timestamp();
725 	gp_hif_rtpm_ctx->stats.last_busy_ts = client->put_ts;
726 
727 	return QDF_STATUS_SUCCESS;
728 }
729 
730 /**
731  * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol
732  *                                      reason
733  * @lock: runtime_pm lock being acquired
734  *
735  * Return: 0 if successful.
736  */
737 static int __hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *lock)
738 {
739 	int ret = 0;
740 
741 	if (lock->active)
742 		return 0;
743 
744 	ret = __hif_rtpm_get(gp_hif_rtpm_ctx->dev);
745 
746 	/**
747 	 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
748 	 * RPM_SUSPENDING. Any other negative value is an error.
749 	 * We shouldn't do runtime_put here as in later point allow
750 	 * suspend gets called with the context and there the usage count
751 	 * is decremented, so suspend will be prevented.
752 	 */
753 	if (ret < 0 && ret != -EINPROGRESS) {
754 		gp_hif_rtpm_ctx->stats.runtime_get_err++;
755 		hif_err("pm_state: %d ret: %d",
756 			qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
757 			ret);
758 	}
759 
760 	list_add_tail(&lock->list, &gp_hif_rtpm_ctx->prevent_list);
761 	lock->active = true;
762 	gp_hif_rtpm_ctx->prevent_cnt++;
763 	gp_hif_rtpm_ctx->stats.prevent_suspend++;
764 	return ret;
765 }
766 
767 /**
768  * __hif_pm_runtime_allow_suspend() - Allow Runtime suspend
769  * @lock: runtime pm lock
770  *
771  * This function will allow runtime suspend, by decrementing
772  * device's usage count.
773  *
774  * Return: status
775  */
776 static int __hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *lock)
777 {
778 	int ret = 0;
779 	int usage_count;
780 
781 	if (gp_hif_rtpm_ctx->prevent_cnt == 0 || !lock->active)
782 		return ret;
783 
784 	usage_count = hif_rtpm_read_usage_count();
785 	/*
786 	 * For runtime PM enabled case, the usage count should never be 0
787 	 * at this point. For runtime PM disabled case, it should never be
788 	 * 2 at this point. Catch unexpected PUT without GET here.
789 	 */
790 	if (usage_count == 2 && !gp_hif_rtpm_ctx->enable_rpm) {
791 		hif_err("Unexpected PUT when runtime PM is disabled");
792 		QDF_BUG(0);
793 		return QDF_STATUS_E_CANCELED;
794 	} else if (!usage_count) {
795 		hif_info_high("Put without a Get operation, %s", lock->name);
796 		return QDF_STATUS_E_CANCELED;
797 	}
798 
799 	hif_rtpm_mark_last_busy(HIF_RTPM_ID_RESERVED);
800 	ret = __hif_rtpm_put_auto(gp_hif_rtpm_ctx->dev);
801 
802 	list_del(&lock->list);
803 	lock->active = false;
804 	gp_hif_rtpm_ctx->prevent_cnt--;
805 	gp_hif_rtpm_ctx->stats.allow_suspend++;
806 	return ret;
807 }
808 
809 int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *lock)
810 {
811 	if (!hif_rtpm_enabled() || !lock)
812 		return -EINVAL;
813 
814 	if (in_irq())
815 		WARN_ON(1);
816 
817 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
818 	__hif_pm_runtime_prevent_suspend(lock);
819 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
820 
821 	if (qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state) >=
822 		HIF_RTPM_STATE_SUSPENDING)
823 		hif_info_high("request RTPM resume by %s",
824 			      lock->name);
825 
826 	return 0;
827 }
828 
829 /**
830  * __hif_pm_runtime_prevent_suspend_sync() - synchronized prevent runtime
831  *  suspend for a protocol reason
832  * @lock: runtime_pm lock being acquired
833  *
834  * Return: 0 if successful.
835  */
836 static
837 int __hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *lock)
838 {
839 	int ret = 0;
840 
841 	if (lock->active)
842 		return 0;
843 
844 	ret = __hif_rtpm_get_sync(gp_hif_rtpm_ctx->dev);
845 
846 	/**
847 	 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
848 	 * RPM_SUSPENDING. Any other negative value is an error.
849 	 * We shouldn't do runtime_put here as in later point allow
850 	 * suspend gets called with the context and there the usage count
851 	 * is decremented, so suspend will be prevented.
852 	 */
853 	if (ret < 0 && ret != -EINPROGRESS) {
854 		gp_hif_rtpm_ctx->stats.runtime_get_err++;
855 		hif_err("pm_state: %d ret: %d",
856 			qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
857 			ret);
858 	}
859 
860 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
861 	list_add_tail(&lock->list, &gp_hif_rtpm_ctx->prevent_list);
862 	lock->active = true;
863 	gp_hif_rtpm_ctx->prevent_cnt++;
864 	gp_hif_rtpm_ctx->stats.prevent_suspend++;
865 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
866 
867 	return ret;
868 }
869 
870 int hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *lock)
871 {
872 	if (!hif_rtpm_enabled())
873 		return 0;
874 
875 	if (!lock)
876 		return -EINVAL;
877 
878 	if (in_irq())
879 		WARN_ON(1);
880 
881 	__hif_pm_runtime_prevent_suspend_sync(lock);
882 
883 	if (qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state) >=
884 		HIF_RTPM_STATE_SUSPENDING)
885 		hif_info_high("request RTPM resume by %s",
886 			      lock->name);
887 
888 	return 0;
889 }
890 
891 int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *lock)
892 {
893 	if (!hif_rtpm_enabled())
894 		return 0;
895 
896 	if (!lock)
897 		return -EINVAL;
898 
899 	if (in_irq())
900 		WARN_ON(1);
901 
902 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
903 	__hif_pm_runtime_allow_suspend(lock);
904 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
905 
906 	return 0;
907 }
908 
909 QDF_STATUS hif_rtpm_sync_resume(void)
910 {
911 	struct device *dev;
912 	int pm_state;
913 	int ret;
914 
915 	if (!hif_rtpm_enabled())
916 		return 0;
917 
918 	dev = gp_hif_rtpm_ctx->dev;
919 	pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
920 
921 	ret = __hif_rtpm_resume(dev);
922 	__hif_rtpm_mark_last_busy(dev);
923 
924 	if (ret >= 0) {
925 		gp_hif_rtpm_ctx->stats.resume_count++;
926 		gp_hif_rtpm_ctx->stats.resume_ts = qdf_get_log_timestamp();
927 		gp_hif_rtpm_ctx->stats.last_busy_ts =
928 					gp_hif_rtpm_ctx->stats.resume_ts;
929 		return QDF_STATUS_SUCCESS;
930 	}
931 
932 	hif_err("pm_state: %d, err: %d", pm_state, ret);
933 	return QDF_STATUS_E_FAILURE;
934 }
935 
936 void hif_rtpm_request_resume(void)
937 {
938 	__hif_rtpm_request_resume(gp_hif_rtpm_ctx->dev);
939 	hif_info_high("request RTPM resume %s", (char *)_RET_IP_);
940 }
941 
942 void hif_rtpm_check_and_request_resume(void)
943 {
944 	hif_rtpm_suspend_lock();
945 	if (qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state) >=
946 			HIF_RTPM_STATE_SUSPENDING) {
947 		hif_rtpm_suspend_unlock();
948 		__hif_rtpm_request_resume(gp_hif_rtpm_ctx->dev);
949 		gp_hif_rtpm_ctx->stats.request_resume_ts =
950 						qdf_get_log_timestamp();
951 		gp_hif_rtpm_ctx->stats.request_resume_id = HIF_RTPM_ID_RESERVED;
952 	} else {
953 		__hif_rtpm_mark_last_busy(gp_hif_rtpm_ctx->dev);
954 		gp_hif_rtpm_ctx->stats.last_busy_ts = qdf_get_log_timestamp();
955 		hif_rtpm_suspend_unlock();
956 	}
957 }
958 
959 int hif_rtpm_get_monitor_wake_intr(void)
960 {
961 	return qdf_atomic_read(&gp_hif_rtpm_ctx->monitor_wake_intr);
962 }
963 
964 void hif_rtpm_set_monitor_wake_intr(int val)
965 {
966 	qdf_atomic_set(&gp_hif_rtpm_ctx->monitor_wake_intr, val);
967 }
968 
969 void hif_rtpm_display_last_busy_hist(struct hif_opaque_softc *hif_ctx)
970 {
971 	struct hif_softc *scn;
972 	struct hif_rtpm_ctx *rtpm_ctx = gp_hif_rtpm_ctx;
973 	struct hif_rtpm_last_busy_hist *hist;
974 	unsigned long cur_idx;
975 	int i;
976 
977 	scn = HIF_GET_SOFTC(hif_ctx);
978 	if (!scn)
979 		return;
980 
981 	hif_info_high("RTPM last busy ts:%llu client:%s from:%ps",
982 		      rtpm_ctx->stats.last_busy_ts,
983 		      hif_rtpm_id_to_string(rtpm_ctx->stats.last_busy_id),
984 		      rtpm_ctx->stats.last_busy_marker);
985 
986 	/*Display CE and DP clients RTPM stats*/
987 	for (i = 0; i < HIF_RTPM_ID_MAX; i++) {
988 		if (!rtpm_ctx->clients[i] ||
989 		    (i != HIF_RTPM_ID_CE && i != HIF_RTPM_ID_DP))
990 			continue;
991 		hif_info_high("RTPM client:%s busy_ts:%llu get_ts:%llu put_ts:%llu get_cnt:%d put_cnt:%d",
992 			      hif_rtpm_id_to_string(i),
993 			      rtpm_ctx->clients[i]->last_busy_ts,
994 			      rtpm_ctx->clients[i]->get_ts,
995 			      rtpm_ctx->clients[i]->put_ts,
996 			      qdf_atomic_read(&rtpm_ctx->clients[i]->get_count),
997 			      qdf_atomic_read(&rtpm_ctx->clients[i]->put_count));
998 	}
999 
1000 	for (i = 0; i < CE_COUNT_MAX; i++) {
1001 		hist = gp_hif_rtpm_ctx->busy_hist[i];
1002 		if (!hist)
1003 			continue;
1004 		cur_idx = hist->last_busy_idx;
1005 
1006 		hif_info_high("RTPM CE-%u last busy_cnt:%lu cur_idx:%lu ts1:%llu ts2:%llu ts3:%llu ts4:%llu",
1007 			      i, hist->last_busy_cnt, cur_idx,
1008 			      hist->last_busy_ts[cur_idx & HIF_RTPM_BUSY_HIST_MASK],
1009 			      hist->last_busy_ts[(cur_idx + 4) & HIF_RTPM_BUSY_HIST_MASK],
1010 			      hist->last_busy_ts[(cur_idx + 8) & HIF_RTPM_BUSY_HIST_MASK],
1011 			      hist->last_busy_ts[(cur_idx + 12) & HIF_RTPM_BUSY_HIST_MASK]);
1012 	}
1013 }
1014 
1015 void hif_rtpm_record_ce_last_busy_evt(struct hif_softc *scn,
1016 				      unsigned long ce_id)
1017 {
1018 	struct hif_rtpm_last_busy_hist *hist;
1019 	unsigned long idx;
1020 
1021 	if (!scn || !gp_hif_rtpm_ctx->busy_hist[ce_id])
1022 		return;
1023 
1024 	hist = gp_hif_rtpm_ctx->busy_hist[ce_id];
1025 	hist->last_busy_cnt++;
1026 	hist->last_busy_idx++;
1027 	idx = hist->last_busy_idx & HIF_RTPM_BUSY_HIST_MASK;
1028 	hist->last_busy_ts[idx] = qdf_get_log_timestamp();
1029 }
1030 
1031 void hif_rtpm_mark_last_busy(uint32_t id)
1032 {
1033 	__hif_rtpm_mark_last_busy(gp_hif_rtpm_ctx->dev);
1034 	gp_hif_rtpm_ctx->stats.last_busy_ts = qdf_get_log_timestamp();
1035 	gp_hif_rtpm_ctx->stats.last_busy_id = id;
1036 	gp_hif_rtpm_ctx->stats.last_busy_marker = (void *)_RET_IP_;
1037 	if (gp_hif_rtpm_ctx->clients[id]) {
1038 		gp_hif_rtpm_ctx->clients[id]->last_busy_cnt++;
1039 		gp_hif_rtpm_ctx->clients[id]->last_busy_ts =
1040 					gp_hif_rtpm_ctx->stats.last_busy_ts;
1041 	}
1042 }
1043 
1044 void hif_rtpm_set_client_job(uint32_t client_id)
1045 {
1046 	int pm_state;
1047 
1048 	if (!gp_hif_rtpm_ctx->clients[client_id])
1049 		return;
1050 
1051 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1052 	pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
1053 	if (pm_state <= HIF_RTPM_STATE_RESUMING_LINKUP &&
1054 	    gp_hif_rtpm_ctx->clients[client_id]->hif_rtpm_cbk)
1055 		gp_hif_rtpm_ctx->clients[client_id]->hif_rtpm_cbk();
1056 	else
1057 		qdf_set_bit(client_id, &gp_hif_rtpm_ctx->pending_job);
1058 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1059 }
1060 
1061 /**
1062  * hif_rtpm_pending_job() - continue jobs when bus resumed
1063  *
1064  * Return: Void
1065  */
1066 static void hif_rtpm_pending_job(void)
1067 {
1068 	int i;
1069 
1070 	for (i = 0; i < gp_hif_rtpm_ctx->client_count; i++) {
1071 		if (qdf_test_and_clear_bit(i, &gp_hif_rtpm_ctx->pending_job)) {
1072 			qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1073 			if (gp_hif_rtpm_ctx->clients[i]->hif_rtpm_cbk)
1074 				gp_hif_rtpm_ctx->clients[i]->hif_rtpm_cbk();
1075 			qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1076 		}
1077 	}
1078 }
1079 
1080 #define PREVENT_LIST_STRING_LEN 200
1081 
1082 void hif_rtpm_print_prevent_list(void)
1083 {
1084 	struct hif_rtpm_client *client;
1085 	struct hif_pm_runtime_lock *ctx;
1086 	char *str_buf;
1087 	int i, prevent_list_count, len = 0;
1088 
1089 	str_buf = qdf_mem_malloc(PREVENT_LIST_STRING_LEN);
1090 	if (!str_buf)
1091 		return;
1092 
1093 	qdf_spin_lock(&gp_hif_rtpm_ctx->prevent_list_lock);
1094 	prevent_list_count = gp_hif_rtpm_ctx->prevent_cnt;
1095 	if (prevent_list_count) {
1096 		list_for_each_entry(ctx, &gp_hif_rtpm_ctx->prevent_list, list)
1097 			len += qdf_scnprintf(str_buf + len,
1098 				PREVENT_LIST_STRING_LEN - len,
1099 				"%s ", ctx->name);
1100 	}
1101 	qdf_spin_unlock(&gp_hif_rtpm_ctx->prevent_list_lock);
1102 
1103 	if (prevent_list_count)
1104 		hif_info_high("prevent_suspend_cnt %u, prevent_list: %s",
1105 			      prevent_list_count, str_buf);
1106 
1107 	qdf_mem_free(str_buf);
1108 
1109 	for (i = 0; i < HIF_RTPM_ID_MAX; i++) {
1110 		client = gp_hif_rtpm_ctx->clients[i];
1111 		if (client && qdf_atomic_read(&client->active_count))
1112 			hif_info_high("client: %d: %s- active count: %d", i,
1113 				      hif_rtpm_id_to_string(i),
1114 				      qdf_atomic_read(&client->active_count));
1115 	}
1116 }
1117 
1118 /**
1119  * hif_rtpm_is_suspend_allowed() - Reject suspend if client is active
1120  *
1121  * Return: True if no clients are active
1122  */
1123 static bool hif_rtpm_is_suspend_allowed(void)
1124 {
1125 	if (!gp_hif_rtpm_ctx || !gp_hif_rtpm_ctx->enable_rpm)
1126 		return false;
1127 
1128 	if (!hif_rtpm_read_usage_count())
1129 		return true;
1130 
1131 	return false;
1132 }
1133 
1134 void hif_rtpm_suspend_lock(void)
1135 {
1136 	qdf_spin_lock_irqsave(&gp_hif_rtpm_ctx->runtime_suspend_lock);
1137 }
1138 
1139 void hif_rtpm_suspend_unlock(void)
1140 {
1141 	qdf_spin_unlock_irqrestore(&gp_hif_rtpm_ctx->runtime_suspend_lock);
1142 }
1143 
1144 /**
1145  * hif_rtpm_set_state(): utility function
1146  * @state: state to set
1147  *
1148  * Return: Void
1149  */
1150 static inline
1151 void hif_rtpm_set_state(enum hif_rtpm_state state)
1152 {
1153 	qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, state);
1154 }
1155 
1156 int hif_rtpm_get_state(void)
1157 {
1158 	return qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
1159 }
1160 
1161 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
1162 {
1163 	if (!hif_can_suspend_link(hif_ctx)) {
1164 		hif_err("Runtime PM not supported for link up suspend");
1165 		return -EINVAL;
1166 	}
1167 
1168 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1169 	hif_rtpm_set_state(HIF_RTPM_STATE_SUSPENDING);
1170 
1171 	/* keep this after set suspending */
1172 	if (!hif_rtpm_is_suspend_allowed()) {
1173 		qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1174 		hif_rtpm_print_prevent_list();
1175 		gp_hif_rtpm_ctx->stats.suspend_err_count++;
1176 		gp_hif_rtpm_ctx->stats.suspend_err_ts = qdf_get_log_timestamp();
1177 		hif_info_high("Runtime PM not allowed now");
1178 		return -EINVAL;
1179 	}
1180 
1181 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1182 
1183 	return QDF_STATUS_SUCCESS;
1184 }
1185 
1186 void hif_process_runtime_suspend_success(void)
1187 {
1188 	hif_rtpm_set_state(HIF_RTPM_STATE_SUSPENDED);
1189 	gp_hif_rtpm_ctx->stats.suspend_count++;
1190 	gp_hif_rtpm_ctx->stats.suspend_ts = qdf_get_log_timestamp();
1191 }
1192 
1193 void hif_process_runtime_suspend_failure(void)
1194 {
1195 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1196 	hif_rtpm_set_state(HIF_RTPM_STATE_ON);
1197 	hif_rtpm_pending_job();
1198 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1199 
1200 	gp_hif_rtpm_ctx->stats.suspend_err_count++;
1201 	gp_hif_rtpm_ctx->stats.suspend_err_ts = qdf_get_log_timestamp();
1202 	gp_hif_rtpm_ctx->stats.last_busy_ts = qdf_get_log_timestamp();
1203 	hif_rtpm_mark_last_busy(HIF_RTPM_ID_RESERVED);
1204 }
1205 
1206 void hif_pre_runtime_resume(void)
1207 {
1208 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1209 	hif_rtpm_set_monitor_wake_intr(0);
1210 	hif_rtpm_set_state(HIF_RTPM_STATE_RESUMING);
1211 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1212 }
1213 
1214 void hif_process_runtime_resume_linkup(void)
1215 {
1216 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1217 	hif_rtpm_set_state(HIF_RTPM_STATE_RESUMING_LINKUP);
1218 	hif_rtpm_pending_job();
1219 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1220 }
1221 
1222 void hif_process_runtime_resume_success(void)
1223 {
1224 	hif_rtpm_set_state(HIF_RTPM_STATE_ON);
1225 	gp_hif_rtpm_ctx->stats.resume_count++;
1226 	gp_hif_rtpm_ctx->stats.resume_ts = qdf_get_log_timestamp();
1227 	gp_hif_rtpm_ctx->stats.last_busy_ts = gp_hif_rtpm_ctx->stats.resume_ts;
1228 	hif_rtpm_mark_last_busy(HIF_RTPM_ID_RESERVED);
1229 }
1230 
1231 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
1232 {
1233 	int errno;
1234 
1235 	errno = hif_bus_suspend(hif_ctx);
1236 	if (errno) {
1237 		hif_err("Failed bus suspend: %d", errno);
1238 		return errno;
1239 	}
1240 
1241 	hif_rtpm_set_monitor_wake_intr(1);
1242 
1243 	errno = hif_bus_suspend_noirq(hif_ctx);
1244 	if (errno) {
1245 		hif_err("Failed bus suspend noirq: %d", errno);
1246 		hif_rtpm_set_monitor_wake_intr(0);
1247 		goto bus_resume;
1248 	}
1249 
1250 	return 0;
1251 
1252 bus_resume:
1253 	QDF_BUG(!hif_bus_resume(hif_ctx));
1254 
1255 	return errno;
1256 }
1257 
1258 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
1259 {
1260 	int errno;
1261 
1262 	QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
1263 	errno = hif_bus_resume(hif_ctx);
1264 	if (errno)
1265 		hif_err("Failed runtime resume: %d", errno);
1266 
1267 	return errno;
1268 }
1269 
1270 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
1271 {
1272 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1273 	struct CE_state *ce_state;
1274 
1275 	if (!scn)
1276 		return;
1277 
1278 	if (scn->fastpath_mode_on) {
1279 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1280 			return;
1281 
1282 		ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
1283 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1284 
1285 		/*war_ce_src_ring_write_idx_set */
1286 		CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
1287 					  ce_state->src_ring->write_index);
1288 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1289 		Q_TARGET_ACCESS_END(scn);
1290 	}
1291 }
1292 #endif /* FEATURE_RUNTIME_PM */
1293