xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_runtime_pm.c (revision d0c05845839e5f2ba5a8dcebe0cd3e4cd4e8dfcf)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8 
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <linux/slab.h>
19 #include <linux/interrupt.h>
20 #include <linux/if_arp.h>
21 #include "hif_io32.h"
22 #include "hif_runtime_pm.h"
23 #include "hif.h"
24 #include "target_type.h"
25 #include "hif_main.h"
26 #include "ce_main.h"
27 #include "ce_api.h"
28 #include "ce_internal.h"
29 #include "ce_reg.h"
30 #include "ce_bmi.h"
31 #include "regtable.h"
32 #include "hif_hw_version.h"
33 #include <linux/debugfs.h>
34 #include <linux/seq_file.h>
35 #include "qdf_status.h"
36 #include "qdf_atomic.h"
37 #include "pld_common.h"
38 #include "mp_dev.h"
39 #include "hif_debug.h"
40 
41 #include "ce_tasklet.h"
42 #include "targaddrs.h"
43 #include "hif_exec.h"
44 
45 #define CNSS_RUNTIME_FILE "cnss_runtime_pm"
46 #define CNSS_RUNTIME_FILE_PERM QDF_FILE_USR_READ
47 
48 #ifdef FEATURE_RUNTIME_PM
49 
50 static struct hif_rtpm_ctx g_hif_rtpm_ctx;
51 static struct hif_rtpm_ctx *gp_hif_rtpm_ctx;
52 
53 /**
54  * hif_rtpm_id_to_string() - Convert dbgid to respective string
55  * @id -  debug id
56  *
57  * Debug support function to convert  dbgid to string.
58  * Please note to add new string in the array at index equal to
59  * its enum value in wlan_rtpm_dbgid.
60  *
61  * Return: String of ID
62  */
63 static const char *hif_rtpm_id_to_string(enum hif_rtpm_client_id id)
64 {
65 	static const char * const strings[] = {
66 					"HIF_RTPM_ID_RESERVED",
67 					"HIF_RTPM_HAL_REO_CMD",
68 					"HIF_RTPM_WMI",
69 					"HIF_RTPM_HTT",
70 					"HIF_RTPM_DP",
71 					"HIF_RTPM_RING_STATS",
72 					"HIF_RTPM_CE",
73 					"HIF_RTPM_FORCE_WAKE",
74 					"HIF_RTPM_ID_PM_QOS_NOTIFY",
75 					"HIF_RTPM_ID_WIPHY_SUSPEND",
76 					"HIF_RTPM_ID_MAX"
77 	};
78 
79 	return strings[id];
80 }
81 
82 /**
83  * hif_rtpm_read_usage_count() - Read device usage count
84  * @dev: device structure
85  *
86  * Return: current usage count
87  */
88 static inline int hif_rtpm_read_usage_count(void)
89 {
90 	return qdf_atomic_read(&gp_hif_rtpm_ctx->dev->power.usage_count);
91 }
92 
93 #define HIF_RTPM_STATS(_s, _rtpm_ctx, _name) \
94 	seq_printf(_s, "%30s: %u\n", #_name, (_rtpm_ctx)->stats._name)
95 
96 /**
97  * hif_rtpm_debugfs_show(): show debug stats for runtimepm
98  * @s: file to print to
99  * @data: unused
100  *
101  * debugging tool added to the debug fs for displaying runtimepm stats
102  *
103  * Return: 0
104  */
105 static int hif_rtpm_debugfs_show(struct seq_file *s, void *data)
106 {
107 	struct hif_rtpm_client *client = NULL;
108 	struct hif_pm_runtime_lock *ctx;
109 	static const char * const autopm_state[] = {"NONE", "ON", "RESUMING",
110 			"RESUMING_LINKUP", "SUSPENDING", "SUSPENDED"};
111 	int pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
112 	int i;
113 
114 	seq_printf(s, "%30s: %llu\n", "Current timestamp",
115 		   qdf_get_log_timestamp());
116 
117 	seq_printf(s, "%30s: %s\n", "Runtime PM state", autopm_state[pm_state]);
118 
119 	seq_printf(s, "%30s: %llu\n", "Last Busy timestamp",
120 		   gp_hif_rtpm_ctx->stats.last_busy_ts);
121 
122 	seq_printf(s, "%30s: %ps\n", "Last Busy Marker",
123 		   gp_hif_rtpm_ctx->stats.last_busy_marker);
124 
125 	seq_puts(s, "Rx busy marker counts:\n");
126 	seq_printf(s, "%30s: %u %llu\n", hif_rtpm_id_to_string(HIF_RTPM_ID_DP),
127 		   gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_DP]->last_busy_cnt,
128 		   gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_DP]->last_busy_ts);
129 
130 	seq_printf(s, "%30s: %u %llu\n", hif_rtpm_id_to_string(HIF_RTPM_ID_CE),
131 		   gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_CE]->last_busy_cnt,
132 		   gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_CE]->last_busy_ts);
133 
134 	HIF_RTPM_STATS(s, gp_hif_rtpm_ctx, last_busy_id);
135 
136 	if (pm_state == HIF_RTPM_STATE_SUSPENDED) {
137 		seq_printf(s, "%30s: %llx us\n", "Suspended Since",
138 			   gp_hif_rtpm_ctx->stats.suspend_ts);
139 	}
140 
141 	HIF_RTPM_STATS(s, gp_hif_rtpm_ctx, resume_count);
142 	HIF_RTPM_STATS(s, gp_hif_rtpm_ctx, suspend_count);
143 	HIF_RTPM_STATS(s, gp_hif_rtpm_ctx, suspend_err_count);
144 
145 	seq_printf(s, "%30s: %d\n", "PM Usage count",
146 		   hif_rtpm_read_usage_count());
147 
148 	seq_puts(s, "get  put  get-timestamp put-timestamp :DBGID_NAME\n");
149 	for (i = 0; i < HIF_RTPM_ID_MAX; i++) {
150 		client = gp_hif_rtpm_ctx->clients[i];
151 		if (!client)
152 			continue;
153 		seq_printf(s, "%-10d ", qdf_atomic_read(&client->get_count));
154 		seq_printf(s, "%-10d ", qdf_atomic_read(&client->put_count));
155 		seq_printf(s, "0x%-10llx ", client->get_ts);
156 		seq_printf(s, "0x%-10llx ", client->put_ts);
157 		seq_printf(s, ":%-2d %-30s\n", i, hif_rtpm_id_to_string(i));
158 	}
159 	seq_puts(s, "\n");
160 
161 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
162 	if (list_empty(&gp_hif_rtpm_ctx->prevent_list)) {
163 		qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
164 		return 0;
165 	}
166 
167 	seq_printf(s, "%30s: ", "Active Wakeup_Sources");
168 	list_for_each_entry(ctx, &gp_hif_rtpm_ctx->prevent_list, list) {
169 		seq_printf(s, "%s", ctx->name);
170 		seq_puts(s, " ");
171 	}
172 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
173 
174 	return 0;
175 }
176 
177 #undef HIF_RTPM_STATS
178 
179 /**
180  * hif_rtpm_debugfs_open() - open a debug fs file to access the runtime pm stats
181  * @inode
182  * @file
183  *
184  * Return: linux error code of single_open.
185  */
186 static int hif_rtpm_debugfs_open(struct inode *inode, struct file *file)
187 {
188 	return single_open(file, hif_rtpm_debugfs_show,
189 			inode->i_private);
190 }
191 
192 static const struct file_operations hif_rtpm_fops = {
193 	.owner          = THIS_MODULE,
194 	.open           = hif_rtpm_debugfs_open,
195 	.release        = single_release,
196 	.read           = seq_read,
197 	.llseek         = seq_lseek,
198 };
199 
200 /**
201  * hif_rtpm_debugfs_create() - creates runtimepm debugfs entry
202  * @scn: hif context
203  *
204  * creates a debugfs entry to debug the runtime pm feature.
205  */
206 static void hif_rtpm_debugfs_create(void)
207 {
208 	gp_hif_rtpm_ctx->pm_dentry = qdf_debugfs_create_entry(CNSS_RUNTIME_FILE,
209 							CNSS_RUNTIME_FILE_PERM,
210 							NULL,
211 							NULL,
212 							&hif_rtpm_fops);
213 }
214 
215 /**
216  * hif_rtpm_debugfs_remove() - removes runtimepm debugfs entry
217  * @scn: pci context
218  *
219  * removes the debugfs entry to debug the runtime pm feature.
220  */
221 static void hif_rtpm_debugfs_remove(void)
222 {
223 	qdf_debugfs_remove_file(gp_hif_rtpm_ctx->pm_dentry);
224 }
225 
226 /**
227  * hif_rtpm_init() - Initialize Runtime PM
228  * @dev: device structure
229  * @delay: delay to be confgured for auto suspend
230  *
231  * This function will init all the Runtime PM config.
232  *
233  * Return: void
234  */
235 static void hif_rtpm_init(struct device *dev, int delay)
236 {
237 	pm_runtime_set_autosuspend_delay(dev, delay);
238 	pm_runtime_use_autosuspend(dev);
239 	pm_runtime_allow(dev);
240 	pm_runtime_mark_last_busy(dev);
241 	pm_runtime_put_noidle(dev);
242 	pm_suspend_ignore_children(dev, true);
243 }
244 
245 /**
246  * hif_rtpm_exit() - Deinit/Exit Runtime PM
247  * @dev: device structure
248  *
249  * This function will deinit all the Runtime PM config.
250  *
251  * Return: void
252  */
253 static void hif_rtpm_exit(struct device *dev)
254 {
255 	pm_runtime_get_noresume(dev);
256 	pm_runtime_set_active(dev);
257 	pm_runtime_forbid(dev);
258 }
259 
260 void hif_rtpm_open(struct hif_softc *scn)
261 {
262 	gp_hif_rtpm_ctx = &g_hif_rtpm_ctx;
263 	gp_hif_rtpm_ctx->dev = scn->qdf_dev->dev;
264 	qdf_spinlock_create(&gp_hif_rtpm_ctx->runtime_lock);
265 	qdf_spinlock_create(&gp_hif_rtpm_ctx->runtime_suspend_lock);
266 	qdf_spinlock_create(&gp_hif_rtpm_ctx->prevent_list_lock);
267 	qdf_atomic_init(&gp_hif_rtpm_ctx->pm_state);
268 	qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, HIF_RTPM_STATE_NONE);
269 	qdf_atomic_init(&gp_hif_rtpm_ctx->monitor_wake_intr);
270 	INIT_LIST_HEAD(&gp_hif_rtpm_ctx->prevent_list);
271 	gp_hif_rtpm_ctx->client_count = 0;
272 	gp_hif_rtpm_ctx->pending_job = 0;
273 	hif_rtpm_register(HIF_RTPM_ID_CE, NULL);
274 	hif_rtpm_register(HIF_RTPM_ID_FORCE_WAKE, NULL);
275 	hif_info_high("Runtime PM attached");
276 }
277 
278 static int __hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *lock);
279 
280 /**
281  * hif_rtpm_sanitize_exit(): sanitize runtime PM gets/puts from driver
282  *
283  * Ensure all gets/puts are in sync before exiting runtime PM feature.
284  * Also make sure all runtime PM locks are deinitialized properly.
285  *
286  * Return: void
287  */
288 static void hif_rtpm_sanitize_exit(void)
289 {
290 	struct hif_pm_runtime_lock *ctx, *tmp;
291 	struct hif_rtpm_client *client;
292 	int i, active_count;
293 
294 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
295 	list_for_each_entry_safe(ctx, tmp,
296 				 &gp_hif_rtpm_ctx->prevent_list, list) {
297 		hif_runtime_lock_deinit(ctx);
298 	}
299 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
300 
301 	/* check if get and put out of sync for all clients */
302 	for (i = 0; i < HIF_RTPM_ID_MAX; i++) {
303 		client = gp_hif_rtpm_ctx->clients[i];
304 		if (client) {
305 			if (qdf_atomic_read(&client->active_count)) {
306 				active_count =
307 					qdf_atomic_read(&client->active_count);
308 				hif_err("Client active: %u- %s", i,
309 					hif_rtpm_id_to_string(i));
310 				QDF_DEBUG_PANIC("Client active on exit!");
311 				while (active_count--)
312 					__hif_rtpm_put_noidle(
313 							gp_hif_rtpm_ctx->dev);
314 			}
315 			QDF_DEBUG_PANIC("Client not deinitialized");
316 			qdf_mem_free(client);
317 			gp_hif_rtpm_ctx->clients[i] = NULL;
318 		}
319 	}
320 }
321 
322 /**
323  * hif_rtpm_sanitize_on_ssr_exit() - Empty the suspend list on SSR
324  *
325  * API is used to empty the runtime pm prevent suspend list.
326  *
327  * Return: void
328  */
329 static void hif_rtpm_sanitize_ssr_exit(void)
330 {
331 	struct hif_pm_runtime_lock *ctx, *tmp;
332 
333 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
334 	list_for_each_entry_safe(ctx, tmp,
335 				 &gp_hif_rtpm_ctx->prevent_list, list) {
336 		__hif_pm_runtime_allow_suspend(ctx);
337 	}
338 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
339 }
340 
341 void hif_rtpm_close(struct hif_softc *scn)
342 {
343 	hif_rtpm_deregister(HIF_RTPM_ID_CE);
344 	hif_rtpm_deregister(HIF_RTPM_ID_FORCE_WAKE);
345 
346 	hif_is_recovery_in_progress(scn) ?
347 		hif_rtpm_sanitize_ssr_exit() :
348 		hif_rtpm_sanitize_exit();
349 
350 	qdf_mem_set(gp_hif_rtpm_ctx, sizeof(*gp_hif_rtpm_ctx), 0);
351 	gp_hif_rtpm_ctx = NULL;
352 	hif_info_high("Runtime PM context detached");
353 }
354 
355 void hif_rtpm_start(struct hif_softc *scn)
356 {
357 	uint32_t mode = hif_get_conparam(scn);
358 
359 	gp_hif_rtpm_ctx->enable_rpm = scn->hif_config.enable_runtime_pm;
360 
361 	if (!gp_hif_rtpm_ctx->enable_rpm) {
362 		hif_info_high("RUNTIME PM is disabled in ini");
363 		return;
364 	}
365 
366 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
367 	    mode == QDF_GLOBAL_MONITOR_MODE) {
368 		hif_info("RUNTIME PM is disabled for FTM/EPPING/MONITOR mode");
369 		return;
370 	}
371 
372 	hif_info_high("Enabling RUNTIME PM, Delay: %d ms",
373 		      scn->hif_config.runtime_pm_delay);
374 
375 	qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, HIF_RTPM_STATE_ON);
376 	hif_rtpm_init(gp_hif_rtpm_ctx->dev, scn->hif_config.runtime_pm_delay);
377 	hif_rtpm_debugfs_create();
378 }
379 
380 void hif_rtpm_stop(struct hif_softc *scn)
381 {
382 	uint32_t mode = hif_get_conparam(scn);
383 
384 	if (!gp_hif_rtpm_ctx->enable_rpm)
385 		return;
386 
387 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
388 	    mode == QDF_GLOBAL_MONITOR_MODE)
389 		return;
390 
391 	hif_rtpm_exit(gp_hif_rtpm_ctx->dev);
392 
393 	hif_rtpm_sync_resume();
394 
395 	qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, HIF_RTPM_STATE_NONE);
396 	hif_rtpm_debugfs_remove();
397 }
398 
399 QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rtpm_cbk)(void))
400 {
401 	struct hif_rtpm_client *client;
402 
403 	if (qdf_unlikely(!gp_hif_rtpm_ctx)) {
404 		hif_err("Runtime PM context NULL");
405 		return QDF_STATUS_E_FAILURE;
406 	}
407 
408 	if (id >= HIF_RTPM_ID_MAX || gp_hif_rtpm_ctx->clients[id]) {
409 		hif_err("Invalid client %d", id);
410 		return QDF_STATUS_E_INVAL;
411 	}
412 
413 	client = qdf_mem_malloc(sizeof(struct hif_rtpm_client));
414 	if (!client)
415 		return QDF_STATUS_E_NOMEM;
416 
417 	client->hif_rtpm_cbk = hif_rtpm_cbk;
418 	qdf_atomic_init(&client->active_count);
419 	qdf_atomic_init(&client->get_count);
420 	qdf_atomic_init(&client->put_count);
421 
422 	gp_hif_rtpm_ctx->clients[id] = client;
423 	gp_hif_rtpm_ctx->client_count++;
424 
425 	return QDF_STATUS_SUCCESS;
426 }
427 
428 QDF_STATUS hif_rtpm_deregister(uint32_t id)
429 {
430 	struct hif_rtpm_client *client;
431 	int active_count;
432 
433 	if (qdf_unlikely(!gp_hif_rtpm_ctx)) {
434 		hif_err("Runtime PM context NULL");
435 		return QDF_STATUS_E_FAILURE;
436 	}
437 
438 	if (id >= HIF_RTPM_ID_MAX || !gp_hif_rtpm_ctx->clients[id]) {
439 		hif_err("invalid client, id: %u", id);
440 		return QDF_STATUS_E_INVAL;
441 	}
442 
443 	client = gp_hif_rtpm_ctx->clients[id];
444 	if (qdf_atomic_read(&client->active_count)) {
445 		active_count = qdf_atomic_read(&client->active_count);
446 		hif_err("Client: %u-%s Runtime PM active",
447 			id, hif_rtpm_id_to_string(id));
448 		hif_err("last get called: 0x%llx, get count: %d, put count: %d",
449 			client->get_ts, qdf_atomic_read(&client->get_count),
450 			qdf_atomic_read(&client->put_count));
451 		QDF_DEBUG_PANIC("Get and PUT call out of sync!");
452 		while (active_count--)
453 			__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
454 	}
455 
456 	qdf_mem_free(client);
457 	gp_hif_rtpm_ctx->clients[id] = NULL;
458 
459 	return QDF_STATUS_SUCCESS;
460 }
461 
462 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
463 {
464 	struct hif_pm_runtime_lock *context;
465 
466 	if (qdf_unlikely(!gp_hif_rtpm_ctx)) {
467 		hif_err("Runtime PM context NULL");
468 		return QDF_STATUS_E_FAILURE;
469 	}
470 
471 	hif_debug("Initializing Runtime PM wakelock %s", name);
472 
473 	context = qdf_mem_malloc(sizeof(*context));
474 	if (!context)
475 		return -ENOMEM;
476 
477 	context->name = name ? name : "Default";
478 	lock->lock = context;
479 
480 	return 0;
481 }
482 
483 void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *lock)
484 {
485 	if (!lock) {
486 		hif_err("Runtime PM lock already freed");
487 		return;
488 	}
489 
490 	hif_debug("Deinitializing Runtime PM wakelock %s", lock->name);
491 
492 	if (gp_hif_rtpm_ctx) {
493 		qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
494 		__hif_pm_runtime_allow_suspend(lock);
495 		qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
496 	}
497 
498 	qdf_mem_free(lock);
499 }
500 
501 /**
502  * hif_rtpm_enabled() - To check if Runtime PM is enabled
503  *
504  * This function will check if Runtime PM is enabled or not.
505  *
506  * Return: void
507  */
508 static bool hif_rtpm_enabled(void)
509 {
510 	if (qdf_unlikely(!gp_hif_rtpm_ctx))
511 		return false;
512 
513 	if (gp_hif_rtpm_ctx->enable_rpm)
514 		return true;
515 
516 	return __hif_rtpm_enabled(gp_hif_rtpm_ctx->dev);
517 }
518 
519 QDF_STATUS hif_rtpm_get(uint8_t type, uint32_t id)
520 {
521 	struct hif_rtpm_client *client = NULL;
522 	int ret = QDF_STATUS_E_FAILURE;
523 	int pm_state;
524 
525 	if (!hif_rtpm_enabled())
526 		return QDF_STATUS_SUCCESS;
527 
528 	if (id >= HIF_RTPM_ID_MAX || !gp_hif_rtpm_ctx->clients[id]) {
529 		QDF_DEBUG_PANIC("Invalid client, id: %u", id);
530 		return -QDF_STATUS_E_INVAL;
531 	}
532 
533 	client = gp_hif_rtpm_ctx->clients[id];
534 
535 	if (type != HIF_RTPM_GET_ASYNC) {
536 		switch (type) {
537 		case HIF_RTPM_GET_FORCE:
538 			ret = __hif_rtpm_get(gp_hif_rtpm_ctx->dev);
539 			break;
540 		case HIF_RTPM_GET_SYNC:
541 			ret = __hif_rtpm_get_sync(gp_hif_rtpm_ctx->dev);
542 			break;
543 		case HIF_RTPM_GET_NORESUME:
544 			__hif_rtpm_get_noresume(gp_hif_rtpm_ctx->dev);
545 			ret = 0;
546 			break;
547 		default:
548 			QDF_DEBUG_PANIC("Invalid call type");
549 			return QDF_STATUS_E_BADMSG;
550 		}
551 
552 		if (ret < 0 && ret != -EINPROGRESS) {
553 			hif_err("pm_state: %d ret: %d",
554 				qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
555 				ret);
556 			__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
557 		} else {
558 			ret = QDF_STATUS_SUCCESS;
559 		}
560 		goto out;
561 	}
562 
563 	pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
564 	if (pm_state <= HIF_RTPM_STATE_RESUMING_LINKUP) {
565 		ret = __hif_rtpm_get(gp_hif_rtpm_ctx->dev);
566 		/* Get will return 1 if the device is already active,
567 		 * just return success in that case
568 		 */
569 		if (ret > 0) {
570 			ret = QDF_STATUS_SUCCESS;
571 		} else if (ret == 0 || ret == -EINPROGRESS) {
572 			qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
573 			pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
574 			if (pm_state >= HIF_RTPM_STATE_RESUMING) {
575 				__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
576 				gp_hif_rtpm_ctx->stats.request_resume_ts =
577 							qdf_get_log_timestamp();
578 				gp_hif_rtpm_ctx->stats.request_resume_id = id;
579 				ret = QDF_STATUS_E_FAILURE;
580 			} else {
581 				ret = QDF_STATUS_SUCCESS;
582 			}
583 			qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
584 		} else if (ret < 0) {
585 			hif_err("pm_state: %d ret: %d",
586 				qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
587 				ret);
588 			__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
589 		}
590 	} else if (pm_state >= HIF_RTPM_STATE_RESUMING) {
591 		/* Do not log in performance path */
592 		if (id != HIF_RTPM_ID_DP)
593 			hif_info_high("request RTPM resume by %d- %s",
594 				      id, hif_rtpm_id_to_string(id));
595 		__hif_rtpm_request_resume(gp_hif_rtpm_ctx->dev);
596 		gp_hif_rtpm_ctx->stats.request_resume_ts =
597 						qdf_get_log_timestamp();
598 		gp_hif_rtpm_ctx->stats.request_resume_id = id;
599 		return QDF_STATUS_E_FAILURE;
600 	}
601 
602 out:
603 	if (QDF_IS_STATUS_SUCCESS(ret)) {
604 		qdf_atomic_inc(&client->active_count);
605 		qdf_atomic_inc(&client->get_count);
606 		client->get_ts = qdf_get_log_timestamp();
607 	}
608 
609 	return ret;
610 }
611 
612 QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id)
613 {
614 	struct hif_rtpm_client *client;
615 	int usage_count;
616 
617 	if (!hif_rtpm_enabled())
618 		return QDF_STATUS_SUCCESS;
619 
620 	if (id >= HIF_RTPM_ID_MAX || !gp_hif_rtpm_ctx->clients[id]) {
621 		hif_err("Invalid client, id: %u", id);
622 		return QDF_STATUS_E_INVAL;
623 	}
624 
625 	client = gp_hif_rtpm_ctx->clients[id];
626 
627 	usage_count = hif_rtpm_read_usage_count();
628 	if (usage_count == 2 && !gp_hif_rtpm_ctx->enable_rpm) {
629 		hif_err("Unexpected PUT when runtime PM is disabled");
630 		QDF_BUG(0);
631 		return QDF_STATUS_E_CANCELED;
632 	} else if (!usage_count || !qdf_atomic_read(&client->active_count)) {
633 		hif_info_high("Put without a Get operation, %u-%s",
634 			      id, hif_rtpm_id_to_string(id));
635 		return QDF_STATUS_E_CANCELED;
636 	}
637 
638 	switch (type) {
639 	case HIF_RTPM_PUT_ASYNC:
640 		__hif_rtpm_put_auto(gp_hif_rtpm_ctx->dev);
641 		break;
642 	case HIF_RTPM_PUT_NOIDLE:
643 		__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
644 		break;
645 	case HIF_RTPM_PUT_SYNC_SUSPEND:
646 		__hif_rtpm_put_sync_suspend(gp_hif_rtpm_ctx->dev);
647 		break;
648 	default:
649 		QDF_DEBUG_PANIC("Invalid call type");
650 		return QDF_STATUS_E_BADMSG;
651 	}
652 
653 	__hif_rtpm_mark_last_busy(gp_hif_rtpm_ctx->dev);
654 	qdf_atomic_dec(&client->active_count);
655 	qdf_atomic_inc(&client->put_count);
656 	client->put_ts = qdf_get_log_timestamp();
657 	gp_hif_rtpm_ctx->stats.last_busy_ts = client->put_ts;
658 
659 	return QDF_STATUS_SUCCESS;
660 
661 }
662 
663 /**
664  * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol
665  *                                      reason
666  * @lock: runtime_pm lock being acquired
667  *
668  * Return: 0 if successful.
669  */
670 static int __hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *lock)
671 {
672 	int ret = 0;
673 
674 	if (lock->active)
675 		return 0;
676 
677 	ret = __hif_rtpm_get(gp_hif_rtpm_ctx->dev);
678 
679 	/**
680 	 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
681 	 * RPM_SUSPENDING. Any other negative value is an error.
682 	 * We shouldn't do runtime_put here as in later point allow
683 	 * suspend gets called with the context and there the usage count
684 	 * is decremented, so suspend will be prevented.
685 	 */
686 	if (ret < 0 && ret != -EINPROGRESS) {
687 		gp_hif_rtpm_ctx->stats.runtime_get_err++;
688 		hif_err("pm_state: %d ret: %d",
689 			qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
690 			ret);
691 	}
692 
693 	list_add_tail(&lock->list, &gp_hif_rtpm_ctx->prevent_list);
694 	lock->active = true;
695 	gp_hif_rtpm_ctx->prevent_cnt++;
696 	gp_hif_rtpm_ctx->stats.prevent_suspend++;
697 	return ret;
698 }
699 
700 /**
701  * __hif_pm_runtime_allow_suspend() - Allow Runtime suspend
702  * @lock: runtime pm lock
703  *
704  * This function will allow runtime suspend, by decrementing
705  * device's usage count.
706  *
707  * Return: status
708  */
709 static int __hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *lock)
710 {
711 	int ret = 0;
712 	int usage_count;
713 
714 	if (gp_hif_rtpm_ctx->prevent_cnt == 0 || !lock->active)
715 		return ret;
716 
717 	usage_count = hif_rtpm_read_usage_count();
718 	/*
719 	 * For runtime PM enabled case, the usage count should never be 0
720 	 * at this point. For runtime PM disabled case, it should never be
721 	 * 2 at this point. Catch unexpected PUT without GET here.
722 	 */
723 	if (usage_count == 2 && !gp_hif_rtpm_ctx->enable_rpm) {
724 		hif_err("Unexpected PUT when runtime PM is disabled");
725 		QDF_BUG(0);
726 		return QDF_STATUS_E_CANCELED;
727 	} else if (!usage_count) {
728 		hif_info_high("Put without a Get operation, %s", lock->name);
729 		return QDF_STATUS_E_CANCELED;
730 	}
731 
732 	hif_rtpm_mark_last_busy(HIF_RTPM_ID_RESERVED);
733 	ret = __hif_rtpm_put_auto(gp_hif_rtpm_ctx->dev);
734 
735 	list_del(&lock->list);
736 	lock->active = false;
737 	gp_hif_rtpm_ctx->prevent_cnt--;
738 	gp_hif_rtpm_ctx->stats.allow_suspend++;
739 	return ret;
740 }
741 
742 
743 int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *lock)
744 {
745 	if (!hif_rtpm_enabled() || !lock)
746 		return -EINVAL;
747 
748 	if (in_irq())
749 		WARN_ON(1);
750 
751 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
752 	__hif_pm_runtime_prevent_suspend(lock);
753 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
754 
755 	if (qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state) >=
756 		HIF_RTPM_STATE_SUSPENDING)
757 		hif_info_high("request RTPM resume by %s",
758 			      lock->name);
759 
760 	return 0;
761 }
762 
763 int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *lock)
764 {
765 	if (!hif_rtpm_enabled() || !lock)
766 		return -EINVAL;
767 
768 	if (in_irq())
769 		WARN_ON(1);
770 
771 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
772 	__hif_pm_runtime_allow_suspend(lock);
773 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
774 
775 	return 0;
776 }
777 
778 QDF_STATUS hif_rtpm_sync_resume(void)
779 {
780 	struct device *dev;
781 	int pm_state;
782 	int ret;
783 
784 	if (!hif_rtpm_enabled())
785 		return 0;
786 
787 	dev = gp_hif_rtpm_ctx->dev;
788 	pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
789 
790 	ret = __hif_rtpm_resume(dev);
791 	__hif_rtpm_mark_last_busy(dev);
792 
793 	if (ret >= 0) {
794 		gp_hif_rtpm_ctx->stats.resume_count++;
795 		gp_hif_rtpm_ctx->stats.resume_ts = qdf_get_log_timestamp();
796 		gp_hif_rtpm_ctx->stats.last_busy_ts =
797 					gp_hif_rtpm_ctx->stats.resume_ts;
798 		return QDF_STATUS_SUCCESS;
799 	}
800 
801 	hif_err("pm_state: %d, err: %d", pm_state, ret);
802 	return QDF_STATUS_E_FAILURE;
803 }
804 
805 void hif_rtpm_request_resume(void)
806 {
807 	__hif_rtpm_request_resume(gp_hif_rtpm_ctx->dev);
808 	hif_info_high("request RTPM resume %s", (char *)_RET_IP_);
809 }
810 
811 void hif_rtpm_check_and_request_resume(void)
812 {
813 	hif_rtpm_suspend_lock();
814 	if (qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state) >=
815 			HIF_RTPM_STATE_SUSPENDING) {
816 		hif_rtpm_suspend_unlock();
817 		__hif_rtpm_request_resume(gp_hif_rtpm_ctx->dev);
818 		gp_hif_rtpm_ctx->stats.request_resume_ts =
819 						qdf_get_log_timestamp();
820 		gp_hif_rtpm_ctx->stats.request_resume_id = HIF_RTPM_ID_RESERVED;
821 	} else {
822 		__hif_rtpm_mark_last_busy(gp_hif_rtpm_ctx->dev);
823 		gp_hif_rtpm_ctx->stats.last_busy_ts = qdf_get_log_timestamp();
824 		hif_rtpm_suspend_unlock();
825 	}
826 }
827 
828 int hif_rtpm_get_monitor_wake_intr(void)
829 {
830 	return qdf_atomic_read(&gp_hif_rtpm_ctx->monitor_wake_intr);
831 }
832 
833 void hif_rtpm_set_monitor_wake_intr(int val)
834 {
835 	qdf_atomic_set(&gp_hif_rtpm_ctx->monitor_wake_intr, val);
836 }
837 
838 void hif_rtpm_mark_last_busy(uint32_t id)
839 {
840 	__hif_rtpm_mark_last_busy(gp_hif_rtpm_ctx->dev);
841 	gp_hif_rtpm_ctx->stats.last_busy_ts = qdf_get_log_timestamp();
842 	gp_hif_rtpm_ctx->stats.last_busy_id = id;
843 	gp_hif_rtpm_ctx->stats.last_busy_marker = (void *)_RET_IP_;
844 	if (gp_hif_rtpm_ctx->clients[id]) {
845 		gp_hif_rtpm_ctx->clients[id]->last_busy_cnt++;
846 		gp_hif_rtpm_ctx->clients[id]->last_busy_ts =
847 					gp_hif_rtpm_ctx->stats.last_busy_ts;
848 	}
849 }
850 
851 void hif_rtpm_set_client_job(uint32_t client_id)
852 {
853 	int pm_state;
854 
855 	if (!gp_hif_rtpm_ctx->clients[client_id])
856 		return;
857 
858 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
859 	pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
860 	if (pm_state <= HIF_RTPM_STATE_RESUMING_LINKUP &&
861 	    gp_hif_rtpm_ctx->clients[client_id]->hif_rtpm_cbk)
862 		gp_hif_rtpm_ctx->clients[client_id]->hif_rtpm_cbk();
863 	else
864 		qdf_set_bit(client_id, &gp_hif_rtpm_ctx->pending_job);
865 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
866 }
867 
868 /**
869  * hif_rtpm_pending_job() - continue jobs when bus resumed
870  *
871  * Return: Void
872  */
873 static void hif_rtpm_pending_job(void)
874 {
875 	int i;
876 
877 	for (i = 0; i < gp_hif_rtpm_ctx->client_count; i++) {
878 		if (qdf_test_and_clear_bit(i, &gp_hif_rtpm_ctx->pending_job)) {
879 			qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
880 			if (gp_hif_rtpm_ctx->clients[i]->hif_rtpm_cbk)
881 				gp_hif_rtpm_ctx->clients[i]->hif_rtpm_cbk();
882 			qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
883 		}
884 	}
885 }
886 
887 #define PREVENT_LIST_STRING_LEN 200
888 
889 void hif_rtpm_print_prevent_list(void)
890 {
891 	struct hif_rtpm_client *client;
892 	struct hif_pm_runtime_lock *ctx;
893 	char *str_buf;
894 	int i, prevent_list_count, len = 0;
895 
896 	str_buf = qdf_mem_malloc(PREVENT_LIST_STRING_LEN);
897 	if (!str_buf)
898 		return;
899 
900 	qdf_spin_lock(&gp_hif_rtpm_ctx->prevent_list_lock);
901 	prevent_list_count = gp_hif_rtpm_ctx->prevent_cnt;
902 	if (prevent_list_count) {
903 		list_for_each_entry(ctx, &gp_hif_rtpm_ctx->prevent_list, list)
904 			len += qdf_scnprintf(str_buf + len,
905 				PREVENT_LIST_STRING_LEN - len,
906 				"%s ", ctx->name);
907 	}
908 	qdf_spin_unlock(&gp_hif_rtpm_ctx->prevent_list_lock);
909 
910 	if (prevent_list_count)
911 		hif_info_high("prevent_suspend_cnt %u, prevent_list: %s",
912 			      prevent_list_count, str_buf);
913 
914 	qdf_mem_free(str_buf);
915 
916 	for (i = 0; i < HIF_RTPM_ID_MAX; i++) {
917 		client = gp_hif_rtpm_ctx->clients[i];
918 		if (client && qdf_atomic_read(&client->active_count))
919 			hif_info_high("client: %d: %s- active count: %d", i,
920 				      hif_rtpm_id_to_string(i),
921 				      qdf_atomic_read(&client->active_count));
922 	}
923 }
924 
925 /**
926  * hif_rtpm_is_suspend_allowed() - Reject suspend if client is active
927  *
928  * Return: True if no clients are active
929  */
930 static bool hif_rtpm_is_suspend_allowed(void)
931 {
932 	if (!gp_hif_rtpm_ctx || !gp_hif_rtpm_ctx->enable_rpm)
933 		return false;
934 
935 	if (!hif_rtpm_read_usage_count())
936 		return true;
937 
938 	return false;
939 }
940 
941 void hif_rtpm_suspend_lock(void)
942 {
943 	qdf_spin_lock_irqsave(&gp_hif_rtpm_ctx->runtime_suspend_lock);
944 }
945 
946 void hif_rtpm_suspend_unlock(void)
947 {
948 	qdf_spin_unlock_irqrestore(&gp_hif_rtpm_ctx->runtime_suspend_lock);
949 }
950 
951 /**
952  * hif_rtpm_set_state(): utility function
953  * @state: state to set
954  *
955  * Return: Void
956  */
957 static inline
958 void hif_rtpm_set_state(enum hif_rtpm_state state)
959 {
960 	qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, state);
961 }
962 
963 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
964 {
965 	if (!hif_can_suspend_link(hif_ctx)) {
966 		hif_err("Runtime PM not supported for link up suspend");
967 		return -EINVAL;
968 	}
969 
970 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
971 	hif_rtpm_set_state(HIF_RTPM_STATE_SUSPENDING);
972 
973 	/* keep this after set suspending */
974 	if (!hif_rtpm_is_suspend_allowed()) {
975 		qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
976 		hif_rtpm_print_prevent_list();
977 		gp_hif_rtpm_ctx->stats.suspend_err_count++;
978 		gp_hif_rtpm_ctx->stats.suspend_err_ts = qdf_get_log_timestamp();
979 		hif_info_high("Runtime PM not allowed now");
980 		return -EINVAL;
981 	}
982 
983 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
984 
985 	return QDF_STATUS_SUCCESS;
986 }
987 
988 void hif_process_runtime_suspend_success(void)
989 {
990 	hif_rtpm_set_state(HIF_RTPM_STATE_SUSPENDED);
991 	gp_hif_rtpm_ctx->stats.suspend_count++;
992 	gp_hif_rtpm_ctx->stats.suspend_ts = qdf_get_log_timestamp();
993 }
994 
995 void hif_process_runtime_suspend_failure(void)
996 {
997 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
998 	hif_rtpm_set_state(HIF_RTPM_STATE_ON);
999 	hif_rtpm_pending_job();
1000 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1001 
1002 	gp_hif_rtpm_ctx->stats.suspend_err_count++;
1003 	gp_hif_rtpm_ctx->stats.suspend_err_ts = qdf_get_log_timestamp();
1004 	gp_hif_rtpm_ctx->stats.last_busy_ts = qdf_get_log_timestamp();
1005 	hif_rtpm_mark_last_busy(HIF_RTPM_ID_RESERVED);
1006 }
1007 
1008 void hif_pre_runtime_resume(void)
1009 {
1010 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1011 	hif_rtpm_set_monitor_wake_intr(0);
1012 	hif_rtpm_set_state(HIF_RTPM_STATE_RESUMING);
1013 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1014 }
1015 
1016 void hif_process_runtime_resume_linkup(void)
1017 {
1018 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1019 	hif_rtpm_set_state(HIF_RTPM_STATE_RESUMING_LINKUP);
1020 	hif_rtpm_pending_job();
1021 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1022 }
1023 
1024 void hif_process_runtime_resume_success(void)
1025 {
1026 	hif_rtpm_set_state(HIF_RTPM_STATE_ON);
1027 	gp_hif_rtpm_ctx->stats.resume_count++;
1028 	gp_hif_rtpm_ctx->stats.resume_ts = qdf_get_log_timestamp();
1029 	gp_hif_rtpm_ctx->stats.last_busy_ts = gp_hif_rtpm_ctx->stats.resume_ts;
1030 	hif_rtpm_mark_last_busy(HIF_RTPM_ID_RESERVED);
1031 }
1032 
1033 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
1034 {
1035 	int errno;
1036 
1037 	errno = hif_bus_suspend(hif_ctx);
1038 	if (errno) {
1039 		hif_err("Failed bus suspend: %d", errno);
1040 		return errno;
1041 	}
1042 
1043 	hif_rtpm_set_monitor_wake_intr(1);
1044 
1045 	errno = hif_bus_suspend_noirq(hif_ctx);
1046 	if (errno) {
1047 		hif_err("Failed bus suspend noirq: %d", errno);
1048 		hif_rtpm_set_monitor_wake_intr(0);
1049 		goto bus_resume;
1050 	}
1051 
1052 	return 0;
1053 
1054 bus_resume:
1055 	QDF_BUG(!hif_bus_resume(hif_ctx));
1056 
1057 	return errno;
1058 }
1059 
1060 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
1061 {
1062 	int errno;
1063 
1064 	QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
1065 	errno = hif_bus_resume(hif_ctx);
1066 	if (errno)
1067 		hif_err("Failed runtime resume: %d", errno);
1068 
1069 	return errno;
1070 }
1071 
1072 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
1073 {
1074 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1075 	struct CE_state *ce_state;
1076 
1077 	if (!scn)
1078 		return;
1079 
1080 	if (scn->fastpath_mode_on) {
1081 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1082 			return;
1083 
1084 		ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
1085 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1086 
1087 		/*war_ce_src_ring_write_idx_set */
1088 		CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
1089 					  ce_state->src_ring->write_index);
1090 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1091 		Q_TARGET_ACCESS_END(scn);
1092 	}
1093 }
1094 #endif /* FEATURE_RUNTIME_PM */
1095