xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_runtime_pm.c (revision 901120c066e139c7f8a2c8e4820561fdd83c67ef)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8 
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <linux/slab.h>
19 #include <linux/interrupt.h>
20 #include <linux/if_arp.h>
21 #include "hif_io32.h"
22 #include "hif_runtime_pm.h"
23 #include "hif.h"
24 #include "target_type.h"
25 #include "hif_main.h"
26 #include "ce_main.h"
27 #include "ce_api.h"
28 #include "ce_internal.h"
29 #include "ce_reg.h"
30 #include "ce_bmi.h"
31 #include "regtable.h"
32 #include "hif_hw_version.h"
33 #include <linux/debugfs.h>
34 #include <linux/seq_file.h>
35 #include "qdf_status.h"
36 #include "qdf_atomic.h"
37 #include "pld_common.h"
38 #include "mp_dev.h"
39 #include "hif_debug.h"
40 
41 #include "ce_tasklet.h"
42 #include "targaddrs.h"
43 #include "hif_exec.h"
44 
45 #define CNSS_RUNTIME_FILE "cnss_runtime_pm"
46 #define CNSS_RUNTIME_FILE_PERM QDF_FILE_USR_READ
47 
48 #ifdef FEATURE_RUNTIME_PM
49 
50 static struct hif_rtpm_ctx g_hif_rtpm_ctx;
51 static struct hif_rtpm_ctx *gp_hif_rtpm_ctx;
52 
53 /**
54  * hif_rtpm_id_to_string() - Convert dbgid to respective string
55  * @id -  debug id
56  *
57  * Debug support function to convert  dbgid to string.
58  * Please note to add new string in the array at index equal to
59  * its enum value in wlan_rtpm_dbgid.
60  *
61  * Return: String of ID
62  */
63 static const char *hif_rtpm_id_to_string(enum hif_rtpm_client_id id)
64 {
65 	static const char * const strings[] = {
66 					"HIF_RTPM_ID_RESERVED",
67 					"HIF_RTPM_HAL_REO_CMD",
68 					"HIF_RTPM_WMI",
69 					"HIF_RTPM_HTT",
70 					"HIF_RTPM_DP",
71 					"HIF_RTPM_RING_STATS",
72 					"HIF_RTPM_CE",
73 					"HIF_RTPM_FORCE_WAKE",
74 					"HIF_RTPM_ID_PM_QOS_NOTIFY",
75 					"HIF_RTPM_ID_WIPHY_SUSPEND",
76 					"HIF_RTPM_ID_MAX"
77 	};
78 
79 	return strings[id];
80 }
81 
82 /**
83  * hif_rtpm_read_usage_count() - Read device usage count
84  * @dev: device structure
85  *
86  * Return: current usage count
87  */
88 static inline int hif_rtpm_read_usage_count(void)
89 {
90 	return qdf_atomic_read(&gp_hif_rtpm_ctx->dev->power.usage_count);
91 }
92 
93 #define HIF_RTPM_STATS(_s, _rtpm_ctx, _name) \
94 	seq_printf(_s, "%30s: %u\n", #_name, (_rtpm_ctx)->stats._name)
95 
96 /**
97  * hif_rtpm_debugfs_show(): show debug stats for runtimepm
98  * @s: file to print to
99  * @data: unused
100  *
101  * debugging tool added to the debug fs for displaying runtimepm stats
102  *
103  * Return: 0
104  */
105 static int hif_rtpm_debugfs_show(struct seq_file *s, void *data)
106 {
107 	struct hif_rtpm_client *client = NULL;
108 	struct hif_pm_runtime_lock *ctx;
109 	static const char * const autopm_state[] = {"NONE", "ON", "RESUMING",
110 			"RESUMING_LINKUP", "SUSPENDING", "SUSPENDED"};
111 	int pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
112 	int i;
113 
114 	seq_printf(s, "%30s: %llu\n", "Current timestamp",
115 		   qdf_get_log_timestamp());
116 
117 	seq_printf(s, "%30s: %s\n", "Runtime PM state", autopm_state[pm_state]);
118 
119 	seq_printf(s, "%30s: %llu\n", "Last Busy timestamp",
120 		   gp_hif_rtpm_ctx->stats.last_busy_ts);
121 
122 	seq_printf(s, "%30s: %ps\n", "Last Busy Marker",
123 		   gp_hif_rtpm_ctx->stats.last_busy_marker);
124 
125 	seq_puts(s, "Rx busy marker counts:\n");
126 	seq_printf(s, "%30s: %u %llu\n", hif_rtpm_id_to_string(HIF_RTPM_ID_DP),
127 		   gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_DP]->last_busy_cnt,
128 		   gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_DP]->last_busy_ts);
129 
130 	seq_printf(s, "%30s: %u %llu\n", hif_rtpm_id_to_string(HIF_RTPM_ID_CE),
131 		   gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_CE]->last_busy_cnt,
132 		   gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_CE]->last_busy_ts);
133 
134 	HIF_RTPM_STATS(s, gp_hif_rtpm_ctx, last_busy_id);
135 
136 	if (pm_state == HIF_RTPM_STATE_SUSPENDED) {
137 		seq_printf(s, "%30s: %llx us\n", "Suspended Since",
138 			   gp_hif_rtpm_ctx->stats.suspend_ts);
139 	}
140 
141 	HIF_RTPM_STATS(s, gp_hif_rtpm_ctx, resume_count);
142 	HIF_RTPM_STATS(s, gp_hif_rtpm_ctx, suspend_count);
143 	HIF_RTPM_STATS(s, gp_hif_rtpm_ctx, suspend_err_count);
144 
145 	seq_printf(s, "%30s: %d\n", "PM Usage count",
146 		   hif_rtpm_read_usage_count());
147 
148 	seq_puts(s, "get  put  get-timestamp put-timestamp :DBGID_NAME\n");
149 	for (i = 0; i < HIF_RTPM_ID_MAX; i++) {
150 		client = gp_hif_rtpm_ctx->clients[i];
151 		if (!client)
152 			continue;
153 		seq_printf(s, "%-10d ", qdf_atomic_read(&client->get_count));
154 		seq_printf(s, "%-10d ", qdf_atomic_read(&client->put_count));
155 		seq_printf(s, "0x%-10llx ", client->get_ts);
156 		seq_printf(s, "0x%-10llx ", client->put_ts);
157 		seq_printf(s, ":%-2d %-30s\n", i, hif_rtpm_id_to_string(i));
158 	}
159 	seq_puts(s, "\n");
160 
161 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
162 	if (list_empty(&gp_hif_rtpm_ctx->prevent_list)) {
163 		qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
164 		return 0;
165 	}
166 
167 	seq_printf(s, "%30s: ", "Active Wakeup_Sources");
168 	list_for_each_entry(ctx, &gp_hif_rtpm_ctx->prevent_list, list) {
169 		seq_printf(s, "%s", ctx->name);
170 		seq_puts(s, " ");
171 	}
172 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
173 
174 	return 0;
175 }
176 
177 #undef HIF_RTPM_STATS
178 
179 /**
180  * hif_rtpm_debugfs_open() - open a debug fs file to access the runtime pm stats
181  * @inode
182  * @file
183  *
184  * Return: linux error code of single_open.
185  */
186 static int hif_rtpm_debugfs_open(struct inode *inode, struct file *file)
187 {
188 	return single_open(file, hif_rtpm_debugfs_show,
189 			inode->i_private);
190 }
191 
192 static const struct file_operations hif_rtpm_fops = {
193 	.owner          = THIS_MODULE,
194 	.open           = hif_rtpm_debugfs_open,
195 	.release        = single_release,
196 	.read           = seq_read,
197 	.llseek         = seq_lseek,
198 };
199 
200 /**
201  * hif_rtpm_debugfs_create() - creates runtimepm debugfs entry
202  * @scn: hif context
203  *
204  * creates a debugfs entry to debug the runtime pm feature.
205  */
206 static void hif_rtpm_debugfs_create(void)
207 {
208 	gp_hif_rtpm_ctx->pm_dentry = qdf_debugfs_create_entry(CNSS_RUNTIME_FILE,
209 							CNSS_RUNTIME_FILE_PERM,
210 							NULL,
211 							NULL,
212 							&hif_rtpm_fops);
213 }
214 
215 /**
216  * hif_rtpm_debugfs_remove() - removes runtimepm debugfs entry
217  * @scn: pci context
218  *
219  * removes the debugfs entry to debug the runtime pm feature.
220  */
221 static void hif_rtpm_debugfs_remove(void)
222 {
223 	qdf_debugfs_remove_file(gp_hif_rtpm_ctx->pm_dentry);
224 }
225 
226 /**
227  * hif_rtpm_init() - Initialize Runtime PM
228  * @dev: device structure
229  * @delay: delay to be configured for auto suspend
230  *
231  * This function will init all the Runtime PM config.
232  *
233  * Return: void
234  */
235 static void hif_rtpm_init(struct device *dev, int delay)
236 {
237 	pm_runtime_set_autosuspend_delay(dev, delay);
238 	pm_runtime_use_autosuspend(dev);
239 	pm_runtime_allow(dev);
240 	pm_runtime_mark_last_busy(dev);
241 	pm_runtime_put_noidle(dev);
242 	pm_suspend_ignore_children(dev, true);
243 }
244 
245 /**
246  * hif_rtpm_exit() - Deinit/Exit Runtime PM
247  * @dev: device structure
248  *
249  * This function will deinit all the Runtime PM config.
250  *
251  * Return: void
252  */
253 static void hif_rtpm_exit(struct device *dev)
254 {
255 	pm_runtime_get_noresume(dev);
256 	pm_runtime_set_active(dev);
257 	pm_runtime_forbid(dev);
258 }
259 
260 void hif_rtpm_open(struct hif_softc *scn)
261 {
262 	gp_hif_rtpm_ctx = &g_hif_rtpm_ctx;
263 	gp_hif_rtpm_ctx->dev = scn->qdf_dev->dev;
264 	qdf_spinlock_create(&gp_hif_rtpm_ctx->runtime_lock);
265 	qdf_spinlock_create(&gp_hif_rtpm_ctx->runtime_suspend_lock);
266 	qdf_spinlock_create(&gp_hif_rtpm_ctx->prevent_list_lock);
267 	qdf_atomic_init(&gp_hif_rtpm_ctx->pm_state);
268 	qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, HIF_RTPM_STATE_NONE);
269 	qdf_atomic_init(&gp_hif_rtpm_ctx->monitor_wake_intr);
270 	INIT_LIST_HEAD(&gp_hif_rtpm_ctx->prevent_list);
271 	gp_hif_rtpm_ctx->client_count = 0;
272 	gp_hif_rtpm_ctx->pending_job = 0;
273 	hif_rtpm_register(HIF_RTPM_ID_CE, NULL);
274 	hif_rtpm_register(HIF_RTPM_ID_FORCE_WAKE, NULL);
275 	hif_info_high("Runtime PM attached");
276 }
277 
278 static int __hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *lock);
279 
280 /**
281  * hif_rtpm_sanitize_exit(): sanitize runtime PM gets/puts from driver
282  *
283  * Ensure all gets/puts are in sync before exiting runtime PM feature.
284  * Also make sure all runtime PM locks are deinitialized properly.
285  *
286  * Return: void
287  */
288 static void hif_rtpm_sanitize_exit(void)
289 {
290 	struct hif_pm_runtime_lock *ctx, *tmp;
291 	struct hif_rtpm_client *client;
292 	int i, active_count;
293 
294 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
295 	list_for_each_entry_safe(ctx, tmp,
296 				 &gp_hif_rtpm_ctx->prevent_list, list) {
297 		hif_runtime_lock_deinit(ctx);
298 	}
299 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
300 
301 	/* check if get and put out of sync for all clients */
302 	for (i = 0; i < HIF_RTPM_ID_MAX; i++) {
303 		client = gp_hif_rtpm_ctx->clients[i];
304 		if (client) {
305 			if (qdf_atomic_read(&client->active_count)) {
306 				active_count =
307 					qdf_atomic_read(&client->active_count);
308 				hif_err("Client active: %u- %s", i,
309 					hif_rtpm_id_to_string(i));
310 				QDF_DEBUG_PANIC("Client active on exit!");
311 				while (active_count--)
312 					__hif_rtpm_put_noidle(
313 							gp_hif_rtpm_ctx->dev);
314 			}
315 			QDF_DEBUG_PANIC("Client not deinitialized");
316 			qdf_mem_free(client);
317 			gp_hif_rtpm_ctx->clients[i] = NULL;
318 		}
319 	}
320 }
321 
322 /**
323  * hif_rtpm_sanitize_on_ssr_exit() - Empty the suspend list on SSR
324  *
325  * API is used to empty the runtime pm prevent suspend list.
326  *
327  * Return: void
328  */
329 static void hif_rtpm_sanitize_ssr_exit(void)
330 {
331 	struct hif_pm_runtime_lock *ctx, *tmp;
332 
333 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
334 	list_for_each_entry_safe(ctx, tmp,
335 				 &gp_hif_rtpm_ctx->prevent_list, list) {
336 		__hif_pm_runtime_allow_suspend(ctx);
337 	}
338 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
339 }
340 
341 void hif_rtpm_close(struct hif_softc *scn)
342 {
343 	hif_rtpm_deregister(HIF_RTPM_ID_CE);
344 	hif_rtpm_deregister(HIF_RTPM_ID_FORCE_WAKE);
345 
346 	hif_is_recovery_in_progress(scn) ?
347 		hif_rtpm_sanitize_ssr_exit() :
348 		hif_rtpm_sanitize_exit();
349 
350 	qdf_mem_set(gp_hif_rtpm_ctx, sizeof(*gp_hif_rtpm_ctx), 0);
351 	gp_hif_rtpm_ctx = NULL;
352 	hif_info_high("Runtime PM context detached");
353 }
354 
355 void hif_rtpm_start(struct hif_softc *scn)
356 {
357 	uint32_t mode = hif_get_conparam(scn);
358 
359 	gp_hif_rtpm_ctx->enable_rpm = scn->hif_config.enable_runtime_pm;
360 
361 	if (!gp_hif_rtpm_ctx->enable_rpm) {
362 		hif_info_high("RUNTIME PM is disabled in ini");
363 		return;
364 	}
365 
366 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
367 	    mode == QDF_GLOBAL_MONITOR_MODE) {
368 		hif_info("RUNTIME PM is disabled for FTM/EPPING/MONITOR mode");
369 		return;
370 	}
371 
372 	hif_info_high("Enabling RUNTIME PM, Delay: %d ms",
373 		      scn->hif_config.runtime_pm_delay);
374 
375 	qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, HIF_RTPM_STATE_ON);
376 	hif_rtpm_init(gp_hif_rtpm_ctx->dev, scn->hif_config.runtime_pm_delay);
377 	hif_rtpm_debugfs_create();
378 }
379 
380 void hif_rtpm_stop(struct hif_softc *scn)
381 {
382 	uint32_t mode = hif_get_conparam(scn);
383 
384 	if (!gp_hif_rtpm_ctx->enable_rpm)
385 		return;
386 
387 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
388 	    mode == QDF_GLOBAL_MONITOR_MODE)
389 		return;
390 
391 	hif_rtpm_exit(gp_hif_rtpm_ctx->dev);
392 
393 	hif_rtpm_sync_resume();
394 
395 	qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, HIF_RTPM_STATE_NONE);
396 	hif_rtpm_debugfs_remove();
397 }
398 
399 QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rtpm_cbk)(void))
400 {
401 	struct hif_rtpm_client *client;
402 
403 	if (qdf_unlikely(!gp_hif_rtpm_ctx)) {
404 		hif_err("Runtime PM context NULL");
405 		return QDF_STATUS_E_FAILURE;
406 	}
407 
408 	if (id >= HIF_RTPM_ID_MAX || gp_hif_rtpm_ctx->clients[id]) {
409 		hif_err("Invalid client %d", id);
410 		return QDF_STATUS_E_INVAL;
411 	}
412 
413 	client = qdf_mem_malloc(sizeof(struct hif_rtpm_client));
414 	if (!client)
415 		return QDF_STATUS_E_NOMEM;
416 
417 	client->hif_rtpm_cbk = hif_rtpm_cbk;
418 	qdf_atomic_init(&client->active_count);
419 	qdf_atomic_init(&client->get_count);
420 	qdf_atomic_init(&client->put_count);
421 
422 	gp_hif_rtpm_ctx->clients[id] = client;
423 	gp_hif_rtpm_ctx->client_count++;
424 
425 	return QDF_STATUS_SUCCESS;
426 }
427 
428 QDF_STATUS hif_rtpm_deregister(uint32_t id)
429 {
430 	struct hif_rtpm_client *client;
431 	int active_count;
432 
433 	if (qdf_unlikely(!gp_hif_rtpm_ctx)) {
434 		hif_err("Runtime PM context NULL");
435 		return QDF_STATUS_E_FAILURE;
436 	}
437 
438 	if (id >= HIF_RTPM_ID_MAX || !gp_hif_rtpm_ctx->clients[id]) {
439 		hif_err("invalid client, id: %u", id);
440 		return QDF_STATUS_E_INVAL;
441 	}
442 
443 	client = gp_hif_rtpm_ctx->clients[id];
444 	if (qdf_atomic_read(&client->active_count)) {
445 		active_count = qdf_atomic_read(&client->active_count);
446 		hif_err("Client: %u-%s Runtime PM active",
447 			id, hif_rtpm_id_to_string(id));
448 		hif_err("last get called: 0x%llx, get count: %d, put count: %d",
449 			client->get_ts, qdf_atomic_read(&client->get_count),
450 			qdf_atomic_read(&client->put_count));
451 		QDF_DEBUG_PANIC("Get and PUT call out of sync!");
452 		while (active_count--)
453 			__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
454 	}
455 
456 	qdf_mem_free(client);
457 	gp_hif_rtpm_ctx->clients[id] = NULL;
458 
459 	return QDF_STATUS_SUCCESS;
460 }
461 
462 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
463 {
464 	struct hif_pm_runtime_lock *context;
465 
466 	if (qdf_unlikely(!gp_hif_rtpm_ctx)) {
467 		hif_err("Runtime PM context NULL");
468 		return QDF_STATUS_E_FAILURE;
469 	}
470 
471 	hif_debug("Initializing Runtime PM wakelock %s", name);
472 
473 	context = qdf_mem_malloc(sizeof(*context));
474 	if (!context)
475 		return -ENOMEM;
476 
477 	context->name = name ? name : "Default";
478 	lock->lock = context;
479 
480 	return 0;
481 }
482 
483 void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *lock)
484 {
485 	if (!lock) {
486 		hif_err("Runtime PM lock already freed");
487 		return;
488 	}
489 
490 	hif_debug("Deinitializing Runtime PM wakelock %s", lock->name);
491 
492 	if (gp_hif_rtpm_ctx) {
493 		qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
494 		__hif_pm_runtime_allow_suspend(lock);
495 		qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
496 	}
497 
498 	qdf_mem_free(lock);
499 }
500 
501 /**
502  * hif_rtpm_enabled() - To check if Runtime PM is enabled
503  *
504  * This function will check if Runtime PM is enabled or not.
505  *
506  * Return: void
507  */
508 static bool hif_rtpm_enabled(void)
509 {
510 	if (qdf_unlikely(!gp_hif_rtpm_ctx))
511 		return false;
512 
513 	if (gp_hif_rtpm_ctx->enable_rpm)
514 		return true;
515 
516 	return __hif_rtpm_enabled(gp_hif_rtpm_ctx->dev);
517 }
518 
519 QDF_STATUS hif_rtpm_get(uint8_t type, uint32_t id)
520 {
521 	struct hif_rtpm_client *client = NULL;
522 	int ret = QDF_STATUS_E_FAILURE;
523 	int pm_state;
524 
525 	if (!hif_rtpm_enabled())
526 		return QDF_STATUS_SUCCESS;
527 
528 	if (id >= HIF_RTPM_ID_MAX || !gp_hif_rtpm_ctx->clients[id]) {
529 		QDF_DEBUG_PANIC("Invalid client, id: %u", id);
530 		return -QDF_STATUS_E_INVAL;
531 	}
532 
533 	client = gp_hif_rtpm_ctx->clients[id];
534 
535 	if (type != HIF_RTPM_GET_ASYNC) {
536 		switch (type) {
537 		case HIF_RTPM_GET_FORCE:
538 			ret = __hif_rtpm_get(gp_hif_rtpm_ctx->dev);
539 			break;
540 		case HIF_RTPM_GET_SYNC:
541 			ret = __hif_rtpm_get_sync(gp_hif_rtpm_ctx->dev);
542 			break;
543 		case HIF_RTPM_GET_NORESUME:
544 			__hif_rtpm_get_noresume(gp_hif_rtpm_ctx->dev);
545 			ret = 0;
546 			break;
547 		default:
548 			QDF_DEBUG_PANIC("Invalid call type");
549 			return QDF_STATUS_E_BADMSG;
550 		}
551 
552 		if (ret < 0 && ret != -EINPROGRESS) {
553 			hif_err("pm_state: %d ret: %d",
554 				qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
555 				ret);
556 			__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
557 		} else {
558 			ret = QDF_STATUS_SUCCESS;
559 		}
560 		goto out;
561 	}
562 
563 	pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
564 	if (pm_state <= HIF_RTPM_STATE_RESUMING_LINKUP) {
565 		ret = __hif_rtpm_get(gp_hif_rtpm_ctx->dev);
566 		/* Get will return 1 if the device is already active,
567 		 * just return success in that case
568 		 */
569 		if (ret > 0) {
570 			ret = QDF_STATUS_SUCCESS;
571 		} else if (ret == 0 || ret == -EINPROGRESS) {
572 			qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
573 			pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
574 			if (pm_state >= HIF_RTPM_STATE_RESUMING) {
575 				__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
576 				gp_hif_rtpm_ctx->stats.request_resume_ts =
577 							qdf_get_log_timestamp();
578 				gp_hif_rtpm_ctx->stats.request_resume_id = id;
579 				ret = QDF_STATUS_E_FAILURE;
580 			} else {
581 				ret = QDF_STATUS_SUCCESS;
582 			}
583 			qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
584 		} else if (ret < 0) {
585 			hif_err("pm_state: %d ret: %d",
586 				qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
587 				ret);
588 			__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
589 		}
590 	} else if (pm_state >= HIF_RTPM_STATE_RESUMING) {
591 		/* Do not log in performance path */
592 		if (id != HIF_RTPM_ID_DP)
593 			hif_info_high("request RTPM resume by %d- %s",
594 				      id, hif_rtpm_id_to_string(id));
595 		__hif_rtpm_request_resume(gp_hif_rtpm_ctx->dev);
596 		gp_hif_rtpm_ctx->stats.request_resume_ts =
597 						qdf_get_log_timestamp();
598 		gp_hif_rtpm_ctx->stats.request_resume_id = id;
599 		return QDF_STATUS_E_FAILURE;
600 	}
601 
602 out:
603 	if (QDF_IS_STATUS_SUCCESS(ret)) {
604 		qdf_atomic_inc(&client->active_count);
605 		qdf_atomic_inc(&client->get_count);
606 		client->get_ts = qdf_get_log_timestamp();
607 	}
608 
609 	return ret;
610 }
611 
612 QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id)
613 {
614 	struct hif_rtpm_client *client;
615 	int usage_count;
616 
617 	if (!hif_rtpm_enabled())
618 		return QDF_STATUS_SUCCESS;
619 
620 	if (id >= HIF_RTPM_ID_MAX || !gp_hif_rtpm_ctx->clients[id]) {
621 		hif_err("Invalid client, id: %u", id);
622 		return QDF_STATUS_E_INVAL;
623 	}
624 
625 	client = gp_hif_rtpm_ctx->clients[id];
626 
627 	usage_count = hif_rtpm_read_usage_count();
628 	if (usage_count == 2 && !gp_hif_rtpm_ctx->enable_rpm) {
629 		hif_err("Unexpected PUT when runtime PM is disabled");
630 		QDF_BUG(0);
631 		return QDF_STATUS_E_CANCELED;
632 	} else if (!usage_count || !qdf_atomic_read(&client->active_count)) {
633 		hif_info_high("Put without a Get operation, %u-%s",
634 			      id, hif_rtpm_id_to_string(id));
635 		return QDF_STATUS_E_CANCELED;
636 	}
637 
638 	switch (type) {
639 	case HIF_RTPM_PUT_ASYNC:
640 		__hif_rtpm_put_auto(gp_hif_rtpm_ctx->dev);
641 		break;
642 	case HIF_RTPM_PUT_NOIDLE:
643 		__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
644 		break;
645 	case HIF_RTPM_PUT_SYNC_SUSPEND:
646 		__hif_rtpm_put_sync_suspend(gp_hif_rtpm_ctx->dev);
647 		break;
648 	default:
649 		QDF_DEBUG_PANIC("Invalid call type");
650 		return QDF_STATUS_E_BADMSG;
651 	}
652 
653 	__hif_rtpm_mark_last_busy(gp_hif_rtpm_ctx->dev);
654 	qdf_atomic_dec(&client->active_count);
655 	qdf_atomic_inc(&client->put_count);
656 	client->put_ts = qdf_get_log_timestamp();
657 	gp_hif_rtpm_ctx->stats.last_busy_ts = client->put_ts;
658 
659 	return QDF_STATUS_SUCCESS;
660 
661 }
662 
663 /**
664  * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol
665  *                                      reason
666  * @lock: runtime_pm lock being acquired
667  *
668  * Return: 0 if successful.
669  */
670 static int __hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *lock)
671 {
672 	int ret = 0;
673 
674 	if (lock->active)
675 		return 0;
676 
677 	ret = __hif_rtpm_get(gp_hif_rtpm_ctx->dev);
678 
679 	/**
680 	 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
681 	 * RPM_SUSPENDING. Any other negative value is an error.
682 	 * We shouldn't do runtime_put here as in later point allow
683 	 * suspend gets called with the context and there the usage count
684 	 * is decremented, so suspend will be prevented.
685 	 */
686 	if (ret < 0 && ret != -EINPROGRESS) {
687 		gp_hif_rtpm_ctx->stats.runtime_get_err++;
688 		hif_err("pm_state: %d ret: %d",
689 			qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
690 			ret);
691 	}
692 
693 	list_add_tail(&lock->list, &gp_hif_rtpm_ctx->prevent_list);
694 	lock->active = true;
695 	gp_hif_rtpm_ctx->prevent_cnt++;
696 	gp_hif_rtpm_ctx->stats.prevent_suspend++;
697 	return ret;
698 }
699 
700 /**
701  * __hif_pm_runtime_allow_suspend() - Allow Runtime suspend
702  * @lock: runtime pm lock
703  *
704  * This function will allow runtime suspend, by decrementing
705  * device's usage count.
706  *
707  * Return: status
708  */
709 static int __hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *lock)
710 {
711 	int ret = 0;
712 	int usage_count;
713 
714 	if (gp_hif_rtpm_ctx->prevent_cnt == 0 || !lock->active)
715 		return ret;
716 
717 	usage_count = hif_rtpm_read_usage_count();
718 	/*
719 	 * For runtime PM enabled case, the usage count should never be 0
720 	 * at this point. For runtime PM disabled case, it should never be
721 	 * 2 at this point. Catch unexpected PUT without GET here.
722 	 */
723 	if (usage_count == 2 && !gp_hif_rtpm_ctx->enable_rpm) {
724 		hif_err("Unexpected PUT when runtime PM is disabled");
725 		QDF_BUG(0);
726 		return QDF_STATUS_E_CANCELED;
727 	} else if (!usage_count) {
728 		hif_info_high("Put without a Get operation, %s", lock->name);
729 		return QDF_STATUS_E_CANCELED;
730 	}
731 
732 	hif_rtpm_mark_last_busy(HIF_RTPM_ID_RESERVED);
733 	ret = __hif_rtpm_put_auto(gp_hif_rtpm_ctx->dev);
734 
735 	list_del(&lock->list);
736 	lock->active = false;
737 	gp_hif_rtpm_ctx->prevent_cnt--;
738 	gp_hif_rtpm_ctx->stats.allow_suspend++;
739 	return ret;
740 }
741 
742 
743 int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *lock)
744 {
745 	if (!hif_rtpm_enabled() || !lock)
746 		return -EINVAL;
747 
748 	if (in_irq())
749 		WARN_ON(1);
750 
751 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
752 	__hif_pm_runtime_prevent_suspend(lock);
753 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
754 
755 	if (qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state) >=
756 		HIF_RTPM_STATE_SUSPENDING)
757 		hif_info_high("request RTPM resume by %s",
758 			      lock->name);
759 
760 	return 0;
761 }
762 
763 /**
764  * __hif_pm_runtime_prevent_suspend_sync() - synchronized prevent runtime
765  *  suspend for a protocol reason
766  * @lock: runtime_pm lock being acquired
767  *
768  * Return: 0 if successful.
769  */
770 static
771 int __hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *lock)
772 {
773 	int ret = 0;
774 
775 	if (lock->active)
776 		return 0;
777 
778 	ret = __hif_rtpm_get_sync(gp_hif_rtpm_ctx->dev);
779 
780 	/**
781 	 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
782 	 * RPM_SUSPENDING. Any other negative value is an error.
783 	 * We shouldn't do runtime_put here as in later point allow
784 	 * suspend gets called with the context and there the usage count
785 	 * is decremented, so suspend will be prevented.
786 	 */
787 	if (ret < 0 && ret != -EINPROGRESS) {
788 		gp_hif_rtpm_ctx->stats.runtime_get_err++;
789 		hif_err("pm_state: %d ret: %d",
790 			qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
791 			ret);
792 	}
793 
794 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
795 	list_add_tail(&lock->list, &gp_hif_rtpm_ctx->prevent_list);
796 	lock->active = true;
797 	gp_hif_rtpm_ctx->prevent_cnt++;
798 	gp_hif_rtpm_ctx->stats.prevent_suspend++;
799 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
800 
801 	return ret;
802 }
803 
804 int hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *lock)
805 {
806 	if (!hif_rtpm_enabled())
807 		return 0;
808 
809 	if (!lock)
810 		return -EINVAL;
811 
812 	if (in_irq())
813 		WARN_ON(1);
814 
815 	__hif_pm_runtime_prevent_suspend_sync(lock);
816 
817 	if (qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state) >=
818 		HIF_RTPM_STATE_SUSPENDING)
819 		hif_info_high("request RTPM resume by %s",
820 			      lock->name);
821 
822 	return 0;
823 }
824 
825 int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *lock)
826 {
827 	if (!hif_rtpm_enabled())
828 		return 0;
829 
830 	if (!lock)
831 		return -EINVAL;
832 
833 	if (in_irq())
834 		WARN_ON(1);
835 
836 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
837 	__hif_pm_runtime_allow_suspend(lock);
838 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
839 
840 	return 0;
841 }
842 
843 QDF_STATUS hif_rtpm_sync_resume(void)
844 {
845 	struct device *dev;
846 	int pm_state;
847 	int ret;
848 
849 	if (!hif_rtpm_enabled())
850 		return 0;
851 
852 	dev = gp_hif_rtpm_ctx->dev;
853 	pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
854 
855 	ret = __hif_rtpm_resume(dev);
856 	__hif_rtpm_mark_last_busy(dev);
857 
858 	if (ret >= 0) {
859 		gp_hif_rtpm_ctx->stats.resume_count++;
860 		gp_hif_rtpm_ctx->stats.resume_ts = qdf_get_log_timestamp();
861 		gp_hif_rtpm_ctx->stats.last_busy_ts =
862 					gp_hif_rtpm_ctx->stats.resume_ts;
863 		return QDF_STATUS_SUCCESS;
864 	}
865 
866 	hif_err("pm_state: %d, err: %d", pm_state, ret);
867 	return QDF_STATUS_E_FAILURE;
868 }
869 
870 void hif_rtpm_request_resume(void)
871 {
872 	__hif_rtpm_request_resume(gp_hif_rtpm_ctx->dev);
873 	hif_info_high("request RTPM resume %s", (char *)_RET_IP_);
874 }
875 
876 void hif_rtpm_check_and_request_resume(void)
877 {
878 	hif_rtpm_suspend_lock();
879 	if (qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state) >=
880 			HIF_RTPM_STATE_SUSPENDING) {
881 		hif_rtpm_suspend_unlock();
882 		__hif_rtpm_request_resume(gp_hif_rtpm_ctx->dev);
883 		gp_hif_rtpm_ctx->stats.request_resume_ts =
884 						qdf_get_log_timestamp();
885 		gp_hif_rtpm_ctx->stats.request_resume_id = HIF_RTPM_ID_RESERVED;
886 	} else {
887 		__hif_rtpm_mark_last_busy(gp_hif_rtpm_ctx->dev);
888 		gp_hif_rtpm_ctx->stats.last_busy_ts = qdf_get_log_timestamp();
889 		hif_rtpm_suspend_unlock();
890 	}
891 }
892 
893 int hif_rtpm_get_monitor_wake_intr(void)
894 {
895 	return qdf_atomic_read(&gp_hif_rtpm_ctx->monitor_wake_intr);
896 }
897 
898 void hif_rtpm_set_monitor_wake_intr(int val)
899 {
900 	qdf_atomic_set(&gp_hif_rtpm_ctx->monitor_wake_intr, val);
901 }
902 
903 void hif_rtpm_mark_last_busy(uint32_t id)
904 {
905 	__hif_rtpm_mark_last_busy(gp_hif_rtpm_ctx->dev);
906 	gp_hif_rtpm_ctx->stats.last_busy_ts = qdf_get_log_timestamp();
907 	gp_hif_rtpm_ctx->stats.last_busy_id = id;
908 	gp_hif_rtpm_ctx->stats.last_busy_marker = (void *)_RET_IP_;
909 	if (gp_hif_rtpm_ctx->clients[id]) {
910 		gp_hif_rtpm_ctx->clients[id]->last_busy_cnt++;
911 		gp_hif_rtpm_ctx->clients[id]->last_busy_ts =
912 					gp_hif_rtpm_ctx->stats.last_busy_ts;
913 	}
914 }
915 
916 void hif_rtpm_set_client_job(uint32_t client_id)
917 {
918 	int pm_state;
919 
920 	if (!gp_hif_rtpm_ctx->clients[client_id])
921 		return;
922 
923 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
924 	pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
925 	if (pm_state <= HIF_RTPM_STATE_RESUMING_LINKUP &&
926 	    gp_hif_rtpm_ctx->clients[client_id]->hif_rtpm_cbk)
927 		gp_hif_rtpm_ctx->clients[client_id]->hif_rtpm_cbk();
928 	else
929 		qdf_set_bit(client_id, &gp_hif_rtpm_ctx->pending_job);
930 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
931 }
932 
933 /**
934  * hif_rtpm_pending_job() - continue jobs when bus resumed
935  *
936  * Return: Void
937  */
938 static void hif_rtpm_pending_job(void)
939 {
940 	int i;
941 
942 	for (i = 0; i < gp_hif_rtpm_ctx->client_count; i++) {
943 		if (qdf_test_and_clear_bit(i, &gp_hif_rtpm_ctx->pending_job)) {
944 			qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
945 			if (gp_hif_rtpm_ctx->clients[i]->hif_rtpm_cbk)
946 				gp_hif_rtpm_ctx->clients[i]->hif_rtpm_cbk();
947 			qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
948 		}
949 	}
950 }
951 
952 #define PREVENT_LIST_STRING_LEN 200
953 
954 void hif_rtpm_print_prevent_list(void)
955 {
956 	struct hif_rtpm_client *client;
957 	struct hif_pm_runtime_lock *ctx;
958 	char *str_buf;
959 	int i, prevent_list_count, len = 0;
960 
961 	str_buf = qdf_mem_malloc(PREVENT_LIST_STRING_LEN);
962 	if (!str_buf)
963 		return;
964 
965 	qdf_spin_lock(&gp_hif_rtpm_ctx->prevent_list_lock);
966 	prevent_list_count = gp_hif_rtpm_ctx->prevent_cnt;
967 	if (prevent_list_count) {
968 		list_for_each_entry(ctx, &gp_hif_rtpm_ctx->prevent_list, list)
969 			len += qdf_scnprintf(str_buf + len,
970 				PREVENT_LIST_STRING_LEN - len,
971 				"%s ", ctx->name);
972 	}
973 	qdf_spin_unlock(&gp_hif_rtpm_ctx->prevent_list_lock);
974 
975 	if (prevent_list_count)
976 		hif_info_high("prevent_suspend_cnt %u, prevent_list: %s",
977 			      prevent_list_count, str_buf);
978 
979 	qdf_mem_free(str_buf);
980 
981 	for (i = 0; i < HIF_RTPM_ID_MAX; i++) {
982 		client = gp_hif_rtpm_ctx->clients[i];
983 		if (client && qdf_atomic_read(&client->active_count))
984 			hif_info_high("client: %d: %s- active count: %d", i,
985 				      hif_rtpm_id_to_string(i),
986 				      qdf_atomic_read(&client->active_count));
987 	}
988 }
989 
990 /**
991  * hif_rtpm_is_suspend_allowed() - Reject suspend if client is active
992  *
993  * Return: True if no clients are active
994  */
995 static bool hif_rtpm_is_suspend_allowed(void)
996 {
997 	if (!gp_hif_rtpm_ctx || !gp_hif_rtpm_ctx->enable_rpm)
998 		return false;
999 
1000 	if (!hif_rtpm_read_usage_count())
1001 		return true;
1002 
1003 	return false;
1004 }
1005 
1006 void hif_rtpm_suspend_lock(void)
1007 {
1008 	qdf_spin_lock_irqsave(&gp_hif_rtpm_ctx->runtime_suspend_lock);
1009 }
1010 
1011 void hif_rtpm_suspend_unlock(void)
1012 {
1013 	qdf_spin_unlock_irqrestore(&gp_hif_rtpm_ctx->runtime_suspend_lock);
1014 }
1015 
1016 /**
1017  * hif_rtpm_set_state(): utility function
1018  * @state: state to set
1019  *
1020  * Return: Void
1021  */
1022 static inline
1023 void hif_rtpm_set_state(enum hif_rtpm_state state)
1024 {
1025 	qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, state);
1026 }
1027 
1028 int hif_rtpm_get_state(void)
1029 {
1030 	return qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
1031 }
1032 
1033 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
1034 {
1035 	if (!hif_can_suspend_link(hif_ctx)) {
1036 		hif_err("Runtime PM not supported for link up suspend");
1037 		return -EINVAL;
1038 	}
1039 
1040 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1041 	hif_rtpm_set_state(HIF_RTPM_STATE_SUSPENDING);
1042 
1043 	/* keep this after set suspending */
1044 	if (!hif_rtpm_is_suspend_allowed()) {
1045 		qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1046 		hif_rtpm_print_prevent_list();
1047 		gp_hif_rtpm_ctx->stats.suspend_err_count++;
1048 		gp_hif_rtpm_ctx->stats.suspend_err_ts = qdf_get_log_timestamp();
1049 		hif_info_high("Runtime PM not allowed now");
1050 		return -EINVAL;
1051 	}
1052 
1053 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1054 
1055 	return QDF_STATUS_SUCCESS;
1056 }
1057 
1058 void hif_process_runtime_suspend_success(void)
1059 {
1060 	hif_rtpm_set_state(HIF_RTPM_STATE_SUSPENDED);
1061 	gp_hif_rtpm_ctx->stats.suspend_count++;
1062 	gp_hif_rtpm_ctx->stats.suspend_ts = qdf_get_log_timestamp();
1063 }
1064 
1065 void hif_process_runtime_suspend_failure(void)
1066 {
1067 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1068 	hif_rtpm_set_state(HIF_RTPM_STATE_ON);
1069 	hif_rtpm_pending_job();
1070 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1071 
1072 	gp_hif_rtpm_ctx->stats.suspend_err_count++;
1073 	gp_hif_rtpm_ctx->stats.suspend_err_ts = qdf_get_log_timestamp();
1074 	gp_hif_rtpm_ctx->stats.last_busy_ts = qdf_get_log_timestamp();
1075 	hif_rtpm_mark_last_busy(HIF_RTPM_ID_RESERVED);
1076 }
1077 
1078 void hif_pre_runtime_resume(void)
1079 {
1080 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1081 	hif_rtpm_set_monitor_wake_intr(0);
1082 	hif_rtpm_set_state(HIF_RTPM_STATE_RESUMING);
1083 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1084 }
1085 
1086 void hif_process_runtime_resume_linkup(void)
1087 {
1088 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1089 	hif_rtpm_set_state(HIF_RTPM_STATE_RESUMING_LINKUP);
1090 	hif_rtpm_pending_job();
1091 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1092 }
1093 
1094 void hif_process_runtime_resume_success(void)
1095 {
1096 	hif_rtpm_set_state(HIF_RTPM_STATE_ON);
1097 	gp_hif_rtpm_ctx->stats.resume_count++;
1098 	gp_hif_rtpm_ctx->stats.resume_ts = qdf_get_log_timestamp();
1099 	gp_hif_rtpm_ctx->stats.last_busy_ts = gp_hif_rtpm_ctx->stats.resume_ts;
1100 	hif_rtpm_mark_last_busy(HIF_RTPM_ID_RESERVED);
1101 }
1102 
1103 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
1104 {
1105 	int errno;
1106 
1107 	errno = hif_bus_suspend(hif_ctx);
1108 	if (errno) {
1109 		hif_err("Failed bus suspend: %d", errno);
1110 		return errno;
1111 	}
1112 
1113 	hif_rtpm_set_monitor_wake_intr(1);
1114 
1115 	errno = hif_bus_suspend_noirq(hif_ctx);
1116 	if (errno) {
1117 		hif_err("Failed bus suspend noirq: %d", errno);
1118 		hif_rtpm_set_monitor_wake_intr(0);
1119 		goto bus_resume;
1120 	}
1121 
1122 	return 0;
1123 
1124 bus_resume:
1125 	QDF_BUG(!hif_bus_resume(hif_ctx));
1126 
1127 	return errno;
1128 }
1129 
1130 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
1131 {
1132 	int errno;
1133 
1134 	QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
1135 	errno = hif_bus_resume(hif_ctx);
1136 	if (errno)
1137 		hif_err("Failed runtime resume: %d", errno);
1138 
1139 	return errno;
1140 }
1141 
1142 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
1143 {
1144 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1145 	struct CE_state *ce_state;
1146 
1147 	if (!scn)
1148 		return;
1149 
1150 	if (scn->fastpath_mode_on) {
1151 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1152 			return;
1153 
1154 		ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
1155 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1156 
1157 		/*war_ce_src_ring_write_idx_set */
1158 		CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
1159 					  ce_state->src_ring->write_index);
1160 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1161 		Q_TARGET_ACCESS_END(scn);
1162 	}
1163 }
1164 #endif /* FEATURE_RUNTIME_PM */
1165