1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8 
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <linux/slab.h>
19 #include <linux/interrupt.h>
20 #include <linux/if_arp.h>
21 #include "hif_io32.h"
22 #include "hif_runtime_pm.h"
23 #include "hif.h"
24 #include "target_type.h"
25 #include "hif_main.h"
26 #include "ce_main.h"
27 #include "ce_api.h"
28 #include "ce_internal.h"
29 #include "ce_reg.h"
30 #include "ce_bmi.h"
31 #include "regtable.h"
32 #include "hif_hw_version.h"
33 #include <linux/debugfs.h>
34 #include <linux/seq_file.h>
35 #include "qdf_status.h"
36 #include "qdf_atomic.h"
37 #include "pld_common.h"
38 #include "mp_dev.h"
39 #include "hif_debug.h"
40 
41 #include "ce_tasklet.h"
42 #include "targaddrs.h"
43 #include "hif_exec.h"
44 
45 #define CNSS_RUNTIME_FILE "cnss_runtime_pm"
46 #define CNSS_RUNTIME_FILE_PERM QDF_FILE_USR_READ
47 
48 #ifdef FEATURE_RUNTIME_PM
49 
50 static struct hif_rtpm_ctx g_hif_rtpm_ctx;
51 static struct hif_rtpm_ctx *gp_hif_rtpm_ctx;
52 
53 /**
54  * hif_rtpm_id_to_string() - Convert dbgid to respective string
55  * @id: debug id
56  *
57  * Debug support function to convert  dbgid to string.
58  * Please note to add new string in the array at index equal to
59  * its enum value in wlan_rtpm_dbgid.
60  *
61  * Return: String of ID
62  */
hif_rtpm_id_to_string(enum hif_rtpm_client_id id)63 static const char *hif_rtpm_id_to_string(enum hif_rtpm_client_id id)
64 {
65 	static const char * const strings[] = {
66 					"HIF_RTPM_ID_RESERVED",
67 					"HIF_RTPM_HAL_REO_CMD",
68 					"HIF_RTPM_WMI",
69 					"HIF_RTPM_HTT",
70 					"HIF_RTPM_DP",
71 					"HIF_RTPM_RING_STATS",
72 					"HIF_RTPM_CE",
73 					"HIF_RTPM_FORCE_WAKE",
74 					"HIF_RTPM_ID_PM_QOS_NOTIFY",
75 					"HIF_RTPM_ID_WIPHY_SUSPEND",
76 					"HIF_RTPM_ID_MAX"
77 	};
78 
79 	return strings[id];
80 }
81 
82 /**
83  * hif_rtpm_read_usage_count() - Read device usage count
84  *
85  * Return: current usage count
86  */
hif_rtpm_read_usage_count(void)87 static inline int hif_rtpm_read_usage_count(void)
88 {
89 	return qdf_atomic_read(&gp_hif_rtpm_ctx->dev->power.usage_count);
90 }
91 
92 /**
93  * hif_rtpm_print(): print stats for runtimepm
94  * @type: type of caller
95  * @index: pointer to index to keep track of print position
96  * @buf: pointer of buffer to print to
97  * @fmt: format string
98  *
99  * debugging tool added to allow for unified API for debug/sys fs rtpm printing
100  */
101 static void
hif_rtpm_print(enum hif_rtpm_fill_type type,int * index,void * buf,char * fmt,...)102 hif_rtpm_print(enum hif_rtpm_fill_type type, int *index, void *buf,
103 	       char *fmt, ...)
104 {
105 	va_list args;
106 
107 	va_start(args, fmt);
108 	if (type == HIF_RTPM_FILL_TYPE_SYSFS) {
109 		if (index)
110 			*index += vscnprintf((char *)buf + *index, PAGE_SIZE,
111 					     fmt, args);
112 	} else if (type == HIF_RTPM_FILL_TYPE_DEBUGFS) {
113 		seq_vprintf((struct seq_file *)buf, fmt, args);
114 	}
115 
116 	va_end(args);
117 }
118 
119 #define HIF_RTPM_STATS(_type, _index,  _s, _rtpm_ctx, _name) \
120 	hif_rtpm_print(_type, _index,  _s, "%30s: %u\n", #_name, \
121 		       (_rtpm_ctx)->stats._name)
122 
hif_rtpm_log_debug_stats(void * s,enum hif_rtpm_fill_type type)123 int hif_rtpm_log_debug_stats(void *s, enum hif_rtpm_fill_type type)
124 {
125 	int index = 0;
126 	struct hif_rtpm_client *client = NULL;
127 	struct hif_pm_runtime_lock *ctx;
128 	static const char * const autopm_state[] = {"NONE", "ON", "RESUMING",
129 			"RESUMING_LINKUP", "SUSPENDING", "SUSPENDED"};
130 	int pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
131 	int i;
132 
133 	hif_rtpm_print(type, &index, s, "%30s: %llu\n", "Current timestamp",
134 		       qdf_get_log_timestamp());
135 
136 	hif_rtpm_print(type, &index, s, "%30s: %s\n", "Runtime PM state",
137 		       autopm_state[pm_state]);
138 
139 	hif_rtpm_print(type, &index, s, "%30s: %llu\n", "Last Busy timestamp",
140 		       gp_hif_rtpm_ctx->stats.last_busy_ts);
141 
142 	hif_rtpm_print(type, &index, s, "%30s: %llu\n", "Last resume request timestamp",
143 		       gp_hif_rtpm_ctx->stats.request_resume_ts);
144 
145 	hif_rtpm_print(type, &index, s, "%30s: %llu\n",
146 		       "Last resume request by",
147 		       gp_hif_rtpm_ctx->stats.request_resume_id);
148 
149 	hif_rtpm_print(type, &index, s, "%30s: %ps\n", "Last Busy Marker",
150 		       gp_hif_rtpm_ctx->stats.last_busy_marker);
151 
152 	hif_rtpm_print(type, &index, s, "Rx busy marker counts:\n");
153 	hif_rtpm_print(type, &index, s, "%30s: %u %llu\n",
154 		       hif_rtpm_id_to_string(HIF_RTPM_ID_DP),
155 		       gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_DP]->last_busy_cnt,
156 		       gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_DP]->last_busy_ts);
157 
158 	hif_rtpm_print(type, &index, s, "%30s: %u %llu\n",
159 		       hif_rtpm_id_to_string(HIF_RTPM_ID_CE),
160 		       gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_CE]->last_busy_cnt,
161 		       gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_CE]->last_busy_ts);
162 
163 	HIF_RTPM_STATS(type, &index, s, gp_hif_rtpm_ctx, last_busy_id);
164 
165 	if (pm_state == HIF_RTPM_STATE_SUSPENDED) {
166 		hif_rtpm_print(type, &index, s, "%30s: %llx us\n",
167 			       "Suspended Since",
168 			       gp_hif_rtpm_ctx->stats.suspend_ts);
169 	}
170 
171 	HIF_RTPM_STATS(type, &index, s, gp_hif_rtpm_ctx, resume_count);
172 	HIF_RTPM_STATS(type, &index, s, gp_hif_rtpm_ctx, suspend_count);
173 	HIF_RTPM_STATS(type, &index, s, gp_hif_rtpm_ctx, suspend_err_count);
174 
175 	hif_rtpm_print(type, &index, s, "%30s: %d\n", "PM Usage count",
176 		       hif_rtpm_read_usage_count());
177 
178 	hif_rtpm_print(type, &index, s,
179 		       "get  put  get-timestamp put-timestamp :DBGID_NAME\n");
180 	for (i = 0; i < HIF_RTPM_ID_MAX; i++) {
181 		client = gp_hif_rtpm_ctx->clients[i];
182 		if (!client)
183 			continue;
184 		hif_rtpm_print(type, &index, s, "%-10d ",
185 			       qdf_atomic_read(&client->get_count));
186 		hif_rtpm_print(type, &index, s, "%-10d ",
187 			       qdf_atomic_read(&client->put_count));
188 		hif_rtpm_print(type, &index, s, "0x%-10llx ", client->get_ts);
189 		hif_rtpm_print(type, &index, s, "0x%-10llx ", client->put_ts);
190 		hif_rtpm_print(type, &index, s, ":%-2d %-30s\n", i,
191 			       hif_rtpm_id_to_string(i));
192 	}
193 	hif_rtpm_print(type, &index, s, "\n");
194 
195 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
196 	if (list_empty(&gp_hif_rtpm_ctx->prevent_list)) {
197 		qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
198 		return index;
199 	}
200 
201 	hif_rtpm_print(type, &index, s, "%30s: ", "Active Wakeup_Sources");
202 	list_for_each_entry(ctx, &gp_hif_rtpm_ctx->prevent_list, list) {
203 		hif_rtpm_print(type, &index, s, "%s", ctx->name);
204 		hif_rtpm_print(type, &index, s, " ");
205 	}
206 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
207 	hif_rtpm_print(type, &index, s, "\n");
208 
209 	return index;
210 }
211 
212 /**
213  * hif_rtpm_debugfs_show(): show debug stats for runtimepm
214  * @s: file to print to
215  * @data: unused
216  *
217  * debugging tool added to the debug fs for displaying runtimepm stats
218  *
219  * Return: 0
220  */
221 
hif_rtpm_debugfs_show(struct seq_file * s,void * data)222 static int hif_rtpm_debugfs_show(struct seq_file *s, void *data)
223 {
224 	return hif_rtpm_log_debug_stats((void *)s, HIF_RTPM_FILL_TYPE_DEBUGFS);
225 }
226 
227 #undef HIF_RTPM_STATS
228 
229 /**
230  * hif_rtpm_debugfs_open() - open a debug fs file to access the runtime pm stats
231  * @inode:
232  * @file:
233  *
234  * Return: linux error code of single_open.
235  */
hif_rtpm_debugfs_open(struct inode * inode,struct file * file)236 static int hif_rtpm_debugfs_open(struct inode *inode, struct file *file)
237 {
238 	return single_open(file, hif_rtpm_debugfs_show,
239 			inode->i_private);
240 }
241 
242 static const struct file_operations hif_rtpm_fops = {
243 	.owner          = THIS_MODULE,
244 	.open           = hif_rtpm_debugfs_open,
245 	.release        = single_release,
246 	.read           = seq_read,
247 	.llseek         = seq_lseek,
248 };
249 
250 /**
251  * hif_rtpm_debugfs_create() - creates runtimepm debugfs entry
252  *
253  * creates a debugfs entry to debug the runtime pm feature.
254  */
hif_rtpm_debugfs_create(void)255 static void hif_rtpm_debugfs_create(void)
256 {
257 	gp_hif_rtpm_ctx->pm_dentry = qdf_debugfs_create_entry(CNSS_RUNTIME_FILE,
258 							CNSS_RUNTIME_FILE_PERM,
259 							NULL,
260 							NULL,
261 							&hif_rtpm_fops);
262 }
263 
264 /**
265  * hif_rtpm_debugfs_remove() - removes runtimepm debugfs entry
266  *
267  * removes the debugfs entry to debug the runtime pm feature.
268  */
hif_rtpm_debugfs_remove(void)269 static void hif_rtpm_debugfs_remove(void)
270 {
271 	qdf_debugfs_remove_file(gp_hif_rtpm_ctx->pm_dentry);
272 }
273 
274 /**
275  * hif_rtpm_init() - Initialize Runtime PM
276  * @dev: device structure
277  * @delay: delay to be configured for auto suspend
278  *
279  * This function will init all the Runtime PM config.
280  *
281  * Return: void
282  */
hif_rtpm_init(struct device * dev,int delay)283 static void hif_rtpm_init(struct device *dev, int delay)
284 {
285 	pm_runtime_set_autosuspend_delay(dev, delay);
286 	pm_runtime_use_autosuspend(dev);
287 	pm_runtime_allow(dev);
288 	pm_runtime_mark_last_busy(dev);
289 	pm_runtime_put_noidle(dev);
290 	pm_suspend_ignore_children(dev, true);
291 }
292 
293 /**
294  * hif_rtpm_exit() - Deinit/Exit Runtime PM
295  * @dev: device structure
296  *
297  * This function will deinit all the Runtime PM config.
298  *
299  * Return: void
300  */
hif_rtpm_exit(struct device * dev)301 static void hif_rtpm_exit(struct device *dev)
302 {
303 	pm_runtime_get_noresume(dev);
304 	pm_runtime_set_active(dev);
305 	pm_runtime_forbid(dev);
306 }
307 
hif_rtpm_alloc_last_busy_hist(void)308 static void hif_rtpm_alloc_last_busy_hist(void)
309 {
310 	int i;
311 
312 	for (i = 0; i < CE_COUNT_MAX; i++) {
313 		if (i != CE_ID_1 && i != CE_ID_2 && i != CE_ID_7) {
314 			gp_hif_rtpm_ctx->busy_hist[i] = NULL;
315 			continue;
316 		}
317 
318 		gp_hif_rtpm_ctx->busy_hist[i] =
319 			qdf_mem_malloc(sizeof(struct hif_rtpm_last_busy_hist));
320 		if (!gp_hif_rtpm_ctx->busy_hist[i])
321 			return;
322 	}
323 }
324 
hif_rtpm_free_last_busy_hist(void)325 static void hif_rtpm_free_last_busy_hist(void)
326 {
327 	int i;
328 
329 	for (i = 0; i < CE_COUNT_MAX; i++) {
330 		if (i != CE_ID_1 && i != CE_ID_2 && i != CE_ID_7)
331 			continue;
332 
333 		qdf_mem_free(gp_hif_rtpm_ctx->busy_hist[i]);
334 	}
335 }
336 
hif_rtpm_open(struct hif_softc * scn)337 void hif_rtpm_open(struct hif_softc *scn)
338 {
339 	gp_hif_rtpm_ctx = &g_hif_rtpm_ctx;
340 	gp_hif_rtpm_ctx->dev = scn->qdf_dev->dev;
341 	qdf_spinlock_create(&gp_hif_rtpm_ctx->runtime_lock);
342 	qdf_spinlock_create(&gp_hif_rtpm_ctx->runtime_suspend_lock);
343 	qdf_spinlock_create(&gp_hif_rtpm_ctx->prevent_list_lock);
344 	qdf_atomic_init(&gp_hif_rtpm_ctx->pm_state);
345 	qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, HIF_RTPM_STATE_NONE);
346 	qdf_atomic_init(&gp_hif_rtpm_ctx->monitor_wake_intr);
347 	INIT_LIST_HEAD(&gp_hif_rtpm_ctx->prevent_list);
348 	gp_hif_rtpm_ctx->client_count = 0;
349 	gp_hif_rtpm_ctx->pending_job = 0;
350 	hif_rtpm_register(HIF_RTPM_ID_CE, NULL);
351 	hif_rtpm_register(HIF_RTPM_ID_FORCE_WAKE, NULL);
352 	hif_rtpm_alloc_last_busy_hist();
353 	hif_info_high("Runtime PM attached");
354 }
355 
356 static int __hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *lock);
357 
358 /**
359  * hif_rtpm_sanitize_exit(): sanitize runtime PM gets/puts from driver
360  *
361  * Ensure all gets/puts are in sync before exiting runtime PM feature.
362  * Also make sure all runtime PM locks are deinitialized properly.
363  *
364  * Return: void
365  */
hif_rtpm_sanitize_exit(void)366 static void hif_rtpm_sanitize_exit(void)
367 {
368 	struct hif_pm_runtime_lock *ctx, *tmp;
369 	struct hif_rtpm_client *client;
370 	int i, active_count;
371 
372 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
373 	list_for_each_entry_safe(ctx, tmp,
374 				 &gp_hif_rtpm_ctx->prevent_list, list) {
375 		hif_runtime_lock_deinit(ctx);
376 	}
377 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
378 
379 	/* check if get and put out of sync for all clients */
380 	for (i = 0; i < HIF_RTPM_ID_MAX; i++) {
381 		client = gp_hif_rtpm_ctx->clients[i];
382 		if (client) {
383 			if (qdf_atomic_read(&client->active_count)) {
384 				active_count =
385 					qdf_atomic_read(&client->active_count);
386 				hif_err("Client active: %u- %s", i,
387 					hif_rtpm_id_to_string(i));
388 				QDF_DEBUG_PANIC("Client active on exit!");
389 				while (active_count--)
390 					__hif_rtpm_put_noidle(
391 							gp_hif_rtpm_ctx->dev);
392 			}
393 			QDF_DEBUG_PANIC("Client not deinitialized");
394 			qdf_mem_free(client);
395 			gp_hif_rtpm_ctx->clients[i] = NULL;
396 		}
397 	}
398 }
399 
400 /**
401  * hif_rtpm_sanitize_ssr_exit() - Empty the suspend list on SSR
402  *
403  * API is used to empty the runtime pm prevent suspend list.
404  *
405  * Return: void
406  */
hif_rtpm_sanitize_ssr_exit(void)407 static void hif_rtpm_sanitize_ssr_exit(void)
408 {
409 	struct hif_pm_runtime_lock *ctx, *tmp;
410 
411 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
412 	list_for_each_entry_safe(ctx, tmp,
413 				 &gp_hif_rtpm_ctx->prevent_list, list) {
414 		__hif_pm_runtime_allow_suspend(ctx);
415 	}
416 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
417 }
418 
hif_rtpm_close(struct hif_softc * scn)419 void hif_rtpm_close(struct hif_softc *scn)
420 {
421 	hif_rtpm_free_last_busy_hist();
422 	hif_rtpm_deregister(HIF_RTPM_ID_CE);
423 	hif_rtpm_deregister(HIF_RTPM_ID_FORCE_WAKE);
424 
425 	hif_is_recovery_in_progress(scn) ?
426 		hif_rtpm_sanitize_ssr_exit() :
427 		hif_rtpm_sanitize_exit();
428 
429 	qdf_mem_set(gp_hif_rtpm_ctx, sizeof(*gp_hif_rtpm_ctx), 0);
430 	gp_hif_rtpm_ctx = NULL;
431 	hif_info_high("Runtime PM context detached");
432 }
433 
hif_set_enable_rpm(struct hif_opaque_softc * hif_hdl)434 void hif_set_enable_rpm(struct hif_opaque_softc *hif_hdl)
435 {
436 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
437 
438 	gp_hif_rtpm_ctx->enable_rpm = scn->hif_config.enable_runtime_pm;
439 }
440 
hif_rtpm_start(struct hif_softc * scn)441 void hif_rtpm_start(struct hif_softc *scn)
442 {
443 	uint32_t mode = hif_get_conparam(scn);
444 
445 	if (!gp_hif_rtpm_ctx->enable_rpm) {
446 		hif_info_high("RUNTIME PM is disabled in ini");
447 		return;
448 	}
449 
450 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
451 	    mode == QDF_GLOBAL_MONITOR_MODE) {
452 		hif_info("RUNTIME PM is disabled for FTM/EPPING/MONITOR mode");
453 		return;
454 	}
455 
456 	hif_info_high("Enabling RUNTIME PM, Delay: %d ms",
457 		      scn->hif_config.runtime_pm_delay);
458 
459 	qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, HIF_RTPM_STATE_ON);
460 	hif_rtpm_init(gp_hif_rtpm_ctx->dev, scn->hif_config.runtime_pm_delay);
461 	gp_hif_rtpm_ctx->cfg_delay = scn->hif_config.runtime_pm_delay;
462 	gp_hif_rtpm_ctx->delay = gp_hif_rtpm_ctx->cfg_delay;
463 	hif_rtpm_debugfs_create();
464 }
465 
hif_rtpm_stop(struct hif_softc * scn)466 void hif_rtpm_stop(struct hif_softc *scn)
467 {
468 	uint32_t mode = hif_get_conparam(scn);
469 
470 	if (!gp_hif_rtpm_ctx->enable_rpm)
471 		return;
472 
473 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
474 	    mode == QDF_GLOBAL_MONITOR_MODE)
475 		return;
476 
477 	hif_rtpm_exit(gp_hif_rtpm_ctx->dev);
478 
479 	hif_rtpm_sync_resume();
480 
481 	qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, HIF_RTPM_STATE_NONE);
482 	hif_rtpm_debugfs_remove();
483 }
484 
hif_rtpm_register(uint32_t id,void (* hif_rtpm_cbk)(void))485 QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rtpm_cbk)(void))
486 {
487 	struct hif_rtpm_client *client;
488 
489 	if (qdf_unlikely(!gp_hif_rtpm_ctx)) {
490 		hif_err("Runtime PM context NULL");
491 		return QDF_STATUS_E_FAILURE;
492 	}
493 
494 	if (id >= HIF_RTPM_ID_MAX || gp_hif_rtpm_ctx->clients[id]) {
495 		hif_err("Invalid client %d", id);
496 		return QDF_STATUS_E_INVAL;
497 	}
498 
499 	client = qdf_mem_malloc(sizeof(struct hif_rtpm_client));
500 	if (!client)
501 		return QDF_STATUS_E_NOMEM;
502 
503 	client->hif_rtpm_cbk = hif_rtpm_cbk;
504 	qdf_atomic_init(&client->active_count);
505 	qdf_atomic_init(&client->get_count);
506 	qdf_atomic_init(&client->put_count);
507 
508 	gp_hif_rtpm_ctx->clients[id] = client;
509 	gp_hif_rtpm_ctx->client_count++;
510 
511 	return QDF_STATUS_SUCCESS;
512 }
513 
hif_rtpm_deregister(uint32_t id)514 QDF_STATUS hif_rtpm_deregister(uint32_t id)
515 {
516 	struct hif_rtpm_client *client;
517 	int active_count;
518 
519 	if (qdf_unlikely(!gp_hif_rtpm_ctx)) {
520 		hif_err("Runtime PM context NULL");
521 		return QDF_STATUS_E_FAILURE;
522 	}
523 
524 	if (id >= HIF_RTPM_ID_MAX || !gp_hif_rtpm_ctx->clients[id]) {
525 		hif_err("invalid client, id: %u", id);
526 		return QDF_STATUS_E_INVAL;
527 	}
528 
529 	client = gp_hif_rtpm_ctx->clients[id];
530 	if (qdf_atomic_read(&client->active_count)) {
531 		active_count = qdf_atomic_read(&client->active_count);
532 		hif_err("Client: %u-%s Runtime PM active",
533 			id, hif_rtpm_id_to_string(id));
534 		hif_err("last get called: 0x%llx, get count: %d, put count: %d",
535 			client->get_ts, qdf_atomic_read(&client->get_count),
536 			qdf_atomic_read(&client->put_count));
537 		QDF_DEBUG_PANIC("Get and PUT call out of sync!");
538 		while (active_count--)
539 			__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
540 	}
541 
542 	qdf_mem_free(client);
543 	gp_hif_rtpm_ctx->clients[id] = NULL;
544 
545 	return QDF_STATUS_SUCCESS;
546 }
547 
hif_rtpm_set_autosuspend_delay(int delay)548 QDF_STATUS hif_rtpm_set_autosuspend_delay(int delay)
549 {
550 	if (delay < HIF_RTPM_DELAY_MIN || delay > HIF_RTPM_DELAY_MAX) {
551 		hif_err("Invalid delay value %d ms", delay);
552 		return QDF_STATUS_E_INVAL;
553 	}
554 
555 	__hif_rtpm_set_autosuspend_delay(gp_hif_rtpm_ctx->dev, delay);
556 	gp_hif_rtpm_ctx->delay = delay;
557 	hif_info_high("RTPM delay set: %d ms", delay);
558 
559 	return QDF_STATUS_SUCCESS;
560 }
561 
hif_rtpm_restore_autosuspend_delay(void)562 QDF_STATUS hif_rtpm_restore_autosuspend_delay(void)
563 {
564 	if (gp_hif_rtpm_ctx->delay == gp_hif_rtpm_ctx->cfg_delay) {
565 		hif_info_rl("RTPM delay already default: %d",
566 			    gp_hif_rtpm_ctx->delay);
567 		return QDF_STATUS_E_ALREADY;
568 	}
569 
570 	__hif_rtpm_set_autosuspend_delay(gp_hif_rtpm_ctx->dev,
571 					 gp_hif_rtpm_ctx->cfg_delay);
572 	gp_hif_rtpm_ctx->delay = gp_hif_rtpm_ctx->cfg_delay;
573 	hif_info_rl("RTPM delay set: %d ms", gp_hif_rtpm_ctx->delay);
574 
575 	return QDF_STATUS_SUCCESS;
576 }
577 
hif_rtpm_get_autosuspend_delay(void)578 int hif_rtpm_get_autosuspend_delay(void)
579 {
580 	return gp_hif_rtpm_ctx->delay;
581 }
582 
hif_runtime_lock_init(qdf_runtime_lock_t * lock,const char * name)583 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
584 {
585 	struct hif_pm_runtime_lock *context;
586 
587 	if (qdf_unlikely(!gp_hif_rtpm_ctx)) {
588 		hif_err("Runtime PM context NULL");
589 		return QDF_STATUS_E_FAILURE;
590 	}
591 
592 	hif_debug("Initializing Runtime PM wakelock %s", name);
593 
594 	context = qdf_mem_malloc(sizeof(*context));
595 	if (!context)
596 		return -ENOMEM;
597 
598 	context->name = name ? name : "Default";
599 	lock->lock = context;
600 
601 	return 0;
602 }
603 
hif_runtime_lock_deinit(struct hif_pm_runtime_lock * lock)604 void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *lock)
605 {
606 	if (!lock) {
607 		hif_err("Runtime PM lock already freed");
608 		return;
609 	}
610 
611 	hif_debug("Deinitializing Runtime PM wakelock %s", lock->name);
612 
613 	if (gp_hif_rtpm_ctx) {
614 		qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
615 		__hif_pm_runtime_allow_suspend(lock);
616 		qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
617 	}
618 
619 	qdf_mem_free(lock);
620 }
621 
622 /**
623  * hif_rtpm_enabled() - To check if Runtime PM is enabled
624  *
625  * This function will check if Runtime PM is enabled or not.
626  *
627  * Return: void
628  */
hif_rtpm_enabled(void)629 static bool hif_rtpm_enabled(void)
630 {
631 	if (qdf_unlikely(!gp_hif_rtpm_ctx))
632 		return false;
633 
634 	if (gp_hif_rtpm_ctx->enable_rpm)
635 		return true;
636 
637 	return __hif_rtpm_enabled(gp_hif_rtpm_ctx->dev);
638 }
639 
hif_rtpm_get(uint8_t type,uint32_t id)640 QDF_STATUS hif_rtpm_get(uint8_t type, uint32_t id)
641 {
642 	struct hif_rtpm_client *client = NULL;
643 	int ret = QDF_STATUS_E_FAILURE;
644 	int pm_state;
645 
646 	if (!hif_rtpm_enabled())
647 		return QDF_STATUS_SUCCESS;
648 
649 	if (id >= HIF_RTPM_ID_MAX || !gp_hif_rtpm_ctx->clients[id]) {
650 		QDF_DEBUG_PANIC("Invalid client, id: %u", id);
651 		return -QDF_STATUS_E_INVAL;
652 	}
653 
654 	client = gp_hif_rtpm_ctx->clients[id];
655 
656 	if (type != HIF_RTPM_GET_ASYNC) {
657 		switch (type) {
658 		case HIF_RTPM_GET_FORCE:
659 			ret = __hif_rtpm_get(gp_hif_rtpm_ctx->dev);
660 			break;
661 		case HIF_RTPM_GET_SYNC:
662 			ret = __hif_rtpm_get_sync(gp_hif_rtpm_ctx->dev);
663 			break;
664 		case HIF_RTPM_GET_NORESUME:
665 			__hif_rtpm_get_noresume(gp_hif_rtpm_ctx->dev);
666 			ret = 0;
667 			break;
668 		default:
669 			QDF_DEBUG_PANIC("Invalid call type");
670 			return QDF_STATUS_E_BADMSG;
671 		}
672 
673 		if (ret < 0 && ret != -EINPROGRESS) {
674 			hif_err("pm_state: %d ret: %d",
675 				qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
676 				ret);
677 			__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
678 		} else {
679 			ret = QDF_STATUS_SUCCESS;
680 		}
681 		goto out;
682 	}
683 
684 	pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
685 	if (pm_state <= HIF_RTPM_STATE_RESUMING_LINKUP) {
686 		ret = __hif_rtpm_get(gp_hif_rtpm_ctx->dev);
687 		/* Get will return 1 if the device is already active,
688 		 * just return success in that case
689 		 */
690 		if (ret > 0) {
691 			ret = QDF_STATUS_SUCCESS;
692 		} else if (ret == 0 || ret == -EINPROGRESS) {
693 			qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
694 			pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
695 			if (pm_state >= HIF_RTPM_STATE_RESUMING) {
696 				__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
697 				gp_hif_rtpm_ctx->stats.request_resume_ts =
698 							qdf_get_log_timestamp();
699 				gp_hif_rtpm_ctx->stats.request_resume_id = id;
700 				ret = QDF_STATUS_E_FAILURE;
701 			} else {
702 				ret = QDF_STATUS_SUCCESS;
703 			}
704 			qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
705 		} else if (ret < 0) {
706 			hif_err("pm_state: %d ret: %d",
707 				qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
708 				ret);
709 			__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
710 		}
711 	} else if (pm_state >= HIF_RTPM_STATE_RESUMING) {
712 		/* Do not log in performance path */
713 		if (id != HIF_RTPM_ID_DP)
714 			hif_info_high("request RTPM resume by %d- %s",
715 				      id, hif_rtpm_id_to_string(id));
716 		__hif_rtpm_request_resume(gp_hif_rtpm_ctx->dev);
717 		gp_hif_rtpm_ctx->stats.request_resume_ts =
718 						qdf_get_log_timestamp();
719 		gp_hif_rtpm_ctx->stats.request_resume_id = id;
720 		return QDF_STATUS_E_FAILURE;
721 	}
722 
723 out:
724 	if (QDF_IS_STATUS_SUCCESS(ret)) {
725 		qdf_atomic_inc(&client->active_count);
726 		qdf_atomic_inc(&client->get_count);
727 		client->get_ts = qdf_get_log_timestamp();
728 	}
729 
730 	return ret;
731 }
732 
hif_rtpm_put(uint8_t type,uint32_t id)733 QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id)
734 {
735 	struct hif_rtpm_client *client;
736 	int usage_count;
737 
738 	if (!hif_rtpm_enabled())
739 		return QDF_STATUS_SUCCESS;
740 
741 	if (id >= HIF_RTPM_ID_MAX || !gp_hif_rtpm_ctx->clients[id]) {
742 		hif_err("Invalid client, id: %u", id);
743 		return QDF_STATUS_E_INVAL;
744 	}
745 
746 	client = gp_hif_rtpm_ctx->clients[id];
747 
748 	usage_count = hif_rtpm_read_usage_count();
749 	if (usage_count == 2 && !gp_hif_rtpm_ctx->enable_rpm) {
750 		hif_err("Unexpected PUT when runtime PM is disabled");
751 		QDF_BUG(0);
752 		return QDF_STATUS_E_CANCELED;
753 	} else if (!usage_count || !qdf_atomic_read(&client->active_count)) {
754 		hif_info_high("Put without a Get operation, %u-%s",
755 			      id, hif_rtpm_id_to_string(id));
756 		return QDF_STATUS_E_CANCELED;
757 	}
758 
759 	switch (type) {
760 	case HIF_RTPM_PUT_ASYNC:
761 		__hif_rtpm_put_auto(gp_hif_rtpm_ctx->dev);
762 		break;
763 	case HIF_RTPM_PUT_NOIDLE:
764 		__hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
765 		break;
766 	case HIF_RTPM_PUT_SYNC_SUSPEND:
767 		__hif_rtpm_put_sync_suspend(gp_hif_rtpm_ctx->dev);
768 		break;
769 	default:
770 		QDF_DEBUG_PANIC("Invalid call type");
771 		return QDF_STATUS_E_BADMSG;
772 	}
773 
774 	__hif_rtpm_mark_last_busy(gp_hif_rtpm_ctx->dev);
775 	qdf_atomic_dec(&client->active_count);
776 	qdf_atomic_inc(&client->put_count);
777 	client->put_ts = qdf_get_log_timestamp();
778 	gp_hif_rtpm_ctx->stats.last_busy_ts = client->put_ts;
779 
780 	return QDF_STATUS_SUCCESS;
781 }
782 
783 /**
784  * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol
785  *                                      reason
786  * @lock: runtime_pm lock being acquired
787  *
788  * Return: 0 if successful.
789  */
__hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock * lock)790 static int __hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *lock)
791 {
792 	int ret = 0;
793 
794 	if (lock->active)
795 		return 0;
796 
797 	ret = __hif_rtpm_get(gp_hif_rtpm_ctx->dev);
798 
799 	/**
800 	 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
801 	 * RPM_SUSPENDING. Any other negative value is an error.
802 	 * We shouldn't do runtime_put here as in later point allow
803 	 * suspend gets called with the context and there the usage count
804 	 * is decremented, so suspend will be prevented.
805 	 */
806 	if (ret < 0 && ret != -EINPROGRESS) {
807 		gp_hif_rtpm_ctx->stats.runtime_get_err++;
808 		hif_err("pm_state: %d ret: %d",
809 			qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
810 			ret);
811 	}
812 
813 	list_add_tail(&lock->list, &gp_hif_rtpm_ctx->prevent_list);
814 	lock->active = true;
815 	gp_hif_rtpm_ctx->prevent_cnt++;
816 	gp_hif_rtpm_ctx->stats.prevent_suspend++;
817 	return ret;
818 }
819 
820 /**
821  * __hif_pm_runtime_allow_suspend() - Allow Runtime suspend
822  * @lock: runtime pm lock
823  *
824  * This function will allow runtime suspend, by decrementing
825  * device's usage count.
826  *
827  * Return: status
828  */
__hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock * lock)829 static int __hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *lock)
830 {
831 	int ret = 0;
832 	int usage_count;
833 
834 	if (gp_hif_rtpm_ctx->prevent_cnt == 0 || !lock->active)
835 		return ret;
836 
837 	usage_count = hif_rtpm_read_usage_count();
838 	/*
839 	 * For runtime PM enabled case, the usage count should never be 0
840 	 * at this point. For runtime PM disabled case, it should never be
841 	 * 2 at this point. Catch unexpected PUT without GET here.
842 	 */
843 	if (usage_count == 2 && !gp_hif_rtpm_ctx->enable_rpm) {
844 		hif_err("Unexpected PUT when runtime PM is disabled");
845 		QDF_BUG(0);
846 		return QDF_STATUS_E_CANCELED;
847 	} else if (!usage_count) {
848 		hif_info_high("Put without a Get operation, %s", lock->name);
849 		return QDF_STATUS_E_CANCELED;
850 	}
851 
852 	hif_rtpm_mark_last_busy(HIF_RTPM_ID_RESERVED);
853 	ret = __hif_rtpm_put_auto(gp_hif_rtpm_ctx->dev);
854 
855 	list_del(&lock->list);
856 	lock->active = false;
857 	gp_hif_rtpm_ctx->prevent_cnt--;
858 	gp_hif_rtpm_ctx->stats.allow_suspend++;
859 	return ret;
860 }
861 
hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock * lock)862 int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *lock)
863 {
864 	if (!hif_rtpm_enabled() || !lock)
865 		return -EINVAL;
866 
867 	if (in_irq())
868 		WARN_ON(1);
869 
870 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
871 	__hif_pm_runtime_prevent_suspend(lock);
872 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
873 
874 	if (qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state) >=
875 		HIF_RTPM_STATE_SUSPENDING)
876 		hif_info_high("request RTPM resume by %s",
877 			      lock->name);
878 
879 	return 0;
880 }
881 
882 /**
883  * __hif_pm_runtime_prevent_suspend_sync() - synchronized prevent runtime
884  *  suspend for a protocol reason
885  * @lock: runtime_pm lock being acquired
886  *
887  * Return: 0 if successful.
888  */
889 static
__hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock * lock)890 int __hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *lock)
891 {
892 	int ret = 0;
893 
894 	if (lock->active)
895 		return 0;
896 
897 	ret = __hif_rtpm_get_sync(gp_hif_rtpm_ctx->dev);
898 
899 	/**
900 	 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
901 	 * RPM_SUSPENDING. Any other negative value is an error.
902 	 * We shouldn't do runtime_put here as in later point allow
903 	 * suspend gets called with the context and there the usage count
904 	 * is decremented, so suspend will be prevented.
905 	 */
906 	if (ret < 0 && ret != -EINPROGRESS) {
907 		gp_hif_rtpm_ctx->stats.runtime_get_err++;
908 		hif_err("pm_state: %d ret: %d",
909 			qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
910 			ret);
911 	}
912 
913 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
914 	list_add_tail(&lock->list, &gp_hif_rtpm_ctx->prevent_list);
915 	lock->active = true;
916 	gp_hif_rtpm_ctx->prevent_cnt++;
917 	gp_hif_rtpm_ctx->stats.prevent_suspend++;
918 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
919 
920 	return ret;
921 }
922 
hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock * lock)923 int hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *lock)
924 {
925 	if (!hif_rtpm_enabled())
926 		return 0;
927 
928 	if (!lock)
929 		return -EINVAL;
930 
931 	if (in_irq())
932 		WARN_ON(1);
933 
934 	__hif_pm_runtime_prevent_suspend_sync(lock);
935 
936 	if (qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state) >=
937 		HIF_RTPM_STATE_SUSPENDING)
938 		hif_info_high("request RTPM resume by %s",
939 			      lock->name);
940 
941 	return 0;
942 }
943 
hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock * lock)944 int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *lock)
945 {
946 	if (!hif_rtpm_enabled())
947 		return 0;
948 
949 	if (!lock)
950 		return -EINVAL;
951 
952 	if (in_irq())
953 		WARN_ON(1);
954 
955 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
956 	__hif_pm_runtime_allow_suspend(lock);
957 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
958 
959 	return 0;
960 }
961 
hif_rtpm_sync_resume(void)962 QDF_STATUS hif_rtpm_sync_resume(void)
963 {
964 	struct device *dev;
965 	int pm_state;
966 	int ret;
967 
968 	if (!hif_rtpm_enabled())
969 		return 0;
970 
971 	dev = gp_hif_rtpm_ctx->dev;
972 	pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
973 
974 	ret = __hif_rtpm_resume(dev);
975 	__hif_rtpm_mark_last_busy(dev);
976 
977 	if (ret >= 0) {
978 		gp_hif_rtpm_ctx->stats.resume_count++;
979 		gp_hif_rtpm_ctx->stats.resume_ts = qdf_get_log_timestamp();
980 		gp_hif_rtpm_ctx->stats.last_busy_ts =
981 					gp_hif_rtpm_ctx->stats.resume_ts;
982 		return QDF_STATUS_SUCCESS;
983 	}
984 
985 	hif_err("pm_state: %d, err: %d", pm_state, ret);
986 	return QDF_STATUS_E_FAILURE;
987 }
988 
hif_rtpm_request_resume(void)989 void hif_rtpm_request_resume(void)
990 {
991 	__hif_rtpm_request_resume(gp_hif_rtpm_ctx->dev);
992 	hif_info_high("request RTPM resume %s", (char *)_RET_IP_);
993 }
994 
hif_rtpm_check_and_request_resume(bool suspend_in_progress)995 void hif_rtpm_check_and_request_resume(bool suspend_in_progress)
996 {
997 	enum hif_rtpm_state state;
998 
999 	hif_rtpm_suspend_lock();
1000 	state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
1001 	hif_rtpm_suspend_unlock();
1002 
1003 	if ((state == HIF_RTPM_STATE_SUSPENDED) ||
1004 	    (suspend_in_progress && (state == HIF_RTPM_STATE_SUSPENDING))) {
1005 		__hif_rtpm_request_resume(gp_hif_rtpm_ctx->dev);
1006 		gp_hif_rtpm_ctx->stats.request_resume_ts =
1007 						qdf_get_log_timestamp();
1008 		gp_hif_rtpm_ctx->stats.request_resume_id = HIF_RTPM_ID_RESERVED;
1009 	}
1010 }
1011 
hif_rtpm_get_monitor_wake_intr(void)1012 int hif_rtpm_get_monitor_wake_intr(void)
1013 {
1014 	return qdf_atomic_read(&gp_hif_rtpm_ctx->monitor_wake_intr);
1015 }
1016 
hif_rtpm_set_monitor_wake_intr(int val)1017 void hif_rtpm_set_monitor_wake_intr(int val)
1018 {
1019 	qdf_atomic_set(&gp_hif_rtpm_ctx->monitor_wake_intr, val);
1020 }
1021 
hif_rtpm_display_last_busy_hist(struct hif_opaque_softc * hif_ctx)1022 void hif_rtpm_display_last_busy_hist(struct hif_opaque_softc *hif_ctx)
1023 {
1024 	struct hif_softc *scn;
1025 	struct hif_rtpm_ctx *rtpm_ctx = gp_hif_rtpm_ctx;
1026 	struct hif_rtpm_last_busy_hist *hist;
1027 	unsigned long cur_idx;
1028 	int i;
1029 
1030 	scn = HIF_GET_SOFTC(hif_ctx);
1031 	if (!scn)
1032 		return;
1033 
1034 	hif_info_high("RTPM last busy ts:%llu client:%s from:%ps",
1035 		      rtpm_ctx->stats.last_busy_ts,
1036 		      hif_rtpm_id_to_string(rtpm_ctx->stats.last_busy_id),
1037 		      rtpm_ctx->stats.last_busy_marker);
1038 
1039 	/*Display CE and DP clients RTPM stats*/
1040 	for (i = 0; i < HIF_RTPM_ID_MAX; i++) {
1041 		if (!rtpm_ctx->clients[i] ||
1042 		    (i != HIF_RTPM_ID_CE && i != HIF_RTPM_ID_DP))
1043 			continue;
1044 		hif_info_high("RTPM client:%s busy_ts:%llu get_ts:%llu put_ts:%llu get_cnt:%d put_cnt:%d",
1045 			      hif_rtpm_id_to_string(i),
1046 			      rtpm_ctx->clients[i]->last_busy_ts,
1047 			      rtpm_ctx->clients[i]->get_ts,
1048 			      rtpm_ctx->clients[i]->put_ts,
1049 			      qdf_atomic_read(&rtpm_ctx->clients[i]->get_count),
1050 			      qdf_atomic_read(&rtpm_ctx->clients[i]->put_count));
1051 	}
1052 
1053 	for (i = 0; i < CE_COUNT_MAX; i++) {
1054 		hist = gp_hif_rtpm_ctx->busy_hist[i];
1055 		if (!hist)
1056 			continue;
1057 		cur_idx = hist->last_busy_idx;
1058 
1059 		hif_info_high("RTPM CE-%u last busy_cnt:%lu cur_idx:%lu ts1:%llu ts2:%llu ts3:%llu ts4:%llu",
1060 			      i, hist->last_busy_cnt, cur_idx,
1061 			      hist->last_busy_ts[cur_idx & HIF_RTPM_BUSY_HIST_MASK],
1062 			      hist->last_busy_ts[(cur_idx + 4) & HIF_RTPM_BUSY_HIST_MASK],
1063 			      hist->last_busy_ts[(cur_idx + 8) & HIF_RTPM_BUSY_HIST_MASK],
1064 			      hist->last_busy_ts[(cur_idx + 12) & HIF_RTPM_BUSY_HIST_MASK]);
1065 	}
1066 }
1067 
hif_rtpm_record_ce_last_busy_evt(struct hif_softc * scn,unsigned long ce_id)1068 void hif_rtpm_record_ce_last_busy_evt(struct hif_softc *scn,
1069 				      unsigned long ce_id)
1070 {
1071 	struct hif_rtpm_last_busy_hist *hist;
1072 	unsigned long idx;
1073 
1074 	if (!scn || !gp_hif_rtpm_ctx->busy_hist[ce_id])
1075 		return;
1076 
1077 	hist = gp_hif_rtpm_ctx->busy_hist[ce_id];
1078 	hist->last_busy_cnt++;
1079 	hist->last_busy_idx++;
1080 	idx = hist->last_busy_idx & HIF_RTPM_BUSY_HIST_MASK;
1081 	hist->last_busy_ts[idx] = qdf_get_log_timestamp();
1082 }
1083 
hif_rtpm_mark_last_busy(uint32_t id)1084 void hif_rtpm_mark_last_busy(uint32_t id)
1085 {
1086 	__hif_rtpm_mark_last_busy(gp_hif_rtpm_ctx->dev);
1087 	gp_hif_rtpm_ctx->stats.last_busy_ts = qdf_get_log_timestamp();
1088 	gp_hif_rtpm_ctx->stats.last_busy_id = id;
1089 	gp_hif_rtpm_ctx->stats.last_busy_marker = (void *)_RET_IP_;
1090 	if (gp_hif_rtpm_ctx->clients[id]) {
1091 		gp_hif_rtpm_ctx->clients[id]->last_busy_cnt++;
1092 		gp_hif_rtpm_ctx->clients[id]->last_busy_ts =
1093 					gp_hif_rtpm_ctx->stats.last_busy_ts;
1094 	}
1095 }
1096 
hif_rtpm_set_client_job(uint32_t client_id)1097 void hif_rtpm_set_client_job(uint32_t client_id)
1098 {
1099 	int pm_state;
1100 
1101 	if (!gp_hif_rtpm_ctx->clients[client_id])
1102 		return;
1103 
1104 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1105 	pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
1106 	if (pm_state <= HIF_RTPM_STATE_RESUMING_LINKUP &&
1107 	    gp_hif_rtpm_ctx->clients[client_id]->hif_rtpm_cbk)
1108 		gp_hif_rtpm_ctx->clients[client_id]->hif_rtpm_cbk();
1109 	else
1110 		qdf_set_bit(client_id, &gp_hif_rtpm_ctx->pending_job);
1111 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1112 }
1113 
1114 /**
1115  * hif_rtpm_pending_job() - continue jobs when bus resumed
1116  *
1117  * Return: Void
1118  */
hif_rtpm_pending_job(void)1119 static void hif_rtpm_pending_job(void)
1120 {
1121 	int i;
1122 
1123 	for (i = 0; i < gp_hif_rtpm_ctx->client_count; i++) {
1124 		if (qdf_test_and_clear_bit(i, &gp_hif_rtpm_ctx->pending_job)) {
1125 			qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1126 			if (gp_hif_rtpm_ctx->clients[i]->hif_rtpm_cbk)
1127 				gp_hif_rtpm_ctx->clients[i]->hif_rtpm_cbk();
1128 			qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1129 		}
1130 	}
1131 }
1132 
1133 #define PREVENT_LIST_STRING_LEN 200
1134 
hif_rtpm_print_prevent_list(void)1135 void hif_rtpm_print_prevent_list(void)
1136 {
1137 	struct hif_rtpm_client *client;
1138 	struct hif_pm_runtime_lock *ctx;
1139 	char *str_buf;
1140 	int i, prevent_list_count, len = 0;
1141 
1142 	str_buf = qdf_mem_malloc(PREVENT_LIST_STRING_LEN);
1143 	if (!str_buf)
1144 		return;
1145 
1146 	qdf_spin_lock(&gp_hif_rtpm_ctx->prevent_list_lock);
1147 	prevent_list_count = gp_hif_rtpm_ctx->prevent_cnt;
1148 	if (prevent_list_count) {
1149 		list_for_each_entry(ctx, &gp_hif_rtpm_ctx->prevent_list, list)
1150 			len += qdf_scnprintf(str_buf + len,
1151 				PREVENT_LIST_STRING_LEN - len,
1152 				"%s ", ctx->name);
1153 	}
1154 	qdf_spin_unlock(&gp_hif_rtpm_ctx->prevent_list_lock);
1155 
1156 	if (prevent_list_count)
1157 		hif_info_high("prevent_suspend_cnt %u, prevent_list: %s",
1158 			      prevent_list_count, str_buf);
1159 
1160 	qdf_mem_free(str_buf);
1161 
1162 	for (i = 0; i < HIF_RTPM_ID_MAX; i++) {
1163 		client = gp_hif_rtpm_ctx->clients[i];
1164 		if (client && qdf_atomic_read(&client->active_count))
1165 			hif_info_high("client: %d: %s- active count: %d", i,
1166 				      hif_rtpm_id_to_string(i),
1167 				      qdf_atomic_read(&client->active_count));
1168 	}
1169 }
1170 
1171 /**
1172  * hif_rtpm_is_suspend_allowed() - Reject suspend if client is active
1173  *
1174  * Return: True if no clients are active
1175  */
hif_rtpm_is_suspend_allowed(void)1176 static bool hif_rtpm_is_suspend_allowed(void)
1177 {
1178 	if (!gp_hif_rtpm_ctx || !gp_hif_rtpm_ctx->enable_rpm)
1179 		return false;
1180 
1181 	if (!hif_rtpm_read_usage_count())
1182 		return true;
1183 
1184 	return false;
1185 }
1186 
hif_rtpm_suspend_lock(void)1187 void hif_rtpm_suspend_lock(void)
1188 {
1189 	qdf_spin_lock_irqsave(&gp_hif_rtpm_ctx->runtime_suspend_lock);
1190 }
1191 
hif_rtpm_suspend_unlock(void)1192 void hif_rtpm_suspend_unlock(void)
1193 {
1194 	qdf_spin_unlock_irqrestore(&gp_hif_rtpm_ctx->runtime_suspend_lock);
1195 }
1196 
1197 /**
1198  * hif_rtpm_set_state(): utility function
1199  * @state: state to set
1200  *
1201  * Return: Void
1202  */
1203 static inline
hif_rtpm_set_state(enum hif_rtpm_state state)1204 void hif_rtpm_set_state(enum hif_rtpm_state state)
1205 {
1206 	qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, state);
1207 }
1208 
hif_rtpm_get_state(void)1209 int hif_rtpm_get_state(void)
1210 {
1211 	return qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
1212 }
1213 
hif_pre_runtime_suspend(struct hif_opaque_softc * hif_ctx)1214 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
1215 {
1216 	if (!hif_can_suspend_link(hif_ctx)) {
1217 		hif_err("Runtime PM not supported for link up suspend");
1218 		return -EINVAL;
1219 	}
1220 
1221 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1222 	hif_rtpm_set_state(HIF_RTPM_STATE_SUSPENDING);
1223 
1224 	/* keep this after set suspending */
1225 	if (!hif_rtpm_is_suspend_allowed()) {
1226 		qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1227 		hif_rtpm_print_prevent_list();
1228 		gp_hif_rtpm_ctx->stats.suspend_err_count++;
1229 		gp_hif_rtpm_ctx->stats.suspend_err_ts = qdf_get_log_timestamp();
1230 		hif_info_high("Runtime PM not allowed now");
1231 		return -EINVAL;
1232 	}
1233 
1234 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1235 
1236 	return QDF_STATUS_SUCCESS;
1237 }
1238 
hif_process_runtime_suspend_success(void)1239 void hif_process_runtime_suspend_success(void)
1240 {
1241 	hif_rtpm_set_state(HIF_RTPM_STATE_SUSPENDED);
1242 	gp_hif_rtpm_ctx->stats.suspend_count++;
1243 	gp_hif_rtpm_ctx->stats.suspend_ts = qdf_get_log_timestamp();
1244 }
1245 
hif_process_runtime_suspend_failure(void)1246 void hif_process_runtime_suspend_failure(void)
1247 {
1248 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1249 	hif_rtpm_set_state(HIF_RTPM_STATE_ON);
1250 	hif_rtpm_pending_job();
1251 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1252 
1253 	gp_hif_rtpm_ctx->stats.suspend_err_count++;
1254 	gp_hif_rtpm_ctx->stats.suspend_err_ts = qdf_get_log_timestamp();
1255 	gp_hif_rtpm_ctx->stats.last_busy_ts = qdf_get_log_timestamp();
1256 	hif_rtpm_mark_last_busy(HIF_RTPM_ID_RESERVED);
1257 }
1258 
hif_pre_runtime_resume(void)1259 void hif_pre_runtime_resume(void)
1260 {
1261 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1262 	hif_rtpm_set_monitor_wake_intr(0);
1263 	hif_rtpm_set_state(HIF_RTPM_STATE_RESUMING);
1264 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1265 }
1266 
hif_process_runtime_resume_linkup(void)1267 void hif_process_runtime_resume_linkup(void)
1268 {
1269 	qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1270 	hif_rtpm_set_state(HIF_RTPM_STATE_RESUMING_LINKUP);
1271 	hif_rtpm_pending_job();
1272 	qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
1273 }
1274 
hif_process_runtime_resume_success(void)1275 void hif_process_runtime_resume_success(void)
1276 {
1277 	hif_rtpm_set_state(HIF_RTPM_STATE_ON);
1278 	gp_hif_rtpm_ctx->stats.resume_count++;
1279 	gp_hif_rtpm_ctx->stats.resume_ts = qdf_get_log_timestamp();
1280 	gp_hif_rtpm_ctx->stats.last_busy_ts = gp_hif_rtpm_ctx->stats.resume_ts;
1281 	hif_rtpm_mark_last_busy(HIF_RTPM_ID_RESERVED);
1282 }
1283 
hif_runtime_suspend(struct hif_opaque_softc * hif_ctx)1284 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
1285 {
1286 	int errno;
1287 
1288 	errno = hif_bus_suspend(hif_ctx);
1289 	if (errno) {
1290 		hif_err("Failed bus suspend: %d", errno);
1291 		return errno;
1292 	}
1293 
1294 	hif_rtpm_set_monitor_wake_intr(1);
1295 
1296 	errno = hif_bus_suspend_noirq(hif_ctx);
1297 	if (errno) {
1298 		hif_err("Failed bus suspend noirq: %d", errno);
1299 		hif_rtpm_set_monitor_wake_intr(0);
1300 		goto bus_resume;
1301 	}
1302 
1303 	return 0;
1304 
1305 bus_resume:
1306 	QDF_BUG(!hif_bus_resume(hif_ctx));
1307 
1308 	return errno;
1309 }
1310 
hif_runtime_resume(struct hif_opaque_softc * hif_ctx)1311 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
1312 {
1313 	int errno;
1314 
1315 	QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
1316 	errno = hif_bus_resume(hif_ctx);
1317 	if (errno)
1318 		hif_err("Failed runtime resume: %d", errno);
1319 
1320 	return errno;
1321 }
1322 
hif_fastpath_resume(struct hif_opaque_softc * hif_ctx)1323 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
1324 {
1325 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1326 	struct CE_state *ce_state;
1327 
1328 	if (!scn)
1329 		return;
1330 
1331 	if (scn->fastpath_mode_on) {
1332 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1333 			return;
1334 
1335 		ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
1336 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1337 
1338 		/*war_ce_src_ring_write_idx_set */
1339 		CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
1340 					  ce_state->src_ring->write_index);
1341 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1342 		Q_TARGET_ACCESS_END(scn);
1343 	}
1344 }
1345 #endif /* FEATURE_RUNTIME_PM */
1346