xref: /wlan-dirver/qca-wifi-host-cmn/qdf/inc/qdf_lock.h (revision d0c05845839e5f2ba5a8dcebe0cd3e4cd4e8dfcf)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * @file qdf_lock.h
22  * This file abstracts locking operations.
23  */
24 
25 #ifndef _QDF_LOCK_H
26 #define _QDF_LOCK_H
27 
28 #include <qdf_types.h>
29 #include <qdf_mem.h>
30 #include <qdf_time.h>
31 #include <i_qdf_trace.h>
32 
33 #ifndef QDF_LOCK_STATS
34 #define QDF_LOCK_STATS 0
35 #endif
36 #ifndef QDF_LOCK_STATS_DESTROY_PRINT
37 #define QDF_LOCK_STATS_DESTROY_PRINT 0
38 #endif
39 #ifndef QDF_LOCK_STATS_BUG_ON
40 #define QDF_LOCK_STATS_BUG_ON 0
41 #endif
42 #ifndef QDF_LOCK_STATS_LIST
43 #define QDF_LOCK_STATS_LIST 0
44 #endif
45 
46 /* Max hold time in micro seconds, 0 to disable detection*/
47 #ifdef VCPU_TIMESTOLEN
48 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ         400000
49 #else
50 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ         10000
51 #endif
52 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK                 0
53 
54 #if QDF_LOCK_STATS
55 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_BH        2000000
56 #else
57 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_BH        1000000
58 #endif
59 
60 #if !QDF_LOCK_STATS
61 struct lock_stats {};
62 #define BEFORE_LOCK(x...) do {} while (0)
63 #define AFTER_LOCK(x...) do {} while (0)
64 #define BEFORE_TRYLOCK(x...) do {} while (0)
65 #define AFTER_TRYLOCK(x...) do {} while (0)
66 #define BEFORE_UNLOCK(x...) do {} while (0)
67 #define qdf_lock_stats_create(x...) do {} while (0)
68 #define qdf_lock_stats_destroy(x...) do {} while (0)
69 #define qdf_lock_stats_init(x...) do {} while (0)
70 #define qdf_lock_stats_deinit(x...) do {} while (0)
71 #else
72 void qdf_lock_stats_init(void);
73 void qdf_lock_stats_deinit(void);
74 struct qdf_lock_cookie;
75 struct lock_stats {
76 	const char *initialization_fn;
77 	const char *acquired_by;
78 	int line;
79 	int acquired;
80 	int contended;
81 	uint64_t contention_time;
82 	uint64_t non_contention_time;
83 	uint64_t held_time;
84 	uint64_t last_acquired;
85 	uint64_t max_contention_wait;
86 	uint64_t max_held_time;
87 	int num_large_contentions;
88 	int num_large_holds;
89 	struct qdf_lock_cookie *cookie;
90 };
91 #define LARGE_CONTENTION QDF_LOG_TIMESTAMP_CYCLES_PER_10_US
92 
93 #define BEFORE_LOCK(lock, was_locked) \
94 do { \
95 	uint64_t BEFORE_LOCK_time; \
96 	uint64_t AFTER_LOCK_time;  \
97 	bool BEFORE_LOCK_is_locked = was_locked; \
98 	BEFORE_LOCK_time = qdf_get_log_timestamp_lightweight(); \
99 	do {} while (0)
100 
101 
102 #define AFTER_LOCK(lock, func) \
103 	lock->stats.acquired_by = func; \
104 	AFTER_LOCK_time = qdf_get_log_timestamp_lightweight(); \
105 	lock->stats.acquired++; \
106 	lock->stats.last_acquired = AFTER_LOCK_time; \
107 	if (BEFORE_LOCK_is_locked) { \
108 		lock->stats.contended++; \
109 		lock->stats.contention_time += \
110 			(AFTER_LOCK_time - BEFORE_LOCK_time); \
111 	} else { \
112 		lock->stats.non_contention_time += \
113 			(AFTER_LOCK_time - BEFORE_LOCK_time); \
114 	} \
115 \
116 	if (AFTER_LOCK_time - BEFORE_LOCK_time > LARGE_CONTENTION) \
117 		lock->stats.num_large_contentions++; \
118 \
119 	if (AFTER_LOCK_time - BEFORE_LOCK_time > \
120 	    lock->stats.max_contention_wait) \
121 		lock->stats.max_contention_wait = \
122 			AFTER_LOCK_time - BEFORE_LOCK_time; \
123 } while (0)
124 
125 #define BEFORE_TRYLOCK(lock) \
126 do { \
127 	uint64_t BEFORE_LOCK_time; \
128 	uint64_t AFTER_LOCK_time;  \
129 	BEFORE_LOCK_time = qdf_get_log_timestamp_lightweight(); \
130 	do {} while (0)
131 
132 #define AFTER_TRYLOCK(lock, trylock_return, func) \
133 	AFTER_LOCK_time = qdf_get_log_timestamp_lightweight(); \
134 	if (trylock_return) { \
135 		lock->stats.acquired++; \
136 		lock->stats.last_acquired = AFTER_LOCK_time; \
137 		lock->stats.non_contention_time += \
138 			(AFTER_LOCK_time - BEFORE_LOCK_time); \
139 		lock->stats.acquired_by = func; \
140 	} \
141 } while (0)
142 
143 /* max_hold_time in US */
144 #define BEFORE_UNLOCK(lock, max_hold_time) \
145 do {\
146 	uint64_t BEFORE_UNLOCK_time;  \
147 	uint64_t held_time;  \
148 	BEFORE_UNLOCK_time = qdf_get_log_timestamp_lightweight(); \
149 \
150 	if (unlikely(BEFORE_UNLOCK_time < lock->stats.last_acquired)) \
151 		held_time = 0; \
152 	else \
153 		held_time = BEFORE_UNLOCK_time - lock->stats.last_acquired; \
154 \
155 	lock->stats.held_time += held_time; \
156 \
157 	if (held_time > lock->stats.max_held_time) \
158 		lock->stats.max_held_time = held_time; \
159 \
160 	if (held_time > LARGE_CONTENTION) \
161 		lock->stats.num_large_holds++; \
162 	if (QDF_LOCK_STATS_BUG_ON && max_hold_time && \
163 	    held_time > qdf_usecs_to_log_timestamp(max_hold_time)) { \
164 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, \
165 			"BEFORE_UNLOCK: lock held too long (%lluus)", \
166 			qdf_log_timestamp_to_usecs(held_time)); \
167 		QDF_BUG(0); \
168 	} \
169 	lock->stats.acquired_by = NULL; \
170 } while (0)
171 
172 void qdf_lock_stats_cookie_destroy(struct lock_stats *stats);
173 void qdf_lock_stats_cookie_create(struct lock_stats *stats,
174 				  const char *func, int line);
175 
176 static inline void qdf_lock_stats_destroy(struct lock_stats *stats)
177 {
178 	if (QDF_LOCK_STATS_DESTROY_PRINT) {
179 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG,
180 			"%s: lock: %s %d \t"
181 			"acquired:\t%d\tcontended:\t%d\t"
182 			"contention_time\t%llu\tmax_contention_wait:\t%llu\t"
183 			"non_contention_time\t%llu\t"
184 			"held_time\t%llu\tmax_held:\t%llu"
185 			, __func__, stats->initialization_fn, stats->line,
186 			stats->acquired, stats->contended,
187 			qdf_log_timestamp_to_usecs(stats->contention_time),
188 			qdf_log_timestamp_to_usecs(stats->max_contention_wait),
189 			qdf_log_timestamp_to_usecs(stats->non_contention_time),
190 			qdf_log_timestamp_to_usecs(stats->held_time),
191 			qdf_log_timestamp_to_usecs(stats->max_held_time));
192 	}
193 
194 	if (QDF_LOCK_STATS_LIST)
195 		qdf_lock_stats_cookie_destroy(stats);
196 }
197 
198 #ifndef MEMORY_DEBUG
199 #define qdf_mem_malloc_debug(x, y, z) qdf_mem_malloc(x)
200 #endif
201 
202 /* qdf_lock_stats_create() - initialize the lock stats structure
203  *
204  */
205 static inline void qdf_lock_stats_create(struct lock_stats *stats,
206 					 const char *func, int line)
207 {
208 	qdf_mem_zero(stats, sizeof(*stats));
209 	stats->initialization_fn = func;
210 	stats->line = line;
211 
212 	if (QDF_LOCK_STATS_LIST)
213 		qdf_lock_stats_cookie_create(stats, func, line);
214 }
215 #endif
216 
217 #include <i_qdf_lock.h>
218 
219 #define WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT 0
220 #define WIFI_POWER_EVENT_WAKELOCK_TAKEN 0
221 #define WIFI_POWER_EVENT_WAKELOCK_RELEASED 1
222 
223 /**
224  * qdf_semaphore_acquire_timeout() - Take the semaphore before timeout
225  * @m: semaphore to take
226  * @timeout: maximum time to try to take the semaphore
227  * Return: int
228  */
229 static inline int qdf_semaphore_acquire_timeout(struct semaphore *m,
230 						unsigned long timeout)
231 {
232 	return __qdf_semaphore_acquire_timeout(m, timeout);
233 }
234 
235 struct qdf_spinlock {
236 	__qdf_spinlock_t lock;
237 	struct lock_stats stats;
238 };
239 
240 /**
241  * @brief Platform spinlock object
242  */
243 typedef struct qdf_spinlock qdf_spinlock_t;
244 
245 
246 /**
247  * @brief Platform mutex object
248  */
249 typedef __qdf_semaphore_t qdf_semaphore_t;
250 typedef __qdf_mutex_t qdf_mutex_t;
251 
252 /* function Declaration */
253 QDF_STATUS qdf_mutex_create(qdf_mutex_t *m, const char *func, int line);
254 #define qdf_mutex_create(m) qdf_mutex_create(m, __func__, __LINE__)
255 
256 QDF_STATUS qdf_mutex_acquire(qdf_mutex_t *m);
257 
258 QDF_STATUS qdf_mutex_release(qdf_mutex_t *m);
259 
260 QDF_STATUS qdf_mutex_destroy(qdf_mutex_t *lock);
261 
262 /**
263  * qdf_spinlock_create - Initialize a spinlock
264  * @lock: spinlock object pointer
265  * Return: none
266  */
267 static inline void qdf_spinlock_create(qdf_spinlock_t *lock, const char *func,
268 				       int line)
269 {
270 	__qdf_spinlock_create(&lock->lock);
271 
272 	/* spinlock stats create relies on the spinlock working allread */
273 	qdf_lock_stats_create(&lock->stats, func, line);
274 }
275 
276 #define qdf_spinlock_create(x) qdf_spinlock_create(x, __func__, __LINE__)
277 
278 /**
279  * qdf_spinlock_destroy - Delete a spinlock
280  * @lock: spinlock object pointer
281  * Return: none
282  */
283 static inline void qdf_spinlock_destroy(qdf_spinlock_t *lock)
284 {
285 	qdf_lock_stats_destroy(&lock->stats);
286 	__qdf_spinlock_destroy(&lock->lock);
287 }
288 
289 /**
290  * qdf_spin_is_locked() - check if the spinlock is locked
291  * @lock: spinlock object
292  *
293  * Return: nonzero if lock is held.
294  */
295 static inline int qdf_spin_is_locked(qdf_spinlock_t *lock)
296 {
297 	return __qdf_spin_is_locked(&lock->lock);
298 }
299 
300 /**
301  * qdf_spin_trylock_bh() - spin trylock bottomhalf
302  * @lock: spinlock object
303  *
304  * Return: nonzero if lock is acquired
305  */
306 static inline int qdf_spin_trylock_bh(qdf_spinlock_t *lock, const char *func)
307 {
308 	int trylock_return;
309 
310 	BEFORE_TRYLOCK(lock);
311 	trylock_return = __qdf_spin_trylock_bh(&lock->lock);
312 	AFTER_TRYLOCK(lock, trylock_return, func);
313 
314 	return trylock_return;
315 }
316 #define qdf_spin_trylock_bh(lock) qdf_spin_trylock_bh(lock, __func__)
317 
318 /**
319  * qdf_spin_trylock() - spin trylock
320  * @lock: spinlock object
321  * Return: int
322  */
323 static inline int qdf_spin_trylock(qdf_spinlock_t *lock, const char *func)
324 {
325 	int result = 0;
326 
327 	BEFORE_LOCK(lock, qdf_spin_is_locked(lock));
328 	result = __qdf_spin_trylock(&lock->lock);
329 	AFTER_LOCK(lock, func);
330 
331 	return result;
332 }
333 
334 #define qdf_spin_trylock(lock) qdf_spin_trylock(lock, __func__)
335 
336 /**
337  * qdf_spin_lock_bh() - locks the spinlock mutex in soft irq context
338  * @lock: spinlock object pointer
339  * Return: none
340  */
341 static inline void qdf_spin_lock_bh(qdf_spinlock_t *lock, const char *func)
342 {
343 	BEFORE_LOCK(lock, qdf_spin_is_locked(lock));
344 	__qdf_spin_lock_bh(&lock->lock);
345 	AFTER_LOCK(lock, func);
346 }
347 
348 #define qdf_spin_lock_bh(lock) qdf_spin_lock_bh(lock, __func__)
349 
350 /**
351  * qdf_spin_unlock_bh() - unlocks the spinlock mutex in soft irq context
352  * @lock: spinlock object pointer
353  * Return: none
354  */
355 static inline void qdf_spin_unlock_bh(qdf_spinlock_t *lock)
356 {
357 	BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_BH);
358 	__qdf_spin_unlock_bh(&lock->lock);
359 }
360 
361 /**
362  * qdf_spinlock_irq_exec - Execute the input function with spinlock held
363  * and interrupt disabled.
364  * @hdl: OS handle
365  * @lock: spinlock to be held for the critical region
366  * @func: critical region function that to be executed
367  * @context: context of the critical region function
368  * Return: Boolean status returned by the critical region function
369  */
370 static inline bool qdf_spinlock_irq_exec(qdf_handle_t hdl,
371 					 qdf_spinlock_t *lock,
372 					 qdf_irqlocked_func_t func, void *arg)
373 {
374 	return __qdf_spinlock_irq_exec(hdl, &lock->lock, func, arg);
375 }
376 
377 /**
378  * qdf_spin_lock() - Acquire a Spinlock(SMP) & disable Preemption (Preemptive)
379  * @lock: Lock object
380  *
381  * Return: none
382  */
383 static inline void qdf_spin_lock(qdf_spinlock_t *lock, const char *func)
384 {
385 	BEFORE_LOCK(lock, qdf_spin_is_locked(lock));
386 	__qdf_spin_lock(&lock->lock);
387 	AFTER_LOCK(lock, func);
388 }
389 #define qdf_spin_lock(lock) qdf_spin_lock(lock, __func__)
390 
391 /**
392  * qdf_spin_unlock() - Unlock the spinlock and enables the Preemption
393  * @lock: Lock object
394  *
395  * Return: none
396  */
397 static inline void qdf_spin_unlock(qdf_spinlock_t *lock)
398 {
399 	BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK);
400 	__qdf_spin_unlock(&lock->lock);
401 }
402 
403 /**
404  * qdf_spin_lock_irq() - Acquire a Spinlock(SMP) & save the irq state
405  * @lock: Lock object
406  * @flags: flags
407  *
408  * Return: none
409  */
410 static inline void qdf_spin_lock_irq(qdf_spinlock_t *lock, unsigned long flags,
411 				     const char *func)
412 {
413 	BEFORE_LOCK(lock, qdf_spin_is_locked(lock));
414 	__qdf_spin_lock_irq(&lock->lock.spinlock, flags);
415 	AFTER_LOCK(lock, func);
416 }
417 #define qdf_spin_lock_irq(lock, flags) qdf_spin_lock_irq(lock, flags, __func__)
418 
419 /**
420  * qdf_spin_lock_irqsave() - Acquire a Spinlock (SMP) & disable Preemption
421  * (Preemptive) and disable IRQs
422  * @lock: Lock object
423  *
424  * Return: none
425  */
426 static inline void qdf_spin_lock_irqsave(qdf_spinlock_t *lock, const char *func)
427 {
428 	BEFORE_LOCK(lock, qdf_spin_is_locked(lock));
429 	__qdf_spin_lock_irqsave(&lock->lock);
430 	AFTER_LOCK(lock, func);
431 }
432 #define qdf_spin_lock_irqsave(lock) qdf_spin_lock_irqsave(lock, __func__)
433 
434 /**
435  * qdf_spin_unlock_irqrestore() - Unlock the spinlock and enables the
436  * Preemption and enable IRQ
437  * @lock: Lock object
438  *
439  * Return: none
440  */
441 static inline void qdf_spin_unlock_irqrestore(qdf_spinlock_t *lock)
442 {
443 	BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ);
444 	__qdf_spin_unlock_irqrestore(&lock->lock);
445 }
446 
447 /**
448  * qdf_spin_unlock_irq() - Unlock a Spinlock(SMP) & save the restore state
449  * @lock: Lock object
450  * @flags: flags
451  *
452  * Return: none
453  */
454 static inline void qdf_spin_unlock_irq(qdf_spinlock_t *lock,
455 				       unsigned long flags)
456 {
457 	BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ);
458 	__qdf_spin_unlock_irq(&lock->lock.spinlock, flags);
459 }
460 
461 /**
462  * qdf_semaphore_init() - initialize a semaphore
463  * @m: Semaphore to initialize
464  * Return: None
465  */
466 static inline void qdf_semaphore_init(qdf_semaphore_t *m)
467 {
468 	__qdf_semaphore_init(m);
469 }
470 
471 /**
472  * qdf_semaphore_acquire() - take the semaphore
473  * @m: Semaphore to take
474  * Return: int
475  */
476 static inline int qdf_semaphore_acquire(qdf_semaphore_t *m)
477 {
478 	return __qdf_semaphore_acquire(m);
479 }
480 
481 /**
482  * qdf_semaphore_release() - give the semaphore
483  * @m: Semaphore to give
484  * Return: None
485  */
486 static inline void qdf_semaphore_release(qdf_semaphore_t *m)
487 {
488 	__qdf_semaphore_release(m);
489 }
490 
491 /**
492  * qdf_semaphore_acquire_intr - Take the semaphore, interruptible version
493  * @osdev: OS Device
494  * @m: mutex to take
495  * Return: int
496  */
497 static inline int qdf_semaphore_acquire_intr(qdf_semaphore_t *m)
498 {
499 	return __qdf_semaphore_acquire_intr(m);
500 }
501 
502 #ifdef WLAN_WAKE_LOCK_DEBUG
503 /**
504  * qdf_wake_lock_check_for_leaks() - assert no wake lock leaks
505  *
506  * Return: None
507  */
508 void qdf_wake_lock_check_for_leaks(void);
509 
510 /**
511  * qdf_wake_lock_feature_init() - global init logic for wake lock
512  *
513  * Return: None
514  */
515 void qdf_wake_lock_feature_init(void);
516 
517 /**
518  * qdf_wake_lock_feature_deinit() - global de-init logic for wake lock
519  *
520  * Return: None
521  */
522 void qdf_wake_lock_feature_deinit(void);
523 #else
524 static inline void qdf_wake_lock_check_for_leaks(void) { }
525 static inline void qdf_wake_lock_feature_init(void) { }
526 static inline void qdf_wake_lock_feature_deinit(void) { }
527 #endif /* WLAN_WAKE_LOCK_DEBUG */
528 
529 /**
530  * __qdf_wake_lock_create() - initialize a wake lock
531  * @lock: The wake lock to initialize
532  * @name: Name of wake lock
533  * @func: caller function
534  * @line: caller line
535  * Return:
536  * QDF status success: if wake lock is initialized
537  * QDF status failure: if wake lock was not initialized
538  */
539 QDF_STATUS __qdf_wake_lock_create(qdf_wake_lock_t *lock, const char *name,
540 				  const char *func, uint32_t line);
541 
542 /**
543  * qdf_wake_lock_create() - initialized a wakeup source lock
544  * @lock: the wakeup source lock to initialize
545  * @name: the name of wakeup source lock
546  *
547  * Return: QDF_STATUS
548  */
549 #define qdf_wake_lock_create(lock, name) \
550 	__qdf_wake_lock_create(lock, name, __func__, __LINE__)
551 
552 QDF_STATUS qdf_wake_lock_acquire(qdf_wake_lock_t *lock, uint32_t reason);
553 
554 const char *qdf_wake_lock_name(qdf_wake_lock_t *lock);
555 QDF_STATUS qdf_wake_lock_timeout_acquire(qdf_wake_lock_t *lock,
556 					 uint32_t msec);
557 
558 QDF_STATUS qdf_wake_lock_release(qdf_wake_lock_t *lock, uint32_t reason);
559 
560 /**
561  * __qdf_wake_lock_destroy() - destroy a wake lock
562  * @lock: The wake lock to destroy
563  * @func: caller function
564  * @line: caller line
565  *
566  * Return: None
567  */
568 void __qdf_wake_lock_destroy(qdf_wake_lock_t *lock,
569 			     const char *func, uint32_t line);
570 
571 /**
572  * qdf_wake_lock_destroy() - deinitialize a wakeup source lock
573  * @lock: the wakeup source lock to de-initialize
574  *
575  * Return: None
576  */
577 #define qdf_wake_lock_destroy(lock) \
578 	__qdf_wake_lock_destroy(lock, __func__, __LINE__)
579 
580 void qdf_pm_system_wakeup(void);
581 
582 QDF_STATUS qdf_spinlock_acquire(qdf_spinlock_t *lock);
583 
584 QDF_STATUS qdf_spinlock_release(qdf_spinlock_t *lock);
585 
586 /**
587  * enum qdf_rtpm_type - Get and Put calls types
588  * @QDF_RTPM_GET: Increment usage count and when system is suspended
589  *               schedule resume process, return depends on pm state.
590  * @QDF_RTPM_GET_FORCE: Increment usage count and when system is suspended
591  *                     shedule resume process, returns success irrespective of
592  *                     pm_state.
593  * @QDF_RTPM_GET_SYNC: Increment usage count and when system is suspended,
594  *                    wait till process is resumed.
595  * @QDF_RTPM_GET_NORESUME: Only increments usage count.
596  * @QDF_RTPM_PUT: Decrements usage count and puts system in idle state.
597  * @QDF_RTPM_PUT_SYNC_SUSPEND: Decrements usage count and puts system in
598  *                            suspended state.
599  * @QDF_RTPM_PUT_NOIDLE: Decrements usage count.
600  */
601 enum qdf_rtpm_call_type {
602 	QDF_RTPM_GET,
603 	QDF_RTPM_GET_FORCE,
604 	QDF_RTPM_GET_SYNC,
605 	QDF_RTPM_GET_NORESUME,
606 	QDF_RTPM_PUT,
607 	QDF_RTPM_PUT_SYNC_SUSPEND,
608 	QDF_RTPM_PUT_NOIDLE,
609 };
610 
611 /**
612  * enum qdf_rtpm_client_id - modules registered with runtime pm module
613  * @QDF_RTPM_ID_RESERVED: Reserved ID
614  * @QDF_RTPM_ID_PM_QOS_NOTIFY: PM QOS context
615  * @QDF_RTPM_ID_BUS_SUSPEND: APSS Bus suspend context
616  * @QDF_RTPM_ID_MAX: Max id
617  */
618 enum  qdf_rtpm_client_id {
619 	QDF_RTPM_ID_RESERVED,
620 	QDF_RTPM_ID_PM_QOS_NOTIFY,
621 	QDF_RTPM_ID_WIPHY_SUSPEND,
622 	QDF_RTPM_ID_MAX
623 };
624 
625 #define qdf_runtime_lock_init(lock) __qdf_runtime_lock_init(lock, #lock)
626 
627 #ifdef FEATURE_RUNTIME_PM
628 /**
629  * qdf_rtpm_register() - QDF wrapper to register a module with runtime PM.
630  * @id: ID of the module which needs to be registered
631  * @hif_rpm_cbk: callback to be called when get was called in suspended state.
632  * @prevent_multiple_get: not allow simultaneous get calls or put calls
633  *
634  * Return: success status if registered
635  */
636 QDF_STATUS qdf_rtpm_register(uint32_t id, void (*hif_rpm_cbk)(void));
637 
638 /**
639  * qdf_rtpm_deregister() - QDF wrapper to deregister the module
640  * @id: ID of the module which needs to be de-registered
641  *
642  * Return: success status if successfully de-registered
643  */
644 QDF_STATUS qdf_rtpm_deregister(uint32_t id);
645 
646 /**
647  * qdf_runtime_lock_init() - initialize runtime lock
648  * @name: name of the runtime lock
649  *
650  * Initialize a runtime pm lock.  This lock can be used
651  * to prevent the runtime pm system from putting the bus
652  * to sleep.
653  *
654  * Return: Success if lock initialized
655  */
656 QDF_STATUS __qdf_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
657 
658 /**
659  * qdf_runtime_lock_deinit() - deinitialize runtime pm lock
660  * @lock: the lock to deinitialize
661  *
662  * Ensures the lock is released. Frees the runtime lock.
663  *
664  * Return: void
665  */
666 void qdf_runtime_lock_deinit(qdf_runtime_lock_t *lock);
667 
668 /**
669  * qdf_rtpm_get() - Incremeant usage_count on the device to avoid suspend.
670  * @type: get call types from hif_rpm_type
671  * @id: ID of the module calling get()
672  *
673  * Return: success if a get has been issued, else error code.
674  */
675 QDF_STATUS qdf_rtpm_get(uint8_t type, uint32_t id);
676 
677 /**
678  * qdf_rtpm_put() - Decremeant usage_count on the device to avoid suspend.
679  * @type: put call types from hif_rpm_type
680  * @id: ID of the module calling put()
681  *
682  * Return: success if a put has been issued, else error code.
683  */
684 QDF_STATUS qdf_rtpm_put(uint8_t type, uint32_t id);
685 
686 /**
687  * qdf_runtime_pm_allow_suspend() - Prevent Runtime suspend
688  * @data: runtime PM lock
689  *
690  * This function will prevent runtime suspend, by incrementing
691  * device's usage count.
692  *
693  * Return: status
694  */
695 QDF_STATUS qdf_runtime_pm_prevent_suspend(qdf_runtime_lock_t *lock);
696 
697 /**
698  * qdf_runtime_pm_allow_suspend() - Allow Runtime suspend
699  * @data: runtime PM lock
700  *
701  * This function will allow runtime suspend, by decrementing
702  * device's usage count.
703  *
704  * Return: status
705  */
706 QDF_STATUS qdf_runtime_pm_allow_suspend(qdf_runtime_lock_t *lock);
707 
708 /**
709  * qdf_pm_runtime_sync_resume() - Invoke synchronous runtime resume.
710  *
711  * This function will invoke synchronous runtime resume.
712  *
713  * Return: Success if state is ON
714  */
715 QDF_STATUS qdf_rtpm_sync_resume(void);
716 
717 #else
718 static inline
719 QDF_STATUS qdf_rtpm_register(uint32_t id, void (*hif_rpm_cbk)(void))
720 {
721 	return 0;
722 }
723 
724 static inline
725 QDF_STATUS qdf_rtpm_deregister(uint32_t id)
726 {
727 	return QDF_STATUS_SUCCESS;
728 }
729 
730 static inline
731 QDF_STATUS __qdf_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
732 {
733 	return QDF_STATUS_SUCCESS;
734 }
735 
736 static inline
737 void qdf_runtime_lock_deinit(qdf_runtime_lock_t *lock)
738 {
739 }
740 
741 static inline
742 QDF_STATUS qdf_rtpm_get(uint8_t type, uint32_t id)
743 {
744 	return QDF_STATUS_SUCCESS;
745 }
746 
747 static inline
748 QDF_STATUS qdf_rtpm_put(uint8_t type, uint32_t id)
749 {
750 	return QDF_STATUS_SUCCESS;
751 }
752 
753 static inline
754 QDF_STATUS qdf_runtime_pm_prevent_suspend(qdf_runtime_lock_t *lock)
755 {
756 	return QDF_STATUS_SUCCESS;
757 }
758 
759 static inline
760 QDF_STATUS qdf_runtime_pm_allow_suspend(qdf_runtime_lock_t *lock)
761 {
762 	return QDF_STATUS_SUCCESS;
763 }
764 
765 static inline
766 QDF_STATUS qdf_rtpm_sync_resume(void)
767 {
768 	return QDF_STATUS_SUCCESS;
769 }
770 
771 #endif /* FEATURE_RUNTIME_PM */
772 
773 #endif /* _QDF_LOCK_H */
774