1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: qdf_lock.h
22  * This file abstracts locking operations.
23  */
24 
25 #ifndef _QDF_LOCK_H
26 #define _QDF_LOCK_H
27 
28 #include <qdf_types.h>
29 #include <qdf_mem.h>
30 #include <qdf_time.h>
31 #include <i_qdf_trace.h>
32 
33 #ifndef QDF_LOCK_STATS
34 #define QDF_LOCK_STATS 0
35 #endif
36 #ifndef QDF_LOCK_STATS_DESTROY_PRINT
37 #define QDF_LOCK_STATS_DESTROY_PRINT 0
38 #endif
39 #ifndef QDF_LOCK_STATS_BUG_ON
40 #define QDF_LOCK_STATS_BUG_ON 0
41 #endif
42 #ifndef QDF_LOCK_STATS_LIST
43 #define QDF_LOCK_STATS_LIST 0
44 #endif
45 
46 /* Max hold time in micro seconds, 0 to disable detection*/
47 #ifdef VCPU_TIMESTOLEN
48 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ         400000
49 #else
50 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ         10000
51 #endif
52 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK                 0
53 
54 #if QDF_LOCK_STATS
55 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_BH        2000000
56 #else
57 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_BH        1000000
58 #endif
59 
60 #if !QDF_LOCK_STATS
61 struct lock_stats {};
62 #define BEFORE_LOCK(x...) do {} while (0)
63 #define AFTER_LOCK(x...) do {} while (0)
64 #define BEFORE_TRYLOCK(x...) do {} while (0)
65 #define AFTER_TRYLOCK(x...) do {} while (0)
66 #define BEFORE_UNLOCK(x...) do {} while (0)
67 #define qdf_lock_stats_create(x...) do {} while (0)
68 #define qdf_lock_stats_destroy(x...) do {} while (0)
69 #define qdf_lock_stats_init(x...) do {} while (0)
70 #define qdf_lock_stats_deinit(x...) do {} while (0)
71 #else
72 void qdf_lock_stats_init(void);
73 void qdf_lock_stats_deinit(void);
74 struct qdf_lock_cookie;
75 struct lock_stats {
76 	const char *initialization_fn;
77 	const char *acquired_by;
78 	int line;
79 	int acquired;
80 	int contended;
81 	uint64_t contention_time;
82 	uint64_t non_contention_time;
83 	uint64_t held_time;
84 	uint64_t last_acquired;
85 	uint64_t max_contention_wait;
86 	uint64_t max_held_time;
87 	int num_large_contentions;
88 	int num_large_holds;
89 	struct qdf_lock_cookie *cookie;
90 };
91 #define LARGE_CONTENTION QDF_LOG_TIMESTAMP_CYCLES_PER_10_US
92 
93 #define BEFORE_LOCK(lock, was_locked) \
94 do { \
95 	uint64_t BEFORE_LOCK_time; \
96 	uint64_t AFTER_LOCK_time;  \
97 	bool BEFORE_LOCK_is_locked = was_locked; \
98 	BEFORE_LOCK_time = qdf_get_log_timestamp_lightweight(); \
99 	do {} while (0)
100 
101 
102 #define AFTER_LOCK(lock, func) \
103 	lock->stats.acquired_by = func; \
104 	AFTER_LOCK_time = qdf_get_log_timestamp_lightweight(); \
105 	lock->stats.acquired++; \
106 	lock->stats.last_acquired = AFTER_LOCK_time; \
107 	if (BEFORE_LOCK_is_locked) { \
108 		lock->stats.contended++; \
109 		lock->stats.contention_time += \
110 			(AFTER_LOCK_time - BEFORE_LOCK_time); \
111 	} else { \
112 		lock->stats.non_contention_time += \
113 			(AFTER_LOCK_time - BEFORE_LOCK_time); \
114 	} \
115 \
116 	if (AFTER_LOCK_time - BEFORE_LOCK_time > LARGE_CONTENTION) \
117 		lock->stats.num_large_contentions++; \
118 \
119 	if (AFTER_LOCK_time - BEFORE_LOCK_time > \
120 	    lock->stats.max_contention_wait) \
121 		lock->stats.max_contention_wait = \
122 			AFTER_LOCK_time - BEFORE_LOCK_time; \
123 } while (0)
124 
125 #define BEFORE_TRYLOCK(lock) \
126 do { \
127 	uint64_t BEFORE_LOCK_time; \
128 	uint64_t AFTER_LOCK_time;  \
129 	BEFORE_LOCK_time = qdf_get_log_timestamp_lightweight(); \
130 	do {} while (0)
131 
132 #define AFTER_TRYLOCK(lock, trylock_return, func) \
133 	AFTER_LOCK_time = qdf_get_log_timestamp_lightweight(); \
134 	if (trylock_return) { \
135 		lock->stats.acquired++; \
136 		lock->stats.last_acquired = AFTER_LOCK_time; \
137 		lock->stats.non_contention_time += \
138 			(AFTER_LOCK_time - BEFORE_LOCK_time); \
139 		lock->stats.acquired_by = func; \
140 	} \
141 } while (0)
142 
143 /* max_hold_time in US */
144 #define BEFORE_UNLOCK(lock, max_hold_time) \
145 do {\
146 	uint64_t BEFORE_UNLOCK_time;  \
147 	uint64_t held_time;  \
148 	BEFORE_UNLOCK_time = qdf_get_log_timestamp_lightweight(); \
149 \
150 	if (unlikely(BEFORE_UNLOCK_time < lock->stats.last_acquired)) \
151 		held_time = 0; \
152 	else \
153 		held_time = BEFORE_UNLOCK_time - lock->stats.last_acquired; \
154 \
155 	lock->stats.held_time += held_time; \
156 \
157 	if (held_time > lock->stats.max_held_time) \
158 		lock->stats.max_held_time = held_time; \
159 \
160 	if (held_time > LARGE_CONTENTION) \
161 		lock->stats.num_large_holds++; \
162 	if (QDF_LOCK_STATS_BUG_ON && max_hold_time && \
163 	    held_time > qdf_usecs_to_log_timestamp(max_hold_time)) { \
164 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, \
165 			"BEFORE_UNLOCK: lock held too long (%lluus)", \
166 			qdf_log_timestamp_to_usecs(held_time)); \
167 		QDF_BUG(0); \
168 	} \
169 	lock->stats.acquired_by = NULL; \
170 } while (0)
171 
172 void qdf_lock_stats_cookie_destroy(struct lock_stats *stats);
173 void qdf_lock_stats_cookie_create(struct lock_stats *stats,
174 				  const char *func, int line);
175 
qdf_lock_stats_destroy(struct lock_stats * stats)176 static inline void qdf_lock_stats_destroy(struct lock_stats *stats)
177 {
178 	if (QDF_LOCK_STATS_DESTROY_PRINT) {
179 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG,
180 			"%s: lock: %s %d \t"
181 			"acquired:\t%d\tcontended:\t%d\t"
182 			"contention_time\t%llu\tmax_contention_wait:\t%llu\t"
183 			"non_contention_time\t%llu\t"
184 			"held_time\t%llu\tmax_held:\t%llu"
185 			, __func__, stats->initialization_fn, stats->line,
186 			stats->acquired, stats->contended,
187 			qdf_log_timestamp_to_usecs(stats->contention_time),
188 			qdf_log_timestamp_to_usecs(stats->max_contention_wait),
189 			qdf_log_timestamp_to_usecs(stats->non_contention_time),
190 			qdf_log_timestamp_to_usecs(stats->held_time),
191 			qdf_log_timestamp_to_usecs(stats->max_held_time));
192 	}
193 
194 	if (QDF_LOCK_STATS_LIST)
195 		qdf_lock_stats_cookie_destroy(stats);
196 }
197 
198 #ifndef MEMORY_DEBUG
199 #define qdf_mem_malloc_debug(x, y, z) qdf_mem_malloc(x)
200 #endif
201 
202 /**
203  * qdf_lock_stats_create() - initialize the lock stats structure
204  * @stats: stats to initialize
205  * @func: calling function
206  * @line: calling line number
207  */
qdf_lock_stats_create(struct lock_stats * stats,const char * func,int line)208 static inline void qdf_lock_stats_create(struct lock_stats *stats,
209 					 const char *func, int line)
210 {
211 	qdf_mem_zero(stats, sizeof(*stats));
212 	stats->initialization_fn = func;
213 	stats->line = line;
214 
215 	if (QDF_LOCK_STATS_LIST)
216 		qdf_lock_stats_cookie_create(stats, func, line);
217 }
218 #endif
219 
220 #include <i_qdf_lock.h>
221 
222 #define WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT 0
223 #define WIFI_POWER_EVENT_WAKELOCK_TAKEN 0
224 #define WIFI_POWER_EVENT_WAKELOCK_RELEASED 1
225 
226 /**
227  * qdf_semaphore_acquire_timeout() - Take the semaphore before timeout
228  * @m: semaphore to take
229  * @timeout: maximum time to try to take the semaphore
230  *
231  * Return: int
232  */
qdf_semaphore_acquire_timeout(struct semaphore * m,unsigned long timeout)233 static inline int qdf_semaphore_acquire_timeout(struct semaphore *m,
234 						unsigned long timeout)
235 {
236 	return __qdf_semaphore_acquire_timeout(m, timeout);
237 }
238 
239 struct qdf_spinlock {
240 	__qdf_spinlock_t lock;
241 	struct lock_stats stats;
242 };
243 
244 /**
245  * typedef qdf_spinlock_t - Abstracted spinlock object
246  *
247  * Abstracted object. Clients must not make any assumptions about the
248  * composition of this object
249  */
250 typedef struct qdf_spinlock qdf_spinlock_t;
251 
252 /**
253  * typedef qdf_semaphore_t - Abstracted semaphore object
254  *
255  * Abstracted object. Clients must not make any assumptions about the
256  * composition of this object
257  */
258 typedef __qdf_semaphore_t qdf_semaphore_t;
259 
260 /**
261  * typedef qdf_mutex_t - Abstracted mutex object
262  *
263  * Abstracted object. Clients must not make any assumptions about the
264  * composition of this object
265  */
266 typedef __qdf_mutex_t qdf_mutex_t;
267 
268 QDF_STATUS qdf_mutex_create(qdf_mutex_t *lock, const char *func, int line);
269 
270 /**
271  * qdf_mutex_create() - Initialize a mutex
272  * @lock: pointer to the qdf_mutex_t mutex to initialize
273  *
274  * Return: QDF_STATUS_SUCCESS on success, else QDF_STATUS failure
275  */
276 #define qdf_mutex_create(lock) qdf_mutex_create(lock, __func__, __LINE__)
277 
278 /**
279  * qdf_mutex_acquire() - acquire a QDF lock
280  * @lock: Pointer to the opaque lock object to acquire
281  *
282  * A lock object is acquired by calling qdf_mutex_acquire().  If the lock
283  * is already locked, the calling thread shall block until the lock becomes
284  * available. This operation shall return with the lock object referenced by
285  * lock in the locked state with the calling thread as its owner.
286  *
287  * Return:
288  * QDF_STATUS_SUCCESS if lock was successfully initialized
289  * QDF failure reason codes if lock is not initialized and can't be used
290  */
291 QDF_STATUS qdf_mutex_acquire(qdf_mutex_t *lock);
292 
293 /**
294  * qdf_mutex_release() - release a QDF lock
295  * @lock: Pointer to the opaque lock object to be released
296  *
297  * qdf_mutex_release() function shall release the lock object
298  * referenced by 'lock'.
299  *
300  * If a thread attempts to release a lock that it unlocked or is not
301  * initialized, an error is returned.
302  *
303  * Return:
304  * QDF_STATUS_SUCCESS if lock was successfully initialized
305  * QDF failure reason codes if lock is not initialized and can't be used
306  */
307 QDF_STATUS qdf_mutex_release(qdf_mutex_t *lock);
308 
309 /**
310  * qdf_mutex_destroy() - destroy a QDF lock
311  * @lock: Pointer to the opaque lock object to be destroyed
312  *
313  * function shall destroy the lock object referenced by lock. After a
314  * successful return from qdf_mutex_destroy()
315  * the lock object becomes, in effect, uninitialized.
316  *
317  * A destroyed lock object can be reinitialized using qdf_mutex_create();
318  * the results of otherwise referencing the object after it has been destroyed
319  * are undefined.  Calls to QDF lock functions to manipulate the lock such
320  * as qdf_mutex_acquire() will fail if the lock is destroyed.  Therefore,
321  * don't use the lock after it has been destroyed until it has
322  * been re-initialized.
323  *
324  * Return:
325  * QDF_STATUS_SUCCESS if lock was successfully initialized
326  * QDF failure reason codes if lock is not initialized and can't be used
327  */
328 QDF_STATUS qdf_mutex_destroy(qdf_mutex_t *lock);
329 
qdf_spinlock_create(qdf_spinlock_t * lock,const char * func,int line)330 static inline void qdf_spinlock_create(qdf_spinlock_t *lock, const char *func,
331 				       int line)
332 {
333 	__qdf_spinlock_create(&lock->lock);
334 
335 	/* spinlock stats create relies on the spinlock working allread */
336 	qdf_lock_stats_create(&lock->stats, func, line);
337 }
338 
339 /**
340  * qdf_spinlock_create() - Initialize a spinlock
341  * @lock: spinlock object pointer
342  *
343  * Return: none
344  */
345 #define qdf_spinlock_create(lock) qdf_spinlock_create(lock, __func__, __LINE__)
346 
347 /**
348  * qdf_spinlock_destroy() - Delete a spinlock
349  * @lock: spinlock object pointer
350  *
351  * Return: none
352  */
qdf_spinlock_destroy(qdf_spinlock_t * lock)353 static inline void qdf_spinlock_destroy(qdf_spinlock_t *lock)
354 {
355 	qdf_lock_stats_destroy(&lock->stats);
356 	__qdf_spinlock_destroy(&lock->lock);
357 }
358 
359 /**
360  * qdf_spin_is_locked() - check if the spinlock is locked
361  * @lock: spinlock object
362  *
363  * Return: nonzero if lock is held.
364  */
qdf_spin_is_locked(qdf_spinlock_t * lock)365 static inline int qdf_spin_is_locked(qdf_spinlock_t *lock)
366 {
367 	return __qdf_spin_is_locked(&lock->lock);
368 }
369 
qdf_spin_trylock_bh(qdf_spinlock_t * lock,const char * func)370 static inline int qdf_spin_trylock_bh(qdf_spinlock_t *lock, const char *func)
371 {
372 	int trylock_return;
373 
374 	BEFORE_TRYLOCK(lock);
375 	trylock_return = __qdf_spin_trylock_bh(&lock->lock);
376 	AFTER_TRYLOCK(lock, trylock_return, func);
377 
378 	return trylock_return;
379 }
380 
381 /**
382  * qdf_spin_trylock_bh() - spin trylock bottomhalf
383  * @lock: spinlock object
384  *
385  * Return: nonzero if lock is acquired
386  */
387 #define qdf_spin_trylock_bh(lock) qdf_spin_trylock_bh(lock, __func__)
388 
qdf_spin_trylock(qdf_spinlock_t * lock,const char * func)389 static inline int qdf_spin_trylock(qdf_spinlock_t *lock, const char *func)
390 {
391 	int result = 0;
392 
393 	BEFORE_LOCK(lock, qdf_spin_is_locked(lock));
394 	result = __qdf_spin_trylock(&lock->lock);
395 	AFTER_LOCK(lock, func);
396 
397 	return result;
398 }
399 
400 /**
401  * qdf_spin_trylock() - spin trylock
402  * @lock: spinlock object
403  *
404  * Return: nonzero if lock is acquired
405  */
406 #define qdf_spin_trylock(lock) qdf_spin_trylock(lock, __func__)
407 
qdf_spin_lock_bh(qdf_spinlock_t * lock,const char * func)408 static inline void qdf_spin_lock_bh(qdf_spinlock_t *lock, const char *func)
409 {
410 	BEFORE_LOCK(lock, qdf_spin_is_locked(lock));
411 	__qdf_spin_lock_bh(&lock->lock);
412 	AFTER_LOCK(lock, func);
413 }
414 
415 /**
416  * qdf_spin_lock_bh() - locks the spinlock mutex in soft irq context
417  * @lock: spinlock object pointer
418  *
419  * Return: none
420  */
421 #define qdf_spin_lock_bh(lock) qdf_spin_lock_bh(lock, __func__)
422 
423 /**
424  * qdf_spin_unlock_bh() - unlocks the spinlock mutex in soft irq context
425  * @lock: spinlock object pointer
426  *
427  * Return: none
428  */
qdf_spin_unlock_bh(qdf_spinlock_t * lock)429 static inline void qdf_spin_unlock_bh(qdf_spinlock_t *lock)
430 {
431 	BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_BH);
432 	__qdf_spin_unlock_bh(&lock->lock);
433 }
434 
435 /**
436  * qdf_spinlock_irq_exec() - Execute the input function with spinlock held
437  *                           and interrupt disabled.
438  * @hdl: OS handle
439  * @lock: spinlock to be held for the critical region
440  * @func: critical region function that to be executed
441  * @arg: argument of the critical region function
442  *
443  * Return: Boolean status returned by the critical region function
444  */
qdf_spinlock_irq_exec(qdf_handle_t hdl,qdf_spinlock_t * lock,qdf_irqlocked_func_t func,void * arg)445 static inline bool qdf_spinlock_irq_exec(qdf_handle_t hdl,
446 					 qdf_spinlock_t *lock,
447 					 qdf_irqlocked_func_t func, void *arg)
448 {
449 	return __qdf_spinlock_irq_exec(hdl, &lock->lock, func, arg);
450 }
451 
qdf_spin_lock(qdf_spinlock_t * lock,const char * func)452 static inline void qdf_spin_lock(qdf_spinlock_t *lock, const char *func)
453 {
454 	BEFORE_LOCK(lock, qdf_spin_is_locked(lock));
455 	__qdf_spin_lock(&lock->lock);
456 	AFTER_LOCK(lock, func);
457 }
458 
459 /**
460  * qdf_spin_lock() - Acquire a Spinlock(SMP) & disable Preemption (Preemptive)
461  * @lock: Lock object
462  *
463  * Return: none
464  */
465 #define qdf_spin_lock(lock) qdf_spin_lock(lock, __func__)
466 
467 /**
468  * qdf_spin_unlock() - Unlock the spinlock and enables the Preemption
469  * @lock: Lock object
470  *
471  * Return: none
472  */
qdf_spin_unlock(qdf_spinlock_t * lock)473 static inline void qdf_spin_unlock(qdf_spinlock_t *lock)
474 {
475 	BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK);
476 	__qdf_spin_unlock(&lock->lock);
477 }
478 
qdf_spin_lock_irq(qdf_spinlock_t * lock,unsigned long flags,const char * func)479 static inline void qdf_spin_lock_irq(qdf_spinlock_t *lock, unsigned long flags,
480 				     const char *func)
481 {
482 	BEFORE_LOCK(lock, qdf_spin_is_locked(lock));
483 	__qdf_spin_lock_irq(&lock->lock.spinlock, flags);
484 	AFTER_LOCK(lock, func);
485 }
486 
487 /**
488  * qdf_spin_lock_irq() - Acquire a Spinlock(SMP) & save the irq state
489  * @lock: Lock object
490  * @flags: flags
491  *
492  * Return: none
493  */
494 #define qdf_spin_lock_irq(lock, flags) qdf_spin_lock_irq(lock, flags, __func__)
495 
qdf_spin_lock_irqsave(qdf_spinlock_t * lock,const char * func)496 static inline void qdf_spin_lock_irqsave(qdf_spinlock_t *lock, const char *func)
497 {
498 	BEFORE_LOCK(lock, qdf_spin_is_locked(lock));
499 	__qdf_spin_lock_irqsave(&lock->lock);
500 	AFTER_LOCK(lock, func);
501 }
502 
503 /**
504  * qdf_spin_lock_irqsave() - Acquire a Spinlock (SMP) & disable Preemption
505  *                           (Preemptive) and disable IRQs
506  * @lock: Lock object
507  *
508  * Return: none
509  */
510 #define qdf_spin_lock_irqsave(lock) qdf_spin_lock_irqsave(lock, __func__)
511 
512 /**
513  * qdf_spin_unlock_irqrestore() - Unlock the spinlock and enables the
514  *                                Preemption and enable IRQ
515  * @lock: Lock object
516  *
517  * Return: none
518  */
qdf_spin_unlock_irqrestore(qdf_spinlock_t * lock)519 static inline void qdf_spin_unlock_irqrestore(qdf_spinlock_t *lock)
520 {
521 	BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ);
522 	__qdf_spin_unlock_irqrestore(&lock->lock);
523 }
524 
525 /**
526  * qdf_spin_unlock_irq() - Unlock a Spinlock(SMP) & save the restore state
527  * @lock: Lock object
528  * @flags: flags
529  *
530  * Return: none
531  */
qdf_spin_unlock_irq(qdf_spinlock_t * lock,unsigned long flags)532 static inline void qdf_spin_unlock_irq(qdf_spinlock_t *lock,
533 				       unsigned long flags)
534 {
535 	BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ);
536 	__qdf_spin_unlock_irq(&lock->lock.spinlock, flags);
537 }
538 
539 /**
540  * qdf_semaphore_init() - initialize a semaphore
541  * @m: Semaphore to initialize
542  * Return: None
543  */
qdf_semaphore_init(qdf_semaphore_t * m)544 static inline void qdf_semaphore_init(qdf_semaphore_t *m)
545 {
546 	__qdf_semaphore_init(m);
547 }
548 
549 /**
550  * qdf_semaphore_acquire() - take the semaphore
551  * @m: Semaphore to take
552  *
553  * Return: int
554  */
qdf_semaphore_acquire(qdf_semaphore_t * m)555 static inline int qdf_semaphore_acquire(qdf_semaphore_t *m)
556 {
557 	return __qdf_semaphore_acquire(m);
558 }
559 
560 /**
561  * qdf_semaphore_release() - give the semaphore
562  * @m: Semaphore to give
563  *
564  * Return: None
565  */
qdf_semaphore_release(qdf_semaphore_t * m)566 static inline void qdf_semaphore_release(qdf_semaphore_t *m)
567 {
568 	__qdf_semaphore_release(m);
569 }
570 
571 /**
572  * qdf_semaphore_acquire_intr() - Take the semaphore, interruptible
573  * @m: mutex to take
574  *
575  * This function allows a user-space process that is waiting on a
576  * semaphore to be interrupted by the user.  If the operation is
577  * interrupted, the function returns a nonzero value, and the caller
578  * does not hold the semaphore.  Always check the return value and
579  * responding accordingly.
580  *
581  * Return: 0 if the semaphore was acquired, non-zero if not acquired
582  */
qdf_semaphore_acquire_intr(qdf_semaphore_t * m)583 static inline int qdf_semaphore_acquire_intr(qdf_semaphore_t *m)
584 {
585 	return __qdf_semaphore_acquire_intr(m);
586 }
587 
588 #ifdef WLAN_WAKE_LOCK_DEBUG
589 /**
590  * qdf_wake_lock_check_for_leaks() - assert no wake lock leaks
591  *
592  * Return: None
593  */
594 void qdf_wake_lock_check_for_leaks(void);
595 
596 /**
597  * qdf_wake_lock_feature_init() - global init logic for wake lock
598  *
599  * Return: None
600  */
601 void qdf_wake_lock_feature_init(void);
602 
603 /**
604  * qdf_wake_lock_feature_deinit() - global de-init logic for wake lock
605  *
606  * Return: None
607  */
608 void qdf_wake_lock_feature_deinit(void);
609 #else
qdf_wake_lock_check_for_leaks(void)610 static inline void qdf_wake_lock_check_for_leaks(void) { }
qdf_wake_lock_feature_init(void)611 static inline void qdf_wake_lock_feature_init(void) { }
qdf_wake_lock_feature_deinit(void)612 static inline void qdf_wake_lock_feature_deinit(void) { }
613 #endif /* WLAN_WAKE_LOCK_DEBUG */
614 
615 /**
616  * __qdf_wake_lock_create() - initialize a wake lock
617  * @lock: The wake lock to initialize
618  * @name: Name of wake lock
619  * @func: caller function
620  * @line: caller line
621  * Return:
622  * QDF status success if wake lock is initialized
623  * QDF status failure if wake lock was not initialized
624  */
625 QDF_STATUS __qdf_wake_lock_create(qdf_wake_lock_t *lock, const char *name,
626 				  const char *func, uint32_t line);
627 
628 /**
629  * qdf_wake_lock_create() - initialized a wakeup source lock
630  * @lock: the wakeup source lock to initialize
631  * @name: the name of wakeup source lock
632  *
633  * Return: QDF_STATUS
634  */
635 #define qdf_wake_lock_create(lock, name) \
636 	__qdf_wake_lock_create(lock, name, __func__, __LINE__)
637 
638 /**
639  * qdf_wake_lock_acquire() - acquires a wake lock
640  * @lock: The wake lock to acquire
641  * @reason: Reason for wakelock
642  *
643  * Return:
644  * QDF status success if wake lock is acquired
645  * QDF status failure if wake lock was not acquired
646  */
647 QDF_STATUS qdf_wake_lock_acquire(qdf_wake_lock_t *lock, uint32_t reason);
648 
649 /**
650  * qdf_wake_lock_name() - This function returns the name of the wakelock
651  * @lock: Pointer to the wakelock
652  *
653  * This function returns the name of the wakelock
654  *
655  * Return: Pointer to the name if it is valid or a default string
656  */
657 const char *qdf_wake_lock_name(qdf_wake_lock_t *lock);
658 
659 /**
660  * qdf_wake_lock_timeout_acquire() - acquires a wake lock with a timeout
661  * @lock: The wake lock to acquire
662  * @msec: timeout in ms (0 for no timeout)
663  *
664  * Return:
665  * QDF status success if wake lock is acquired
666  * QDF status failure if wake lock was not acquired
667  */
668 QDF_STATUS qdf_wake_lock_timeout_acquire(qdf_wake_lock_t *lock,
669 					 uint32_t msec);
670 
671 /**
672  * qdf_wake_lock_release() - releases a wake lock
673  * @lock: the wake lock to release
674  * @reason: Reason for wakelock
675  *
676  * Return:
677  * QDF status success if wake lock is acquired
678  * QDF status failure if wake lock was not acquired
679  */
680 QDF_STATUS qdf_wake_lock_release(qdf_wake_lock_t *lock, uint32_t reason);
681 
682 /**
683  * __qdf_wake_lock_destroy() - destroy a wake lock
684  * @lock: The wake lock to destroy
685  * @func: caller function
686  * @line: caller line
687  *
688  * Return: None
689  */
690 void __qdf_wake_lock_destroy(qdf_wake_lock_t *lock,
691 			     const char *func, uint32_t line);
692 
693 /**
694  * qdf_wake_lock_destroy() - deinitialize a wakeup source lock
695  * @lock: the wakeup source lock to de-initialize
696  *
697  * Return: None
698  */
699 #define qdf_wake_lock_destroy(lock) \
700 	__qdf_wake_lock_destroy(lock, __func__, __LINE__)
701 
702 /**
703  * qdf_pm_system_wakeup() - wakeup system
704  *
705  * Return: None
706  */
707 void qdf_pm_system_wakeup(void);
708 
709 /**
710  * qdf_spinlock_acquire() - acquires a spin lock
711  * @lock: Spin lock to acquire
712  *
713  * Return: QDF status success if wake lock is acquired
714  */
715 QDF_STATUS qdf_spinlock_acquire(qdf_spinlock_t *lock);
716 
717 /**
718  * qdf_spinlock_release() - release a spin lock
719  * @lock: Spin lock to release
720  *
721  * Return: QDF status success if wake lock is acquired
722  */
723 QDF_STATUS qdf_spinlock_release(qdf_spinlock_t *lock);
724 
725 /**
726  * enum qdf_rtpm_call_type - Get and Put calls types
727  * @QDF_RTPM_GET: Increment usage count and when system is suspended
728  *               schedule resume process, return depends on pm state.
729  * @QDF_RTPM_GET_FORCE: Increment usage count and when system is suspended
730  *                     schedule resume process, returns success irrespective of
731  *                     pm_state.
732  * @QDF_RTPM_GET_SYNC: Increment usage count and when system is suspended,
733  *                    wait till process is resumed.
734  * @QDF_RTPM_GET_NORESUME: Only increments usage count.
735  * @QDF_RTPM_PUT: Decrements usage count and puts system in idle state.
736  * @QDF_RTPM_PUT_SYNC_SUSPEND: Decrements usage count and puts system in
737  *                            suspended state.
738  * @QDF_RTPM_PUT_NOIDLE: Decrements usage count.
739  */
740 enum qdf_rtpm_call_type {
741 	QDF_RTPM_GET,
742 	QDF_RTPM_GET_FORCE,
743 	QDF_RTPM_GET_SYNC,
744 	QDF_RTPM_GET_NORESUME,
745 	QDF_RTPM_PUT,
746 	QDF_RTPM_PUT_SYNC_SUSPEND,
747 	QDF_RTPM_PUT_NOIDLE,
748 };
749 
750 /**
751  * enum qdf_rtpm_client_id - modules registered with runtime pm module
752  * @QDF_RTPM_ID_RESERVED: Reserved ID
753  * @QDF_RTPM_ID_PM_QOS_NOTIFY: PM QOS context
754  * @QDF_RTPM_ID_WIPHY_SUSPEND: APSS Bus suspend context
755  * @QDF_RTPM_ID_MAX: Max id
756  */
757 enum qdf_rtpm_client_id {
758 	QDF_RTPM_ID_RESERVED,
759 	QDF_RTPM_ID_PM_QOS_NOTIFY,
760 	QDF_RTPM_ID_WIPHY_SUSPEND,
761 	QDF_RTPM_ID_MAX
762 };
763 
764 /**
765  * qdf_runtime_lock_init() - initialize runtime lock
766  * @lock: the lock to initialize
767  *
768  * Initialize a runtime pm lock.  This lock can be used
769  * to prevent the runtime pm system from putting the bus
770  * to sleep.
771  *
772  * Return: Success if lock initialized
773  */
774 #define qdf_runtime_lock_init(lock) __qdf_runtime_lock_init(lock, #lock)
775 
776 #ifdef FEATURE_RUNTIME_PM
777 /**
778  * qdf_rtpm_register() - QDF wrapper to register a module with runtime PM.
779  * @id: ID of the module which needs to be registered
780  * @hif_rpm_cbk: callback to be called when get was called in suspended state.
781  *
782  * Return: success status if registered
783  */
784 QDF_STATUS qdf_rtpm_register(uint32_t id, void (*hif_rpm_cbk)(void));
785 
786 /**
787  * qdf_rtpm_deregister() - QDF wrapper to deregister the module
788  * @id: ID of the module which needs to be de-registered
789  *
790  * Return: success status if successfully de-registered
791  */
792 QDF_STATUS qdf_rtpm_deregister(uint32_t id);
793 
794 /**
795  * __qdf_runtime_lock_init() - initialize runtime lock
796  * @lock: the lock to initialize
797  * @name: name of the runtime lock
798  *
799  * Initialize a runtime pm lock.  This lock can be used
800  * to prevent the runtime pm system from putting the bus
801  * to sleep.
802  *
803  * Return: Success if lock initialized
804  */
805 QDF_STATUS __qdf_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
806 
807 /**
808  * qdf_runtime_lock_deinit() - deinitialize runtime pm lock
809  * @lock: the lock to deinitialize
810  *
811  * Ensures the lock is released. Frees the runtime lock.
812  *
813  * Return: void
814  */
815 void qdf_runtime_lock_deinit(qdf_runtime_lock_t *lock);
816 
817 /**
818  * qdf_rtpm_get() - Increment usage_count on the device to avoid suspend.
819  * @type: get call types from hif_rpm_type
820  * @id: ID of the module calling qdf_rtpm_get()
821  *
822  * Return: success if a get has been issued, else error code.
823  */
824 QDF_STATUS qdf_rtpm_get(uint8_t type, uint32_t id);
825 
826 /**
827  * qdf_rtpm_put() - Decrement usage_count on the device to avoid suspend.
828  * @type: put call types from hif_rpm_type
829  * @id: ID of the module calling qdf_rtpm_put()
830  *
831  * Return: success if a put has been issued, else error code.
832  */
833 QDF_STATUS qdf_rtpm_put(uint8_t type, uint32_t id);
834 
835 /**
836  * qdf_runtime_pm_prevent_suspend() - Prevent Runtime suspend
837  * @lock: runtime PM lock
838  *
839  * This function will prevent runtime suspend, by incrementing
840  * device's usage count.
841  *
842  * Return: status
843  */
844 QDF_STATUS qdf_runtime_pm_prevent_suspend(qdf_runtime_lock_t *lock);
845 
846 /**
847  * qdf_runtime_pm_prevent_suspend_sync() - Synchronized Prevent Runtime suspend
848  * @lock: runtime PM lock
849  *
850  * This function will prevent runtime suspend, by incrementing
851  * device's usage count  and waits till system is in resumed state.
852  *
853  * Return: status
854  */
855 QDF_STATUS qdf_runtime_pm_prevent_suspend_sync(qdf_runtime_lock_t *lock);
856 
857 /**
858  * qdf_runtime_pm_allow_suspend() - Allow Runtime suspend
859  * @lock: runtime PM lock
860  *
861  * This function will allow runtime suspend, by decrementing
862  * device's usage count.
863  *
864  * Return: status
865  */
866 QDF_STATUS qdf_runtime_pm_allow_suspend(qdf_runtime_lock_t *lock);
867 
868 /**
869  * qdf_rtpm_sync_resume() - Invoke synchronous runtime resume.
870  *
871  * This function will invoke synchronous runtime resume.
872  *
873  * Return: Success if state is ON
874  */
875 QDF_STATUS qdf_rtpm_sync_resume(void);
876 
877 #else
878 static inline
qdf_rtpm_register(uint32_t id,void (* hif_rpm_cbk)(void))879 QDF_STATUS qdf_rtpm_register(uint32_t id, void (*hif_rpm_cbk)(void))
880 {
881 	return 0;
882 }
883 
884 static inline
qdf_rtpm_deregister(uint32_t id)885 QDF_STATUS qdf_rtpm_deregister(uint32_t id)
886 {
887 	return QDF_STATUS_SUCCESS;
888 }
889 
890 static inline
__qdf_runtime_lock_init(qdf_runtime_lock_t * lock,const char * name)891 QDF_STATUS __qdf_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
892 {
893 	return QDF_STATUS_SUCCESS;
894 }
895 
896 static inline
qdf_runtime_lock_deinit(qdf_runtime_lock_t * lock)897 void qdf_runtime_lock_deinit(qdf_runtime_lock_t *lock)
898 {
899 }
900 
901 static inline
qdf_rtpm_get(uint8_t type,uint32_t id)902 QDF_STATUS qdf_rtpm_get(uint8_t type, uint32_t id)
903 {
904 	return QDF_STATUS_SUCCESS;
905 }
906 
907 static inline
qdf_rtpm_put(uint8_t type,uint32_t id)908 QDF_STATUS qdf_rtpm_put(uint8_t type, uint32_t id)
909 {
910 	return QDF_STATUS_SUCCESS;
911 }
912 
913 static inline
qdf_runtime_pm_prevent_suspend(qdf_runtime_lock_t * lock)914 QDF_STATUS qdf_runtime_pm_prevent_suspend(qdf_runtime_lock_t *lock)
915 {
916 	return QDF_STATUS_SUCCESS;
917 }
918 
919 static inline
qdf_runtime_pm_prevent_suspend_sync(qdf_runtime_lock_t * lock)920 QDF_STATUS qdf_runtime_pm_prevent_suspend_sync(qdf_runtime_lock_t *lock)
921 {
922 	return QDF_STATUS_SUCCESS;
923 }
924 
925 static inline
qdf_runtime_pm_allow_suspend(qdf_runtime_lock_t * lock)926 QDF_STATUS qdf_runtime_pm_allow_suspend(qdf_runtime_lock_t *lock)
927 {
928 	return QDF_STATUS_SUCCESS;
929 }
930 
931 static inline
qdf_rtpm_sync_resume(void)932 QDF_STATUS qdf_rtpm_sync_resume(void)
933 {
934 	return QDF_STATUS_SUCCESS;
935 }
936 
937 #endif /* FEATURE_RUNTIME_PM */
938 
939 #endif /* _QDF_LOCK_H */
940