xref: /wlan-dirver/qca-wifi-host-cmn/qdf/inc/qdf_lock.h (revision 8b7e2ee3720101d16dde046b0345f866abb7a5d8)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * @file qdf_lock.h
21  * This file abstracts locking operations.
22  */
23 
24 #ifndef _QDF_LOCK_H
25 #define _QDF_LOCK_H
26 
27 #include <qdf_types.h>
28 #include <qdf_mem.h>
29 #include <qdf_time.h>
30 #include <i_qdf_trace.h>
31 
32 #ifndef QDF_LOCK_STATS
33 #define QDF_LOCK_STATS 0
34 #endif
35 #ifndef QDF_LOCK_STATS_DESTROY_PRINT
36 #define QDF_LOCK_STATS_DESTROY_PRINT 0
37 #endif
38 #ifndef QDF_LOCK_STATS_BUG_ON
39 #define QDF_LOCK_STATS_BUG_ON 0
40 #endif
41 #ifndef QDF_LOCK_STATS_LIST
42 #define QDF_LOCK_STATS_LIST 0
43 #endif
44 
45 /* Max hold time in micro seconds, 0 to disable detection*/
46 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ         10000
47 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_BH        1000000
48 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK                 0
49 
50 #if !QDF_LOCK_STATS
51 struct lock_stats {};
52 #define BEFORE_LOCK(x...) do {} while (0)
53 #define AFTER_LOCK(x...) do {} while (0)
54 #define BEFORE_TRYLOCK(x...) do {} while (0)
55 #define AFTER_TRYLOCK(x...) do {} while (0)
56 #define BEFORE_UNLOCK(x...) do {} while (0)
57 #define qdf_lock_stats_create(x...) do {} while (0)
58 #define qdf_lock_stats_destroy(x...) do {} while (0)
59 #define qdf_lock_stats_init(x...) do {} while (0)
60 #define qdf_lock_stats_deinit(x...) do {} while (0)
61 #else
62 void qdf_lock_stats_init(void);
63 void qdf_lock_stats_deinit(void);
64 struct qdf_lock_cookie;
65 struct lock_stats {
66 	const char *initialization_fn;
67 	const char *acquired_by;
68 	int line;
69 	int acquired;
70 	int contended;
71 	uint64_t contention_time;
72 	uint64_t non_contention_time;
73 	uint64_t held_time;
74 	uint64_t last_acquired;
75 	uint64_t max_contention_wait;
76 	uint64_t max_held_time;
77 	int num_large_contentions;
78 	int num_large_holds;
79 	struct qdf_lock_cookie *cookie;
80 };
81 #define LARGE_CONTENTION QDF_LOG_TIMESTAMP_CYCLES_PER_10_US
82 
83 #define BEFORE_LOCK(lock, was_locked) \
84 do { \
85 	uint64_t BEFORE_LOCK_time; \
86 	uint64_t AFTER_LOCK_time;  \
87 	bool BEFORE_LOCK_is_locked = was_locked; \
88 	BEFORE_LOCK_time = qdf_get_log_timestamp(); \
89 	do {} while (0)
90 
91 
92 #define AFTER_LOCK(lock, func) \
93 	lock->stats.acquired_by = func; \
94 	AFTER_LOCK_time = qdf_get_log_timestamp(); \
95 	lock->stats.acquired++; \
96 	lock->stats.last_acquired = AFTER_LOCK_time; \
97 	if (BEFORE_LOCK_is_locked) { \
98 		lock->stats.contended++; \
99 		lock->stats.contention_time += \
100 			(AFTER_LOCK_time - BEFORE_LOCK_time); \
101 	} else { \
102 		lock->stats.non_contention_time += \
103 			(AFTER_LOCK_time - BEFORE_LOCK_time); \
104 	} \
105 \
106 	if (AFTER_LOCK_time - BEFORE_LOCK_time > LARGE_CONTENTION) \
107 		lock->stats.num_large_contentions++; \
108 \
109 	if (AFTER_LOCK_time - BEFORE_LOCK_time > \
110 	    lock->stats.max_contention_wait) \
111 		lock->stats.max_contention_wait = \
112 			AFTER_LOCK_time - BEFORE_LOCK_time; \
113 } while (0)
114 
115 #define BEFORE_TRYLOCK(lock) \
116 do { \
117 	uint64_t BEFORE_LOCK_time; \
118 	uint64_t AFTER_LOCK_time;  \
119 	BEFORE_LOCK_time = qdf_get_log_timestamp(); \
120 	do {} while (0)
121 
122 #define AFTER_TRYLOCK(lock, trylock_return, func) \
123 	AFTER_LOCK_time = qdf_get_log_timestamp(); \
124 	if (trylock_return) { \
125 		lock->stats.acquired++; \
126 		lock->stats.last_acquired = AFTER_LOCK_time; \
127 		lock->stats.non_contention_time += \
128 			(AFTER_LOCK_time - BEFORE_LOCK_time); \
129 		lock->stats.acquired_by = func; \
130 	} \
131 } while (0)
132 
133 /* max_hold_time in US */
134 #define BEFORE_UNLOCK(lock, max_hold_time) \
135 do {\
136 	uint64_t held_time = qdf_get_log_timestamp() - \
137 		lock->stats.last_acquired; \
138 	lock->stats.held_time += held_time; \
139 \
140 	if (held_time > lock->stats.max_held_time) \
141 		lock->stats.max_held_time = held_time; \
142 \
143 	if (held_time > LARGE_CONTENTION) \
144 		lock->stats.num_large_holds++; \
145 	if (QDF_LOCK_STATS_BUG_ON && max_hold_time && \
146 	    held_time > qdf_usecs_to_log_timestamp(max_hold_time)) { \
147 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, \
148 			"BEFORE_UNLOCK: lock held too long (%lluus)", \
149 			qdf_log_timestamp_to_usecs(held_time)); \
150 		QDF_BUG(0); \
151 	} \
152 	lock->stats.acquired_by = NULL; \
153 } while (0)
154 
155 void qdf_lock_stats_cookie_destroy(struct lock_stats *stats);
156 void qdf_lock_stats_cookie_create(struct lock_stats *stats,
157 				  const char *func, int line);
158 
159 static inline void qdf_lock_stats_destroy(struct lock_stats *stats)
160 {
161 	if (QDF_LOCK_STATS_DESTROY_PRINT) {
162 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG,
163 			"%s: lock: %s %d \t"
164 			"acquired:\t%d\tcontended:\t%d\t"
165 			"contention_time\t%llu\tmax_contention_wait:\t%llu\t"
166 			"non_contention_time\t%llu\t"
167 			"held_time\t%llu\tmax_held:\t%llu"
168 			, __func__, stats->initialization_fn, stats->line,
169 			stats->acquired, stats->contended,
170 			qdf_log_timestamp_to_usecs(stats->contention_time),
171 			qdf_log_timestamp_to_usecs(stats->max_contention_wait),
172 			qdf_log_timestamp_to_usecs(stats->non_contention_time),
173 			qdf_log_timestamp_to_usecs(stats->held_time),
174 			qdf_log_timestamp_to_usecs(stats->max_held_time));
175 	}
176 
177 	if (QDF_LOCK_STATS_LIST)
178 		qdf_lock_stats_cookie_destroy(stats);
179 }
180 
181 #ifndef MEMORY_DEBUG
182 #define qdf_mem_malloc_debug(x, y, z) qdf_mem_malloc(x)
183 #endif
184 
185 /* qdf_lock_stats_create() - initialize the lock stats structure
186  *
187  */
188 static inline void qdf_lock_stats_create(struct lock_stats *stats,
189 					 const char *func, int line)
190 {
191 	qdf_mem_zero(stats, sizeof(*stats));
192 	stats->initialization_fn = func;
193 	stats->line = line;
194 
195 	if (QDF_LOCK_STATS_LIST)
196 		qdf_lock_stats_cookie_create(stats, func, line);
197 }
198 #endif
199 
200 #include <i_qdf_lock.h>
201 
202 #define WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT 0
203 #define WIFI_POWER_EVENT_WAKELOCK_TAKEN 0
204 #define WIFI_POWER_EVENT_WAKELOCK_RELEASED 1
205 
206 /**
207  * qdf_semaphore_acquire_timeout() - Take the semaphore before timeout
208  * @m: semaphore to take
209  * @timeout: maximum time to try to take the semaphore
210  * Return: int
211  */
212 static inline int qdf_semaphore_acquire_timeout(struct semaphore *m,
213 						unsigned long timeout)
214 {
215 	return __qdf_semaphore_acquire_timeout(m, timeout);
216 }
217 
218 struct qdf_spinlock {
219 	__qdf_spinlock_t lock;
220 	struct lock_stats stats;
221 };
222 
223 /**
224  * @brief Platform spinlock object
225  */
226 typedef struct qdf_spinlock qdf_spinlock_t;
227 
228 
229 /**
230  * @brief Platform mutex object
231  */
232 typedef __qdf_semaphore_t qdf_semaphore_t;
233 typedef __qdf_mutex_t qdf_mutex_t;
234 
235 /* function Declaration */
236 QDF_STATUS qdf_mutex_create(qdf_mutex_t *m, const char *func, int line);
237 #define qdf_mutex_create(m) qdf_mutex_create(m, __func__, __LINE__)
238 
239 QDF_STATUS qdf_mutex_acquire(qdf_mutex_t *m);
240 
241 QDF_STATUS qdf_mutex_release(qdf_mutex_t *m);
242 
243 QDF_STATUS qdf_mutex_destroy(qdf_mutex_t *lock);
244 
245 /**
246  * qdf_spinlock_create - Initialize a spinlock
247  * @lock: spinlock object pointer
248  * Return: none
249  */
250 static inline void qdf_spinlock_create(qdf_spinlock_t *lock, const char *func,
251 				       int line)
252 {
253 	__qdf_spinlock_create(&lock->lock);
254 
255 	/* spinlock stats create relies on the spinlock working allread */
256 	qdf_lock_stats_create(&lock->stats, func, line);
257 }
258 
259 #define qdf_spinlock_create(x) qdf_spinlock_create(x, __func__, __LINE__)
260 
261 /**
262  * qdf_spinlock_destroy - Delete a spinlock
263  * @lock: spinlock object pointer
264  * Return: none
265  */
266 static inline void qdf_spinlock_destroy(qdf_spinlock_t *lock)
267 {
268 	qdf_lock_stats_destroy(&lock->stats);
269 	__qdf_spinlock_destroy(&lock->lock);
270 }
271 
272 /**
273  * qdf_spin_is_locked() - check if the spinlock is locked
274  * @lock: spinlock object
275  *
276  * Return: nonzero if lock is held.
277  */
278 static inline int qdf_spin_is_locked(qdf_spinlock_t *lock)
279 {
280 	return __qdf_spin_is_locked(&lock->lock);
281 }
282 
283 /**
284  * qdf_spin_trylock_bh() - spin trylock bottomhalf
285  * @lock: spinlock object
286  *
287  * Return: nonzero if lock is acquired
288  */
289 static inline int qdf_spin_trylock_bh(qdf_spinlock_t *lock, const char *func)
290 {
291 	int trylock_return;
292 
293 	BEFORE_TRYLOCK(lock);
294 	trylock_return = __qdf_spin_trylock_bh(&lock->lock);
295 	AFTER_TRYLOCK(lock, trylock_return, func);
296 
297 	return trylock_return;
298 }
299 #define qdf_spin_trylock_bh(lock) qdf_spin_trylock_bh(lock, __func__)
300 
301 int qdf_spin_trylock_bh_outline(qdf_spinlock_t *lock);
302 
303 /**
304  * qdf_spin_trylock() - spin trylock
305  * @lock: spinlock object
306  * Return: int
307  */
308 static inline int qdf_spin_trylock(qdf_spinlock_t *lock, const char *func)
309 {
310 	int result = 0;
311 
312 	BEFORE_LOCK(lock, qdf_spin_is_locked(lock));
313 	result = __qdf_spin_trylock(&lock->lock);
314 	AFTER_LOCK(lock, func);
315 
316 	return result;
317 }
318 
319 #define qdf_spin_trylock(lock) qdf_spin_trylock(lock, __func__)
320 
321 /**
322  * qdf_spin_lock_bh() - locks the spinlock mutex in soft irq context
323  * @lock: spinlock object pointer
324  * Return: none
325  */
326 static inline void qdf_spin_lock_bh(qdf_spinlock_t *lock, const char *func)
327 {
328 	BEFORE_LOCK(lock, qdf_spin_is_locked(lock));
329 	__qdf_spin_lock_bh(&lock->lock);
330 	AFTER_LOCK(lock, func);
331 }
332 
333 #define qdf_spin_lock_bh(lock) qdf_spin_lock_bh(lock, __func__)
334 
335 void qdf_spin_lock_bh_outline(qdf_spinlock_t *lock);
336 
337 /**
338  * qdf_spin_unlock_bh() - unlocks the spinlock mutex in soft irq context
339  * @lock: spinlock object pointer
340  * Return: none
341  */
342 static inline void qdf_spin_unlock_bh(qdf_spinlock_t *lock)
343 {
344 	BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_BH);
345 	__qdf_spin_unlock_bh(&lock->lock);
346 }
347 
348 void qdf_spin_unlock_bh_outline(qdf_spinlock_t *lock);
349 
350 /**
351  * qdf_spinlock_irq_exec - Execute the input function with spinlock held
352  * and interrupt disabled.
353  * @hdl: OS handle
354  * @lock: spinlock to be held for the critical region
355  * @func: critical region function that to be executed
356  * @context: context of the critical region function
357  * Return: Boolean status returned by the critical region function
358  */
359 static inline bool qdf_spinlock_irq_exec(qdf_handle_t hdl,
360 					 qdf_spinlock_t *lock,
361 					 qdf_irqlocked_func_t func, void *arg)
362 {
363 	return __qdf_spinlock_irq_exec(hdl, &lock->lock, func, arg);
364 }
365 
366 /**
367  * qdf_spin_lock() - Acquire a Spinlock(SMP) & disable Preemption (Preemptive)
368  * @lock: Lock object
369  *
370  * Return: none
371  */
372 static inline void qdf_spin_lock(qdf_spinlock_t *lock, const char *func)
373 {
374 	BEFORE_LOCK(lock, qdf_spin_is_locked(lock));
375 	__qdf_spin_lock(&lock->lock);
376 	AFTER_LOCK(lock, func);
377 }
378 #define qdf_spin_lock(lock) qdf_spin_lock(lock, __func__)
379 
380 /**
381  * qdf_spin_unlock() - Unlock the spinlock and enables the Preemption
382  * @lock: Lock object
383  *
384  * Return: none
385  */
386 static inline void qdf_spin_unlock(qdf_spinlock_t *lock)
387 {
388 	BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK);
389 	__qdf_spin_unlock(&lock->lock);
390 }
391 
392 /**
393  * qdf_spin_lock_irq() - Acquire a Spinlock(SMP) & save the irq state
394  * @lock: Lock object
395  * @flags: flags
396  *
397  * Return: none
398  */
399 static inline void qdf_spin_lock_irq(qdf_spinlock_t *lock, unsigned long flags,
400 				     const char *func)
401 {
402 	BEFORE_LOCK(lock, qdf_spin_is_locked(lock));
403 	__qdf_spin_lock_irq(&lock->lock.spinlock, flags);
404 	AFTER_LOCK(lock, func);
405 }
406 #define qdf_spin_lock_irq(lock, flags) qdf_spin_lock_irq(lock, flags, __func__)
407 
408 /**
409  * qdf_spin_lock_irqsave() - Acquire a Spinlock (SMP) & disable Preemption
410  * (Preemptive) and disable IRQs
411  * @lock: Lock object
412  *
413  * Return: none
414  */
415 static inline void qdf_spin_lock_irqsave(qdf_spinlock_t *lock, const char *func)
416 {
417 	BEFORE_LOCK(lock, qdf_spin_is_locked(lock));
418 	__qdf_spin_lock_irqsave(&lock->lock);
419 	AFTER_LOCK(lock, func);
420 }
421 #define qdf_spin_lock_irqsave(lock) qdf_spin_lock_irqsave(lock, __func__)
422 
423 /**
424  * qdf_spin_unlock_irqrestore() - Unlock the spinlock and enables the
425  * Preemption and enable IRQ
426  * @lock: Lock object
427  *
428  * Return: none
429  */
430 static inline void qdf_spin_unlock_irqrestore(qdf_spinlock_t *lock)
431 {
432 	BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ);
433 	__qdf_spin_unlock_irqrestore(&lock->lock);
434 }
435 
436 /**
437  * qdf_spin_unlock_irq() - Unlock a Spinlock(SMP) & save the restore state
438  * @lock: Lock object
439  * @flags: flags
440  *
441  * Return: none
442  */
443 static inline void qdf_spin_unlock_irq(qdf_spinlock_t *lock,
444 				       unsigned long flags)
445 {
446 	BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ);
447 	__qdf_spin_unlock_irq(&lock->lock.spinlock, flags);
448 }
449 
450 /**
451  * qdf_semaphore_init() - initialize a semaphore
452  * @m: Semaphore to initialize
453  * Return: None
454  */
455 static inline void qdf_semaphore_init(qdf_semaphore_t *m)
456 {
457 	__qdf_semaphore_init(m);
458 }
459 
460 /**
461  * qdf_semaphore_acquire() - take the semaphore
462  * @m: Semaphore to take
463  * Return: int
464  */
465 static inline int qdf_semaphore_acquire(qdf_semaphore_t *m)
466 {
467 	return __qdf_semaphore_acquire(m);
468 }
469 
470 /**
471  * qdf_semaphore_release() - give the semaphore
472  * @m: Semaphore to give
473  * Return: None
474  */
475 static inline void qdf_semaphore_release(qdf_semaphore_t *m)
476 {
477 	__qdf_semaphore_release(m);
478 }
479 
480 /**
481  * qdf_semaphore_acquire_intr - Take the semaphore, interruptible version
482  * @osdev: OS Device
483  * @m: mutex to take
484  * Return: int
485  */
486 static inline int qdf_semaphore_acquire_intr(qdf_semaphore_t *m)
487 {
488 	return __qdf_semaphore_acquire_intr(m);
489 }
490 
491 QDF_STATUS qdf_wake_lock_create(qdf_wake_lock_t *lock, const char *name);
492 
493 QDF_STATUS qdf_wake_lock_acquire(qdf_wake_lock_t *lock, uint32_t reason);
494 
495 const char *qdf_wake_lock_name(qdf_wake_lock_t *lock);
496 QDF_STATUS qdf_wake_lock_timeout_acquire(qdf_wake_lock_t *lock,
497 					 uint32_t msec);
498 
499 QDF_STATUS qdf_wake_lock_release(qdf_wake_lock_t *lock, uint32_t reason);
500 
501 QDF_STATUS qdf_wake_lock_destroy(qdf_wake_lock_t *lock);
502 
503 QDF_STATUS qdf_runtime_pm_get(void);
504 QDF_STATUS qdf_runtime_pm_put(void);
505 QDF_STATUS qdf_runtime_pm_prevent_suspend(qdf_runtime_lock_t *lock);
506 QDF_STATUS qdf_runtime_pm_allow_suspend(qdf_runtime_lock_t *lock);
507 
508 QDF_STATUS __qdf_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
509 
510 #define qdf_runtime_lock_init(lock) __qdf_runtime_lock_init(lock, #lock)
511 
512 void qdf_runtime_lock_deinit(qdf_runtime_lock_t *lock);
513 
514 QDF_STATUS qdf_spinlock_acquire(qdf_spinlock_t *lock);
515 
516 QDF_STATUS qdf_spinlock_release(qdf_spinlock_t *lock);
517 #endif /* _QDF_LOCK_H */
518