xref: /wlan-dirver/qca-wifi-host-cmn/qdf/inc/qdf_lock.h (revision bea437e2293c3d4fb1b5704fcf633aedac996962)
1 /*
2  * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * @file qdf_lock.h
21  * This file abstracts locking operations.
22  */
23 
24 #ifndef _QDF_LOCK_H
25 #define _QDF_LOCK_H
26 
27 #include <qdf_types.h>
28 #include <qdf_mem.h>
29 #include <qdf_time.h>
30 #include <i_qdf_trace.h>
31 
32 #ifndef QDF_LOCK_STATS
33 #define QDF_LOCK_STATS 0
34 #endif
35 #ifndef QDF_LOCK_STATS_DESTROY_PRINT
36 #define QDF_LOCK_STATS_DESTROY_PRINT 0
37 #endif
38 #ifndef QDF_LOCK_STATS_BUG_ON
39 #define QDF_LOCK_STATS_BUG_ON 0
40 #endif
41 #ifndef QDF_LOCK_STATS_LIST
42 #define QDF_LOCK_STATS_LIST 0
43 #endif
44 
45 /* Max hold time in micro seconds, 0 to disable detection*/
46 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ         10000
47 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_BH        1000000
48 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK                 0
49 
50 #if !QDF_LOCK_STATS
51 struct lock_stats {};
52 #define BEFORE_LOCK(x...) do {} while (0)
53 #define AFTER_LOCK(x...) do {} while (0)
54 #define BEFORE_TRYLOCK(x...) do {} while (0)
55 #define AFTER_TRYLOCK(x...) do {} while (0)
56 #define BEFORE_UNLOCK(x...) do {} while (0)
57 #define qdf_lock_stats_create(x...) do {} while (0)
58 #define qdf_lock_stats_destroy(x...) do {} while (0)
59 #define qdf_lock_stats_init(x...) do {} while (0)
60 #define qdf_lock_stats_deinit(x...) do {} while (0)
61 #else
62 void qdf_lock_stats_init(void);
63 void qdf_lock_stats_deinit(void);
64 struct qdf_lock_cookie;
65 struct lock_stats {
66 	const char *initialization_fn;
67 	const char *acquired_by;
68 	int line;
69 	int acquired;
70 	int contended;
71 	uint64_t contention_time;
72 	uint64_t non_contention_time;
73 	uint64_t held_time;
74 	uint64_t last_acquired;
75 	uint64_t max_contention_wait;
76 	uint64_t max_held_time;
77 	int num_large_contentions;
78 	int num_large_holds;
79 	struct qdf_lock_cookie *cookie;
80 };
81 #define LARGE_CONTENTION QDF_LOG_TIMESTAMP_CYCLES_PER_10_US
82 
83 #define BEFORE_LOCK(lock, was_locked) \
84 do { \
85 	uint64_t BEFORE_LOCK_time; \
86 	uint64_t AFTER_LOCK_time;  \
87 	bool BEFORE_LOCK_is_locked = was_locked; \
88 	BEFORE_LOCK_time = qdf_get_log_timestamp(); \
89 	do {} while (0)
90 
91 
92 #define AFTER_LOCK(lock, func) \
93 	lock->stats.acquired_by = func; \
94 	AFTER_LOCK_time = qdf_get_log_timestamp(); \
95 	lock->stats.acquired++; \
96 	lock->stats.last_acquired = AFTER_LOCK_time; \
97 	if (BEFORE_LOCK_is_locked) { \
98 		lock->stats.contended++; \
99 		lock->stats.contention_time += \
100 			(AFTER_LOCK_time - BEFORE_LOCK_time); \
101 	} else { \
102 		lock->stats.non_contention_time += \
103 			(AFTER_LOCK_time - BEFORE_LOCK_time); \
104 	} \
105 \
106 	if (AFTER_LOCK_time - BEFORE_LOCK_time > LARGE_CONTENTION) \
107 		lock->stats.num_large_contentions++; \
108 \
109 	if (AFTER_LOCK_time - BEFORE_LOCK_time > \
110 	    lock->stats.max_contention_wait) \
111 		lock->stats.max_contention_wait = \
112 			AFTER_LOCK_time - BEFORE_LOCK_time; \
113 } while (0)
114 
115 #define BEFORE_TRYLOCK(lock) \
116 do { \
117 	uint64_t BEFORE_LOCK_time; \
118 	uint64_t AFTER_LOCK_time;  \
119 	BEFORE_LOCK_time = qdf_get_log_timestamp(); \
120 	do {} while (0)
121 
122 #define AFTER_TRYLOCK(lock, trylock_return, func) \
123 	AFTER_LOCK_time = qdf_get_log_timestamp(); \
124 	if (trylock_return) { \
125 		lock->stats.acquired++; \
126 		lock->stats.last_acquired = AFTER_LOCK_time; \
127 		lock->stats.non_contention_time += \
128 			(AFTER_LOCK_time - BEFORE_LOCK_time); \
129 		lock->stats.acquired_by = func; \
130 	} \
131 } while (0)
132 
133 /* max_hold_time in US */
134 #define BEFORE_UNLOCK(lock, max_hold_time) \
135 do {\
136 	uint64_t held_time = qdf_get_log_timestamp() - \
137 		lock->stats.last_acquired; \
138 	lock->stats.held_time += held_time; \
139 \
140 	if (held_time > lock->stats.max_held_time) \
141 		lock->stats.max_held_time = held_time; \
142 \
143 	if (held_time > LARGE_CONTENTION) \
144 		lock->stats.num_large_holds++; \
145 	if (QDF_LOCK_STATS_BUG_ON && max_hold_time && \
146 	    held_time > qdf_usecs_to_log_timestamp(max_hold_time)) { \
147 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, \
148 			"BEFORE_UNLOCK: lock held too long (%lluus)", \
149 			qdf_log_timestamp_to_usecs(held_time)); \
150 		QDF_BUG(0); \
151 	} \
152 	lock->stats.acquired_by = NULL; \
153 } while (0)
154 
155 void qdf_lock_stats_cookie_destroy(struct lock_stats *stats);
156 void qdf_lock_stats_cookie_create(struct lock_stats *stats,
157 				  const char *func, int line);
158 
159 static inline void qdf_lock_stats_destroy(struct lock_stats *stats)
160 {
161 	if (QDF_LOCK_STATS_DESTROY_PRINT) {
162 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG,
163 			"%s: lock: %s %d \t"
164 			"acquired:\t%d\tcontended:\t%d\t"
165 			"contention_time\t%llu\tmax_contention_wait:\t%llu\t"
166 			"non_contention_time\t%llu\t"
167 			"held_time\t%llu\tmax_held:\t%llu"
168 			, __func__, stats->initialization_fn, stats->line,
169 			stats->acquired, stats->contended,
170 			qdf_log_timestamp_to_usecs(stats->contention_time),
171 			qdf_log_timestamp_to_usecs(stats->max_contention_wait),
172 			qdf_log_timestamp_to_usecs(stats->non_contention_time),
173 			qdf_log_timestamp_to_usecs(stats->held_time),
174 			qdf_log_timestamp_to_usecs(stats->max_held_time));
175 	}
176 
177 	if (QDF_LOCK_STATS_LIST)
178 		qdf_lock_stats_cookie_destroy(stats);
179 }
180 
181 #ifndef MEMORY_DEBUG
182 #define qdf_mem_malloc_debug(x, y, z) qdf_mem_malloc(x)
183 #endif
184 
185 /* qdf_lock_stats_create() - initialize the lock stats structure
186  *
187  */
188 static inline void qdf_lock_stats_create(struct lock_stats *stats,
189 					 const char *func, int line)
190 {
191 	qdf_mem_zero(stats, sizeof(*stats));
192 	stats->initialization_fn = func;
193 	stats->line = line;
194 
195 	if (QDF_LOCK_STATS_LIST)
196 		qdf_lock_stats_cookie_create(stats, func, line);
197 }
198 #endif
199 
200 #include <i_qdf_lock.h>
201 
202 #define WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT 0
203 #define WIFI_POWER_EVENT_WAKELOCK_TAKEN 0
204 #define WIFI_POWER_EVENT_WAKELOCK_RELEASED 1
205 
206 /**
207  * qdf_semaphore_acquire_timeout() - Take the semaphore before timeout
208  * @m: semaphore to take
209  * @timeout: maximum time to try to take the semaphore
210  * Return: int
211  */
212 static inline int qdf_semaphore_acquire_timeout(struct semaphore *m,
213 						unsigned long timeout)
214 {
215 	return __qdf_semaphore_acquire_timeout(m, timeout);
216 }
217 
218 struct qdf_spinlock {
219 	__qdf_spinlock_t lock;
220 	struct lock_stats stats;
221 };
222 
223 /**
224  * @brief Platform spinlock object
225  */
226 typedef struct qdf_spinlock qdf_spinlock_t;
227 
228 
229 /**
230  * @brief Platform mutex object
231  */
232 typedef __qdf_semaphore_t qdf_semaphore_t;
233 typedef __qdf_mutex_t qdf_mutex_t;
234 
235 /* function Declaration */
236 QDF_STATUS qdf_mutex_create(qdf_mutex_t *m, const char *func, int line);
237 #define qdf_mutex_create(m) qdf_mutex_create(m, __func__, __LINE__)
238 
239 QDF_STATUS qdf_mutex_acquire(qdf_mutex_t *m);
240 
241 QDF_STATUS qdf_mutex_release(qdf_mutex_t *m);
242 
243 QDF_STATUS qdf_mutex_destroy(qdf_mutex_t *lock);
244 
245 /**
246  * qdf_spinlock_create - Initialize a spinlock
247  * @lock: spinlock object pointer
248  * Return: none
249  */
250 static inline void qdf_spinlock_create(qdf_spinlock_t *lock, const char *func,
251 				       int line)
252 {
253 	__qdf_spinlock_create(&lock->lock);
254 
255 	/* spinlock stats create relies on the spinlock working allread */
256 	qdf_lock_stats_create(&lock->stats, func, line);
257 }
258 
259 #define qdf_spinlock_create(x) qdf_spinlock_create(x, __func__, __LINE__)
260 
261 /**
262  * qdf_spinlock_destroy - Delete a spinlock
263  * @lock: spinlock object pointer
264  * Return: none
265  */
266 static inline void qdf_spinlock_destroy(qdf_spinlock_t *lock)
267 {
268 	qdf_lock_stats_destroy(&lock->stats);
269 	__qdf_spinlock_destroy(&lock->lock);
270 }
271 
272 /**
273  * qdf_spin_is_locked() - check if the spinlock is locked
274  * @lock: spinlock object
275  *
276  * Return: nonzero if lock is held.
277  */
278 static inline int qdf_spin_is_locked(qdf_spinlock_t *lock)
279 {
280 	return __qdf_spin_is_locked(&lock->lock);
281 }
282 
283 /**
284  * qdf_spin_trylock_bh() - spin trylock bottomhalf
285  * @lock: spinlock object
286  *
287  * Return: nonzero if lock is acquired
288  */
289 static inline int qdf_spin_trylock_bh(qdf_spinlock_t *lock, const char *func)
290 {
291 	int trylock_return;
292 
293 	BEFORE_TRYLOCK(lock);
294 	trylock_return = __qdf_spin_trylock_bh(&lock->lock);
295 	AFTER_TRYLOCK(lock, trylock_return, func);
296 
297 	return trylock_return;
298 }
299 #define qdf_spin_trylock_bh(lock) qdf_spin_trylock_bh(lock, __func__)
300 
301 /**
302  * qdf_spin_trylock() - spin trylock
303  * @lock: spinlock object
304  * Return: int
305  */
306 static inline int qdf_spin_trylock(qdf_spinlock_t *lock, const char *func)
307 {
308 	int result = 0;
309 
310 	BEFORE_LOCK(lock, qdf_spin_is_locked(lock));
311 	result = __qdf_spin_trylock(&lock->lock);
312 	AFTER_LOCK(lock, func);
313 
314 	return result;
315 }
316 
317 #define qdf_spin_trylock(lock) qdf_spin_trylock(lock, __func__)
318 
319 /**
320  * qdf_spin_lock_bh() - locks the spinlock mutex in soft irq context
321  * @lock: spinlock object pointer
322  * Return: none
323  */
324 static inline void qdf_spin_lock_bh(qdf_spinlock_t *lock, const char *func)
325 {
326 	BEFORE_LOCK(lock, qdf_spin_is_locked(lock));
327 	__qdf_spin_lock_bh(&lock->lock);
328 	AFTER_LOCK(lock, func);
329 }
330 
331 #define qdf_spin_lock_bh(lock) qdf_spin_lock_bh(lock, __func__)
332 
333 /**
334  * qdf_spin_unlock_bh() - unlocks the spinlock mutex in soft irq context
335  * @lock: spinlock object pointer
336  * Return: none
337  */
338 static inline void qdf_spin_unlock_bh(qdf_spinlock_t *lock)
339 {
340 	BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_BH);
341 	__qdf_spin_unlock_bh(&lock->lock);
342 }
343 
344 /**
345  * qdf_spinlock_irq_exec - Execute the input function with spinlock held
346  * and interrupt disabled.
347  * @hdl: OS handle
348  * @lock: spinlock to be held for the critical region
349  * @func: critical region function that to be executed
350  * @context: context of the critical region function
351  * Return: Boolean status returned by the critical region function
352  */
353 static inline bool qdf_spinlock_irq_exec(qdf_handle_t hdl,
354 					 qdf_spinlock_t *lock,
355 					 qdf_irqlocked_func_t func, void *arg)
356 {
357 	return __qdf_spinlock_irq_exec(hdl, &lock->lock, func, arg);
358 }
359 
360 /**
361  * qdf_spin_lock() - Acquire a Spinlock(SMP) & disable Preemption (Preemptive)
362  * @lock: Lock object
363  *
364  * Return: none
365  */
366 static inline void qdf_spin_lock(qdf_spinlock_t *lock, const char *func)
367 {
368 	BEFORE_LOCK(lock, qdf_spin_is_locked(lock));
369 	__qdf_spin_lock(&lock->lock);
370 	AFTER_LOCK(lock, func);
371 }
372 #define qdf_spin_lock(lock) qdf_spin_lock(lock, __func__)
373 
374 /**
375  * qdf_spin_unlock() - Unlock the spinlock and enables the Preemption
376  * @lock: Lock object
377  *
378  * Return: none
379  */
380 static inline void qdf_spin_unlock(qdf_spinlock_t *lock)
381 {
382 	BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK);
383 	__qdf_spin_unlock(&lock->lock);
384 }
385 
386 /**
387  * qdf_spin_lock_irq() - Acquire a Spinlock(SMP) & save the irq state
388  * @lock: Lock object
389  * @flags: flags
390  *
391  * Return: none
392  */
393 static inline void qdf_spin_lock_irq(qdf_spinlock_t *lock, unsigned long flags,
394 				     const char *func)
395 {
396 	BEFORE_LOCK(lock, qdf_spin_is_locked(lock));
397 	__qdf_spin_lock_irq(&lock->lock.spinlock, flags);
398 	AFTER_LOCK(lock, func);
399 }
400 #define qdf_spin_lock_irq(lock, flags) qdf_spin_lock_irq(lock, flags, __func__)
401 
402 /**
403  * qdf_spin_lock_irqsave() - Acquire a Spinlock (SMP) & disable Preemption
404  * (Preemptive) and disable IRQs
405  * @lock: Lock object
406  *
407  * Return: none
408  */
409 static inline void qdf_spin_lock_irqsave(qdf_spinlock_t *lock, const char *func)
410 {
411 	BEFORE_LOCK(lock, qdf_spin_is_locked(lock));
412 	__qdf_spin_lock_irqsave(&lock->lock);
413 	AFTER_LOCK(lock, func);
414 }
415 #define qdf_spin_lock_irqsave(lock) qdf_spin_lock_irqsave(lock, __func__)
416 
417 /**
418  * qdf_spin_unlock_irqrestore() - Unlock the spinlock and enables the
419  * Preemption and enable IRQ
420  * @lock: Lock object
421  *
422  * Return: none
423  */
424 static inline void qdf_spin_unlock_irqrestore(qdf_spinlock_t *lock)
425 {
426 	BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ);
427 	__qdf_spin_unlock_irqrestore(&lock->lock);
428 }
429 
430 /**
431  * qdf_spin_unlock_irq() - Unlock a Spinlock(SMP) & save the restore state
432  * @lock: Lock object
433  * @flags: flags
434  *
435  * Return: none
436  */
437 static inline void qdf_spin_unlock_irq(qdf_spinlock_t *lock,
438 				       unsigned long flags)
439 {
440 	BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ);
441 	__qdf_spin_unlock_irq(&lock->lock.spinlock, flags);
442 }
443 
444 /**
445  * qdf_semaphore_init() - initialize a semaphore
446  * @m: Semaphore to initialize
447  * Return: None
448  */
449 static inline void qdf_semaphore_init(qdf_semaphore_t *m)
450 {
451 	__qdf_semaphore_init(m);
452 }
453 
454 /**
455  * qdf_semaphore_acquire() - take the semaphore
456  * @m: Semaphore to take
457  * Return: int
458  */
459 static inline int qdf_semaphore_acquire(qdf_semaphore_t *m)
460 {
461 	return __qdf_semaphore_acquire(m);
462 }
463 
464 /**
465  * qdf_semaphore_release() - give the semaphore
466  * @m: Semaphore to give
467  * Return: None
468  */
469 static inline void qdf_semaphore_release(qdf_semaphore_t *m)
470 {
471 	__qdf_semaphore_release(m);
472 }
473 
474 /**
475  * qdf_semaphore_acquire_intr - Take the semaphore, interruptible version
476  * @osdev: OS Device
477  * @m: mutex to take
478  * Return: int
479  */
480 static inline int qdf_semaphore_acquire_intr(qdf_semaphore_t *m)
481 {
482 	return __qdf_semaphore_acquire_intr(m);
483 }
484 
485 QDF_STATUS qdf_wake_lock_create(qdf_wake_lock_t *lock, const char *name);
486 
487 QDF_STATUS qdf_wake_lock_acquire(qdf_wake_lock_t *lock, uint32_t reason);
488 
489 const char *qdf_wake_lock_name(qdf_wake_lock_t *lock);
490 QDF_STATUS qdf_wake_lock_timeout_acquire(qdf_wake_lock_t *lock,
491 					 uint32_t msec);
492 
493 QDF_STATUS qdf_wake_lock_release(qdf_wake_lock_t *lock, uint32_t reason);
494 
495 QDF_STATUS qdf_wake_lock_destroy(qdf_wake_lock_t *lock);
496 
497 void qdf_pm_system_wakeup(void);
498 
499 QDF_STATUS qdf_runtime_pm_get(void);
500 QDF_STATUS qdf_runtime_pm_put(void);
501 QDF_STATUS qdf_runtime_pm_prevent_suspend(qdf_runtime_lock_t *lock);
502 QDF_STATUS qdf_runtime_pm_allow_suspend(qdf_runtime_lock_t *lock);
503 
504 QDF_STATUS __qdf_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
505 
506 #define qdf_runtime_lock_init(lock) __qdf_runtime_lock_init(lock, #lock)
507 
508 void qdf_runtime_lock_deinit(qdf_runtime_lock_t *lock);
509 
510 QDF_STATUS qdf_spinlock_acquire(qdf_spinlock_t *lock);
511 
512 QDF_STATUS qdf_spinlock_release(qdf_spinlock_t *lock);
513 #endif /* _QDF_LOCK_H */
514