xref: /wlan-dirver/qca-wifi-host-cmn/qdf/inc/qdf_lock.h (revision 1b9674e21e24478fba4530f5ae7396b9555e9c6a)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * @file qdf_lock.h
21  * This file abstracts locking operations.
22  */
23 
24 #ifndef _QDF_LOCK_H
25 #define _QDF_LOCK_H
26 
27 #include <qdf_types.h>
28 #include <qdf_mem.h>
29 #include <qdf_time.h>
30 #include <i_qdf_trace.h>
31 
32 #ifndef QDF_LOCK_STATS
33 #define QDF_LOCK_STATS 0
34 #endif
35 #ifndef QDF_LOCK_STATS_DESTROY_PRINT
36 #define QDF_LOCK_STATS_DESTROY_PRINT 0
37 #endif
38 #ifndef QDF_LOCK_STATS_BUG_ON
39 #define QDF_LOCK_STATS_BUG_ON 0
40 #endif
41 #ifndef QDF_LOCK_STATS_LIST
42 #define QDF_LOCK_STATS_LIST 0
43 #endif
44 
45 /* Max hold time in micro seconds, 0 to disable detection*/
46 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ         10000
47 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_BH        1000000
48 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK                 0
49 
50 #if !QDF_LOCK_STATS
51 struct lock_stats {};
52 #define BEFORE_LOCK(x...) do {} while (0)
53 #define AFTER_LOCK(x...) do {} while (0)
54 #define BEFORE_TRYLOCK(x...) do {} while (0)
55 #define AFTER_TRYLOCK(x...) do {} while (0)
56 #define BEFORE_UNLOCK(x...) do {} while (0)
57 #define qdf_lock_stats_create(x...) do {} while (0)
58 #define qdf_lock_stats_destroy(x...) do {} while (0)
59 #define qdf_lock_stats_init(x...) do {} while (0)
60 #define qdf_lock_stats_deinit(x...) do {} while (0)
61 #else
62 void qdf_lock_stats_init(void);
63 void qdf_lock_stats_deinit(void);
64 struct qdf_lock_cookie;
65 struct lock_stats {
66 	const char *initialization_fn;
67 	const char *acquired_by;
68 	int line;
69 	int acquired;
70 	int contended;
71 	uint64_t contention_time;
72 	uint64_t non_contention_time;
73 	uint64_t held_time;
74 	uint64_t last_acquired;
75 	uint64_t max_contention_wait;
76 	uint64_t max_held_time;
77 	int num_large_contentions;
78 	int num_large_holds;
79 	struct qdf_lock_cookie *cookie;
80 };
81 #define LARGE_CONTENTION QDF_LOG_TIMESTAMP_CYCLES_PER_10_US
82 
83 #define BEFORE_LOCK(lock, was_locked) \
84 do { \
85 	uint64_t BEFORE_LOCK_time; \
86 	uint64_t AFTER_LOCK_time;  \
87 	bool BEFORE_LOCK_is_locked = was_locked; \
88 	BEFORE_LOCK_time = qdf_get_log_timestamp(); \
89 	do {} while (0)
90 
91 
92 #define AFTER_LOCK(lock, func) \
93 	lock->stats.acquired_by = func; \
94 	AFTER_LOCK_time = qdf_get_log_timestamp(); \
95 	lock->stats.acquired++; \
96 	lock->stats.last_acquired = AFTER_LOCK_time; \
97 	if (BEFORE_LOCK_is_locked) { \
98 		lock->stats.contended++; \
99 		lock->stats.contention_time += \
100 			(AFTER_LOCK_time - BEFORE_LOCK_time); \
101 	} else { \
102 		lock->stats.non_contention_time += \
103 			(AFTER_LOCK_time - BEFORE_LOCK_time); \
104 	} \
105 \
106 	if (AFTER_LOCK_time - BEFORE_LOCK_time > LARGE_CONTENTION) \
107 		lock->stats.num_large_contentions++; \
108 \
109 	if (AFTER_LOCK_time - BEFORE_LOCK_time > \
110 	    lock->stats.max_contention_wait) \
111 		lock->stats.max_contention_wait = \
112 			AFTER_LOCK_time - BEFORE_LOCK_time; \
113 } while (0)
114 
115 #define BEFORE_TRYLOCK(lock) \
116 do { \
117 	uint64_t BEFORE_LOCK_time; \
118 	uint64_t AFTER_LOCK_time;  \
119 	BEFORE_LOCK_time = qdf_get_log_timestamp(); \
120 	do {} while (0)
121 
122 #define AFTER_TRYLOCK(lock, trylock_return, func) \
123 	AFTER_LOCK_time = qdf_get_log_timestamp(); \
124 	if (trylock_return) { \
125 		lock->stats.acquired++; \
126 		lock->stats.last_acquired = AFTER_LOCK_time; \
127 		lock->stats.non_contention_time += \
128 			(AFTER_LOCK_time - BEFORE_LOCK_time); \
129 		lock->stats.acquired_by = func; \
130 	} \
131 } while (0)
132 
133 /* max_hold_time in US */
134 #define BEFORE_UNLOCK(lock, max_hold_time) \
135 do {\
136 	uint64_t held_time = qdf_get_log_timestamp() - \
137 		lock->stats.last_acquired; \
138 	lock->stats.held_time += held_time; \
139 \
140 	if (held_time > lock->stats.max_held_time) \
141 		lock->stats.max_held_time = held_time; \
142 \
143 	if (held_time > LARGE_CONTENTION) \
144 		lock->stats.num_large_holds++; \
145 	if (QDF_LOCK_STATS_BUG_ON && max_hold_time && \
146 	    held_time > qdf_usecs_to_log_timestamp(max_hold_time)) { \
147 		qdf_warn("BEFORE_UNLOCK: lock held too long (%lluus)", \
148 		       qdf_log_timestamp_to_usecs(held_time)); \
149 		QDF_BUG(0); \
150 	} \
151 	lock->stats.acquired_by = NULL; \
152 } while (0)
153 
154 void qdf_lock_stats_cookie_destroy(struct lock_stats *stats);
155 void qdf_lock_stats_cookie_create(struct lock_stats *stats,
156 				  const char *func, int line);
157 
158 static inline void qdf_lock_stats_destroy(struct lock_stats *stats)
159 {
160 	if (QDF_LOCK_STATS_DESTROY_PRINT) {
161 		qdf_debug("%s: lock: %s %d \t"
162 			"acquired:\t%d\tcontended:\t%d\t"
163 			"contention_time\t%llu\tmax_contention_wait:\t%llu\t"
164 			"non_contention_time\t%llu\t"
165 			"held_time\t%llu\tmax_held:\t%llu"
166 			, __func__, stats->initialization_fn, stats->line,
167 			stats->acquired, stats->contended,
168 			qdf_log_timestamp_to_usecs(stats->contention_time),
169 			qdf_log_timestamp_to_usecs(stats->max_contention_wait),
170 			qdf_log_timestamp_to_usecs(stats->non_contention_time),
171 			qdf_log_timestamp_to_usecs(stats->held_time),
172 			qdf_log_timestamp_to_usecs(stats->max_held_time));
173 	}
174 
175 	if (QDF_LOCK_STATS_LIST)
176 		qdf_lock_stats_cookie_destroy(stats);
177 }
178 
179 #ifndef MEMORY_DEBUG
180 #define qdf_mem_malloc_debug(x, y, z) qdf_mem_malloc(x)
181 #endif
182 
183 /* qdf_lock_stats_create() - initialize the lock stats structure
184  *
185  */
186 static inline void qdf_lock_stats_create(struct lock_stats *stats,
187 					 const char *func, int line)
188 {
189 	qdf_mem_zero(stats, sizeof(*stats));
190 	stats->initialization_fn = func;
191 	stats->line = line;
192 
193 	if (QDF_LOCK_STATS_LIST)
194 		qdf_lock_stats_cookie_create(stats, func, line);
195 }
196 #endif
197 
198 #include <i_qdf_lock.h>
199 
200 #define WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT 0
201 #define WIFI_POWER_EVENT_WAKELOCK_TAKEN 0
202 #define WIFI_POWER_EVENT_WAKELOCK_RELEASED 1
203 
204 /**
205  * qdf_semaphore_acquire_timeout() - Take the semaphore before timeout
206  * @m: semaphore to take
207  * @timeout: maximum time to try to take the semaphore
208  * Return: int
209  */
210 static inline int qdf_semaphore_acquire_timeout(struct semaphore *m,
211 						unsigned long timeout)
212 {
213 	return __qdf_semaphore_acquire_timeout(m, timeout);
214 }
215 
216 struct qdf_spinlock {
217 	__qdf_spinlock_t lock;
218 	struct lock_stats stats;
219 };
220 
221 /**
222  * @brief Platform spinlock object
223  */
224 typedef struct qdf_spinlock qdf_spinlock_t;
225 
226 
227 /**
228  * @brief Platform mutex object
229  */
230 typedef __qdf_semaphore_t qdf_semaphore_t;
231 typedef __qdf_mutex_t qdf_mutex_t;
232 
233 /* function Declaration */
234 QDF_STATUS qdf_mutex_create(qdf_mutex_t *m, const char *func, int line);
235 #define qdf_mutex_create(m) qdf_mutex_create(m, __func__, __LINE__)
236 
237 QDF_STATUS qdf_mutex_acquire(qdf_mutex_t *m);
238 
239 QDF_STATUS qdf_mutex_release(qdf_mutex_t *m);
240 
241 QDF_STATUS qdf_mutex_destroy(qdf_mutex_t *lock);
242 
243 /**
244  * qdf_spinlock_create - Initialize a spinlock
245  * @lock: spinlock object pointer
246  * Return: none
247  */
248 static inline void qdf_spinlock_create(qdf_spinlock_t *lock, const char *func,
249 				       int line)
250 {
251 	__qdf_spinlock_create(&lock->lock);
252 
253 	/* spinlock stats create relies on the spinlock working allread */
254 	qdf_lock_stats_create(&lock->stats, func, line);
255 }
256 
257 #define qdf_spinlock_create(x) qdf_spinlock_create(x, __func__, __LINE__)
258 
259 /**
260  * qdf_spinlock_destroy - Delete a spinlock
261  * @lock: spinlock object pointer
262  * Return: none
263  */
264 static inline void qdf_spinlock_destroy(qdf_spinlock_t *lock)
265 {
266 	qdf_lock_stats_destroy(&lock->stats);
267 	__qdf_spinlock_destroy(&lock->lock);
268 }
269 
270 /**
271  * qdf_spin_is_locked() - check if the spinlock is locked
272  * @lock: spinlock object
273  *
274  * Return: nonzero if lock is held.
275  */
276 static inline int qdf_spin_is_locked(qdf_spinlock_t *lock)
277 {
278 	return __qdf_spin_is_locked(&lock->lock);
279 }
280 
281 /**
282  * qdf_spin_trylock_bh() - spin trylock bottomhalf
283  * @lock: spinlock object
284  *
285  * Return: nonzero if lock is acquired
286  */
287 static inline int qdf_spin_trylock_bh(qdf_spinlock_t *lock, const char *func)
288 {
289 	int trylock_return;
290 
291 	BEFORE_TRYLOCK(lock);
292 	trylock_return = __qdf_spin_trylock_bh(&lock->lock);
293 	AFTER_TRYLOCK(lock, trylock_return, func);
294 
295 	return trylock_return;
296 }
297 #define qdf_spin_trylock_bh(lock) qdf_spin_trylock_bh(lock, __func__)
298 
299 int qdf_spin_trylock_bh_outline(qdf_spinlock_t *lock);
300 
301 /**
302  * qdf_spin_trylock() - spin trylock
303  * @lock: spinlock object
304  * Return: int
305  */
306 static inline int qdf_spin_trylock(qdf_spinlock_t *lock, const char *func)
307 {
308 	int result = 0;
309 
310 	BEFORE_LOCK(lock, qdf_spin_is_locked(lock));
311 	result = __qdf_spin_trylock(&lock->lock);
312 	AFTER_LOCK(lock, func);
313 
314 	return result;
315 }
316 
317 #define qdf_spin_trylock(lock) qdf_spin_trylock(lock, __func__)
318 
319 /**
320  * qdf_spin_lock_bh() - locks the spinlock mutex in soft irq context
321  * @lock: spinlock object pointer
322  * Return: none
323  */
324 static inline void qdf_spin_lock_bh(qdf_spinlock_t *lock, const char *func)
325 {
326 	BEFORE_LOCK(lock, qdf_spin_is_locked(lock));
327 	__qdf_spin_lock_bh(&lock->lock);
328 	AFTER_LOCK(lock, func);
329 }
330 
331 #define qdf_spin_lock_bh(lock) qdf_spin_lock_bh(lock, __func__)
332 
333 void qdf_spin_lock_bh_outline(qdf_spinlock_t *lock);
334 
335 /**
336  * qdf_spin_unlock_bh() - unlocks the spinlock mutex in soft irq context
337  * @lock: spinlock object pointer
338  * Return: none
339  */
340 static inline void qdf_spin_unlock_bh(qdf_spinlock_t *lock)
341 {
342 	BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_BH);
343 	__qdf_spin_unlock_bh(&lock->lock);
344 }
345 
346 void qdf_spin_unlock_bh_outline(qdf_spinlock_t *lock);
347 
348 /**
349  * qdf_spinlock_irq_exec - Execute the input function with spinlock held
350  * and interrupt disabled.
351  * @hdl: OS handle
352  * @lock: spinlock to be held for the critical region
353  * @func: critical region function that to be executed
354  * @context: context of the critical region function
355  * Return: Boolean status returned by the critical region function
356  */
357 static inline bool qdf_spinlock_irq_exec(qdf_handle_t hdl,
358 					 qdf_spinlock_t *lock,
359 					 qdf_irqlocked_func_t func, void *arg)
360 {
361 	return __qdf_spinlock_irq_exec(hdl, &lock->lock, func, arg);
362 }
363 
364 /**
365  * qdf_spin_lock() - Acquire a Spinlock(SMP) & disable Preemption (Preemptive)
366  * @lock: Lock object
367  *
368  * Return: none
369  */
370 static inline void qdf_spin_lock(qdf_spinlock_t *lock, const char *func)
371 {
372 	BEFORE_LOCK(lock, qdf_spin_is_locked(lock));
373 	__qdf_spin_lock(&lock->lock);
374 	AFTER_LOCK(lock, func);
375 }
376 #define qdf_spin_lock(lock) qdf_spin_lock(lock, __func__)
377 
378 /**
379  * qdf_spin_unlock() - Unlock the spinlock and enables the Preemption
380  * @lock: Lock object
381  *
382  * Return: none
383  */
384 static inline void qdf_spin_unlock(qdf_spinlock_t *lock)
385 {
386 	BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK);
387 	__qdf_spin_unlock(&lock->lock);
388 }
389 
390 /**
391  * qdf_spin_lock_irq() - Acquire a Spinlock(SMP) & save the irq state
392  * @lock: Lock object
393  * @flags: flags
394  *
395  * Return: none
396  */
397 static inline void qdf_spin_lock_irq(qdf_spinlock_t *lock, unsigned long flags,
398 				     const char *func)
399 {
400 	BEFORE_LOCK(lock, qdf_spin_is_locked(lock));
401 	__qdf_spin_lock_irq(&lock->lock.spinlock, flags);
402 	AFTER_LOCK(lock, func);
403 }
404 #define qdf_spin_lock_irq(lock, flags) qdf_spin_lock_irq(lock, flags, __func__)
405 
406 /**
407  * qdf_spin_lock_irqsave() - Acquire a Spinlock (SMP) & disable Preemption
408  * (Preemptive) and disable IRQs
409  * @lock: Lock object
410  *
411  * Return: none
412  */
413 static inline void qdf_spin_lock_irqsave(qdf_spinlock_t *lock, const char *func)
414 {
415 	BEFORE_LOCK(lock, qdf_spin_is_locked(lock));
416 	__qdf_spin_lock_irqsave(&lock->lock);
417 	AFTER_LOCK(lock, func);
418 }
419 #define qdf_spin_lock_irqsave(lock) qdf_spin_lock_irqsave(lock, __func__)
420 
421 /**
422  * qdf_spin_unlock_irqrestore() - Unlock the spinlock and enables the
423  * Preemption and enable IRQ
424  * @lock: Lock object
425  *
426  * Return: none
427  */
428 static inline void qdf_spin_unlock_irqrestore(qdf_spinlock_t *lock)
429 {
430 	BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ);
431 	__qdf_spin_unlock_irqrestore(&lock->lock);
432 }
433 
434 /**
435  * qdf_spin_unlock_irq() - Unlock a Spinlock(SMP) & save the restore state
436  * @lock: Lock object
437  * @flags: flags
438  *
439  * Return: none
440  */
441 static inline void qdf_spin_unlock_irq(qdf_spinlock_t *lock,
442 				       unsigned long flags)
443 {
444 	BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ);
445 	__qdf_spin_unlock_irq(&lock->lock.spinlock, flags);
446 }
447 
448 /**
449  * qdf_semaphore_init() - initialize a semaphore
450  * @m: Semaphore to initialize
451  * Return: None
452  */
453 static inline void qdf_semaphore_init(qdf_semaphore_t *m)
454 {
455 	__qdf_semaphore_init(m);
456 }
457 
458 /**
459  * qdf_semaphore_acquire() - take the semaphore
460  * @m: Semaphore to take
461  * Return: int
462  */
463 static inline int qdf_semaphore_acquire(qdf_semaphore_t *m)
464 {
465 	return __qdf_semaphore_acquire(m);
466 }
467 
468 /**
469  * qdf_semaphore_release() - give the semaphore
470  * @m: Semaphore to give
471  * Return: None
472  */
473 static inline void qdf_semaphore_release(qdf_semaphore_t *m)
474 {
475 	__qdf_semaphore_release(m);
476 }
477 
478 /**
479  * qdf_semaphore_acquire_intr - Take the semaphore, interruptible version
480  * @osdev: OS Device
481  * @m: mutex to take
482  * Return: int
483  */
484 static inline int qdf_semaphore_acquire_intr(qdf_semaphore_t *m)
485 {
486 	return __qdf_semaphore_acquire_intr(m);
487 }
488 
489 QDF_STATUS qdf_wake_lock_create(qdf_wake_lock_t *lock, const char *name);
490 
491 QDF_STATUS qdf_wake_lock_acquire(qdf_wake_lock_t *lock, uint32_t reason);
492 
493 const char *qdf_wake_lock_name(qdf_wake_lock_t *lock);
494 QDF_STATUS qdf_wake_lock_timeout_acquire(qdf_wake_lock_t *lock,
495 					 uint32_t msec);
496 
497 QDF_STATUS qdf_wake_lock_release(qdf_wake_lock_t *lock, uint32_t reason);
498 
499 QDF_STATUS qdf_wake_lock_destroy(qdf_wake_lock_t *lock);
500 
501 QDF_STATUS qdf_runtime_pm_get(void);
502 QDF_STATUS qdf_runtime_pm_put(void);
503 QDF_STATUS qdf_runtime_pm_prevent_suspend(qdf_runtime_lock_t *lock);
504 QDF_STATUS qdf_runtime_pm_allow_suspend(qdf_runtime_lock_t *lock);
505 
506 QDF_STATUS __qdf_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
507 
508 #define qdf_runtime_lock_init(lock) __qdf_runtime_lock_init(lock, #lock)
509 
510 void qdf_runtime_lock_deinit(qdf_runtime_lock_t *lock);
511 
512 QDF_STATUS qdf_spinlock_acquire(qdf_spinlock_t *lock);
513 
514 QDF_STATUS qdf_spinlock_release(qdf_spinlock_t *lock);
515 #endif /* _QDF_LOCK_H */
516