xref: /wlan-dirver/qca-wifi-host-cmn/qdf/inc/qdf_lock.h (revision 4865edfd190c086bbe2c69aae12a8226f877b91e)
1 /*
2  * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * @file qdf_lock.h
21  * This file abstracts locking operations.
22  */
23 
24 #ifndef _QDF_LOCK_H
25 #define _QDF_LOCK_H
26 
27 #include <qdf_types.h>
28 #include <qdf_mem.h>
29 #include <qdf_time.h>
30 #include <i_qdf_trace.h>
31 
32 #ifndef QDF_LOCK_STATS
33 #define QDF_LOCK_STATS 0
34 #endif
35 #ifndef QDF_LOCK_STATS_DESTROY_PRINT
36 #define QDF_LOCK_STATS_DESTROY_PRINT 0
37 #endif
38 #ifndef QDF_LOCK_STATS_BUG_ON
39 #define QDF_LOCK_STATS_BUG_ON 0
40 #endif
41 #ifndef QDF_LOCK_STATS_LIST
42 #define QDF_LOCK_STATS_LIST 0
43 #endif
44 
45 /* Max hold time in micro seconds, 0 to disable detection*/
46 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ         10000
47 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_BH        1000000
48 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK                 0
49 
50 #if !QDF_LOCK_STATS
51 struct lock_stats {};
52 #define BEFORE_LOCK(x...) do {} while (0)
53 #define AFTER_LOCK(x...) do {} while (0)
54 #define BEFORE_TRYLOCK(x...) do {} while (0)
55 #define AFTER_TRYLOCK(x...) do {} while (0)
56 #define BEFORE_UNLOCK(x...) do {} while (0)
57 #define qdf_lock_stats_create(x...) do {} while (0)
58 #define qdf_lock_stats_destroy(x...) do {} while (0)
59 #define qdf_lock_stats_init(x...) do {} while (0)
60 #define qdf_lock_stats_deinit(x...) do {} while (0)
61 #else
62 void qdf_lock_stats_init(void);
63 void qdf_lock_stats_deinit(void);
64 struct qdf_lock_cookie;
65 struct lock_stats {
66 	const char *initialization_fn;
67 	const char *acquired_by;
68 	int line;
69 	int acquired;
70 	int contended;
71 	uint64_t contention_time;
72 	uint64_t non_contention_time;
73 	uint64_t held_time;
74 	uint64_t last_acquired;
75 	uint64_t max_contention_wait;
76 	uint64_t max_held_time;
77 	int num_large_contentions;
78 	int num_large_holds;
79 	struct qdf_lock_cookie *cookie;
80 };
81 #define LARGE_CONTENTION QDF_LOG_TIMESTAMP_CYCLES_PER_10_US
82 
83 #define BEFORE_LOCK(lock, was_locked) \
84 do { \
85 	uint64_t BEFORE_LOCK_time; \
86 	uint64_t AFTER_LOCK_time;  \
87 	bool BEFORE_LOCK_is_locked = was_locked; \
88 	BEFORE_LOCK_time = qdf_get_log_timestamp(); \
89 	do {} while (0)
90 
91 
92 #define AFTER_LOCK(lock, func) \
93 	lock->stats.acquired_by = func; \
94 	AFTER_LOCK_time = qdf_get_log_timestamp(); \
95 	lock->stats.acquired++; \
96 	lock->stats.last_acquired = AFTER_LOCK_time; \
97 	if (BEFORE_LOCK_is_locked) { \
98 		lock->stats.contended++; \
99 		lock->stats.contention_time += \
100 			(AFTER_LOCK_time - BEFORE_LOCK_time); \
101 	} else { \
102 		lock->stats.non_contention_time += \
103 			(AFTER_LOCK_time - BEFORE_LOCK_time); \
104 	} \
105 \
106 	if (AFTER_LOCK_time - BEFORE_LOCK_time > LARGE_CONTENTION) \
107 		lock->stats.num_large_contentions++; \
108 \
109 	if (AFTER_LOCK_time - BEFORE_LOCK_time > \
110 	    lock->stats.max_contention_wait) \
111 		lock->stats.max_contention_wait = \
112 			AFTER_LOCK_time - BEFORE_LOCK_time; \
113 } while (0)
114 
115 #define BEFORE_TRYLOCK(lock) \
116 do { \
117 	uint64_t BEFORE_LOCK_time; \
118 	uint64_t AFTER_LOCK_time;  \
119 	BEFORE_LOCK_time = qdf_get_log_timestamp(); \
120 	do {} while (0)
121 
122 #define AFTER_TRYLOCK(lock, trylock_return, func) \
123 	AFTER_LOCK_time = qdf_get_log_timestamp(); \
124 	if (trylock_return) { \
125 		lock->stats.acquired++; \
126 		lock->stats.last_acquired = AFTER_LOCK_time; \
127 		lock->stats.non_contention_time += \
128 			(AFTER_LOCK_time - BEFORE_LOCK_time); \
129 		lock->stats.acquired_by = func; \
130 	} \
131 } while (0)
132 
133 /* max_hold_time in US */
134 #define BEFORE_UNLOCK(lock, max_hold_time) \
135 do {\
136 	uint64_t held_time = qdf_get_log_timestamp() - \
137 		lock->stats.last_acquired; \
138 	lock->stats.held_time += held_time; \
139 \
140 	if (held_time > lock->stats.max_held_time) \
141 		lock->stats.max_held_time = held_time; \
142 \
143 	if (held_time > LARGE_CONTENTION) \
144 		lock->stats.num_large_holds++; \
145 	if (QDF_LOCK_STATS_BUG_ON && max_hold_time && \
146 	    held_time > qdf_usecs_to_log_timestamp(max_hold_time)) { \
147 		qdf_print("BEFORE_UNLOCK: lock held too long (%lluus)\n", \
148 		       qdf_log_timestamp_to_usecs(held_time)); \
149 		QDF_BUG(0); \
150 	} \
151 	lock->stats.acquired_by = NULL; \
152 } while (0)
153 
154 void qdf_lock_stats_cookie_destroy(struct lock_stats *stats);
155 void qdf_lock_stats_cookie_create(struct lock_stats *stats,
156 				  const char *func, int line);
157 
158 static inline void qdf_lock_stats_destroy(struct lock_stats *stats)
159 {
160 	if (QDF_LOCK_STATS_DESTROY_PRINT) {
161 		qdf_print("%s: lock: %s %d \t"
162 			"acquired:\t%d\tcontended:\t%d\t"
163 			"contention_time\t%llu\tmax_contention_wait:\t%llu\t"
164 			"non_contention_time\t%llu\t"
165 			"held_time\t%llu\tmax_held:\t%llu\t\n"
166 			, __func__, stats->initialization_fn, stats->line,
167 			stats->acquired, stats->contended,
168 			qdf_log_timestamp_to_usecs(stats->contention_time),
169 			qdf_log_timestamp_to_usecs(stats->max_contention_wait),
170 			qdf_log_timestamp_to_usecs(stats->non_contention_time),
171 			qdf_log_timestamp_to_usecs(stats->held_time),
172 			qdf_log_timestamp_to_usecs(stats->max_held_time));
173 	}
174 
175 	if (QDF_LOCK_STATS_LIST)
176 		qdf_lock_stats_cookie_destroy(stats);
177 }
178 
179 #ifndef MEMORY_DEBUG
180 #define qdf_mem_malloc_debug(x, y, z) qdf_mem_malloc(x)
181 #endif
182 
183 /* qdf_lock_stats_create() - initialize the lock stats structure
184  *
185  */
186 static inline void qdf_lock_stats_create(struct lock_stats *stats,
187 					 const char *func, int line)
188 {
189 	qdf_mem_zero(stats, sizeof(*stats));
190 	stats->initialization_fn = func;
191 	stats->line = line;
192 
193 	if (QDF_LOCK_STATS_LIST)
194 		qdf_lock_stats_cookie_create(stats, func, line);
195 }
196 #endif
197 
198 #include <i_qdf_lock.h>
199 
200 #define WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT 0
201 #define WIFI_POWER_EVENT_WAKELOCK_TAKEN 0
202 #define WIFI_POWER_EVENT_WAKELOCK_RELEASED 1
203 
204 /**
205  * qdf_semaphore_acquire_timeout() - Take the semaphore before timeout
206  * @m: semaphore to take
207  * @timeout: maximum time to try to take the semaphore
208  * Return: int
209  */
210 static inline int qdf_semaphore_acquire_timeout(struct semaphore *m,
211 						unsigned long timeout)
212 {
213 	return __qdf_semaphore_acquire_timeout(m, timeout);
214 }
215 
216 struct qdf_spinlock {
217 	__qdf_spinlock_t lock;
218 	struct lock_stats stats;
219 };
220 
221 /**
222  * @brief Platform spinlock object
223  */
224 typedef struct qdf_spinlock qdf_spinlock_t;
225 
226 
227 /**
228  * @brief Platform mutex object
229  */
230 typedef __qdf_semaphore_t qdf_semaphore_t;
231 typedef __qdf_mutex_t qdf_mutex_t;
232 
233 /* function Declaration */
234 QDF_STATUS qdf_mutex_create(qdf_mutex_t *m, const char *func, int line);
235 #define qdf_mutex_create(m) qdf_mutex_create(m, __func__, __LINE__)
236 
237 QDF_STATUS qdf_mutex_acquire(qdf_mutex_t *m);
238 
239 QDF_STATUS qdf_mutex_release(qdf_mutex_t *m);
240 
241 QDF_STATUS qdf_mutex_destroy(qdf_mutex_t *lock);
242 
243 /**
244  * qdf_spinlock_create - Initialize a spinlock
245  * @lock: spinlock object pointer
246  * Return: none
247  */
248 static inline void qdf_spinlock_create(qdf_spinlock_t *lock, const char *func,
249 				       int line)
250 {
251 	__qdf_spinlock_create(&lock->lock);
252 
253 	/* spinlock stats create relies on the spinlock working allread */
254 	qdf_lock_stats_create(&lock->stats, func, line);
255 }
256 
257 #define qdf_spinlock_create(x) qdf_spinlock_create(x, __func__, __LINE__)
258 
259 /**
260  * qdf_spinlock_destroy - Delete a spinlock
261  * @lock: spinlock object pointer
262  * Return: none
263  */
264 static inline void qdf_spinlock_destroy(qdf_spinlock_t *lock)
265 {
266 	qdf_lock_stats_destroy(&lock->stats);
267 	__qdf_spinlock_destroy(&lock->lock);
268 }
269 
270 /**
271  * qdf_spin_is_locked() - check if the spinlock is locked
272  * @lock: spinlock object
273  *
274  * Return: nonzero if lock is held.
275  */
276 static inline int qdf_spin_is_locked(qdf_spinlock_t *lock)
277 {
278 	return __qdf_spin_is_locked(&lock->lock);
279 }
280 
281 /**
282  * qdf_spin_trylock_bh() - spin trylock bottomhalf
283  * @lock: spinlock object
284  *
285  * Return: nonzero if lock is acquired
286  */
287 static inline int qdf_spin_trylock_bh(qdf_spinlock_t *lock, const char *func)
288 {
289 	int trylock_return;
290 
291 	BEFORE_TRYLOCK(lock);
292 	trylock_return = __qdf_spin_trylock_bh(&lock->lock);
293 	AFTER_TRYLOCK(lock, trylock_return, func);
294 
295 	return trylock_return;
296 }
297 #define qdf_spin_trylock_bh(lock) qdf_spin_trylock_bh(lock, __func__)
298 
299 int qdf_spin_trylock_bh_outline(qdf_spinlock_t *lock);
300 
301 /**
302  * qdf_spin_lock_bh() - locks the spinlock mutex in soft irq context
303  * @lock: spinlock object pointer
304  * Return: none
305  */
306 static inline void qdf_spin_lock_bh(qdf_spinlock_t *lock, const char *func)
307 {
308 	BEFORE_LOCK(lock, qdf_spin_is_locked(lock));
309 	__qdf_spin_lock_bh(&lock->lock);
310 	AFTER_LOCK(lock, func);
311 }
312 
313 #define qdf_spin_lock_bh(lock) qdf_spin_lock_bh(lock, __func__)
314 
315 void qdf_spin_lock_bh_outline(qdf_spinlock_t *lock);
316 
317 /**
318  * qdf_spin_unlock_bh() - unlocks the spinlock mutex in soft irq context
319  * @lock: spinlock object pointer
320  * Return: none
321  */
322 static inline void qdf_spin_unlock_bh(qdf_spinlock_t *lock)
323 {
324 	BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_BH);
325 	__qdf_spin_unlock_bh(&lock->lock);
326 }
327 
328 void qdf_spin_unlock_bh_outline(qdf_spinlock_t *lock);
329 
330 /**
331  * qdf_spinlock_irq_exec - Execute the input function with spinlock held
332  * and interrupt disabled.
333  * @hdl: OS handle
334  * @lock: spinlock to be held for the critical region
335  * @func: critical region function that to be executed
336  * @context: context of the critical region function
337  * Return: Boolean status returned by the critical region function
338  */
339 static inline bool qdf_spinlock_irq_exec(qdf_handle_t hdl,
340 					 qdf_spinlock_t *lock,
341 					 qdf_irqlocked_func_t func, void *arg)
342 {
343 	return __qdf_spinlock_irq_exec(hdl, &lock->lock, func, arg);
344 }
345 
346 /**
347  * qdf_spin_lock() - Acquire a Spinlock(SMP) & disable Preemption (Preemptive)
348  * @lock: Lock object
349  *
350  * Return: none
351  */
352 static inline void qdf_spin_lock(qdf_spinlock_t *lock, const char *func)
353 {
354 	BEFORE_LOCK(lock, qdf_spin_is_locked(lock));
355 	__qdf_spin_lock(&lock->lock);
356 	AFTER_LOCK(lock, func);
357 }
358 #define qdf_spin_lock(lock) qdf_spin_lock(lock, __func__)
359 
360 /**
361  * qdf_spin_unlock() - Unlock the spinlock and enables the Preemption
362  * @lock: Lock object
363  *
364  * Return: none
365  */
366 static inline void qdf_spin_unlock(qdf_spinlock_t *lock)
367 {
368 	BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK);
369 	__qdf_spin_unlock(&lock->lock);
370 }
371 
372 /**
373  * qdf_spin_lock_irq() - Acquire a Spinlock(SMP) & save the irq state
374  * @lock: Lock object
375  * @flags: flags
376  *
377  * Return: none
378  */
379 static inline void qdf_spin_lock_irq(qdf_spinlock_t *lock, unsigned long flags,
380 				     const char *func)
381 {
382 	BEFORE_LOCK(lock, qdf_spin_is_locked(lock));
383 	__qdf_spin_lock_irq(&lock->lock.spinlock, flags);
384 	AFTER_LOCK(lock, func);
385 }
386 #define qdf_spin_lock_irq(lock, flags) qdf_spin_lock_irq(lock, flags, __func__)
387 
388 /**
389  * qdf_spin_lock_irqsave() - Acquire a Spinlock (SMP) & disable Preemption
390  * (Preemptive) and disable IRQs
391  * @lock: Lock object
392  *
393  * Return: none
394  */
395 static inline void qdf_spin_lock_irqsave(qdf_spinlock_t *lock, const char *func)
396 {
397 	BEFORE_LOCK(lock, qdf_spin_is_locked(lock));
398 	__qdf_spin_lock_irqsave(&lock->lock);
399 	AFTER_LOCK(lock, func);
400 }
401 #define qdf_spin_lock_irqsave(lock) qdf_spin_lock_irqsave(lock, __func__)
402 
403 /**
404  * qdf_spin_unlock_irqrestore() - Unlock the spinlock and enables the
405  * Preemption and enable IRQ
406  * @lock: Lock object
407  *
408  * Return: none
409  */
410 static inline void qdf_spin_unlock_irqrestore(qdf_spinlock_t *lock)
411 {
412 	BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ);
413 	__qdf_spin_unlock_irqrestore(&lock->lock);
414 }
415 
416 /**
417  * qdf_spin_unlock_irq() - Unlock a Spinlock(SMP) & save the restore state
418  * @lock: Lock object
419  * @flags: flags
420  *
421  * Return: none
422  */
423 static inline void qdf_spin_unlock_irq(qdf_spinlock_t *lock,
424 				       unsigned long flags)
425 {
426 	BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ);
427 	__qdf_spin_unlock_irq(&lock->lock.spinlock, flags);
428 }
429 
430 /**
431  * qdf_semaphore_init() - initialize a semaphore
432  * @m: Semaphore to initialize
433  * Return: None
434  */
435 static inline void qdf_semaphore_init(qdf_semaphore_t *m)
436 {
437 	__qdf_semaphore_init(m);
438 }
439 
440 /**
441  * qdf_semaphore_acquire() - take the semaphore
442  * @m: Semaphore to take
443  * Return: int
444  */
445 static inline int qdf_semaphore_acquire(qdf_semaphore_t *m)
446 {
447 	return __qdf_semaphore_acquire(m);
448 }
449 
450 /**
451  * qdf_semaphore_release() - give the semaphore
452  * @m: Semaphore to give
453  * Return: None
454  */
455 static inline void qdf_semaphore_release(qdf_semaphore_t *m)
456 {
457 	__qdf_semaphore_release(m);
458 }
459 
460 /**
461  * qdf_semaphore_acquire_intr - Take the semaphore, interruptible version
462  * @osdev: OS Device
463  * @m: mutex to take
464  * Return: int
465  */
466 static inline int qdf_semaphore_acquire_intr(qdf_semaphore_t *m)
467 {
468 	return __qdf_semaphore_acquire_intr(m);
469 }
470 
471 QDF_STATUS qdf_wake_lock_create(qdf_wake_lock_t *lock, const char *name);
472 
473 QDF_STATUS qdf_wake_lock_acquire(qdf_wake_lock_t *lock, uint32_t reason);
474 
475 const char *qdf_wake_lock_name(qdf_wake_lock_t *lock);
476 QDF_STATUS qdf_wake_lock_timeout_acquire(qdf_wake_lock_t *lock,
477 					 uint32_t msec);
478 
479 QDF_STATUS qdf_wake_lock_release(qdf_wake_lock_t *lock, uint32_t reason);
480 
481 QDF_STATUS qdf_wake_lock_destroy(qdf_wake_lock_t *lock);
482 
483 QDF_STATUS qdf_runtime_pm_get(void);
484 QDF_STATUS qdf_runtime_pm_put(void);
485 QDF_STATUS qdf_runtime_pm_prevent_suspend(qdf_runtime_lock_t *lock);
486 QDF_STATUS qdf_runtime_pm_allow_suspend(qdf_runtime_lock_t *lock);
487 
488 QDF_STATUS __qdf_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
489 
490 #define qdf_runtime_lock_init(lock) __qdf_runtime_lock_init(lock, #lock)
491 
492 void qdf_runtime_lock_deinit(qdf_runtime_lock_t *lock);
493 
494 QDF_STATUS qdf_spinlock_acquire(qdf_spinlock_t *lock);
495 
496 QDF_STATUS qdf_spinlock_release(qdf_spinlock_t *lock);
497 #endif /* _QDF_LOCK_H */
498