xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_lock.h (revision f28396d060cff5c6519f883cb28ae0116ce479f1)
1 /*
2  * Copyright (c) 2014-2018, 2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: i_qdf_lock.h
21  * Linux-specific definitions for QDF Lock API's
22  */
23 
24 #if !defined(__I_QDF_LOCK_H)
25 #define __I_QDF_LOCK_H
26 
27 /* Include Files */
28 #include <qdf_types.h>
29 #include <qdf_status.h>
30 #include <linux/mutex.h>
31 #include <linux/spinlock.h>
32 #include <linux/sched.h>
33 #include <linux/device.h>
34 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
35 #include <asm/semaphore.h>
36 #else
37 #include <linux/semaphore.h>
38 #endif
39 #include <linux/interrupt.h>
40 #include <linux/pm_wakeup.h>
41 
42 /* define for flag */
43 #define QDF_LINUX_UNLOCK_BH  1
44 
45 #ifdef __cplusplus
46 extern "C" {
47 #endif /* __cplusplus */
48 
49 enum {
50 	LOCK_RELEASED = 0x11223344,
51 	LOCK_ACQUIRED,
52 	LOCK_DESTROYED
53 };
54 
55 /**
56  * typedef struct - __qdf_mutex_t
57  * @m_lock: Mutex lock
58  * @cookie: Lock cookie
59  * @process_id: Process ID to track lock
60  * @state: Lock status
61  * @refcount: Reference count for recursive lock
62  * @stats: a structure that contains usage statistics
63  */
64 struct qdf_lock_s {
65 	struct mutex m_lock;
66 	uint32_t cookie;
67 	int process_id;
68 	uint32_t state;
69 	uint8_t refcount;
70 	struct lock_stats stats;
71 };
72 
73 typedef struct qdf_lock_s __qdf_mutex_t;
74 
75 /**
76  * typedef struct - qdf_spinlock_t
77  * @spinlock: Spin lock
78  * @flags: Lock flag
79  */
80 typedef struct __qdf_spinlock {
81 	spinlock_t spinlock;
82 	unsigned long flags;
83 } __qdf_spinlock_t;
84 
85 typedef struct semaphore __qdf_semaphore_t;
86 
87 /**
88  * typedef struct - qdf_wake_lock_t
89  * @lock: this lock needs to be used in kernel version < 5.4
90  * @priv: this lock pointer needs to be used in kernel version >= 5.4
91  */
92 typedef struct qdf_wake_lock {
93 	struct wakeup_source lock;
94 	struct wakeup_source *priv;
95 } qdf_wake_lock_t;
96 
97 struct hif_pm_runtime_lock;
98 typedef struct qdf_runtime_lock {
99 	struct hif_pm_runtime_lock *lock;
100 } qdf_runtime_lock_t;
101 
102 #define LINUX_LOCK_COOKIE 0x12345678
103 
104 /* Function declarations and documenation */
105 
106 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37)
107 /**
108  * __qdf_semaphore_init() - initialize the semaphore
109  * @m: Semaphore object
110  *
111  * Return: QDF_STATUS_SUCCESS
112  */
113 static inline QDF_STATUS __qdf_semaphore_init(struct semaphore *m)
114 {
115 	init_MUTEX(m);
116 	return QDF_STATUS_SUCCESS;
117 }
118 #else
119 static inline QDF_STATUS __qdf_semaphore_init(struct semaphore *m)
120 {
121 	sema_init(m, 1);
122 	return QDF_STATUS_SUCCESS;
123 }
124 #endif
125 
126 /**
127  * __qdf_semaphore_acquire() - acquire semaphore
128  * @m: Semaphore object
129  *
130  * Return: 0
131  */
132 static inline int __qdf_semaphore_acquire(struct semaphore *m)
133 {
134 	down(m);
135 	return 0;
136 }
137 
138 /**
139  * __qdf_semaphore_acquire_intr() - down_interruptible allows a user-space
140  * process that is waiting on a semaphore to be interrupted by the user.
141  * If the operation is interrupted, the function returns a nonzero value,
142  * and the caller does not hold the semaphore.
143  * Always checking the return value and responding accordingly.
144  * @osdev: OS device handle
145  * @m: Semaphore object
146  *
147  * Return: int
148  */
149 static inline int __qdf_semaphore_acquire_intr(struct semaphore *m)
150 {
151 	return down_interruptible(m);
152 }
153 
154 /**
155  * __qdf_semaphore_release() - release semaphore
156  * @m: Semaphore object
157  *
158  * Return: result of UP operation in integer
159  */
160 static inline void __qdf_semaphore_release(struct semaphore *m)
161 {
162 	up(m);
163 }
164 
165 /**
166  * __qdf_semaphore_acquire_timeout() - Take the semaphore before timeout
167  * @m: semaphore to take
168  * @timeout: maximum time to try to take the semaphore
169  * Return: int
170  */
171 static inline int __qdf_semaphore_acquire_timeout(struct semaphore *m,
172 						  unsigned long timeout)
173 {
174 	unsigned long jiffie_val = msecs_to_jiffies(timeout);
175 
176 	return down_timeout(m, jiffie_val);
177 }
178 
179 /**
180  * __qdf_spinlock_create() - initialize spin lock
181  * @lock: Spin lock object
182  *
183  * Return: QDF_STATUS_SUCCESS
184  */
185 static inline QDF_STATUS __qdf_spinlock_create(__qdf_spinlock_t *lock)
186 {
187 	spin_lock_init(&lock->spinlock);
188 	lock->flags = 0;
189 	return QDF_STATUS_SUCCESS;
190 }
191 
192 #define __qdf_spinlock_destroy(lock)
193 
194 /**
195  * __qdf_spin_lock() - Acquire a Spinlock(SMP) & disable Preemption (Preemptive)
196  * @lock: Lock object
197  *
198  * Return: none
199  */
200 static inline void __qdf_spin_lock(__qdf_spinlock_t *lock)
201 {
202 	spin_lock(&lock->spinlock);
203 }
204 
205 /**
206  * __qdf_spin_unlock() - Unlock the spinlock and enables the Preemption
207  * @lock: Lock object
208  *
209  * Return: none
210  */
211 static inline void __qdf_spin_unlock(__qdf_spinlock_t *lock)
212 {
213 	spin_unlock(&lock->spinlock);
214 }
215 
216 /**
217  * __qdf_spin_lock_irqsave() - Acquire a Spinlock (SMP) & disable Preemption
218  * (Preemptive) and disable IRQs
219  * @lock: Lock object
220  *
221  * Return: none
222  */
223 static inline void __qdf_spin_lock_irqsave(__qdf_spinlock_t *lock)
224 {
225 	spin_lock_irqsave(&lock->spinlock, lock->flags);
226 }
227 
228 /**
229  * __qdf_spin_unlock_irqrestore() - Unlock the spinlock and enables the
230  * Preemption and enable IRQ
231  * @lock: Lock object
232  *
233  * Return: none
234  */
235 static inline void __qdf_spin_unlock_irqrestore(__qdf_spinlock_t *lock)
236 {
237 	spin_unlock_irqrestore(&lock->spinlock, lock->flags);
238 }
239 
240 /*
241  * Synchronous versions - only for OS' that have interrupt disable
242  */
243 #define __qdf_spin_lock_irq(_p_lock, _flags) spin_lock_irqsave(_p_lock, _flags)
244 #define __qdf_spin_unlock_irq(_p_lock, _flags) \
245 	spin_unlock_irqrestore(_p_lock, _flags)
246 
247 /**
248  * __qdf_spin_is_locked(__qdf_spinlock_t *lock)
249  * @lock: spinlock object
250  *
251  * Return: nonzero if lock is held.
252  */
253 static inline int __qdf_spin_is_locked(__qdf_spinlock_t *lock)
254 {
255 	return spin_is_locked(&lock->spinlock);
256 }
257 
258 /**
259  * __qdf_spin_trylock_bh() - spin trylock bottomhalf
260  * @lock: spinlock object
261  *
262  * Return: nonzero if lock is acquired
263  */
264 static inline int __qdf_spin_trylock_bh(__qdf_spinlock_t *lock)
265 {
266 	if (likely(irqs_disabled() || in_irq() || in_softirq()))
267 		return spin_trylock(&lock->spinlock);
268 
269 	if (spin_trylock_bh(&lock->spinlock)) {
270 		lock->flags |= QDF_LINUX_UNLOCK_BH;
271 		return 1;
272 	}
273 
274 	return 0;
275 }
276 
277 /**
278  * __qdf_spin_trylock() - spin trylock
279  * @lock: spinlock object
280  *
281  * Return: int
282  */
283 static inline int __qdf_spin_trylock(__qdf_spinlock_t *lock)
284 {
285 	return spin_trylock(&lock->spinlock);
286 }
287 
288 /**
289  * __qdf_spin_lock_bh() - Acquire the spinlock and disable bottom halves
290  * @lock: Lock object
291  *
292  * Return: none
293  */
294 static inline void __qdf_spin_lock_bh(__qdf_spinlock_t *lock)
295 {
296 	if (likely(irqs_disabled() || in_irq() || in_softirq())) {
297 		spin_lock(&lock->spinlock);
298 	} else {
299 		spin_lock_bh(&lock->spinlock);
300 		lock->flags |= QDF_LINUX_UNLOCK_BH;
301 	}
302 }
303 
304 /**
305  * __qdf_spin_unlock_bh() - Release the spinlock and enable bottom halves
306  * @lock: Lock object
307  *
308  * Return: none
309  */
310 static inline void __qdf_spin_unlock_bh(__qdf_spinlock_t *lock)
311 {
312 	if (unlikely(lock->flags & QDF_LINUX_UNLOCK_BH)) {
313 		lock->flags &= (unsigned long)~QDF_LINUX_UNLOCK_BH;
314 		spin_unlock_bh(&lock->spinlock);
315 	} else
316 		spin_unlock(&lock->spinlock);
317 }
318 
319 /**
320  * __qdf_spinlock_irq_exec - Execute the input function with spinlock held and interrupt disabled.
321  * @hdl: OS handle
322  * @lock: spinlock to be held for the critical region
323  * @func: critical region function that to be executed
324  * @context: context of the critical region function
325  * @return - Boolean status returned by the critical region function
326  */
327 static inline bool __qdf_spinlock_irq_exec(qdf_handle_t hdl,
328 			__qdf_spinlock_t *lock,
329 			qdf_irqlocked_func_t func,
330 			void *arg)
331 {
332 	unsigned long flags;
333 	bool ret;
334 
335 	spin_lock_irqsave(&lock->spinlock, flags);
336 	ret = func(arg);
337 	spin_unlock_irqrestore(&lock->spinlock, flags);
338 
339 	return ret;
340 }
341 
342 /**
343  * __qdf_in_softirq() - in soft irq context
344  *
345  * Return: true if in softirs context else false
346  */
347 static inline bool __qdf_in_softirq(void)
348 {
349 	return in_softirq();
350 }
351 
352 #ifdef __cplusplus
353 }
354 #endif /* __cplusplus */
355 
356 #endif /* __I_QDF_LOCK_H */
357