xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_lock.h (revision 901120c066e139c7f8a2c8e4820561fdd83c67ef)
1 /*
2  * Copyright (c) 2014-2018, 2020 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: i_qdf_lock.h
22  * Linux-specific definitions for QDF Lock API's
23  */
24 
25 #if !defined(__I_QDF_LOCK_H)
26 #define __I_QDF_LOCK_H
27 
28 /* Include Files */
29 #include <qdf_types.h>
30 #include <qdf_status.h>
31 #include <linux/mutex.h>
32 #include <linux/spinlock.h>
33 #include <linux/sched.h>
34 #include <linux/device.h>
35 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
36 #include <asm/semaphore.h>
37 #else
38 #include <linux/semaphore.h>
39 #endif
40 #include <linux/interrupt.h>
41 #include <linux/pm_wakeup.h>
42 
43 /* define for flag */
44 #define QDF_LINUX_UNLOCK_BH  1
45 
46 #ifdef __cplusplus
47 extern "C" {
48 #endif /* __cplusplus */
49 
50 enum {
51 	LOCK_RELEASED = 0x11223344,
52 	LOCK_ACQUIRED,
53 	LOCK_DESTROYED
54 };
55 
56 /**
57  * typedef struct - __qdf_mutex_t
58  * @m_lock: Mutex lock
59  * @cookie: Lock cookie
60  * @process_id: Process ID to track lock
61  * @state: Lock status
62  * @refcount: Reference count for recursive lock
63  * @stats: a structure that contains usage statistics
64  */
65 struct qdf_lock_s {
66 	struct mutex m_lock;
67 	uint32_t cookie;
68 	int process_id;
69 	uint32_t state;
70 	uint8_t refcount;
71 	struct lock_stats stats;
72 };
73 
74 typedef struct qdf_lock_s __qdf_mutex_t;
75 
76 /**
77  * typedef struct - qdf_spinlock_t
78  * @spinlock: Spin lock
79  * @flags: Lock flag
80  */
81 typedef struct __qdf_spinlock {
82 	spinlock_t spinlock;
83 	unsigned long flags;
84 } __qdf_spinlock_t;
85 
86 typedef struct semaphore __qdf_semaphore_t;
87 
88 /**
89  * typedef struct - qdf_wake_lock_t
90  * @lock: this lock needs to be used in kernel version < 5.4
91  * @priv: this lock pointer needs to be used in kernel version >= 5.4
92  */
93 typedef struct qdf_wake_lock {
94 	struct wakeup_source lock;
95 	struct wakeup_source *priv;
96 } qdf_wake_lock_t;
97 
98 struct hif_pm_runtime_lock;
99 typedef struct qdf_runtime_lock {
100 	struct hif_pm_runtime_lock *lock;
101 } qdf_runtime_lock_t;
102 
103 #define LINUX_LOCK_COOKIE 0x12345678
104 
105 /* Function declarations and documentation */
106 
107 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37)
108 /**
109  * __qdf_semaphore_init() - initialize the semaphore
110  * @m: Semaphore object
111  *
112  * Return: QDF_STATUS_SUCCESS
113  */
114 static inline QDF_STATUS __qdf_semaphore_init(struct semaphore *m)
115 {
116 	init_MUTEX(m);
117 	return QDF_STATUS_SUCCESS;
118 }
119 #else
120 static inline QDF_STATUS __qdf_semaphore_init(struct semaphore *m)
121 {
122 	sema_init(m, 1);
123 	return QDF_STATUS_SUCCESS;
124 }
125 #endif
126 
127 /**
128  * __qdf_semaphore_acquire() - acquire semaphore
129  * @m: Semaphore object
130  *
131  * Return: 0
132  */
133 static inline int __qdf_semaphore_acquire(struct semaphore *m)
134 {
135 	down(m);
136 	return 0;
137 }
138 
139 /**
140  * __qdf_semaphore_acquire_intr() - down_interruptible allows a user-space
141  * process that is waiting on a semaphore to be interrupted by the user.
142  * If the operation is interrupted, the function returns a nonzero value,
143  * and the caller does not hold the semaphore.
144  * Always checking the return value and responding accordingly.
145  * @osdev: OS device handle
146  * @m: Semaphore object
147  *
148  * Return: int
149  */
150 static inline int __qdf_semaphore_acquire_intr(struct semaphore *m)
151 {
152 	return down_interruptible(m);
153 }
154 
155 /**
156  * __qdf_semaphore_release() - release semaphore
157  * @m: Semaphore object
158  *
159  * Return: result of UP operation in integer
160  */
161 static inline void __qdf_semaphore_release(struct semaphore *m)
162 {
163 	up(m);
164 }
165 
166 /**
167  * __qdf_semaphore_acquire_timeout() - Take the semaphore before timeout
168  * @m: semaphore to take
169  * @timeout: maximum time to try to take the semaphore
170  * Return: int
171  */
172 static inline int __qdf_semaphore_acquire_timeout(struct semaphore *m,
173 						  unsigned long timeout)
174 {
175 	unsigned long jiffie_val = msecs_to_jiffies(timeout);
176 
177 	return down_timeout(m, jiffie_val);
178 }
179 
180 /**
181  * __qdf_spinlock_create() - initialize spin lock
182  * @lock: Spin lock object
183  *
184  * Return: QDF_STATUS_SUCCESS
185  */
186 static inline QDF_STATUS __qdf_spinlock_create(__qdf_spinlock_t *lock)
187 {
188 	spin_lock_init(&lock->spinlock);
189 	lock->flags = 0;
190 	return QDF_STATUS_SUCCESS;
191 }
192 
193 #define __qdf_spinlock_destroy(lock)
194 
195 /**
196  * __qdf_spin_lock() - Acquire a Spinlock(SMP) & disable Preemption (Preemptive)
197  * @lock: Lock object
198  *
199  * Return: none
200  */
201 static inline void __qdf_spin_lock(__qdf_spinlock_t *lock)
202 {
203 	spin_lock(&lock->spinlock);
204 }
205 
206 /**
207  * __qdf_spin_unlock() - Unlock the spinlock and enables the Preemption
208  * @lock: Lock object
209  *
210  * Return: none
211  */
212 static inline void __qdf_spin_unlock(__qdf_spinlock_t *lock)
213 {
214 	spin_unlock(&lock->spinlock);
215 }
216 
217 /**
218  * __qdf_spin_lock_irqsave() - Acquire a Spinlock (SMP) & disable Preemption
219  * (Preemptive) and disable IRQs
220  * @lock: Lock object
221  *
222  * Return: none
223  */
224 static inline void __qdf_spin_lock_irqsave(__qdf_spinlock_t *lock)
225 {
226 	spin_lock_irqsave(&lock->spinlock, lock->flags);
227 }
228 
229 /**
230  * __qdf_spin_unlock_irqrestore() - Unlock the spinlock and enables the
231  * Preemption and enable IRQ
232  * @lock: Lock object
233  *
234  * Return: none
235  */
236 static inline void __qdf_spin_unlock_irqrestore(__qdf_spinlock_t *lock)
237 {
238 	spin_unlock_irqrestore(&lock->spinlock, lock->flags);
239 }
240 
241 /*
242  * Synchronous versions - only for OS' that have interrupt disable
243  */
244 #define __qdf_spin_lock_irq(_p_lock, _flags) spin_lock_irqsave(_p_lock, _flags)
245 #define __qdf_spin_unlock_irq(_p_lock, _flags) \
246 	spin_unlock_irqrestore(_p_lock, _flags)
247 
248 /**
249  * __qdf_spin_is_locked(__qdf_spinlock_t *lock)
250  * @lock: spinlock object
251  *
252  * Return: nonzero if lock is held.
253  */
254 static inline int __qdf_spin_is_locked(__qdf_spinlock_t *lock)
255 {
256 	return spin_is_locked(&lock->spinlock);
257 }
258 
259 /**
260  * __qdf_spin_trylock_bh() - spin trylock bottomhalf
261  * @lock: spinlock object
262  *
263  * Return: nonzero if lock is acquired
264  */
265 static inline int __qdf_spin_trylock_bh(__qdf_spinlock_t *lock)
266 {
267 	if (likely(irqs_disabled() || in_irq() || in_softirq()))
268 		return spin_trylock(&lock->spinlock);
269 
270 	if (spin_trylock_bh(&lock->spinlock)) {
271 		lock->flags |= QDF_LINUX_UNLOCK_BH;
272 		return 1;
273 	}
274 
275 	return 0;
276 }
277 
278 /**
279  * __qdf_spin_trylock() - spin trylock
280  * @lock: spinlock object
281  *
282  * Return: int
283  */
284 static inline int __qdf_spin_trylock(__qdf_spinlock_t *lock)
285 {
286 	return spin_trylock(&lock->spinlock);
287 }
288 
289 /**
290  * __qdf_spin_lock_bh() - Acquire the spinlock and disable bottom halves
291  * @lock: Lock object
292  *
293  * Return: none
294  */
295 static inline void __qdf_spin_lock_bh(__qdf_spinlock_t *lock)
296 {
297 	if (likely(irqs_disabled() || in_irq() || in_softirq())) {
298 		spin_lock(&lock->spinlock);
299 	} else {
300 		spin_lock_bh(&lock->spinlock);
301 		lock->flags |= QDF_LINUX_UNLOCK_BH;
302 	}
303 }
304 
305 /**
306  * __qdf_spin_unlock_bh() - Release the spinlock and enable bottom halves
307  * @lock: Lock object
308  *
309  * Return: none
310  */
311 static inline void __qdf_spin_unlock_bh(__qdf_spinlock_t *lock)
312 {
313 	if (unlikely(lock->flags & QDF_LINUX_UNLOCK_BH)) {
314 		lock->flags &= (unsigned long)~QDF_LINUX_UNLOCK_BH;
315 		spin_unlock_bh(&lock->spinlock);
316 	} else
317 		spin_unlock(&lock->spinlock);
318 }
319 
320 /**
321  * __qdf_spinlock_irq_exec - Execute the input function with spinlock held and interrupt disabled.
322  * @hdl: OS handle
323  * @lock: spinlock to be held for the critical region
324  * @func: critical region function that to be executed
325  * @context: context of the critical region function
326  * @return - Boolean status returned by the critical region function
327  */
328 static inline bool __qdf_spinlock_irq_exec(qdf_handle_t hdl,
329 			__qdf_spinlock_t *lock,
330 			qdf_irqlocked_func_t func,
331 			void *arg)
332 {
333 	unsigned long flags;
334 	bool ret;
335 
336 	spin_lock_irqsave(&lock->spinlock, flags);
337 	ret = func(arg);
338 	spin_unlock_irqrestore(&lock->spinlock, flags);
339 
340 	return ret;
341 }
342 
343 /**
344  * __qdf_in_softirq() - in soft irq context
345  *
346  * Return: true if in softirs context else false
347  */
348 static inline bool __qdf_in_softirq(void)
349 {
350 	return in_softirq();
351 }
352 
353 #ifdef __cplusplus
354 }
355 #endif /* __cplusplus */
356 
357 #endif /* __I_QDF_LOCK_H */
358