1 /* 2 * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /** 20 * DOC: i_qdf_lock.h 21 * Linux-specific definitions for QDF Lock API's 22 */ 23 24 #if !defined(__I_QDF_LOCK_H) 25 #define __I_QDF_LOCK_H 26 27 /* Include Files */ 28 #include <qdf_types.h> 29 #include <qdf_status.h> 30 #include <linux/mutex.h> 31 #include <linux/spinlock.h> 32 #include <linux/sched.h> 33 #include <linux/device.h> 34 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27) 35 #include <asm/semaphore.h> 36 #else 37 #include <linux/semaphore.h> 38 #endif 39 #include <linux/interrupt.h> 40 #include <linux/pm_wakeup.h> 41 42 /* define for flag */ 43 #define QDF_LINUX_UNLOCK_BH 1 44 45 #ifdef __cplusplus 46 extern "C" { 47 #endif /* __cplusplus */ 48 49 enum { 50 LOCK_RELEASED = 0x11223344, 51 LOCK_ACQUIRED, 52 LOCK_DESTROYED 53 }; 54 55 /** 56 * typedef struct - __qdf_mutex_t 57 * @m_lock: Mutex lock 58 * @cookie: Lock cookie 59 * @process_id: Process ID to track lock 60 * @state: Lock status 61 * @refcount: Reference count for recursive lock 62 * @stats: a structure that contains usage statistics 63 */ 64 struct qdf_lock_s { 65 struct mutex m_lock; 66 uint32_t cookie; 67 int process_id; 68 uint32_t state; 69 uint8_t refcount; 70 struct lock_stats stats; 71 }; 72 73 typedef struct qdf_lock_s __qdf_mutex_t; 74 75 /** 76 * typedef struct - qdf_spinlock_t 77 * @spinlock: Spin lock 78 * @flags: Lock flag 79 */ 80 typedef struct __qdf_spinlock { 81 spinlock_t spinlock; 82 unsigned long flags; 83 } __qdf_spinlock_t; 84 85 typedef struct semaphore __qdf_semaphore_t; 86 87 typedef struct wakeup_source qdf_wake_lock_t; 88 89 struct hif_pm_runtime_lock; 90 typedef struct qdf_runtime_lock { 91 struct hif_pm_runtime_lock *lock; 92 } qdf_runtime_lock_t; 93 94 #define LINUX_LOCK_COOKIE 0x12345678 95 96 /* Function declarations and documenation */ 97 98 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37) 99 /** 100 * __qdf_semaphore_init() - initialize the semaphore 101 * @m: Semaphore object 102 * 103 * Return: QDF_STATUS_SUCCESS 104 */ 105 static inline QDF_STATUS __qdf_semaphore_init(struct semaphore *m) 106 { 107 init_MUTEX(m); 108 return QDF_STATUS_SUCCESS; 109 } 110 #else 111 static inline QDF_STATUS __qdf_semaphore_init(struct semaphore *m) 112 { 113 sema_init(m, 1); 114 return QDF_STATUS_SUCCESS; 115 } 116 #endif 117 118 /** 119 * __qdf_semaphore_acquire() - acquire semaphore 120 * @m: Semaphore object 121 * 122 * Return: 0 123 */ 124 static inline int __qdf_semaphore_acquire(struct semaphore *m) 125 { 126 down(m); 127 return 0; 128 } 129 130 /** 131 * __qdf_semaphore_acquire_intr() - down_interruptible allows a user-space 132 * process that is waiting on a semaphore to be interrupted by the user. 133 * If the operation is interrupted, the function returns a nonzero value, 134 * and the caller does not hold the semaphore. 135 * Always checking the return value and responding accordingly. 136 * @osdev: OS device handle 137 * @m: Semaphore object 138 * 139 * Return: int 140 */ 141 static inline int __qdf_semaphore_acquire_intr(struct semaphore *m) 142 { 143 return down_interruptible(m); 144 } 145 146 /** 147 * __qdf_semaphore_release() - release semaphore 148 * @m: Semaphore object 149 * 150 * Return: result of UP operation in integer 151 */ 152 static inline void __qdf_semaphore_release(struct semaphore *m) 153 { 154 up(m); 155 } 156 157 /** 158 * __qdf_semaphore_acquire_timeout() - Take the semaphore before timeout 159 * @m: semaphore to take 160 * @timeout: maximum time to try to take the semaphore 161 * Return: int 162 */ 163 static inline int __qdf_semaphore_acquire_timeout(struct semaphore *m, 164 unsigned long timeout) 165 { 166 unsigned long jiffie_val = msecs_to_jiffies(timeout); 167 168 return down_timeout(m, jiffie_val); 169 } 170 171 /** 172 * __qdf_spinlock_create() - initialize spin lock 173 * @lock: Spin lock object 174 * 175 * Return: QDF_STATUS_SUCCESS 176 */ 177 static inline QDF_STATUS __qdf_spinlock_create(__qdf_spinlock_t *lock) 178 { 179 spin_lock_init(&lock->spinlock); 180 lock->flags = 0; 181 return QDF_STATUS_SUCCESS; 182 } 183 184 #define __qdf_spinlock_destroy(lock) 185 186 /** 187 * __qdf_spin_lock() - Acquire a Spinlock(SMP) & disable Preemption (Preemptive) 188 * @lock: Lock object 189 * 190 * Return: none 191 */ 192 static inline void __qdf_spin_lock(__qdf_spinlock_t *lock) 193 { 194 spin_lock(&lock->spinlock); 195 } 196 197 /** 198 * __qdf_spin_unlock() - Unlock the spinlock and enables the Preemption 199 * @lock: Lock object 200 * 201 * Return: none 202 */ 203 static inline void __qdf_spin_unlock(__qdf_spinlock_t *lock) 204 { 205 spin_unlock(&lock->spinlock); 206 } 207 208 /** 209 * __qdf_spin_lock_irqsave() - Acquire a Spinlock (SMP) & disable Preemption 210 * (Preemptive) and disable IRQs 211 * @lock: Lock object 212 * 213 * Return: none 214 */ 215 static inline void __qdf_spin_lock_irqsave(__qdf_spinlock_t *lock) 216 { 217 spin_lock_irqsave(&lock->spinlock, lock->flags); 218 } 219 220 /** 221 * __qdf_spin_unlock_irqrestore() - Unlock the spinlock and enables the 222 * Preemption and enable IRQ 223 * @lock: Lock object 224 * 225 * Return: none 226 */ 227 static inline void __qdf_spin_unlock_irqrestore(__qdf_spinlock_t *lock) 228 { 229 spin_unlock_irqrestore(&lock->spinlock, lock->flags); 230 } 231 232 /* 233 * Synchronous versions - only for OS' that have interrupt disable 234 */ 235 #define __qdf_spin_lock_irq(_p_lock, _flags) spin_lock_irqsave(_p_lock, _flags) 236 #define __qdf_spin_unlock_irq(_p_lock, _flags) \ 237 spin_unlock_irqrestore(_p_lock, _flags) 238 239 /** 240 * __qdf_spin_is_locked(__qdf_spinlock_t *lock) 241 * @lock: spinlock object 242 * 243 * Return: nonzero if lock is held. 244 */ 245 static inline int __qdf_spin_is_locked(__qdf_spinlock_t *lock) 246 { 247 return spin_is_locked(&lock->spinlock); 248 } 249 250 /** 251 * __qdf_spin_trylock_bh() - spin trylock bottomhalf 252 * @lock: spinlock object 253 * 254 * Return: nonzero if lock is acquired 255 */ 256 static inline int __qdf_spin_trylock_bh(__qdf_spinlock_t *lock) 257 { 258 if (likely(irqs_disabled() || in_irq() || in_softirq())) 259 return spin_trylock(&lock->spinlock); 260 261 if (spin_trylock_bh(&lock->spinlock)) { 262 lock->flags |= QDF_LINUX_UNLOCK_BH; 263 return 1; 264 } 265 266 return 0; 267 } 268 269 /** 270 * __qdf_spin_trylock() - spin trylock 271 * @lock: spinlock object 272 * 273 * Return: int 274 */ 275 static inline int __qdf_spin_trylock(__qdf_spinlock_t *lock) 276 { 277 return spin_trylock(&lock->spinlock); 278 } 279 280 /** 281 * __qdf_spin_lock_bh() - Acquire the spinlock and disable bottom halves 282 * @lock: Lock object 283 * 284 * Return: none 285 */ 286 static inline void __qdf_spin_lock_bh(__qdf_spinlock_t *lock) 287 { 288 if (likely(irqs_disabled() || in_irq() || in_softirq())) { 289 spin_lock(&lock->spinlock); 290 } else { 291 spin_lock_bh(&lock->spinlock); 292 lock->flags |= QDF_LINUX_UNLOCK_BH; 293 } 294 } 295 296 /** 297 * __qdf_spin_unlock_bh() - Release the spinlock and enable bottom halves 298 * @lock: Lock object 299 * 300 * Return: none 301 */ 302 static inline void __qdf_spin_unlock_bh(__qdf_spinlock_t *lock) 303 { 304 if (unlikely(lock->flags & QDF_LINUX_UNLOCK_BH)) { 305 lock->flags &= (unsigned long)~QDF_LINUX_UNLOCK_BH; 306 spin_unlock_bh(&lock->spinlock); 307 } else 308 spin_unlock(&lock->spinlock); 309 } 310 311 /** 312 * __qdf_spinlock_irq_exec - Execute the input function with spinlock held and interrupt disabled. 313 * @hdl: OS handle 314 * @lock: spinlock to be held for the critical region 315 * @func: critical region function that to be executed 316 * @context: context of the critical region function 317 * @return - Boolean status returned by the critical region function 318 */ 319 static inline bool __qdf_spinlock_irq_exec(qdf_handle_t hdl, 320 __qdf_spinlock_t *lock, 321 qdf_irqlocked_func_t func, 322 void *arg) 323 { 324 unsigned long flags; 325 bool ret; 326 327 spin_lock_irqsave(&lock->spinlock, flags); 328 ret = func(arg); 329 spin_unlock_irqrestore(&lock->spinlock, flags); 330 331 return ret; 332 } 333 334 /** 335 * __qdf_in_softirq() - in soft irq context 336 * 337 * Return: true if in softirs context else false 338 */ 339 static inline bool __qdf_in_softirq(void) 340 { 341 return in_softirq(); 342 } 343 344 #ifdef __cplusplus 345 } 346 #endif /* __cplusplus */ 347 348 #endif /* __I_QDF_LOCK_H */ 349