1 /* 2 * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /** 20 * @file qdf_lock.h 21 * This file abstracts locking operations. 22 */ 23 24 #ifndef _QDF_LOCK_H 25 #define _QDF_LOCK_H 26 27 #include <qdf_types.h> 28 #include <qdf_mem.h> 29 #include <qdf_time.h> 30 #include <i_qdf_trace.h> 31 32 #ifndef QDF_LOCK_STATS 33 #define QDF_LOCK_STATS 0 34 #endif 35 #ifndef QDF_LOCK_STATS_DESTROY_PRINT 36 #define QDF_LOCK_STATS_DESTROY_PRINT 0 37 #endif 38 #ifndef QDF_LOCK_STATS_BUG_ON 39 #define QDF_LOCK_STATS_BUG_ON 0 40 #endif 41 #ifndef QDF_LOCK_STATS_LIST 42 #define QDF_LOCK_STATS_LIST 0 43 #endif 44 45 /* Max hold time in micro seconds, 0 to disable detection*/ 46 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ 10000 47 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK 0 48 49 #if QDF_LOCK_STATS 50 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_BH 2000000 51 #else 52 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_BH 1000000 53 #endif 54 55 #if !QDF_LOCK_STATS 56 struct lock_stats {}; 57 #define BEFORE_LOCK(x...) do {} while (0) 58 #define AFTER_LOCK(x...) do {} while (0) 59 #define BEFORE_TRYLOCK(x...) do {} while (0) 60 #define AFTER_TRYLOCK(x...) do {} while (0) 61 #define BEFORE_UNLOCK(x...) do {} while (0) 62 #define qdf_lock_stats_create(x...) do {} while (0) 63 #define qdf_lock_stats_destroy(x...) do {} while (0) 64 #define qdf_lock_stats_init(x...) do {} while (0) 65 #define qdf_lock_stats_deinit(x...) do {} while (0) 66 #else 67 void qdf_lock_stats_init(void); 68 void qdf_lock_stats_deinit(void); 69 struct qdf_lock_cookie; 70 struct lock_stats { 71 const char *initialization_fn; 72 const char *acquired_by; 73 int line; 74 int acquired; 75 int contended; 76 uint64_t contention_time; 77 uint64_t non_contention_time; 78 uint64_t held_time; 79 uint64_t last_acquired; 80 uint64_t max_contention_wait; 81 uint64_t max_held_time; 82 int num_large_contentions; 83 int num_large_holds; 84 struct qdf_lock_cookie *cookie; 85 }; 86 #define LARGE_CONTENTION QDF_LOG_TIMESTAMP_CYCLES_PER_10_US 87 88 #define BEFORE_LOCK(lock, was_locked) \ 89 do { \ 90 uint64_t BEFORE_LOCK_time; \ 91 uint64_t AFTER_LOCK_time; \ 92 bool BEFORE_LOCK_is_locked = was_locked; \ 93 BEFORE_LOCK_time = qdf_get_log_timestamp_lightweight(); \ 94 do {} while (0) 95 96 97 #define AFTER_LOCK(lock, func) \ 98 lock->stats.acquired_by = func; \ 99 AFTER_LOCK_time = qdf_get_log_timestamp_lightweight(); \ 100 lock->stats.acquired++; \ 101 lock->stats.last_acquired = AFTER_LOCK_time; \ 102 if (BEFORE_LOCK_is_locked) { \ 103 lock->stats.contended++; \ 104 lock->stats.contention_time += \ 105 (AFTER_LOCK_time - BEFORE_LOCK_time); \ 106 } else { \ 107 lock->stats.non_contention_time += \ 108 (AFTER_LOCK_time - BEFORE_LOCK_time); \ 109 } \ 110 \ 111 if (AFTER_LOCK_time - BEFORE_LOCK_time > LARGE_CONTENTION) \ 112 lock->stats.num_large_contentions++; \ 113 \ 114 if (AFTER_LOCK_time - BEFORE_LOCK_time > \ 115 lock->stats.max_contention_wait) \ 116 lock->stats.max_contention_wait = \ 117 AFTER_LOCK_time - BEFORE_LOCK_time; \ 118 } while (0) 119 120 #define BEFORE_TRYLOCK(lock) \ 121 do { \ 122 uint64_t BEFORE_LOCK_time; \ 123 uint64_t AFTER_LOCK_time; \ 124 BEFORE_LOCK_time = qdf_get_log_timestamp_lightweight(); \ 125 do {} while (0) 126 127 #define AFTER_TRYLOCK(lock, trylock_return, func) \ 128 AFTER_LOCK_time = qdf_get_log_timestamp_lightweight(); \ 129 if (trylock_return) { \ 130 lock->stats.acquired++; \ 131 lock->stats.last_acquired = AFTER_LOCK_time; \ 132 lock->stats.non_contention_time += \ 133 (AFTER_LOCK_time - BEFORE_LOCK_time); \ 134 lock->stats.acquired_by = func; \ 135 } \ 136 } while (0) 137 138 /* max_hold_time in US */ 139 #define BEFORE_UNLOCK(lock, max_hold_time) \ 140 do {\ 141 uint64_t held_time = qdf_get_log_timestamp_lightweight() - \ 142 lock->stats.last_acquired; \ 143 lock->stats.held_time += held_time; \ 144 \ 145 if (held_time > lock->stats.max_held_time) \ 146 lock->stats.max_held_time = held_time; \ 147 \ 148 if (held_time > LARGE_CONTENTION) \ 149 lock->stats.num_large_holds++; \ 150 if (QDF_LOCK_STATS_BUG_ON && max_hold_time && \ 151 held_time > qdf_usecs_to_log_timestamp(max_hold_time)) { \ 152 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, \ 153 "BEFORE_UNLOCK: lock held too long (%lluus)", \ 154 qdf_log_timestamp_to_usecs(held_time)); \ 155 QDF_BUG(0); \ 156 } \ 157 lock->stats.acquired_by = NULL; \ 158 } while (0) 159 160 void qdf_lock_stats_cookie_destroy(struct lock_stats *stats); 161 void qdf_lock_stats_cookie_create(struct lock_stats *stats, 162 const char *func, int line); 163 164 static inline void qdf_lock_stats_destroy(struct lock_stats *stats) 165 { 166 if (QDF_LOCK_STATS_DESTROY_PRINT) { 167 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG, 168 "%s: lock: %s %d \t" 169 "acquired:\t%d\tcontended:\t%d\t" 170 "contention_time\t%llu\tmax_contention_wait:\t%llu\t" 171 "non_contention_time\t%llu\t" 172 "held_time\t%llu\tmax_held:\t%llu" 173 , __func__, stats->initialization_fn, stats->line, 174 stats->acquired, stats->contended, 175 qdf_log_timestamp_to_usecs(stats->contention_time), 176 qdf_log_timestamp_to_usecs(stats->max_contention_wait), 177 qdf_log_timestamp_to_usecs(stats->non_contention_time), 178 qdf_log_timestamp_to_usecs(stats->held_time), 179 qdf_log_timestamp_to_usecs(stats->max_held_time)); 180 } 181 182 if (QDF_LOCK_STATS_LIST) 183 qdf_lock_stats_cookie_destroy(stats); 184 } 185 186 #ifndef MEMORY_DEBUG 187 #define qdf_mem_malloc_debug(x, y, z) qdf_mem_malloc(x) 188 #endif 189 190 /* qdf_lock_stats_create() - initialize the lock stats structure 191 * 192 */ 193 static inline void qdf_lock_stats_create(struct lock_stats *stats, 194 const char *func, int line) 195 { 196 qdf_mem_zero(stats, sizeof(*stats)); 197 stats->initialization_fn = func; 198 stats->line = line; 199 200 if (QDF_LOCK_STATS_LIST) 201 qdf_lock_stats_cookie_create(stats, func, line); 202 } 203 #endif 204 205 #include <i_qdf_lock.h> 206 207 #define WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT 0 208 #define WIFI_POWER_EVENT_WAKELOCK_TAKEN 0 209 #define WIFI_POWER_EVENT_WAKELOCK_RELEASED 1 210 211 /** 212 * qdf_semaphore_acquire_timeout() - Take the semaphore before timeout 213 * @m: semaphore to take 214 * @timeout: maximum time to try to take the semaphore 215 * Return: int 216 */ 217 static inline int qdf_semaphore_acquire_timeout(struct semaphore *m, 218 unsigned long timeout) 219 { 220 return __qdf_semaphore_acquire_timeout(m, timeout); 221 } 222 223 struct qdf_spinlock { 224 __qdf_spinlock_t lock; 225 struct lock_stats stats; 226 }; 227 228 /** 229 * @brief Platform spinlock object 230 */ 231 typedef struct qdf_spinlock qdf_spinlock_t; 232 233 234 /** 235 * @brief Platform mutex object 236 */ 237 typedef __qdf_semaphore_t qdf_semaphore_t; 238 typedef __qdf_mutex_t qdf_mutex_t; 239 240 /* function Declaration */ 241 QDF_STATUS qdf_mutex_create(qdf_mutex_t *m, const char *func, int line); 242 #define qdf_mutex_create(m) qdf_mutex_create(m, __func__, __LINE__) 243 244 QDF_STATUS qdf_mutex_acquire(qdf_mutex_t *m); 245 246 QDF_STATUS qdf_mutex_release(qdf_mutex_t *m); 247 248 QDF_STATUS qdf_mutex_destroy(qdf_mutex_t *lock); 249 250 /** 251 * qdf_spinlock_create - Initialize a spinlock 252 * @lock: spinlock object pointer 253 * Return: none 254 */ 255 static inline void qdf_spinlock_create(qdf_spinlock_t *lock, const char *func, 256 int line) 257 { 258 __qdf_spinlock_create(&lock->lock); 259 260 /* spinlock stats create relies on the spinlock working allread */ 261 qdf_lock_stats_create(&lock->stats, func, line); 262 } 263 264 #define qdf_spinlock_create(x) qdf_spinlock_create(x, __func__, __LINE__) 265 266 /** 267 * qdf_spinlock_destroy - Delete a spinlock 268 * @lock: spinlock object pointer 269 * Return: none 270 */ 271 static inline void qdf_spinlock_destroy(qdf_spinlock_t *lock) 272 { 273 qdf_lock_stats_destroy(&lock->stats); 274 __qdf_spinlock_destroy(&lock->lock); 275 } 276 277 /** 278 * qdf_spin_is_locked() - check if the spinlock is locked 279 * @lock: spinlock object 280 * 281 * Return: nonzero if lock is held. 282 */ 283 static inline int qdf_spin_is_locked(qdf_spinlock_t *lock) 284 { 285 return __qdf_spin_is_locked(&lock->lock); 286 } 287 288 /** 289 * qdf_spin_trylock_bh() - spin trylock bottomhalf 290 * @lock: spinlock object 291 * 292 * Return: nonzero if lock is acquired 293 */ 294 static inline int qdf_spin_trylock_bh(qdf_spinlock_t *lock, const char *func) 295 { 296 int trylock_return; 297 298 BEFORE_TRYLOCK(lock); 299 trylock_return = __qdf_spin_trylock_bh(&lock->lock); 300 AFTER_TRYLOCK(lock, trylock_return, func); 301 302 return trylock_return; 303 } 304 #define qdf_spin_trylock_bh(lock) qdf_spin_trylock_bh(lock, __func__) 305 306 /** 307 * qdf_spin_trylock() - spin trylock 308 * @lock: spinlock object 309 * Return: int 310 */ 311 static inline int qdf_spin_trylock(qdf_spinlock_t *lock, const char *func) 312 { 313 int result = 0; 314 315 BEFORE_LOCK(lock, qdf_spin_is_locked(lock)); 316 result = __qdf_spin_trylock(&lock->lock); 317 AFTER_LOCK(lock, func); 318 319 return result; 320 } 321 322 #define qdf_spin_trylock(lock) qdf_spin_trylock(lock, __func__) 323 324 /** 325 * qdf_spin_lock_bh() - locks the spinlock mutex in soft irq context 326 * @lock: spinlock object pointer 327 * Return: none 328 */ 329 static inline void qdf_spin_lock_bh(qdf_spinlock_t *lock, const char *func) 330 { 331 BEFORE_LOCK(lock, qdf_spin_is_locked(lock)); 332 __qdf_spin_lock_bh(&lock->lock); 333 AFTER_LOCK(lock, func); 334 } 335 336 #define qdf_spin_lock_bh(lock) qdf_spin_lock_bh(lock, __func__) 337 338 /** 339 * qdf_spin_unlock_bh() - unlocks the spinlock mutex in soft irq context 340 * @lock: spinlock object pointer 341 * Return: none 342 */ 343 static inline void qdf_spin_unlock_bh(qdf_spinlock_t *lock) 344 { 345 BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_BH); 346 __qdf_spin_unlock_bh(&lock->lock); 347 } 348 349 /** 350 * qdf_spinlock_irq_exec - Execute the input function with spinlock held 351 * and interrupt disabled. 352 * @hdl: OS handle 353 * @lock: spinlock to be held for the critical region 354 * @func: critical region function that to be executed 355 * @context: context of the critical region function 356 * Return: Boolean status returned by the critical region function 357 */ 358 static inline bool qdf_spinlock_irq_exec(qdf_handle_t hdl, 359 qdf_spinlock_t *lock, 360 qdf_irqlocked_func_t func, void *arg) 361 { 362 return __qdf_spinlock_irq_exec(hdl, &lock->lock, func, arg); 363 } 364 365 /** 366 * qdf_spin_lock() - Acquire a Spinlock(SMP) & disable Preemption (Preemptive) 367 * @lock: Lock object 368 * 369 * Return: none 370 */ 371 static inline void qdf_spin_lock(qdf_spinlock_t *lock, const char *func) 372 { 373 BEFORE_LOCK(lock, qdf_spin_is_locked(lock)); 374 __qdf_spin_lock(&lock->lock); 375 AFTER_LOCK(lock, func); 376 } 377 #define qdf_spin_lock(lock) qdf_spin_lock(lock, __func__) 378 379 /** 380 * qdf_spin_unlock() - Unlock the spinlock and enables the Preemption 381 * @lock: Lock object 382 * 383 * Return: none 384 */ 385 static inline void qdf_spin_unlock(qdf_spinlock_t *lock) 386 { 387 BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK); 388 __qdf_spin_unlock(&lock->lock); 389 } 390 391 /** 392 * qdf_spin_lock_irq() - Acquire a Spinlock(SMP) & save the irq state 393 * @lock: Lock object 394 * @flags: flags 395 * 396 * Return: none 397 */ 398 static inline void qdf_spin_lock_irq(qdf_spinlock_t *lock, unsigned long flags, 399 const char *func) 400 { 401 BEFORE_LOCK(lock, qdf_spin_is_locked(lock)); 402 __qdf_spin_lock_irq(&lock->lock.spinlock, flags); 403 AFTER_LOCK(lock, func); 404 } 405 #define qdf_spin_lock_irq(lock, flags) qdf_spin_lock_irq(lock, flags, __func__) 406 407 /** 408 * qdf_spin_lock_irqsave() - Acquire a Spinlock (SMP) & disable Preemption 409 * (Preemptive) and disable IRQs 410 * @lock: Lock object 411 * 412 * Return: none 413 */ 414 static inline void qdf_spin_lock_irqsave(qdf_spinlock_t *lock, const char *func) 415 { 416 BEFORE_LOCK(lock, qdf_spin_is_locked(lock)); 417 __qdf_spin_lock_irqsave(&lock->lock); 418 AFTER_LOCK(lock, func); 419 } 420 #define qdf_spin_lock_irqsave(lock) qdf_spin_lock_irqsave(lock, __func__) 421 422 /** 423 * qdf_spin_unlock_irqrestore() - Unlock the spinlock and enables the 424 * Preemption and enable IRQ 425 * @lock: Lock object 426 * 427 * Return: none 428 */ 429 static inline void qdf_spin_unlock_irqrestore(qdf_spinlock_t *lock) 430 { 431 BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ); 432 __qdf_spin_unlock_irqrestore(&lock->lock); 433 } 434 435 /** 436 * qdf_spin_unlock_irq() - Unlock a Spinlock(SMP) & save the restore state 437 * @lock: Lock object 438 * @flags: flags 439 * 440 * Return: none 441 */ 442 static inline void qdf_spin_unlock_irq(qdf_spinlock_t *lock, 443 unsigned long flags) 444 { 445 BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ); 446 __qdf_spin_unlock_irq(&lock->lock.spinlock, flags); 447 } 448 449 /** 450 * qdf_semaphore_init() - initialize a semaphore 451 * @m: Semaphore to initialize 452 * Return: None 453 */ 454 static inline void qdf_semaphore_init(qdf_semaphore_t *m) 455 { 456 __qdf_semaphore_init(m); 457 } 458 459 /** 460 * qdf_semaphore_acquire() - take the semaphore 461 * @m: Semaphore to take 462 * Return: int 463 */ 464 static inline int qdf_semaphore_acquire(qdf_semaphore_t *m) 465 { 466 return __qdf_semaphore_acquire(m); 467 } 468 469 /** 470 * qdf_semaphore_release() - give the semaphore 471 * @m: Semaphore to give 472 * Return: None 473 */ 474 static inline void qdf_semaphore_release(qdf_semaphore_t *m) 475 { 476 __qdf_semaphore_release(m); 477 } 478 479 /** 480 * qdf_semaphore_acquire_intr - Take the semaphore, interruptible version 481 * @osdev: OS Device 482 * @m: mutex to take 483 * Return: int 484 */ 485 static inline int qdf_semaphore_acquire_intr(qdf_semaphore_t *m) 486 { 487 return __qdf_semaphore_acquire_intr(m); 488 } 489 490 #ifdef WLAN_WAKE_LOCK_DEBUG 491 /** 492 * qdf_wake_lock_check_for_leaks() - assert no wake lock leaks 493 * 494 * Return: None 495 */ 496 void qdf_wake_lock_check_for_leaks(void); 497 498 /** 499 * qdf_wake_lock_feature_init() - global init logic for wake lock 500 * 501 * Return: None 502 */ 503 void qdf_wake_lock_feature_init(void); 504 505 /** 506 * qdf_wake_lock_feature_deinit() - global de-init logic for wake lock 507 * 508 * Return: None 509 */ 510 void qdf_wake_lock_feature_deinit(void); 511 #else 512 static inline void qdf_wake_lock_check_for_leaks(void) { } 513 static inline void qdf_wake_lock_feature_init(void) { } 514 static inline void qdf_wake_lock_feature_deinit(void) { } 515 #endif /* WLAN_WAKE_LOCK_DEBUG */ 516 517 /** 518 * __qdf_wake_lock_create() - initialize a wake lock 519 * @lock: The wake lock to initialize 520 * @name: Name of wake lock 521 * @func: caller function 522 * @line: caller line 523 * Return: 524 * QDF status success: if wake lock is initialized 525 * QDF status failure: if wake lock was not initialized 526 */ 527 QDF_STATUS __qdf_wake_lock_create(qdf_wake_lock_t *lock, const char *name, 528 const char *func, uint32_t line); 529 530 /** 531 * qdf_wake_lock_create() - initialized a wakeup source lock 532 * @lock: the wakeup source lock to initialize 533 * @name: the name of wakeup source lock 534 * 535 * Return: QDF_STATUS 536 */ 537 #define qdf_wake_lock_create(lock, name) \ 538 __qdf_wake_lock_create(lock, name, __func__, __LINE__) 539 540 QDF_STATUS qdf_wake_lock_acquire(qdf_wake_lock_t *lock, uint32_t reason); 541 542 const char *qdf_wake_lock_name(qdf_wake_lock_t *lock); 543 QDF_STATUS qdf_wake_lock_timeout_acquire(qdf_wake_lock_t *lock, 544 uint32_t msec); 545 546 QDF_STATUS qdf_wake_lock_release(qdf_wake_lock_t *lock, uint32_t reason); 547 548 /** 549 * __qdf_wake_lock_destroy() - destroy a wake lock 550 * @lock: The wake lock to destroy 551 * @func: caller function 552 * @line: caller line 553 * 554 * Return: None 555 */ 556 void __qdf_wake_lock_destroy(qdf_wake_lock_t *lock, 557 const char *func, uint32_t line); 558 559 /** 560 * qdf_wake_lock_destroy() - deinitialize a wakeup source lock 561 * @lock: the wakeup source lock to de-initialize 562 * 563 * Return: None 564 */ 565 #define qdf_wake_lock_destroy(lock) \ 566 __qdf_wake_lock_destroy(lock, __func__, __LINE__) 567 568 void qdf_pm_system_wakeup(void); 569 570 QDF_STATUS qdf_runtime_pm_get(void); 571 QDF_STATUS qdf_runtime_pm_put(void); 572 QDF_STATUS qdf_runtime_pm_prevent_suspend(qdf_runtime_lock_t *lock); 573 QDF_STATUS qdf_runtime_pm_allow_suspend(qdf_runtime_lock_t *lock); 574 575 QDF_STATUS __qdf_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name); 576 577 #define qdf_runtime_lock_init(lock) __qdf_runtime_lock_init(lock, #lock) 578 579 void qdf_runtime_lock_deinit(qdf_runtime_lock_t *lock); 580 581 QDF_STATUS qdf_spinlock_acquire(qdf_spinlock_t *lock); 582 583 QDF_STATUS qdf_spinlock_release(qdf_spinlock_t *lock); 584 #endif /* _QDF_LOCK_H */ 585