1 /* 2 * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /** 20 * @file qdf_lock.h 21 * This file abstracts locking operations. 22 */ 23 24 #ifndef _QDF_LOCK_H 25 #define _QDF_LOCK_H 26 27 #include <qdf_types.h> 28 #include <qdf_mem.h> 29 #include <qdf_time.h> 30 #include <i_qdf_trace.h> 31 32 #ifndef QDF_LOCK_STATS 33 #define QDF_LOCK_STATS 0 34 #endif 35 #ifndef QDF_LOCK_STATS_DESTROY_PRINT 36 #define QDF_LOCK_STATS_DESTROY_PRINT 0 37 #endif 38 #ifndef QDF_LOCK_STATS_BUG_ON 39 #define QDF_LOCK_STATS_BUG_ON 0 40 #endif 41 #ifndef QDF_LOCK_STATS_LIST 42 #define QDF_LOCK_STATS_LIST 0 43 #endif 44 45 /* Max hold time in micro seconds, 0 to disable detection*/ 46 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ 10000 47 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK 0 48 49 #if QDF_LOCK_STATS 50 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_BH 2000000 51 #else 52 #define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_BH 1000000 53 #endif 54 55 #if !QDF_LOCK_STATS 56 struct lock_stats {}; 57 #define BEFORE_LOCK(x...) do {} while (0) 58 #define AFTER_LOCK(x...) do {} while (0) 59 #define BEFORE_TRYLOCK(x...) do {} while (0) 60 #define AFTER_TRYLOCK(x...) do {} while (0) 61 #define BEFORE_UNLOCK(x...) do {} while (0) 62 #define qdf_lock_stats_create(x...) do {} while (0) 63 #define qdf_lock_stats_destroy(x...) do {} while (0) 64 #define qdf_lock_stats_init(x...) do {} while (0) 65 #define qdf_lock_stats_deinit(x...) do {} while (0) 66 #else 67 void qdf_lock_stats_init(void); 68 void qdf_lock_stats_deinit(void); 69 struct qdf_lock_cookie; 70 struct lock_stats { 71 const char *initialization_fn; 72 const char *acquired_by; 73 int line; 74 int acquired; 75 int contended; 76 uint64_t contention_time; 77 uint64_t non_contention_time; 78 uint64_t held_time; 79 uint64_t last_acquired; 80 uint64_t max_contention_wait; 81 uint64_t max_held_time; 82 int num_large_contentions; 83 int num_large_holds; 84 struct qdf_lock_cookie *cookie; 85 }; 86 #define LARGE_CONTENTION QDF_LOG_TIMESTAMP_CYCLES_PER_10_US 87 88 #define BEFORE_LOCK(lock, was_locked) \ 89 do { \ 90 uint64_t BEFORE_LOCK_time; \ 91 uint64_t AFTER_LOCK_time; \ 92 bool BEFORE_LOCK_is_locked = was_locked; \ 93 BEFORE_LOCK_time = qdf_get_log_timestamp_lightweight(); \ 94 do {} while (0) 95 96 97 #define AFTER_LOCK(lock, func) \ 98 lock->stats.acquired_by = func; \ 99 AFTER_LOCK_time = qdf_get_log_timestamp_lightweight(); \ 100 lock->stats.acquired++; \ 101 lock->stats.last_acquired = AFTER_LOCK_time; \ 102 if (BEFORE_LOCK_is_locked) { \ 103 lock->stats.contended++; \ 104 lock->stats.contention_time += \ 105 (AFTER_LOCK_time - BEFORE_LOCK_time); \ 106 } else { \ 107 lock->stats.non_contention_time += \ 108 (AFTER_LOCK_time - BEFORE_LOCK_time); \ 109 } \ 110 \ 111 if (AFTER_LOCK_time - BEFORE_LOCK_time > LARGE_CONTENTION) \ 112 lock->stats.num_large_contentions++; \ 113 \ 114 if (AFTER_LOCK_time - BEFORE_LOCK_time > \ 115 lock->stats.max_contention_wait) \ 116 lock->stats.max_contention_wait = \ 117 AFTER_LOCK_time - BEFORE_LOCK_time; \ 118 } while (0) 119 120 #define BEFORE_TRYLOCK(lock) \ 121 do { \ 122 uint64_t BEFORE_LOCK_time; \ 123 uint64_t AFTER_LOCK_time; \ 124 BEFORE_LOCK_time = qdf_get_log_timestamp_lightweight(); \ 125 do {} while (0) 126 127 #define AFTER_TRYLOCK(lock, trylock_return, func) \ 128 AFTER_LOCK_time = qdf_get_log_timestamp_lightweight(); \ 129 if (trylock_return) { \ 130 lock->stats.acquired++; \ 131 lock->stats.last_acquired = AFTER_LOCK_time; \ 132 lock->stats.non_contention_time += \ 133 (AFTER_LOCK_time - BEFORE_LOCK_time); \ 134 lock->stats.acquired_by = func; \ 135 } \ 136 } while (0) 137 138 /* max_hold_time in US */ 139 #define BEFORE_UNLOCK(lock, max_hold_time) \ 140 do {\ 141 uint64_t BEFORE_UNLOCK_time; \ 142 uint64_t held_time; \ 143 BEFORE_UNLOCK_time = qdf_get_log_timestamp_lightweight(); \ 144 \ 145 if (unlikely(BEFORE_UNLOCK_time < lock->stats.last_acquired)) \ 146 held_time = 0; \ 147 else \ 148 held_time = BEFORE_UNLOCK_time - lock->stats.last_acquired; \ 149 \ 150 lock->stats.held_time += held_time; \ 151 \ 152 if (held_time > lock->stats.max_held_time) \ 153 lock->stats.max_held_time = held_time; \ 154 \ 155 if (held_time > LARGE_CONTENTION) \ 156 lock->stats.num_large_holds++; \ 157 if (QDF_LOCK_STATS_BUG_ON && max_hold_time && \ 158 held_time > qdf_usecs_to_log_timestamp(max_hold_time)) { \ 159 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, \ 160 "BEFORE_UNLOCK: lock held too long (%lluus)", \ 161 qdf_log_timestamp_to_usecs(held_time)); \ 162 QDF_BUG(0); \ 163 } \ 164 lock->stats.acquired_by = NULL; \ 165 } while (0) 166 167 void qdf_lock_stats_cookie_destroy(struct lock_stats *stats); 168 void qdf_lock_stats_cookie_create(struct lock_stats *stats, 169 const char *func, int line); 170 171 static inline void qdf_lock_stats_destroy(struct lock_stats *stats) 172 { 173 if (QDF_LOCK_STATS_DESTROY_PRINT) { 174 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG, 175 "%s: lock: %s %d \t" 176 "acquired:\t%d\tcontended:\t%d\t" 177 "contention_time\t%llu\tmax_contention_wait:\t%llu\t" 178 "non_contention_time\t%llu\t" 179 "held_time\t%llu\tmax_held:\t%llu" 180 , __func__, stats->initialization_fn, stats->line, 181 stats->acquired, stats->contended, 182 qdf_log_timestamp_to_usecs(stats->contention_time), 183 qdf_log_timestamp_to_usecs(stats->max_contention_wait), 184 qdf_log_timestamp_to_usecs(stats->non_contention_time), 185 qdf_log_timestamp_to_usecs(stats->held_time), 186 qdf_log_timestamp_to_usecs(stats->max_held_time)); 187 } 188 189 if (QDF_LOCK_STATS_LIST) 190 qdf_lock_stats_cookie_destroy(stats); 191 } 192 193 #ifndef MEMORY_DEBUG 194 #define qdf_mem_malloc_debug(x, y, z) qdf_mem_malloc(x) 195 #endif 196 197 /* qdf_lock_stats_create() - initialize the lock stats structure 198 * 199 */ 200 static inline void qdf_lock_stats_create(struct lock_stats *stats, 201 const char *func, int line) 202 { 203 qdf_mem_zero(stats, sizeof(*stats)); 204 stats->initialization_fn = func; 205 stats->line = line; 206 207 if (QDF_LOCK_STATS_LIST) 208 qdf_lock_stats_cookie_create(stats, func, line); 209 } 210 #endif 211 212 #include <i_qdf_lock.h> 213 214 #define WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT 0 215 #define WIFI_POWER_EVENT_WAKELOCK_TAKEN 0 216 #define WIFI_POWER_EVENT_WAKELOCK_RELEASED 1 217 218 /** 219 * qdf_semaphore_acquire_timeout() - Take the semaphore before timeout 220 * @m: semaphore to take 221 * @timeout: maximum time to try to take the semaphore 222 * Return: int 223 */ 224 static inline int qdf_semaphore_acquire_timeout(struct semaphore *m, 225 unsigned long timeout) 226 { 227 return __qdf_semaphore_acquire_timeout(m, timeout); 228 } 229 230 struct qdf_spinlock { 231 __qdf_spinlock_t lock; 232 struct lock_stats stats; 233 }; 234 235 /** 236 * @brief Platform spinlock object 237 */ 238 typedef struct qdf_spinlock qdf_spinlock_t; 239 240 241 /** 242 * @brief Platform mutex object 243 */ 244 typedef __qdf_semaphore_t qdf_semaphore_t; 245 typedef __qdf_mutex_t qdf_mutex_t; 246 247 /* function Declaration */ 248 QDF_STATUS qdf_mutex_create(qdf_mutex_t *m, const char *func, int line); 249 #define qdf_mutex_create(m) qdf_mutex_create(m, __func__, __LINE__) 250 251 QDF_STATUS qdf_mutex_acquire(qdf_mutex_t *m); 252 253 QDF_STATUS qdf_mutex_release(qdf_mutex_t *m); 254 255 QDF_STATUS qdf_mutex_destroy(qdf_mutex_t *lock); 256 257 /** 258 * qdf_spinlock_create - Initialize a spinlock 259 * @lock: spinlock object pointer 260 * Return: none 261 */ 262 static inline void qdf_spinlock_create(qdf_spinlock_t *lock, const char *func, 263 int line) 264 { 265 __qdf_spinlock_create(&lock->lock); 266 267 /* spinlock stats create relies on the spinlock working allread */ 268 qdf_lock_stats_create(&lock->stats, func, line); 269 } 270 271 #define qdf_spinlock_create(x) qdf_spinlock_create(x, __func__, __LINE__) 272 273 /** 274 * qdf_spinlock_destroy - Delete a spinlock 275 * @lock: spinlock object pointer 276 * Return: none 277 */ 278 static inline void qdf_spinlock_destroy(qdf_spinlock_t *lock) 279 { 280 qdf_lock_stats_destroy(&lock->stats); 281 __qdf_spinlock_destroy(&lock->lock); 282 } 283 284 /** 285 * qdf_spin_is_locked() - check if the spinlock is locked 286 * @lock: spinlock object 287 * 288 * Return: nonzero if lock is held. 289 */ 290 static inline int qdf_spin_is_locked(qdf_spinlock_t *lock) 291 { 292 return __qdf_spin_is_locked(&lock->lock); 293 } 294 295 /** 296 * qdf_spin_trylock_bh() - spin trylock bottomhalf 297 * @lock: spinlock object 298 * 299 * Return: nonzero if lock is acquired 300 */ 301 static inline int qdf_spin_trylock_bh(qdf_spinlock_t *lock, const char *func) 302 { 303 int trylock_return; 304 305 BEFORE_TRYLOCK(lock); 306 trylock_return = __qdf_spin_trylock_bh(&lock->lock); 307 AFTER_TRYLOCK(lock, trylock_return, func); 308 309 return trylock_return; 310 } 311 #define qdf_spin_trylock_bh(lock) qdf_spin_trylock_bh(lock, __func__) 312 313 /** 314 * qdf_spin_trylock() - spin trylock 315 * @lock: spinlock object 316 * Return: int 317 */ 318 static inline int qdf_spin_trylock(qdf_spinlock_t *lock, const char *func) 319 { 320 int result = 0; 321 322 BEFORE_LOCK(lock, qdf_spin_is_locked(lock)); 323 result = __qdf_spin_trylock(&lock->lock); 324 AFTER_LOCK(lock, func); 325 326 return result; 327 } 328 329 #define qdf_spin_trylock(lock) qdf_spin_trylock(lock, __func__) 330 331 /** 332 * qdf_spin_lock_bh() - locks the spinlock mutex in soft irq context 333 * @lock: spinlock object pointer 334 * Return: none 335 */ 336 static inline void qdf_spin_lock_bh(qdf_spinlock_t *lock, const char *func) 337 { 338 BEFORE_LOCK(lock, qdf_spin_is_locked(lock)); 339 __qdf_spin_lock_bh(&lock->lock); 340 AFTER_LOCK(lock, func); 341 } 342 343 #define qdf_spin_lock_bh(lock) qdf_spin_lock_bh(lock, __func__) 344 345 /** 346 * qdf_spin_unlock_bh() - unlocks the spinlock mutex in soft irq context 347 * @lock: spinlock object pointer 348 * Return: none 349 */ 350 static inline void qdf_spin_unlock_bh(qdf_spinlock_t *lock) 351 { 352 BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_BH); 353 __qdf_spin_unlock_bh(&lock->lock); 354 } 355 356 /** 357 * qdf_spinlock_irq_exec - Execute the input function with spinlock held 358 * and interrupt disabled. 359 * @hdl: OS handle 360 * @lock: spinlock to be held for the critical region 361 * @func: critical region function that to be executed 362 * @context: context of the critical region function 363 * Return: Boolean status returned by the critical region function 364 */ 365 static inline bool qdf_spinlock_irq_exec(qdf_handle_t hdl, 366 qdf_spinlock_t *lock, 367 qdf_irqlocked_func_t func, void *arg) 368 { 369 return __qdf_spinlock_irq_exec(hdl, &lock->lock, func, arg); 370 } 371 372 /** 373 * qdf_spin_lock() - Acquire a Spinlock(SMP) & disable Preemption (Preemptive) 374 * @lock: Lock object 375 * 376 * Return: none 377 */ 378 static inline void qdf_spin_lock(qdf_spinlock_t *lock, const char *func) 379 { 380 BEFORE_LOCK(lock, qdf_spin_is_locked(lock)); 381 __qdf_spin_lock(&lock->lock); 382 AFTER_LOCK(lock, func); 383 } 384 #define qdf_spin_lock(lock) qdf_spin_lock(lock, __func__) 385 386 /** 387 * qdf_spin_unlock() - Unlock the spinlock and enables the Preemption 388 * @lock: Lock object 389 * 390 * Return: none 391 */ 392 static inline void qdf_spin_unlock(qdf_spinlock_t *lock) 393 { 394 BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK); 395 __qdf_spin_unlock(&lock->lock); 396 } 397 398 /** 399 * qdf_spin_lock_irq() - Acquire a Spinlock(SMP) & save the irq state 400 * @lock: Lock object 401 * @flags: flags 402 * 403 * Return: none 404 */ 405 static inline void qdf_spin_lock_irq(qdf_spinlock_t *lock, unsigned long flags, 406 const char *func) 407 { 408 BEFORE_LOCK(lock, qdf_spin_is_locked(lock)); 409 __qdf_spin_lock_irq(&lock->lock.spinlock, flags); 410 AFTER_LOCK(lock, func); 411 } 412 #define qdf_spin_lock_irq(lock, flags) qdf_spin_lock_irq(lock, flags, __func__) 413 414 /** 415 * qdf_spin_lock_irqsave() - Acquire a Spinlock (SMP) & disable Preemption 416 * (Preemptive) and disable IRQs 417 * @lock: Lock object 418 * 419 * Return: none 420 */ 421 static inline void qdf_spin_lock_irqsave(qdf_spinlock_t *lock, const char *func) 422 { 423 BEFORE_LOCK(lock, qdf_spin_is_locked(lock)); 424 __qdf_spin_lock_irqsave(&lock->lock); 425 AFTER_LOCK(lock, func); 426 } 427 #define qdf_spin_lock_irqsave(lock) qdf_spin_lock_irqsave(lock, __func__) 428 429 /** 430 * qdf_spin_unlock_irqrestore() - Unlock the spinlock and enables the 431 * Preemption and enable IRQ 432 * @lock: Lock object 433 * 434 * Return: none 435 */ 436 static inline void qdf_spin_unlock_irqrestore(qdf_spinlock_t *lock) 437 { 438 BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ); 439 __qdf_spin_unlock_irqrestore(&lock->lock); 440 } 441 442 /** 443 * qdf_spin_unlock_irq() - Unlock a Spinlock(SMP) & save the restore state 444 * @lock: Lock object 445 * @flags: flags 446 * 447 * Return: none 448 */ 449 static inline void qdf_spin_unlock_irq(qdf_spinlock_t *lock, 450 unsigned long flags) 451 { 452 BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ); 453 __qdf_spin_unlock_irq(&lock->lock.spinlock, flags); 454 } 455 456 /** 457 * qdf_semaphore_init() - initialize a semaphore 458 * @m: Semaphore to initialize 459 * Return: None 460 */ 461 static inline void qdf_semaphore_init(qdf_semaphore_t *m) 462 { 463 __qdf_semaphore_init(m); 464 } 465 466 /** 467 * qdf_semaphore_acquire() - take the semaphore 468 * @m: Semaphore to take 469 * Return: int 470 */ 471 static inline int qdf_semaphore_acquire(qdf_semaphore_t *m) 472 { 473 return __qdf_semaphore_acquire(m); 474 } 475 476 /** 477 * qdf_semaphore_release() - give the semaphore 478 * @m: Semaphore to give 479 * Return: None 480 */ 481 static inline void qdf_semaphore_release(qdf_semaphore_t *m) 482 { 483 __qdf_semaphore_release(m); 484 } 485 486 /** 487 * qdf_semaphore_acquire_intr - Take the semaphore, interruptible version 488 * @osdev: OS Device 489 * @m: mutex to take 490 * Return: int 491 */ 492 static inline int qdf_semaphore_acquire_intr(qdf_semaphore_t *m) 493 { 494 return __qdf_semaphore_acquire_intr(m); 495 } 496 497 #ifdef WLAN_WAKE_LOCK_DEBUG 498 /** 499 * qdf_wake_lock_check_for_leaks() - assert no wake lock leaks 500 * 501 * Return: None 502 */ 503 void qdf_wake_lock_check_for_leaks(void); 504 505 /** 506 * qdf_wake_lock_feature_init() - global init logic for wake lock 507 * 508 * Return: None 509 */ 510 void qdf_wake_lock_feature_init(void); 511 512 /** 513 * qdf_wake_lock_feature_deinit() - global de-init logic for wake lock 514 * 515 * Return: None 516 */ 517 void qdf_wake_lock_feature_deinit(void); 518 #else 519 static inline void qdf_wake_lock_check_for_leaks(void) { } 520 static inline void qdf_wake_lock_feature_init(void) { } 521 static inline void qdf_wake_lock_feature_deinit(void) { } 522 #endif /* WLAN_WAKE_LOCK_DEBUG */ 523 524 /** 525 * __qdf_wake_lock_create() - initialize a wake lock 526 * @lock: The wake lock to initialize 527 * @name: Name of wake lock 528 * @func: caller function 529 * @line: caller line 530 * Return: 531 * QDF status success: if wake lock is initialized 532 * QDF status failure: if wake lock was not initialized 533 */ 534 QDF_STATUS __qdf_wake_lock_create(qdf_wake_lock_t *lock, const char *name, 535 const char *func, uint32_t line); 536 537 /** 538 * qdf_wake_lock_create() - initialized a wakeup source lock 539 * @lock: the wakeup source lock to initialize 540 * @name: the name of wakeup source lock 541 * 542 * Return: QDF_STATUS 543 */ 544 #define qdf_wake_lock_create(lock, name) \ 545 __qdf_wake_lock_create(lock, name, __func__, __LINE__) 546 547 QDF_STATUS qdf_wake_lock_acquire(qdf_wake_lock_t *lock, uint32_t reason); 548 549 const char *qdf_wake_lock_name(qdf_wake_lock_t *lock); 550 QDF_STATUS qdf_wake_lock_timeout_acquire(qdf_wake_lock_t *lock, 551 uint32_t msec); 552 553 QDF_STATUS qdf_wake_lock_release(qdf_wake_lock_t *lock, uint32_t reason); 554 555 /** 556 * __qdf_wake_lock_destroy() - destroy a wake lock 557 * @lock: The wake lock to destroy 558 * @func: caller function 559 * @line: caller line 560 * 561 * Return: None 562 */ 563 void __qdf_wake_lock_destroy(qdf_wake_lock_t *lock, 564 const char *func, uint32_t line); 565 566 /** 567 * qdf_wake_lock_destroy() - deinitialize a wakeup source lock 568 * @lock: the wakeup source lock to de-initialize 569 * 570 * Return: None 571 */ 572 #define qdf_wake_lock_destroy(lock) \ 573 __qdf_wake_lock_destroy(lock, __func__, __LINE__) 574 575 void qdf_pm_system_wakeup(void); 576 577 QDF_STATUS qdf_runtime_pm_get(void); 578 QDF_STATUS qdf_runtime_pm_put(void); 579 QDF_STATUS qdf_runtime_pm_prevent_suspend(qdf_runtime_lock_t *lock); 580 QDF_STATUS qdf_runtime_pm_allow_suspend(qdf_runtime_lock_t *lock); 581 582 QDF_STATUS __qdf_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name); 583 584 #define qdf_runtime_lock_init(lock) __qdf_runtime_lock_init(lock, #lock) 585 586 void qdf_runtime_lock_deinit(qdf_runtime_lock_t *lock); 587 588 QDF_STATUS qdf_spinlock_acquire(qdf_spinlock_t *lock); 589 590 QDF_STATUS qdf_spinlock_release(qdf_spinlock_t *lock); 591 #endif /* _QDF_LOCK_H */ 592