1 /* 2 * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved. 3 * 4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc. 5 * 6 * 7 * Permission to use, copy, modify, and/or distribute this software for 8 * any purpose with or without fee is hereby granted, provided that the 9 * above copyright notice and this permission notice appear in all 10 * copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 19 * PERFORMANCE OF THIS SOFTWARE. 20 */ 21 22 /* 23 * This file was originally distributed by Qualcomm Atheros, Inc. 24 * under proprietary terms before Copyright ownership was assigned 25 * to the Linux Foundation. 26 */ 27 28 #include <linux/module.h> 29 #include <qdf_lock.h> 30 #include <qdf_trace.h> 31 32 #include <qdf_types.h> 33 #ifdef CONFIG_MCL 34 #include <i_host_diag_core_event.h> 35 #include <hif.h> 36 #include <cds_api.h> 37 #endif 38 #include <i_qdf_lock.h> 39 40 /** 41 * qdf_mutex_create() - Initialize a mutex 42 * @m: mutex to initialize 43 * 44 * Returns: QDF_STATUS 45 * =0 success 46 * else fail status 47 */ 48 #undef qdf_mutex_create 49 QDF_STATUS qdf_mutex_create(qdf_mutex_t *lock, const char *func, int line) 50 { 51 /* check for invalid pointer */ 52 if (lock == NULL) { 53 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 54 "%s: NULL pointer passed in", __func__); 55 return QDF_STATUS_E_FAULT; 56 } 57 /* check for 'already initialized' lock */ 58 if (LINUX_LOCK_COOKIE == lock->cookie) { 59 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 60 "%s: already initialized lock", __func__); 61 return QDF_STATUS_E_BUSY; 62 } 63 64 if (in_interrupt()) { 65 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 66 "%s cannot be called from interrupt context!!!", 67 __func__); 68 return QDF_STATUS_E_FAULT; 69 } 70 71 qdf_lock_stats_create(&lock->stats, func, line); 72 73 /* initialize new lock */ 74 mutex_init(&lock->m_lock); 75 lock->cookie = LINUX_LOCK_COOKIE; 76 lock->state = LOCK_RELEASED; 77 lock->process_id = 0; 78 lock->refcount = 0; 79 80 return QDF_STATUS_SUCCESS; 81 } 82 EXPORT_SYMBOL(qdf_mutex_create); 83 84 /** 85 * qdf_mutex_acquire() - acquire a QDF lock 86 * @lock: Pointer to the opaque lock object to acquire 87 * 88 * A lock object is acquired by calling qdf_mutex_acquire(). If the lock 89 * is already locked, the calling thread shall block until the lock becomes 90 * available. This operation shall return with the lock object referenced by 91 * lock in the locked state with the calling thread as its owner. 92 * 93 * Return: 94 * QDF_STATUS_SUCCESS: lock was successfully initialized 95 * QDF failure reason codes: lock is not initialized and can't be used 96 */ 97 QDF_STATUS qdf_mutex_acquire(qdf_mutex_t *lock) 98 { 99 int rc; 100 /* check for invalid pointer */ 101 if (lock == NULL) { 102 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 103 "%s: NULL pointer passed in", __func__); 104 QDF_ASSERT(0); 105 return QDF_STATUS_E_FAULT; 106 } 107 /* check if lock refers to an initialized object */ 108 if (LINUX_LOCK_COOKIE != lock->cookie) { 109 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 110 "%s: uninitialized lock", __func__); 111 QDF_ASSERT(0); 112 return QDF_STATUS_E_INVAL; 113 } 114 115 if (in_interrupt()) { 116 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 117 "%s cannot be called from interrupt context!!!", 118 __func__); 119 QDF_ASSERT(0); 120 return QDF_STATUS_E_FAULT; 121 } 122 if ((lock->process_id == current->pid) && 123 (lock->state == LOCK_ACQUIRED)) { 124 lock->refcount++; 125 #ifdef QDF_NESTED_LOCK_DEBUG 126 pe_err("%s: %x %d %d", __func__, lock, current->pid, 127 lock->refcount); 128 #endif 129 return QDF_STATUS_SUCCESS; 130 } 131 132 BEFORE_LOCK(lock, mutex_is_locked(&lock->m_lock)); 133 /* acquire a Lock */ 134 mutex_lock(&lock->m_lock); 135 AFTER_LOCK(lock, __func__); 136 rc = mutex_is_locked(&lock->m_lock); 137 if (rc == 0) { 138 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 139 "%s: unable to lock mutex (rc = %d)", __func__, rc); 140 QDF_ASSERT(0); 141 return QDF_STATUS_E_FAILURE; 142 } 143 #ifdef QDF_NESTED_LOCK_DEBUG 144 pe_err("%s: %x %d", __func__, lock, current->pid); 145 #endif 146 if (LOCK_DESTROYED != lock->state) { 147 lock->process_id = current->pid; 148 lock->refcount++; 149 lock->state = LOCK_ACQUIRED; 150 return QDF_STATUS_SUCCESS; 151 } 152 153 /* lock is already destroyed */ 154 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 155 "%s: Lock is already destroyed", __func__); 156 mutex_unlock(&lock->m_lock); 157 QDF_ASSERT(0); 158 return QDF_STATUS_E_FAILURE; 159 } 160 EXPORT_SYMBOL(qdf_mutex_acquire); 161 162 /** 163 * qdf_mutex_release() - release a QDF lock 164 * @lock: Pointer to the opaque lock object to be released 165 * 166 * qdf_mutex_release() function shall release the lock object 167 * referenced by 'lock'. 168 * 169 * If a thread attempts to release a lock that it unlocked or is not 170 * initialized, an error is returned. 171 * 172 * Return: 173 * QDF_STATUS_SUCCESS: lock was successfully initialized 174 * QDF failure reason codes: lock is not initialized and can't be used 175 */ 176 QDF_STATUS qdf_mutex_release(qdf_mutex_t *lock) 177 { 178 /* check for invalid pointer */ 179 if (lock == NULL) { 180 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 181 "%s: NULL pointer passed in", __func__); 182 QDF_ASSERT(0); 183 return QDF_STATUS_E_FAULT; 184 } 185 186 /* check if lock refers to an uninitialized object */ 187 if (LINUX_LOCK_COOKIE != lock->cookie) { 188 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 189 "%s: uninitialized lock", __func__); 190 QDF_ASSERT(0); 191 return QDF_STATUS_E_INVAL; 192 } 193 194 if (in_interrupt()) { 195 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 196 "%s cannot be called from interrupt context!!!", 197 __func__); 198 QDF_ASSERT(0); 199 return QDF_STATUS_E_FAULT; 200 } 201 202 /* current_thread = get_current_thread_id(); 203 * Check thread ID of caller against thread ID 204 * of the thread which acquire the lock 205 */ 206 if (lock->process_id != current->pid) { 207 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 208 "%s: current task pid does not match original task pid!!", 209 __func__); 210 #ifdef QDF_NESTED_LOCK_DEBUG 211 pe_err("%s: Lock held by=%d being released by=%d", 212 __func__, lock->process_id, current->pid); 213 #endif 214 QDF_ASSERT(0); 215 return QDF_STATUS_E_PERM; 216 } 217 if ((lock->process_id == current->pid) && 218 (lock->state == LOCK_ACQUIRED)) { 219 if (lock->refcount > 0) 220 lock->refcount--; 221 } 222 #ifdef QDF_NESTED_LOCK_DEBUG 223 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, "%s: %x %d %d", __func__, lock, lock->process_id, 224 lock->refcount); 225 #endif 226 if (lock->refcount) 227 return QDF_STATUS_SUCCESS; 228 229 lock->process_id = 0; 230 lock->refcount = 0; 231 lock->state = LOCK_RELEASED; 232 /* release a Lock */ 233 BEFORE_UNLOCK(lock, 0); 234 mutex_unlock(&lock->m_lock); 235 #ifdef QDF_NESTED_LOCK_DEBUG 236 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, "%s: Freeing lock %x %d %d", lock, lock->process_id, 237 lock->refcount); 238 #endif 239 return QDF_STATUS_SUCCESS; 240 } 241 EXPORT_SYMBOL(qdf_mutex_release); 242 243 /** 244 * qdf_wake_lock_name() - This function returns the name of the wakelock 245 * @lock: Pointer to the wakelock 246 * 247 * This function returns the name of the wakelock 248 * 249 * Return: Pointer to the name if it is valid or a default string 250 */ 251 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) 252 const char *qdf_wake_lock_name(qdf_wake_lock_t *lock) 253 { 254 if (lock->name) 255 return lock->name; 256 return "UNNAMED_WAKELOCK"; 257 } 258 #else 259 const char *qdf_wake_lock_name(qdf_wake_lock_t *lock) 260 { 261 return "NO_WAKELOCK_SUPPORT"; 262 } 263 #endif 264 EXPORT_SYMBOL(qdf_wake_lock_name); 265 266 /** 267 * qdf_wake_lock_create() - initializes a wake lock 268 * @lock: The wake lock to initialize 269 * @name: Name of wake lock 270 * 271 * Return: 272 * QDF status success: if wake lock is initialized 273 * QDF status failure: if wake lock was not initialized 274 */ 275 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) 276 QDF_STATUS qdf_wake_lock_create(qdf_wake_lock_t *lock, const char *name) 277 { 278 wakeup_source_init(lock, name); 279 return QDF_STATUS_SUCCESS; 280 } 281 #else 282 QDF_STATUS qdf_wake_lock_create(qdf_wake_lock_t *lock, const char *name) 283 { 284 return QDF_STATUS_SUCCESS; 285 } 286 #endif 287 EXPORT_SYMBOL(qdf_wake_lock_create); 288 289 /** 290 * qdf_wake_lock_acquire() - acquires a wake lock 291 * @lock: The wake lock to acquire 292 * @reason: Reason for wakelock 293 * 294 * Return: 295 * QDF status success: if wake lock is acquired 296 * QDF status failure: if wake lock was not acquired 297 */ 298 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) 299 QDF_STATUS qdf_wake_lock_acquire(qdf_wake_lock_t *lock, uint32_t reason) 300 { 301 #ifdef CONFIG_MCL 302 host_diag_log_wlock(reason, qdf_wake_lock_name(lock), 303 WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT, 304 WIFI_POWER_EVENT_WAKELOCK_TAKEN); 305 #endif 306 __pm_stay_awake(lock); 307 return QDF_STATUS_SUCCESS; 308 } 309 #else 310 QDF_STATUS qdf_wake_lock_acquire(qdf_wake_lock_t *lock, uint32_t reason) 311 { 312 return QDF_STATUS_SUCCESS; 313 } 314 #endif 315 EXPORT_SYMBOL(qdf_wake_lock_acquire); 316 317 /** 318 * qdf_wake_lock_timeout_acquire() - acquires a wake lock with a timeout 319 * @lock: The wake lock to acquire 320 * @reason: Reason for wakelock 321 * 322 * Return: 323 * QDF status success: if wake lock is acquired 324 * QDF status failure: if wake lock was not acquired 325 */ 326 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) 327 QDF_STATUS qdf_wake_lock_timeout_acquire(qdf_wake_lock_t *lock, uint32_t msec) 328 { 329 /* Wakelock for Rx is frequent. 330 * It is reported only during active debug 331 */ 332 __pm_wakeup_event(lock, msec); 333 return QDF_STATUS_SUCCESS; 334 } 335 #else 336 QDF_STATUS qdf_wake_lock_timeout_acquire(qdf_wake_lock_t *lock, uint32_t msec) 337 { 338 return QDF_STATUS_SUCCESS; 339 } 340 #endif 341 EXPORT_SYMBOL(qdf_wake_lock_timeout_acquire); 342 343 /** 344 * qdf_wake_lock_release() - releases a wake lock 345 * @lock: the wake lock to release 346 * @reason: Reason for wakelock 347 * 348 * Return: 349 * QDF status success: if wake lock is acquired 350 * QDF status failure: if wake lock was not acquired 351 */ 352 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) 353 QDF_STATUS qdf_wake_lock_release(qdf_wake_lock_t *lock, uint32_t reason) 354 { 355 #ifdef CONFIG_MCL 356 host_diag_log_wlock(reason, qdf_wake_lock_name(lock), 357 WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT, 358 WIFI_POWER_EVENT_WAKELOCK_RELEASED); 359 #endif 360 __pm_relax(lock); 361 return QDF_STATUS_SUCCESS; 362 } 363 #else 364 QDF_STATUS qdf_wake_lock_release(qdf_wake_lock_t *lock, uint32_t reason) 365 { 366 return QDF_STATUS_SUCCESS; 367 } 368 #endif 369 EXPORT_SYMBOL(qdf_wake_lock_release); 370 371 /** 372 * qdf_wake_lock_destroy() - destroys a wake lock 373 * @lock: The wake lock to destroy 374 * 375 * Return: 376 * QDF status success: if wake lock is acquired 377 * QDF status failure: if wake lock was not acquired 378 */ 379 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) 380 QDF_STATUS qdf_wake_lock_destroy(qdf_wake_lock_t *lock) 381 { 382 wakeup_source_trash(lock); 383 return QDF_STATUS_SUCCESS; 384 } 385 #else 386 QDF_STATUS qdf_wake_lock_destroy(qdf_wake_lock_t *lock) 387 { 388 return QDF_STATUS_SUCCESS; 389 } 390 #endif 391 EXPORT_SYMBOL(qdf_wake_lock_destroy); 392 393 #ifdef CONFIG_MCL 394 /** 395 * qdf_runtime_pm_get() - do a get opperation on the device 396 * 397 * A get opperation will prevent a runtime suspend untill a 398 * corresponding put is done. This api should be used when sending 399 * data. 400 * 401 * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED, 402 * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!! 403 * 404 * return: success if the bus is up and a get has been issued 405 * otherwise an error code. 406 */ 407 QDF_STATUS qdf_runtime_pm_get(void) 408 { 409 void *ol_sc; 410 int ret; 411 412 ol_sc = cds_get_context(QDF_MODULE_ID_HIF); 413 414 if (ol_sc == NULL) { 415 QDF_ASSERT(0); 416 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 417 "%s: HIF context is null!", __func__); 418 return QDF_STATUS_E_INVAL; 419 } 420 421 ret = hif_pm_runtime_get(ol_sc); 422 423 if (ret) 424 return QDF_STATUS_E_FAILURE; 425 return QDF_STATUS_SUCCESS; 426 } 427 EXPORT_SYMBOL(qdf_runtime_pm_get); 428 429 /** 430 * qdf_runtime_pm_put() - do a put opperation on the device 431 * 432 * A put opperation will allow a runtime suspend after a corresponding 433 * get was done. This api should be used when sending data. 434 * 435 * This api will return a failure if the hif module hasn't been 436 * initialized 437 * 438 * return: QDF_STATUS_SUCCESS if the put is performed 439 */ 440 QDF_STATUS qdf_runtime_pm_put(void) 441 { 442 void *ol_sc; 443 int ret; 444 445 ol_sc = cds_get_context(QDF_MODULE_ID_HIF); 446 447 if (ol_sc == NULL) { 448 QDF_ASSERT(0); 449 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 450 "%s: HIF context is null!", __func__); 451 return QDF_STATUS_E_INVAL; 452 } 453 454 ret = hif_pm_runtime_put(ol_sc); 455 456 if (ret) 457 return QDF_STATUS_E_FAILURE; 458 return QDF_STATUS_SUCCESS; 459 } 460 EXPORT_SYMBOL(qdf_runtime_pm_put); 461 462 /** 463 * qdf_runtime_pm_prevent_suspend() - prevent a runtime bus suspend 464 * @lock: an opaque context for tracking 465 * 466 * The lock can only be acquired once per lock context and is tracked. 467 * 468 * return: QDF_STATUS_SUCCESS or failure code. 469 */ 470 QDF_STATUS qdf_runtime_pm_prevent_suspend(qdf_runtime_lock_t *lock) 471 { 472 void *ol_sc; 473 int ret; 474 475 ol_sc = cds_get_context(QDF_MODULE_ID_HIF); 476 477 if (ol_sc == NULL) { 478 QDF_ASSERT(0); 479 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 480 "%s: HIF context is null!", __func__); 481 return QDF_STATUS_E_INVAL; 482 } 483 484 ret = hif_pm_runtime_prevent_suspend(ol_sc, lock->lock); 485 486 if (ret) 487 return QDF_STATUS_E_FAILURE; 488 return QDF_STATUS_SUCCESS; 489 } 490 EXPORT_SYMBOL(qdf_runtime_pm_prevent_suspend); 491 492 /** 493 * qdf_runtime_pm_allow_suspend() - prevent a runtime bus suspend 494 * @lock: an opaque context for tracking 495 * 496 * The lock can only be acquired once per lock context and is tracked. 497 * 498 * return: QDF_STATUS_SUCCESS or failure code. 499 */ 500 QDF_STATUS qdf_runtime_pm_allow_suspend(qdf_runtime_lock_t *lock) 501 { 502 void *ol_sc; 503 int ret; 504 505 ol_sc = cds_get_context(QDF_MODULE_ID_HIF); 506 if (ol_sc == NULL) { 507 QDF_ASSERT(0); 508 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 509 "%s: HIF context is null!", __func__); 510 return QDF_STATUS_E_INVAL; 511 } 512 513 ret = hif_pm_runtime_allow_suspend(ol_sc, lock->lock); 514 if (ret) 515 return QDF_STATUS_E_FAILURE; 516 517 return QDF_STATUS_SUCCESS; 518 } 519 EXPORT_SYMBOL(qdf_runtime_pm_allow_suspend); 520 521 /** 522 * qdf_runtime_lock_init() - initialize runtime lock 523 * @name: name of the runtime lock 524 * 525 * Initialize a runtime pm lock. This lock can be used 526 * to prevent the runtime pm system from putting the bus 527 * to sleep. 528 * 529 * Return: runtime_pm_lock_t 530 */ 531 QDF_STATUS __qdf_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name) 532 { 533 int ret = hif_runtime_lock_init(lock, name); 534 535 if (ret) 536 return QDF_STATUS_E_NOMEM; 537 538 return QDF_STATUS_SUCCESS; 539 } 540 EXPORT_SYMBOL(__qdf_runtime_lock_init); 541 542 /** 543 * qdf_runtime_lock_deinit() - deinitialize runtime pm lock 544 * @lock: the lock to deinitialize 545 * 546 * Ensures the lock is released. Frees the runtime lock. 547 * 548 * Return: void 549 */ 550 void qdf_runtime_lock_deinit(qdf_runtime_lock_t *lock) 551 { 552 void *hif_ctx = cds_get_context(QDF_MODULE_ID_HIF); 553 hif_runtime_lock_deinit(hif_ctx, lock->lock); 554 } 555 EXPORT_SYMBOL(qdf_runtime_lock_deinit); 556 557 #else 558 559 QDF_STATUS qdf_runtime_pm_get(void) 560 { 561 return QDF_STATUS_SUCCESS; 562 } 563 EXPORT_SYMBOL(qdf_runtime_pm_get); 564 565 QDF_STATUS qdf_runtime_pm_put(void) 566 { 567 return QDF_STATUS_SUCCESS; 568 } 569 EXPORT_SYMBOL(qdf_runtime_pm_put); 570 571 QDF_STATUS qdf_runtime_pm_prevent_suspend(qdf_runtime_lock_t *lock) 572 { 573 return QDF_STATUS_SUCCESS; 574 } 575 EXPORT_SYMBOL(qdf_runtime_pm_prevent_suspend); 576 577 QDF_STATUS qdf_runtime_pm_allow_suspend(qdf_runtime_lock_t *lock) 578 { 579 return QDF_STATUS_SUCCESS; 580 } 581 EXPORT_SYMBOL(qdf_runtime_pm_allow_suspend); 582 583 QDF_STATUS __qdf_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name) 584 { 585 return QDF_STATUS_SUCCESS; 586 } 587 EXPORT_SYMBOL(__qdf_runtime_lock_init); 588 589 void qdf_runtime_lock_deinit(qdf_runtime_lock_t *lock) 590 { 591 } 592 EXPORT_SYMBOL(qdf_runtime_lock_deinit); 593 594 #endif /* CONFIG_MCL */ 595 596 /** 597 * qdf_spinlock_acquire() - acquires a spin lock 598 * @lock: Spin lock to acquire 599 * 600 * Return: 601 * QDF status success: if wake lock is acquired 602 */ 603 QDF_STATUS qdf_spinlock_acquire(qdf_spinlock_t *lock) 604 { 605 spin_lock(&lock->lock.spinlock); 606 return QDF_STATUS_SUCCESS; 607 } 608 EXPORT_SYMBOL(qdf_spinlock_acquire); 609 610 611 /** 612 * qdf_spinlock_release() - release a spin lock 613 * @lock: Spin lock to release 614 * 615 * Return: 616 * QDF status success : if wake lock is acquired 617 */ 618 QDF_STATUS qdf_spinlock_release(qdf_spinlock_t *lock) 619 { 620 spin_unlock(&lock->lock.spinlock); 621 return QDF_STATUS_SUCCESS; 622 } 623 EXPORT_SYMBOL(qdf_spinlock_release); 624 625 /** 626 * qdf_mutex_destroy() - destroy a QDF lock 627 * @lock: Pointer to the opaque lock object to be destroyed 628 * 629 * function shall destroy the lock object referenced by lock. After a 630 * successful return from qdf_mutex_destroy() 631 * the lock object becomes, in effect, uninitialized. 632 * 633 * A destroyed lock object can be reinitialized using qdf_mutex_create(); 634 * the results of otherwise referencing the object after it has been destroyed 635 * are undefined. Calls to QDF lock functions to manipulate the lock such 636 * as qdf_mutex_acquire() will fail if the lock is destroyed. Therefore, 637 * don't use the lock after it has been destroyed until it has 638 * been re-initialized. 639 * 640 * Return: 641 * QDF_STATUS_SUCCESS: lock was successfully initialized 642 * QDF failure reason codes: lock is not initialized and can't be used 643 */ 644 QDF_STATUS qdf_mutex_destroy(qdf_mutex_t *lock) 645 { 646 /* check for invalid pointer */ 647 if (NULL == lock) { 648 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 649 "%s: NULL pointer passed in", __func__); 650 return QDF_STATUS_E_FAULT; 651 } 652 653 if (LINUX_LOCK_COOKIE != lock->cookie) { 654 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 655 "%s: uninitialized lock", __func__); 656 return QDF_STATUS_E_INVAL; 657 } 658 659 if (in_interrupt()) { 660 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 661 "%s cannot be called from interrupt context!!!", 662 __func__); 663 return QDF_STATUS_E_FAULT; 664 } 665 666 /* check if lock is released */ 667 if (!mutex_trylock(&lock->m_lock)) { 668 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 669 "%s: lock is not released", __func__); 670 return QDF_STATUS_E_BUSY; 671 } 672 lock->cookie = 0; 673 lock->state = LOCK_DESTROYED; 674 lock->process_id = 0; 675 lock->refcount = 0; 676 677 qdf_lock_stats_destroy(&lock->stats); 678 mutex_unlock(&lock->m_lock); 679 680 return QDF_STATUS_SUCCESS; 681 } 682 EXPORT_SYMBOL(qdf_mutex_destroy); 683 684 /** 685 * qdf_spin_trylock_bh_outline() - spin trylock bottomhalf 686 * @lock: spinlock object 687 * Return: nonzero if lock is acquired 688 */ 689 int qdf_spin_trylock_bh_outline(qdf_spinlock_t *lock) 690 { 691 return qdf_spin_trylock_bh(lock); 692 } 693 EXPORT_SYMBOL(qdf_spin_trylock_bh_outline); 694 695 /** 696 * qdf_spin_lock_bh_outline() - locks the spinlock in soft irq context 697 * @lock: spinlock object pointer 698 * Return: none 699 */ 700 void qdf_spin_lock_bh_outline(qdf_spinlock_t *lock) 701 { 702 qdf_spin_lock_bh(lock); 703 } 704 EXPORT_SYMBOL(qdf_spin_lock_bh_outline); 705 706 /** 707 * qdf_spin_unlock_bh_outline() - unlocks spinlock in soft irq context 708 * @lock: spinlock object pointer 709 * Return: none 710 */ 711 void qdf_spin_unlock_bh_outline(qdf_spinlock_t *lock) 712 { 713 qdf_spin_unlock_bh(lock); 714 } 715 EXPORT_SYMBOL(qdf_spin_unlock_bh_outline); 716 717 #if QDF_LOCK_STATS_LIST 718 struct qdf_lock_cookie { 719 union { 720 struct { 721 struct lock_stats *stats; 722 const char *func; 723 int line; 724 } cookie; 725 struct { 726 struct qdf_lock_cookie *next; 727 } empty_node; 728 } u; 729 }; 730 731 #ifndef QDF_LOCK_STATS_LIST_SIZE 732 #define QDF_LOCK_STATS_LIST_SIZE 256 733 #endif 734 735 static qdf_spinlock_t qdf_lock_list_spinlock; 736 static struct qdf_lock_cookie lock_cookies[QDF_LOCK_STATS_LIST_SIZE]; 737 static struct qdf_lock_cookie *lock_cookie_freelist; 738 static qdf_atomic_t lock_cookie_get_failures; 739 static qdf_atomic_t lock_cookie_untracked_num; 740 /* dummy value */ 741 #define DUMMY_LOCK_COOKIE 0xc00c1e 742 743 /** 744 * qdf_is_lock_cookie - check if memory is a valid lock cookie 745 * 746 * return true if the memory is within the range of the lock cookie 747 * memory. 748 */ 749 static bool qdf_is_lock_cookie(struct qdf_lock_cookie *lock_cookie) 750 { 751 return lock_cookie >= &lock_cookies[0] && 752 lock_cookie <= &lock_cookies[QDF_LOCK_STATS_LIST_SIZE-1]; 753 } 754 755 /** 756 * qdf_is_lock_cookie_free() - check if the lock cookie is on the freelist 757 * @lock_cookie: lock cookie to check 758 * 759 * Check that the next field of the lock cookie points to a lock cookie. 760 * currently this is only true if the cookie is on the freelist. 761 * 762 * Checking for the function and line being NULL and 0 should also have worked. 763 */ 764 static bool qdf_is_lock_cookie_free(struct qdf_lock_cookie *lock_cookie) 765 { 766 struct qdf_lock_cookie *tmp = lock_cookie->u.empty_node.next; 767 768 return qdf_is_lock_cookie(tmp) || (tmp == NULL); 769 } 770 771 static struct qdf_lock_cookie *qdf_get_lock_cookie(void) 772 { 773 struct qdf_lock_cookie *lock_cookie; 774 775 qdf_spin_lock_bh(&qdf_lock_list_spinlock); 776 lock_cookie = lock_cookie_freelist; 777 if (lock_cookie_freelist) 778 lock_cookie_freelist = lock_cookie_freelist->u.empty_node.next; 779 qdf_spin_unlock_bh(&qdf_lock_list_spinlock); 780 return lock_cookie; 781 } 782 783 static void __qdf_put_lock_cookie(struct qdf_lock_cookie *lock_cookie) 784 { 785 if (!qdf_is_lock_cookie(lock_cookie)) 786 QDF_BUG(0); 787 788 lock_cookie->u.empty_node.next = lock_cookie_freelist; 789 lock_cookie_freelist = lock_cookie; 790 } 791 792 static void qdf_put_lock_cookie(struct qdf_lock_cookie *lock_cookie) 793 { 794 qdf_spin_lock_bh(&qdf_lock_list_spinlock); 795 __qdf_put_lock_cookie(lock_cookie); 796 qdf_spin_unlock_bh(&qdf_lock_list_spinlock); 797 } 798 799 void qdf_lock_stats_init(void) 800 { 801 int i; 802 803 for (i = 0; i < QDF_LOCK_STATS_LIST_SIZE; i++) 804 __qdf_put_lock_cookie(&lock_cookies[i]); 805 806 /* stats must be allocated for the spinlock before the cookie, 807 * otherwise this qdf_lock_list_spinlock wouldnt get initialized 808 * properly 809 */ 810 qdf_spinlock_create(&qdf_lock_list_spinlock); 811 qdf_atomic_init(&lock_cookie_get_failures); 812 qdf_atomic_init(&lock_cookie_untracked_num); 813 } 814 815 void qdf_lock_stats_deinit(void) 816 { 817 int i; 818 819 qdf_spinlock_destroy(&qdf_lock_list_spinlock); 820 for (i = 0; i < QDF_LOCK_STATS_LIST_SIZE; i++) { 821 if (!qdf_is_lock_cookie_free(&lock_cookies[i])) 822 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG, 823 "%s: lock_not_destroyed, fun: %s, line %d", 824 __func__, lock_cookies[i].u.cookie.func, 825 lock_cookies[i].u.cookie.line); 826 } 827 } 828 829 /* allocated separate memory in case the lock memory is freed without 830 * running the deinitialization code. The cookie list will not be 831 * corrupted. 832 */ 833 void qdf_lock_stats_cookie_create(struct lock_stats *stats, 834 const char *func, int line) 835 { 836 struct qdf_lock_cookie *cookie = qdf_get_lock_cookie(); 837 838 if (cookie == NULL) { 839 int count; 840 841 qdf_atomic_inc(&lock_cookie_get_failures); 842 count = qdf_atomic_inc_return(&lock_cookie_untracked_num); 843 stats->cookie = (void *) DUMMY_LOCK_COOKIE; 844 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG, 845 "%s: cookie allocation failure, using dummy (%s:%d) count %d", 846 __func__, func, line, count); 847 return; 848 } 849 850 stats->cookie = cookie; 851 stats->cookie->u.cookie.stats = stats; 852 stats->cookie->u.cookie.func = func; 853 stats->cookie->u.cookie.line = line; 854 } 855 856 void qdf_lock_stats_cookie_destroy(struct lock_stats *stats) 857 { 858 struct qdf_lock_cookie *cookie = stats->cookie; 859 860 if (cookie == NULL) { 861 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 862 "%s: Double cookie destroy", __func__); 863 QDF_ASSERT(0); 864 return; 865 } 866 867 stats->cookie = NULL; 868 if (cookie == (void *)DUMMY_LOCK_COOKIE) { 869 qdf_atomic_dec(&lock_cookie_untracked_num); 870 return; 871 } 872 873 cookie->u.cookie.stats = NULL; 874 cookie->u.cookie.func = NULL; 875 cookie->u.cookie.line = 0; 876 877 qdf_put_lock_cookie(cookie); 878 } 879 #endif 880