1 /* 2 * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. 3 * 4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc. 5 * 6 * 7 * Permission to use, copy, modify, and/or distribute this software for 8 * any purpose with or without fee is hereby granted, provided that the 9 * above copyright notice and this permission notice appear in all 10 * copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 19 * PERFORMANCE OF THIS SOFTWARE. 20 */ 21 22 /* 23 * This file was originally distributed by Qualcomm Atheros, Inc. 24 * under proprietary terms before Copyright ownership was assigned 25 * to the Linux Foundation. 26 */ 27 28 #include <linux/module.h> 29 #include <qdf_lock.h> 30 #include <qdf_trace.h> 31 #include <qdf_module.h> 32 33 #include <qdf_types.h> 34 #ifdef CONFIG_MCL 35 #include <i_host_diag_core_event.h> 36 #include <hif.h> 37 #include <cds_api.h> 38 #endif 39 #include <i_qdf_lock.h> 40 41 /** 42 * qdf_mutex_create() - Initialize a mutex 43 * @m: mutex to initialize 44 * 45 * Returns: QDF_STATUS 46 * =0 success 47 * else fail status 48 */ 49 #undef qdf_mutex_create 50 QDF_STATUS qdf_mutex_create(qdf_mutex_t *lock, const char *func, int line) 51 { 52 /* check for invalid pointer */ 53 if (lock == NULL) { 54 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 55 "%s: NULL pointer passed in", __func__); 56 return QDF_STATUS_E_FAULT; 57 } 58 /* check for 'already initialized' lock */ 59 if (LINUX_LOCK_COOKIE == lock->cookie) { 60 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 61 "%s: already initialized lock", __func__); 62 return QDF_STATUS_E_BUSY; 63 } 64 65 if (in_interrupt()) { 66 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 67 "%s cannot be called from interrupt context!!!", 68 __func__); 69 return QDF_STATUS_E_FAULT; 70 } 71 72 qdf_lock_stats_create(&lock->stats, func, line); 73 74 /* initialize new lock */ 75 mutex_init(&lock->m_lock); 76 lock->cookie = LINUX_LOCK_COOKIE; 77 lock->state = LOCK_RELEASED; 78 lock->process_id = 0; 79 lock->refcount = 0; 80 81 return QDF_STATUS_SUCCESS; 82 } 83 qdf_export_symbol(qdf_mutex_create); 84 85 /** 86 * qdf_mutex_acquire() - acquire a QDF lock 87 * @lock: Pointer to the opaque lock object to acquire 88 * 89 * A lock object is acquired by calling qdf_mutex_acquire(). If the lock 90 * is already locked, the calling thread shall block until the lock becomes 91 * available. This operation shall return with the lock object referenced by 92 * lock in the locked state with the calling thread as its owner. 93 * 94 * Return: 95 * QDF_STATUS_SUCCESS: lock was successfully initialized 96 * QDF failure reason codes: lock is not initialized and can't be used 97 */ 98 QDF_STATUS qdf_mutex_acquire(qdf_mutex_t *lock) 99 { 100 int rc; 101 /* check for invalid pointer */ 102 if (lock == NULL) { 103 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 104 "%s: NULL pointer passed in", __func__); 105 QDF_ASSERT(0); 106 return QDF_STATUS_E_FAULT; 107 } 108 /* check if lock refers to an initialized object */ 109 if (LINUX_LOCK_COOKIE != lock->cookie) { 110 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 111 "%s: uninitialized lock", __func__); 112 QDF_ASSERT(0); 113 return QDF_STATUS_E_INVAL; 114 } 115 116 if (in_interrupt()) { 117 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 118 "%s cannot be called from interrupt context!!!", 119 __func__); 120 QDF_ASSERT(0); 121 return QDF_STATUS_E_FAULT; 122 } 123 if ((lock->process_id == current->pid) && 124 (lock->state == LOCK_ACQUIRED)) { 125 lock->refcount++; 126 #ifdef QDF_NESTED_LOCK_DEBUG 127 pe_err("%s: %x %d %d", __func__, lock, current->pid, 128 lock->refcount); 129 #endif 130 return QDF_STATUS_SUCCESS; 131 } 132 133 BEFORE_LOCK(lock, mutex_is_locked(&lock->m_lock)); 134 /* acquire a Lock */ 135 mutex_lock(&lock->m_lock); 136 AFTER_LOCK(lock, __func__); 137 rc = mutex_is_locked(&lock->m_lock); 138 if (rc == 0) { 139 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 140 "%s: unable to lock mutex (rc = %d)", __func__, rc); 141 QDF_ASSERT(0); 142 return QDF_STATUS_E_FAILURE; 143 } 144 #ifdef QDF_NESTED_LOCK_DEBUG 145 pe_err("%s: %x %d", __func__, lock, current->pid); 146 #endif 147 if (LOCK_DESTROYED != lock->state) { 148 lock->process_id = current->pid; 149 lock->refcount++; 150 lock->state = LOCK_ACQUIRED; 151 return QDF_STATUS_SUCCESS; 152 } 153 154 /* lock is already destroyed */ 155 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 156 "%s: Lock is already destroyed", __func__); 157 mutex_unlock(&lock->m_lock); 158 QDF_ASSERT(0); 159 return QDF_STATUS_E_FAILURE; 160 } 161 qdf_export_symbol(qdf_mutex_acquire); 162 163 /** 164 * qdf_mutex_release() - release a QDF lock 165 * @lock: Pointer to the opaque lock object to be released 166 * 167 * qdf_mutex_release() function shall release the lock object 168 * referenced by 'lock'. 169 * 170 * If a thread attempts to release a lock that it unlocked or is not 171 * initialized, an error is returned. 172 * 173 * Return: 174 * QDF_STATUS_SUCCESS: lock was successfully initialized 175 * QDF failure reason codes: lock is not initialized and can't be used 176 */ 177 QDF_STATUS qdf_mutex_release(qdf_mutex_t *lock) 178 { 179 /* check for invalid pointer */ 180 if (lock == NULL) { 181 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 182 "%s: NULL pointer passed in", __func__); 183 QDF_ASSERT(0); 184 return QDF_STATUS_E_FAULT; 185 } 186 187 /* check if lock refers to an uninitialized object */ 188 if (LINUX_LOCK_COOKIE != lock->cookie) { 189 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 190 "%s: uninitialized lock", __func__); 191 QDF_ASSERT(0); 192 return QDF_STATUS_E_INVAL; 193 } 194 195 if (in_interrupt()) { 196 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 197 "%s cannot be called from interrupt context!!!", 198 __func__); 199 QDF_ASSERT(0); 200 return QDF_STATUS_E_FAULT; 201 } 202 203 /* current_thread = get_current_thread_id(); 204 * Check thread ID of caller against thread ID 205 * of the thread which acquire the lock 206 */ 207 if (lock->process_id != current->pid) { 208 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 209 "%s: current task pid does not match original task pid!!", 210 __func__); 211 #ifdef QDF_NESTED_LOCK_DEBUG 212 pe_err("%s: Lock held by=%d being released by=%d", 213 __func__, lock->process_id, current->pid); 214 #endif 215 QDF_ASSERT(0); 216 return QDF_STATUS_E_PERM; 217 } 218 if ((lock->process_id == current->pid) && 219 (lock->state == LOCK_ACQUIRED)) { 220 if (lock->refcount > 0) 221 lock->refcount--; 222 } 223 #ifdef QDF_NESTED_LOCK_DEBUG 224 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, "%s: %x %d %d", __func__, lock, lock->process_id, 225 lock->refcount); 226 #endif 227 if (lock->refcount) 228 return QDF_STATUS_SUCCESS; 229 230 lock->process_id = 0; 231 lock->refcount = 0; 232 lock->state = LOCK_RELEASED; 233 /* release a Lock */ 234 BEFORE_UNLOCK(lock, 0); 235 mutex_unlock(&lock->m_lock); 236 #ifdef QDF_NESTED_LOCK_DEBUG 237 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, "%s: Freeing lock %x %d %d", lock, lock->process_id, 238 lock->refcount); 239 #endif 240 return QDF_STATUS_SUCCESS; 241 } 242 qdf_export_symbol(qdf_mutex_release); 243 244 /** 245 * qdf_wake_lock_name() - This function returns the name of the wakelock 246 * @lock: Pointer to the wakelock 247 * 248 * This function returns the name of the wakelock 249 * 250 * Return: Pointer to the name if it is valid or a default string 251 */ 252 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) 253 const char *qdf_wake_lock_name(qdf_wake_lock_t *lock) 254 { 255 if (lock->name) 256 return lock->name; 257 return "UNNAMED_WAKELOCK"; 258 } 259 #else 260 const char *qdf_wake_lock_name(qdf_wake_lock_t *lock) 261 { 262 return "NO_WAKELOCK_SUPPORT"; 263 } 264 #endif 265 qdf_export_symbol(qdf_wake_lock_name); 266 267 /** 268 * qdf_wake_lock_create() - initializes a wake lock 269 * @lock: The wake lock to initialize 270 * @name: Name of wake lock 271 * 272 * Return: 273 * QDF status success: if wake lock is initialized 274 * QDF status failure: if wake lock was not initialized 275 */ 276 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) 277 QDF_STATUS qdf_wake_lock_create(qdf_wake_lock_t *lock, const char *name) 278 { 279 wakeup_source_init(lock, name); 280 return QDF_STATUS_SUCCESS; 281 } 282 #else 283 QDF_STATUS qdf_wake_lock_create(qdf_wake_lock_t *lock, const char *name) 284 { 285 return QDF_STATUS_SUCCESS; 286 } 287 #endif 288 qdf_export_symbol(qdf_wake_lock_create); 289 290 /** 291 * qdf_wake_lock_acquire() - acquires a wake lock 292 * @lock: The wake lock to acquire 293 * @reason: Reason for wakelock 294 * 295 * Return: 296 * QDF status success: if wake lock is acquired 297 * QDF status failure: if wake lock was not acquired 298 */ 299 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) 300 QDF_STATUS qdf_wake_lock_acquire(qdf_wake_lock_t *lock, uint32_t reason) 301 { 302 #ifdef CONFIG_MCL 303 host_diag_log_wlock(reason, qdf_wake_lock_name(lock), 304 WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT, 305 WIFI_POWER_EVENT_WAKELOCK_TAKEN); 306 #endif 307 __pm_stay_awake(lock); 308 return QDF_STATUS_SUCCESS; 309 } 310 #else 311 QDF_STATUS qdf_wake_lock_acquire(qdf_wake_lock_t *lock, uint32_t reason) 312 { 313 return QDF_STATUS_SUCCESS; 314 } 315 #endif 316 qdf_export_symbol(qdf_wake_lock_acquire); 317 318 /** 319 * qdf_wake_lock_timeout_acquire() - acquires a wake lock with a timeout 320 * @lock: The wake lock to acquire 321 * @reason: Reason for wakelock 322 * 323 * Return: 324 * QDF status success: if wake lock is acquired 325 * QDF status failure: if wake lock was not acquired 326 */ 327 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) 328 QDF_STATUS qdf_wake_lock_timeout_acquire(qdf_wake_lock_t *lock, uint32_t msec) 329 { 330 /* Wakelock for Rx is frequent. 331 * It is reported only during active debug 332 */ 333 __pm_wakeup_event(lock, msec); 334 return QDF_STATUS_SUCCESS; 335 } 336 #else 337 QDF_STATUS qdf_wake_lock_timeout_acquire(qdf_wake_lock_t *lock, uint32_t msec) 338 { 339 return QDF_STATUS_SUCCESS; 340 } 341 #endif 342 qdf_export_symbol(qdf_wake_lock_timeout_acquire); 343 344 /** 345 * qdf_wake_lock_release() - releases a wake lock 346 * @lock: the wake lock to release 347 * @reason: Reason for wakelock 348 * 349 * Return: 350 * QDF status success: if wake lock is acquired 351 * QDF status failure: if wake lock was not acquired 352 */ 353 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) 354 QDF_STATUS qdf_wake_lock_release(qdf_wake_lock_t *lock, uint32_t reason) 355 { 356 #ifdef CONFIG_MCL 357 host_diag_log_wlock(reason, qdf_wake_lock_name(lock), 358 WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT, 359 WIFI_POWER_EVENT_WAKELOCK_RELEASED); 360 #endif 361 __pm_relax(lock); 362 return QDF_STATUS_SUCCESS; 363 } 364 #else 365 QDF_STATUS qdf_wake_lock_release(qdf_wake_lock_t *lock, uint32_t reason) 366 { 367 return QDF_STATUS_SUCCESS; 368 } 369 #endif 370 qdf_export_symbol(qdf_wake_lock_release); 371 372 /** 373 * qdf_wake_lock_destroy() - destroys a wake lock 374 * @lock: The wake lock to destroy 375 * 376 * Return: 377 * QDF status success: if wake lock is acquired 378 * QDF status failure: if wake lock was not acquired 379 */ 380 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) 381 QDF_STATUS qdf_wake_lock_destroy(qdf_wake_lock_t *lock) 382 { 383 wakeup_source_trash(lock); 384 return QDF_STATUS_SUCCESS; 385 } 386 #else 387 QDF_STATUS qdf_wake_lock_destroy(qdf_wake_lock_t *lock) 388 { 389 return QDF_STATUS_SUCCESS; 390 } 391 #endif 392 qdf_export_symbol(qdf_wake_lock_destroy); 393 394 #ifdef CONFIG_MCL 395 /** 396 * qdf_runtime_pm_get() - do a get opperation on the device 397 * 398 * A get opperation will prevent a runtime suspend untill a 399 * corresponding put is done. This api should be used when sending 400 * data. 401 * 402 * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED, 403 * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!! 404 * 405 * return: success if the bus is up and a get has been issued 406 * otherwise an error code. 407 */ 408 QDF_STATUS qdf_runtime_pm_get(void) 409 { 410 void *ol_sc; 411 int ret; 412 413 ol_sc = cds_get_context(QDF_MODULE_ID_HIF); 414 415 if (ol_sc == NULL) { 416 QDF_ASSERT(0); 417 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 418 "%s: HIF context is null!", __func__); 419 return QDF_STATUS_E_INVAL; 420 } 421 422 ret = hif_pm_runtime_get(ol_sc); 423 424 if (ret) 425 return QDF_STATUS_E_FAILURE; 426 return QDF_STATUS_SUCCESS; 427 } 428 qdf_export_symbol(qdf_runtime_pm_get); 429 430 /** 431 * qdf_runtime_pm_put() - do a put opperation on the device 432 * 433 * A put opperation will allow a runtime suspend after a corresponding 434 * get was done. This api should be used when sending data. 435 * 436 * This api will return a failure if the hif module hasn't been 437 * initialized 438 * 439 * return: QDF_STATUS_SUCCESS if the put is performed 440 */ 441 QDF_STATUS qdf_runtime_pm_put(void) 442 { 443 void *ol_sc; 444 int ret; 445 446 ol_sc = cds_get_context(QDF_MODULE_ID_HIF); 447 448 if (ol_sc == NULL) { 449 QDF_ASSERT(0); 450 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 451 "%s: HIF context is null!", __func__); 452 return QDF_STATUS_E_INVAL; 453 } 454 455 ret = hif_pm_runtime_put(ol_sc); 456 457 if (ret) 458 return QDF_STATUS_E_FAILURE; 459 return QDF_STATUS_SUCCESS; 460 } 461 qdf_export_symbol(qdf_runtime_pm_put); 462 463 /** 464 * qdf_runtime_pm_prevent_suspend() - prevent a runtime bus suspend 465 * @lock: an opaque context for tracking 466 * 467 * The lock can only be acquired once per lock context and is tracked. 468 * 469 * return: QDF_STATUS_SUCCESS or failure code. 470 */ 471 QDF_STATUS qdf_runtime_pm_prevent_suspend(qdf_runtime_lock_t *lock) 472 { 473 void *ol_sc; 474 int ret; 475 476 ol_sc = cds_get_context(QDF_MODULE_ID_HIF); 477 478 if (ol_sc == NULL) { 479 QDF_ASSERT(0); 480 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 481 "%s: HIF context is null!", __func__); 482 return QDF_STATUS_E_INVAL; 483 } 484 485 ret = hif_pm_runtime_prevent_suspend(ol_sc, lock->lock); 486 487 if (ret) 488 return QDF_STATUS_E_FAILURE; 489 return QDF_STATUS_SUCCESS; 490 } 491 qdf_export_symbol(qdf_runtime_pm_prevent_suspend); 492 493 /** 494 * qdf_runtime_pm_allow_suspend() - prevent a runtime bus suspend 495 * @lock: an opaque context for tracking 496 * 497 * The lock can only be acquired once per lock context and is tracked. 498 * 499 * return: QDF_STATUS_SUCCESS or failure code. 500 */ 501 QDF_STATUS qdf_runtime_pm_allow_suspend(qdf_runtime_lock_t *lock) 502 { 503 void *ol_sc; 504 int ret; 505 506 ol_sc = cds_get_context(QDF_MODULE_ID_HIF); 507 if (ol_sc == NULL) { 508 QDF_ASSERT(0); 509 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 510 "%s: HIF context is null!", __func__); 511 return QDF_STATUS_E_INVAL; 512 } 513 514 ret = hif_pm_runtime_allow_suspend(ol_sc, lock->lock); 515 if (ret) 516 return QDF_STATUS_E_FAILURE; 517 518 return QDF_STATUS_SUCCESS; 519 } 520 qdf_export_symbol(qdf_runtime_pm_allow_suspend); 521 522 /** 523 * qdf_runtime_lock_init() - initialize runtime lock 524 * @name: name of the runtime lock 525 * 526 * Initialize a runtime pm lock. This lock can be used 527 * to prevent the runtime pm system from putting the bus 528 * to sleep. 529 * 530 * Return: runtime_pm_lock_t 531 */ 532 QDF_STATUS __qdf_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name) 533 { 534 int ret = hif_runtime_lock_init(lock, name); 535 536 if (ret) 537 return QDF_STATUS_E_NOMEM; 538 539 return QDF_STATUS_SUCCESS; 540 } 541 qdf_export_symbol(__qdf_runtime_lock_init); 542 543 /** 544 * qdf_runtime_lock_deinit() - deinitialize runtime pm lock 545 * @lock: the lock to deinitialize 546 * 547 * Ensures the lock is released. Frees the runtime lock. 548 * 549 * Return: void 550 */ 551 void qdf_runtime_lock_deinit(qdf_runtime_lock_t *lock) 552 { 553 void *hif_ctx = cds_get_context(QDF_MODULE_ID_HIF); 554 hif_runtime_lock_deinit(hif_ctx, lock->lock); 555 } 556 qdf_export_symbol(qdf_runtime_lock_deinit); 557 558 #else 559 560 QDF_STATUS qdf_runtime_pm_get(void) 561 { 562 return QDF_STATUS_SUCCESS; 563 } 564 qdf_export_symbol(qdf_runtime_pm_get); 565 566 QDF_STATUS qdf_runtime_pm_put(void) 567 { 568 return QDF_STATUS_SUCCESS; 569 } 570 qdf_export_symbol(qdf_runtime_pm_put); 571 572 QDF_STATUS qdf_runtime_pm_prevent_suspend(qdf_runtime_lock_t *lock) 573 { 574 return QDF_STATUS_SUCCESS; 575 } 576 qdf_export_symbol(qdf_runtime_pm_prevent_suspend); 577 578 QDF_STATUS qdf_runtime_pm_allow_suspend(qdf_runtime_lock_t *lock) 579 { 580 return QDF_STATUS_SUCCESS; 581 } 582 qdf_export_symbol(qdf_runtime_pm_allow_suspend); 583 584 QDF_STATUS __qdf_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name) 585 { 586 return QDF_STATUS_SUCCESS; 587 } 588 qdf_export_symbol(__qdf_runtime_lock_init); 589 590 void qdf_runtime_lock_deinit(qdf_runtime_lock_t *lock) 591 { 592 } 593 qdf_export_symbol(qdf_runtime_lock_deinit); 594 595 #endif /* CONFIG_MCL */ 596 597 /** 598 * qdf_spinlock_acquire() - acquires a spin lock 599 * @lock: Spin lock to acquire 600 * 601 * Return: 602 * QDF status success: if wake lock is acquired 603 */ 604 QDF_STATUS qdf_spinlock_acquire(qdf_spinlock_t *lock) 605 { 606 spin_lock(&lock->lock.spinlock); 607 return QDF_STATUS_SUCCESS; 608 } 609 qdf_export_symbol(qdf_spinlock_acquire); 610 611 612 /** 613 * qdf_spinlock_release() - release a spin lock 614 * @lock: Spin lock to release 615 * 616 * Return: 617 * QDF status success : if wake lock is acquired 618 */ 619 QDF_STATUS qdf_spinlock_release(qdf_spinlock_t *lock) 620 { 621 spin_unlock(&lock->lock.spinlock); 622 return QDF_STATUS_SUCCESS; 623 } 624 qdf_export_symbol(qdf_spinlock_release); 625 626 /** 627 * qdf_mutex_destroy() - destroy a QDF lock 628 * @lock: Pointer to the opaque lock object to be destroyed 629 * 630 * function shall destroy the lock object referenced by lock. After a 631 * successful return from qdf_mutex_destroy() 632 * the lock object becomes, in effect, uninitialized. 633 * 634 * A destroyed lock object can be reinitialized using qdf_mutex_create(); 635 * the results of otherwise referencing the object after it has been destroyed 636 * are undefined. Calls to QDF lock functions to manipulate the lock such 637 * as qdf_mutex_acquire() will fail if the lock is destroyed. Therefore, 638 * don't use the lock after it has been destroyed until it has 639 * been re-initialized. 640 * 641 * Return: 642 * QDF_STATUS_SUCCESS: lock was successfully initialized 643 * QDF failure reason codes: lock is not initialized and can't be used 644 */ 645 QDF_STATUS qdf_mutex_destroy(qdf_mutex_t *lock) 646 { 647 /* check for invalid pointer */ 648 if (NULL == lock) { 649 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 650 "%s: NULL pointer passed in", __func__); 651 return QDF_STATUS_E_FAULT; 652 } 653 654 if (LINUX_LOCK_COOKIE != lock->cookie) { 655 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 656 "%s: uninitialized lock", __func__); 657 return QDF_STATUS_E_INVAL; 658 } 659 660 if (in_interrupt()) { 661 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 662 "%s cannot be called from interrupt context!!!", 663 __func__); 664 return QDF_STATUS_E_FAULT; 665 } 666 667 /* check if lock is released */ 668 if (!mutex_trylock(&lock->m_lock)) { 669 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 670 "%s: lock is not released", __func__); 671 return QDF_STATUS_E_BUSY; 672 } 673 lock->cookie = 0; 674 lock->state = LOCK_DESTROYED; 675 lock->process_id = 0; 676 lock->refcount = 0; 677 678 qdf_lock_stats_destroy(&lock->stats); 679 mutex_unlock(&lock->m_lock); 680 681 return QDF_STATUS_SUCCESS; 682 } 683 qdf_export_symbol(qdf_mutex_destroy); 684 685 /** 686 * qdf_spin_trylock_bh_outline() - spin trylock bottomhalf 687 * @lock: spinlock object 688 * Return: nonzero if lock is acquired 689 */ 690 int qdf_spin_trylock_bh_outline(qdf_spinlock_t *lock) 691 { 692 return qdf_spin_trylock_bh(lock); 693 } 694 qdf_export_symbol(qdf_spin_trylock_bh_outline); 695 696 /** 697 * qdf_spin_lock_bh_outline() - locks the spinlock in soft irq context 698 * @lock: spinlock object pointer 699 * Return: none 700 */ 701 void qdf_spin_lock_bh_outline(qdf_spinlock_t *lock) 702 { 703 qdf_spin_lock_bh(lock); 704 } 705 qdf_export_symbol(qdf_spin_lock_bh_outline); 706 707 /** 708 * qdf_spin_unlock_bh_outline() - unlocks spinlock in soft irq context 709 * @lock: spinlock object pointer 710 * Return: none 711 */ 712 void qdf_spin_unlock_bh_outline(qdf_spinlock_t *lock) 713 { 714 qdf_spin_unlock_bh(lock); 715 } 716 qdf_export_symbol(qdf_spin_unlock_bh_outline); 717 718 #if QDF_LOCK_STATS_LIST 719 struct qdf_lock_cookie { 720 union { 721 struct { 722 struct lock_stats *stats; 723 const char *func; 724 int line; 725 } cookie; 726 struct { 727 struct qdf_lock_cookie *next; 728 } empty_node; 729 } u; 730 }; 731 732 #ifndef QDF_LOCK_STATS_LIST_SIZE 733 #define QDF_LOCK_STATS_LIST_SIZE 256 734 #endif 735 736 static qdf_spinlock_t qdf_lock_list_spinlock; 737 static struct qdf_lock_cookie lock_cookies[QDF_LOCK_STATS_LIST_SIZE]; 738 static struct qdf_lock_cookie *lock_cookie_freelist; 739 static qdf_atomic_t lock_cookie_get_failures; 740 static qdf_atomic_t lock_cookie_untracked_num; 741 /* dummy value */ 742 #define DUMMY_LOCK_COOKIE 0xc00c1e 743 744 /** 745 * qdf_is_lock_cookie - check if memory is a valid lock cookie 746 * 747 * return true if the memory is within the range of the lock cookie 748 * memory. 749 */ 750 static bool qdf_is_lock_cookie(struct qdf_lock_cookie *lock_cookie) 751 { 752 return lock_cookie >= &lock_cookies[0] && 753 lock_cookie <= &lock_cookies[QDF_LOCK_STATS_LIST_SIZE-1]; 754 } 755 756 /** 757 * qdf_is_lock_cookie_free() - check if the lock cookie is on the freelist 758 * @lock_cookie: lock cookie to check 759 * 760 * Check that the next field of the lock cookie points to a lock cookie. 761 * currently this is only true if the cookie is on the freelist. 762 * 763 * Checking for the function and line being NULL and 0 should also have worked. 764 */ 765 static bool qdf_is_lock_cookie_free(struct qdf_lock_cookie *lock_cookie) 766 { 767 struct qdf_lock_cookie *tmp = lock_cookie->u.empty_node.next; 768 769 return qdf_is_lock_cookie(tmp) || (tmp == NULL); 770 } 771 772 static struct qdf_lock_cookie *qdf_get_lock_cookie(void) 773 { 774 struct qdf_lock_cookie *lock_cookie; 775 776 qdf_spin_lock_bh(&qdf_lock_list_spinlock); 777 lock_cookie = lock_cookie_freelist; 778 if (lock_cookie_freelist) 779 lock_cookie_freelist = lock_cookie_freelist->u.empty_node.next; 780 qdf_spin_unlock_bh(&qdf_lock_list_spinlock); 781 return lock_cookie; 782 } 783 784 static void __qdf_put_lock_cookie(struct qdf_lock_cookie *lock_cookie) 785 { 786 if (!qdf_is_lock_cookie(lock_cookie)) 787 QDF_BUG(0); 788 789 lock_cookie->u.empty_node.next = lock_cookie_freelist; 790 lock_cookie_freelist = lock_cookie; 791 } 792 793 static void qdf_put_lock_cookie(struct qdf_lock_cookie *lock_cookie) 794 { 795 qdf_spin_lock_bh(&qdf_lock_list_spinlock); 796 __qdf_put_lock_cookie(lock_cookie); 797 qdf_spin_unlock_bh(&qdf_lock_list_spinlock); 798 } 799 800 void qdf_lock_stats_init(void) 801 { 802 int i; 803 804 for (i = 0; i < QDF_LOCK_STATS_LIST_SIZE; i++) 805 __qdf_put_lock_cookie(&lock_cookies[i]); 806 807 /* stats must be allocated for the spinlock before the cookie, 808 * otherwise this qdf_lock_list_spinlock wouldnt get initialized 809 * properly 810 */ 811 qdf_spinlock_create(&qdf_lock_list_spinlock); 812 qdf_atomic_init(&lock_cookie_get_failures); 813 qdf_atomic_init(&lock_cookie_untracked_num); 814 } 815 816 void qdf_lock_stats_deinit(void) 817 { 818 int i; 819 820 qdf_spinlock_destroy(&qdf_lock_list_spinlock); 821 for (i = 0; i < QDF_LOCK_STATS_LIST_SIZE; i++) { 822 if (!qdf_is_lock_cookie_free(&lock_cookies[i])) 823 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG, 824 "%s: lock_not_destroyed, fun: %s, line %d", 825 __func__, lock_cookies[i].u.cookie.func, 826 lock_cookies[i].u.cookie.line); 827 } 828 } 829 830 /* allocated separate memory in case the lock memory is freed without 831 * running the deinitialization code. The cookie list will not be 832 * corrupted. 833 */ 834 void qdf_lock_stats_cookie_create(struct lock_stats *stats, 835 const char *func, int line) 836 { 837 struct qdf_lock_cookie *cookie = qdf_get_lock_cookie(); 838 839 if (cookie == NULL) { 840 int count; 841 842 qdf_atomic_inc(&lock_cookie_get_failures); 843 count = qdf_atomic_inc_return(&lock_cookie_untracked_num); 844 stats->cookie = (void *) DUMMY_LOCK_COOKIE; 845 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG, 846 "%s: cookie allocation failure, using dummy (%s:%d) count %d", 847 __func__, func, line, count); 848 return; 849 } 850 851 stats->cookie = cookie; 852 stats->cookie->u.cookie.stats = stats; 853 stats->cookie->u.cookie.func = func; 854 stats->cookie->u.cookie.line = line; 855 } 856 857 void qdf_lock_stats_cookie_destroy(struct lock_stats *stats) 858 { 859 struct qdf_lock_cookie *cookie = stats->cookie; 860 861 if (cookie == NULL) { 862 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 863 "%s: Double cookie destroy", __func__); 864 QDF_ASSERT(0); 865 return; 866 } 867 868 stats->cookie = NULL; 869 if (cookie == (void *)DUMMY_LOCK_COOKIE) { 870 qdf_atomic_dec(&lock_cookie_untracked_num); 871 return; 872 } 873 874 cookie->u.cookie.stats = NULL; 875 cookie->u.cookie.func = NULL; 876 cookie->u.cookie.line = 0; 877 878 qdf_put_lock_cookie(cookie); 879 } 880 #endif 881