Lines Matching +full:de +full:- +full:serialized

1 // SPDX-License-Identifier: GPL-2.0-only
11 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
14 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
15 * from the -rt tree, where it was originally implemented for rtmutexes
19 * Also see Documentation/locking/mutex-design.rst.
48 atomic_long_set(&lock->owner, 0); in __mutex_init()
49 raw_spin_lock_init(&lock->wait_lock); in __mutex_init()
50 INIT_LIST_HEAD(&lock->wait_list); in __mutex_init()
52 osq_lock_init(&lock->osq); in __mutex_init()
64 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
65 * Bit1 indicates unlock needs to hand the lock to the top-waiter
81 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS); in __mutex_owner()
107 owner = atomic_long_read(&lock->owner); in __mutex_trylock_common()
129 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) { in __mutex_trylock_common()
171 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) in __mutex_trylock_fast()
181 return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL); in __mutex_unlock_fast()
187 atomic_long_or(flag, &lock->owner); in __mutex_set_flag()
192 atomic_long_andnot(flag, &lock->owner); in __mutex_clear_flag()
197 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter; in __mutex_waiter_is_first()
210 list_add_tail(&waiter->list, list); in __mutex_add_waiter()
218 list_del(&waiter->list); in __mutex_remove_waiter()
219 if (likely(list_empty(&lock->wait_list))) in __mutex_remove_waiter()
233 unsigned long owner = atomic_long_read(&lock->owner); in __mutex_handoff()
246 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new)) in __mutex_handoff()
256 * branch is predicted by the CPU as default-untaken.
261 * mutex_lock - acquire the mutex
272 * (or statically defined) before it can be locked. memset()-ing
312 * If ww->ctx is set the contents are undefined, only in ww_mutex_spin_on_owner()
322 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx)) in ww_mutex_spin_on_owner()
332 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS)) in ww_mutex_spin_on_owner()
361 * Ensure we emit the owner->on_cpu, dereference _after_ in mutex_spin_on_owner()
362 * checking lock->owner still matches owner. And we already in mutex_spin_on_owner()
363 * disabled preemption which is equal to the RCU read-side in mutex_spin_on_owner()
403 * We already disabled preemption which is equal to the RCU read-side in mutex_can_spin_on_owner()
412 * If lock->owner is not set, the mutex has been released. Return true in mutex_can_spin_on_owner()
436 * queue. The waiter-spinner will spin on the lock directly and concurrently
448 * in case spinning isn't possible. As a waiter-spinner in mutex_optimistic_spin()
460 if (!osq_lock(&lock->osq)) in mutex_optimistic_spin()
481 * everything in this loop to be re-loaded. We don't need in mutex_optimistic_spin()
489 osq_unlock(&lock->osq); in mutex_optimistic_spin()
496 osq_unlock(&lock->osq); in mutex_optimistic_spin()
501 * reschedule now, before we try-lock the mutex. This avoids getting in mutex_optimistic_spin()
527 * mutex_unlock - release the mutex
536 * returned - mutex_unlock() can NOT directly be used to release an object such
553 * ww_mutex_unlock - release the w/w mutex
566 mutex_unlock(&lock->base); in ww_mutex_unlock()
587 MUTEX_WARN_ON(lock->magic != lock); in __mutex_lock_common()
591 if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) in __mutex_lock_common()
592 return -EALREADY; in __mutex_lock_common()
599 if (ww_ctx->acquired == 0) in __mutex_lock_common()
600 ww_ctx->wounded = 0; in __mutex_lock_common()
603 nest_lock = &ww_ctx->dep_map; in __mutex_lock_common()
608 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); in __mutex_lock_common()
614 lock_acquired(&lock->dep_map, ip); in __mutex_lock_common()
622 raw_spin_lock(&lock->wait_lock); in __mutex_lock_common()
638 lock_contended(&lock->dep_map, ip); in __mutex_lock_common()
642 __mutex_add_waiter(lock, &waiter, &lock->wait_list); in __mutex_lock_common()
659 * Once we hold wait_lock, we're serialized against in __mutex_lock_common()
670 * against mutex_unlock() and wake-ups do not go missing. in __mutex_lock_common()
673 ret = -EINTR; in __mutex_lock_common()
683 raw_spin_unlock(&lock->wait_lock); in __mutex_lock_common()
704 raw_spin_lock(&lock->wait_lock); in __mutex_lock_common()
706 raw_spin_lock(&lock->wait_lock); in __mutex_lock_common()
712 * Wound-Wait; we stole the lock (!first_waiter), check the in __mutex_lock_common()
715 if (!ww_ctx->is_wait_die && in __mutex_lock_common()
725 /* got the lock - cleanup and rejoice! */ in __mutex_lock_common()
726 lock_acquired(&lock->dep_map, ip); in __mutex_lock_common()
732 raw_spin_unlock(&lock->wait_lock); in __mutex_lock_common()
741 raw_spin_unlock(&lock->wait_lock); in __mutex_lock_common()
743 mutex_release(&lock->dep_map, ip); in __mutex_lock_common()
763 * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context
771 * specified, -EALREADY handling may happen in calls to ww_mutex_trylock.
778 return mutex_trylock(&ww->base); in ww_mutex_trylock()
780 MUTEX_WARN_ON(ww->base.magic != &ww->base); in ww_mutex_trylock()
787 if (ww_ctx->acquired == 0) in ww_mutex_trylock()
788 ww_ctx->wounded = 0; in ww_mutex_trylock()
790 if (__mutex_trylock(&ww->base)) { in ww_mutex_trylock()
792 mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_); in ww_mutex_trylock()
850 if (ctx->deadlock_inject_countdown-- == 0) { in ww_mutex_deadlock_injection()
851 tmp = ctx->deadlock_inject_interval; in ww_mutex_deadlock_injection()
857 ctx->deadlock_inject_interval = tmp; in ww_mutex_deadlock_injection()
858 ctx->deadlock_inject_countdown = tmp; in ww_mutex_deadlock_injection()
859 ctx->contending_lock = lock; in ww_mutex_deadlock_injection()
863 return -EDEADLK; in ww_mutex_deadlock_injection()
876 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, in ww_mutex_lock()
878 if (!ret && ctx && ctx->acquired > 1) in ww_mutex_lock()
891 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, in ww_mutex_lock_interruptible()
894 if (!ret && ctx && ctx->acquired > 1) in ww_mutex_lock_interruptible()
912 mutex_release(&lock->dep_map, ip); in __mutex_unlock_slowpath()
921 owner = atomic_long_read(&lock->owner); in __mutex_unlock_slowpath()
929 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) { in __mutex_unlock_slowpath()
937 raw_spin_lock(&lock->wait_lock); in __mutex_unlock_slowpath()
939 if (!list_empty(&lock->wait_list)) { in __mutex_unlock_slowpath()
940 /* get the first entry from the wait-list: */ in __mutex_unlock_slowpath()
942 list_first_entry(&lock->wait_list, in __mutex_unlock_slowpath()
945 next = waiter->task; in __mutex_unlock_slowpath()
954 raw_spin_unlock(&lock->wait_lock); in __mutex_unlock_slowpath()
961 * Here come the less common (and hence less performance-critical) APIs:
971 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
979 * Return: 0 if the lock was successfully acquired or %-EINTR if a
995 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
1003 * Return: 0 if the lock was successfully acquired or %-EINTR if a
1018 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1058 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, in __ww_mutex_lock_slowpath()
1066 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, in __ww_mutex_lock_interruptible_slowpath()
1073 * mutex_trylock - try to acquire the mutex, without waiting
1090 MUTEX_WARN_ON(lock->magic != lock); in mutex_trylock()
1094 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); in mutex_trylock()
1106 if (__mutex_trylock_fast(&lock->base)) { in ww_mutex_lock()
1121 if (__mutex_trylock_fast(&lock->base)) { in ww_mutex_lock_interruptible()
1138 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1147 if (atomic_add_unless(cnt, -1, 1)) in atomic_dec_and_mutex_lock()