Lines Matching full:lock
14 #include <trace/events/lock.h>
27 static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type);
38 /* Value we add to the lock in order to take the lock: */
41 /* If the lock has this value (used as a mask), taking the lock fails: */
44 /* Mask that indicates lock is held for this type: */
47 /* Waitlist we wakeup when releasing the lock: */
72 static inline void six_set_bitmask(struct six_lock *lock, u32 mask) in six_set_bitmask() argument
74 if ((atomic_read(&lock->state) & mask) != mask) in six_set_bitmask()
75 atomic_or(mask, &lock->state); in six_set_bitmask()
78 static inline void six_clear_bitmask(struct six_lock *lock, u32 mask) in six_clear_bitmask() argument
80 if (atomic_read(&lock->state) & mask) in six_clear_bitmask()
81 atomic_and(~mask, &lock->state); in six_clear_bitmask()
84 static inline void six_set_owner(struct six_lock *lock, enum six_lock_type type, in six_set_owner() argument
91 EBUG_ON(lock->owner); in six_set_owner()
92 lock->owner = owner; in six_set_owner()
94 EBUG_ON(lock->owner != current); in six_set_owner()
98 static inline unsigned pcpu_read_count(struct six_lock *lock) in pcpu_read_count() argument
104 read_count += *per_cpu_ptr(lock->readers, cpu); in pcpu_read_count()
114 * for anoter thread taking the competing lock type, and we may havve to do a
117 static int __do_six_trylock(struct six_lock *lock, enum six_lock_type type, in __do_six_trylock() argument
123 EBUG_ON(type == SIX_LOCK_write && lock->owner != task); in __do_six_trylock()
125 (try != !(atomic_read(&lock->state) & SIX_LOCK_HELD_write))); in __do_six_trylock()
130 * The basic idea behind this algorithm is that you can implement a lock in __do_six_trylock()
134 * has the lock" and another for "thread b has the lock". in __do_six_trylock()
136 * To take the lock, a thread sets its variable indicating that it holds in __do_six_trylock()
137 * the lock, then issues a full memory barrier, then reads from the in __do_six_trylock()
139 * the lock. If we raced, we backoff and retry/sleep. in __do_six_trylock()
141 * Failure to take the lock may cause a spurious trylock failure in in __do_six_trylock()
142 * another thread, because we temporarily set the lock to indicate that in __do_six_trylock()
147 * Therefore, if we fail to get the lock, and there were waiters of the in __do_six_trylock()
154 if (type == SIX_LOCK_read && lock->readers) { in __do_six_trylock()
156 this_cpu_inc(*lock->readers); /* signal that we own lock */ in __do_six_trylock()
160 old = atomic_read(&lock->state); in __do_six_trylock()
163 this_cpu_sub(*lock->readers, !ret); in __do_six_trylock()
168 if (atomic_read(&lock->state) & SIX_LOCK_WAITING_write) in __do_six_trylock()
171 } else if (type == SIX_LOCK_write && lock->readers) { in __do_six_trylock()
173 atomic_add(SIX_LOCK_HELD_write, &lock->state); in __do_six_trylock()
179 * Paired with the smp_mb() in read lock fast path (per-cpu mode) in __do_six_trylock()
183 ret = !pcpu_read_count(lock); in __do_six_trylock()
186 old = atomic_sub_return(SIX_LOCK_HELD_write, &lock->state); in __do_six_trylock()
191 old = atomic_read(&lock->state); in __do_six_trylock()
198 } while (!atomic_try_cmpxchg_acquire(&lock->state, &old, old + l[type].lock_val)); in __do_six_trylock()
200 EBUG_ON(ret && !(atomic_read(&lock->state) & l[type].held_mask)); in __do_six_trylock()
204 six_set_owner(lock, type, old, task); in __do_six_trylock()
207 (atomic_read(&lock->state) & SIX_LOCK_HELD_write)); in __do_six_trylock()
212 static void __six_lock_wakeup(struct six_lock *lock, enum six_lock_type lock_type) in __six_lock_wakeup() argument
221 raw_spin_lock(&lock->wait_lock); in __six_lock_wakeup()
223 list_for_each_entry_safe(w, next, &lock->wait_list, list) { in __six_lock_wakeup()
231 ret = __do_six_trylock(lock, lock_type, w->task, false); in __six_lock_wakeup()
254 six_clear_bitmask(lock, SIX_LOCK_WAITING_read << lock_type); in __six_lock_wakeup()
256 raw_spin_unlock(&lock->wait_lock); in __six_lock_wakeup()
265 static void six_lock_wakeup(struct six_lock *lock, u32 state, in six_lock_wakeup() argument
274 __six_lock_wakeup(lock, lock_type); in six_lock_wakeup()
278 static bool do_six_trylock(struct six_lock *lock, enum six_lock_type type, bool try) in do_six_trylock() argument
282 ret = __do_six_trylock(lock, type, current, try); in do_six_trylock()
284 __six_lock_wakeup(lock, -ret - 1); in do_six_trylock()
290 * six_trylock_ip - attempt to take a six lock without blocking
291 * @lock: lock to take
297 bool six_trylock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long ip) in six_trylock_ip() argument
299 if (!do_six_trylock(lock, type, true)) in six_trylock_ip()
303 six_acquire(&lock->dep_map, 1, type == SIX_LOCK_read, ip); in six_trylock_ip()
309 * six_relock_ip - attempt to re-take a lock that was held previously
310 * @lock: lock to take
312 * @seq: lock sequence number obtained from six_lock_seq() while lock was
318 bool six_relock_ip(struct six_lock *lock, enum six_lock_type type, in six_relock_ip() argument
321 if (six_lock_seq(lock) != seq || !six_trylock_ip(lock, type, ip)) in six_relock_ip()
324 if (six_lock_seq(lock) != seq) { in six_relock_ip()
325 six_unlock_ip(lock, type, ip); in six_relock_ip()
335 static inline bool six_owner_running(struct six_lock *lock) in six_owner_running() argument
339 * acquiring the lock and setting the owner field. If we're an RT task in six_owner_running()
340 * that will live-lock because we won't let the owner complete. in six_owner_running()
343 struct task_struct *owner = READ_ONCE(lock->owner); in six_owner_running()
350 static inline bool six_optimistic_spin(struct six_lock *lock, in six_optimistic_spin() argument
360 if (lock->wait_list.next != &wait->list) in six_optimistic_spin()
363 if (atomic_read(&lock->state) & SIX_LOCK_NOSPIN) in six_optimistic_spin()
369 while (!need_resched() && six_owner_running(lock)) { in six_optimistic_spin()
381 six_set_bitmask(lock, SIX_LOCK_NOSPIN); in six_optimistic_spin()
400 static inline bool six_optimistic_spin(struct six_lock *lock, in six_optimistic_spin() argument
410 static int six_lock_slowpath(struct six_lock *lock, enum six_lock_type type, in six_lock_slowpath() argument
418 EBUG_ON(atomic_read(&lock->state) & SIX_LOCK_HELD_write); in six_lock_slowpath()
419 atomic_add(SIX_LOCK_HELD_write, &lock->state); in six_lock_slowpath()
423 trace_contention_begin(lock, 0); in six_lock_slowpath()
424 lock_contended(&lock->dep_map, ip); in six_lock_slowpath()
430 raw_spin_lock(&lock->wait_lock); in six_lock_slowpath()
431 six_set_bitmask(lock, SIX_LOCK_WAITING_read << type); in six_lock_slowpath()
433 * Retry taking the lock after taking waitlist lock, in case we raced in six_lock_slowpath()
436 ret = __do_six_trylock(lock, type, current, false); in six_lock_slowpath()
440 if (!list_empty(&lock->wait_list)) { in six_lock_slowpath()
442 list_last_entry(&lock->wait_list, in six_lock_slowpath()
449 list_add_tail(&wait->list, &lock->wait_list); in six_lock_slowpath()
451 raw_spin_unlock(&lock->wait_lock); in six_lock_slowpath()
459 __six_lock_wakeup(lock, -ret - 1); in six_lock_slowpath()
463 if (six_optimistic_spin(lock, wait, type)) in six_lock_slowpath()
477 ret = should_sleep_fn ? should_sleep_fn(lock, p) : 0; in six_lock_slowpath()
484 * acquired the lock - should_sleep_fn() might have in six_lock_slowpath()
488 raw_spin_lock(&lock->wait_lock); in six_lock_slowpath()
492 raw_spin_unlock(&lock->wait_lock); in six_lock_slowpath()
495 do_six_unlock_type(lock, type); in six_lock_slowpath()
505 six_clear_bitmask(lock, SIX_LOCK_HELD_write); in six_lock_slowpath()
506 six_lock_wakeup(lock, atomic_read(&lock->state), SIX_LOCK_read); in six_lock_slowpath()
508 trace_contention_end(lock, 0); in six_lock_slowpath()
514 * six_lock_ip_waiter - take a lock, with full waitlist interface
515 * @lock: lock to take
517 * @wait: pointer to wait object, which will be added to lock's waitlist
531 * lock's waiters, and for each waiter recursively walk their held locks.
533 * When this function must block, @wait will be added to @lock's waitlist before
535 * removed from the lock waitlist until the lock has been successfully acquired,
543 int six_lock_ip_waiter(struct six_lock *lock, enum six_lock_type type, in six_lock_ip_waiter() argument
553 six_acquire(&lock->dep_map, 0, type == SIX_LOCK_read, ip); in six_lock_ip_waiter()
555 ret = do_six_trylock(lock, type, true) ? 0 in six_lock_ip_waiter()
556 : six_lock_slowpath(lock, type, wait, should_sleep_fn, p, ip); in six_lock_ip_waiter()
559 six_release(&lock->dep_map, ip); in six_lock_ip_waiter()
561 lock_acquired(&lock->dep_map, ip); in six_lock_ip_waiter()
568 static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type) in do_six_unlock_type() argument
573 lock->owner = NULL; in do_six_unlock_type()
576 lock->readers) { in do_six_unlock_type()
578 this_cpu_dec(*lock->readers); in do_six_unlock_type()
580 state = atomic_read(&lock->state); in do_six_unlock_type()
585 v += atomic_read(&lock->state) & SIX_LOCK_NOSPIN; in do_six_unlock_type()
587 EBUG_ON(!(atomic_read(&lock->state) & l[type].held_mask)); in do_six_unlock_type()
588 state = atomic_sub_return_release(v, &lock->state); in do_six_unlock_type()
591 six_lock_wakeup(lock, state, l[type].unlock_wakeup); in do_six_unlock_type()
595 * six_unlock_ip - drop a six lock
596 * @lock: lock to unlock
600 * When a lock is held multiple times (because six_lock_incement()) was used),
601 * this decrements the 'lock held' counter by one.
604 * six_lock_read(&foo->lock); read count 1
605 * six_lock_increment(&foo->lock, SIX_LOCK_read); read count 2
606 * six_lock_unlock(&foo->lock, SIX_LOCK_read); read count 1
607 * six_lock_unlock(&foo->lock, SIX_LOCK_read); read count 0
609 void six_unlock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long ip) in six_unlock_ip() argument
612 !(atomic_read(&lock->state) & SIX_LOCK_HELD_intent)); in six_unlock_ip()
615 lock->owner != current); in six_unlock_ip()
618 six_release(&lock->dep_map, ip); in six_unlock_ip()
620 lock->seq++; in six_unlock_ip()
623 lock->intent_lock_recurse) { in six_unlock_ip()
624 --lock->intent_lock_recurse; in six_unlock_ip()
628 do_six_unlock_type(lock, type); in six_unlock_ip()
633 * six_lock_downgrade - convert an intent lock to a read lock
634 * @lock: lock to dowgrade
636 * @lock will have read count incremented and intent count decremented
638 void six_lock_downgrade(struct six_lock *lock) in six_lock_downgrade() argument
640 six_lock_increment(lock, SIX_LOCK_read); in six_lock_downgrade()
641 six_unlock_intent(lock); in six_lock_downgrade()
646 * six_lock_tryupgrade - attempt to convert read lock to an intent lock
647 * @lock: lock to upgrade
649 * On success, @lock will have intent count incremented and read count
654 bool six_lock_tryupgrade(struct six_lock *lock) in six_lock_tryupgrade() argument
656 u32 old = atomic_read(&lock->state), new; in six_lock_tryupgrade()
664 if (!lock->readers) { in six_lock_tryupgrade()
670 } while (!atomic_try_cmpxchg_acquire(&lock->state, &old, new)); in six_lock_tryupgrade()
672 if (lock->readers) in six_lock_tryupgrade()
673 this_cpu_dec(*lock->readers); in six_lock_tryupgrade()
675 six_set_owner(lock, SIX_LOCK_intent, old, current); in six_lock_tryupgrade()
682 * six_trylock_convert - attempt to convert a held lock from one type to another
683 * @lock: lock to upgrade
687 * On success, @lock will have intent count incremented and read count
692 bool six_trylock_convert(struct six_lock *lock, in six_trylock_convert() argument
702 six_lock_downgrade(lock); in six_trylock_convert()
705 return six_lock_tryupgrade(lock); in six_trylock_convert()
711 * six_lock_increment - increase held lock count on a lock that is already held
712 * @lock: lock to increment
715 * @lock must already be held, with a lock type that is greater than or equal to
718 * A corresponding six_unlock_type() call will be required for @lock to be fully
721 void six_lock_increment(struct six_lock *lock, enum six_lock_type type) in six_lock_increment() argument
723 six_acquire(&lock->dep_map, 0, type == SIX_LOCK_read, _RET_IP_); in six_lock_increment()
729 if (lock->readers) { in six_lock_increment()
730 this_cpu_inc(*lock->readers); in six_lock_increment()
732 EBUG_ON(!(atomic_read(&lock->state) & in six_lock_increment()
735 atomic_add(l[type].lock_val, &lock->state); in six_lock_increment()
739 EBUG_ON(!(atomic_read(&lock->state) & SIX_LOCK_HELD_intent)); in six_lock_increment()
740 lock->intent_lock_recurse++; in six_lock_increment()
750 * six_lock_wakeup_all - wake up all waiters on @lock
751 * @lock: lock to wake up waiters for
754 * abort the lock operation.
759 void six_lock_wakeup_all(struct six_lock *lock) in six_lock_wakeup_all() argument
761 u32 state = atomic_read(&lock->state); in six_lock_wakeup_all()
764 six_lock_wakeup(lock, state, SIX_LOCK_read); in six_lock_wakeup_all()
765 six_lock_wakeup(lock, state, SIX_LOCK_intent); in six_lock_wakeup_all()
766 six_lock_wakeup(lock, state, SIX_LOCK_write); in six_lock_wakeup_all()
768 raw_spin_lock(&lock->wait_lock); in six_lock_wakeup_all()
769 list_for_each_entry(w, &lock->wait_list, list) in six_lock_wakeup_all()
771 raw_spin_unlock(&lock->wait_lock); in six_lock_wakeup_all()
776 * six_lock_counts - return held lock counts, for each lock type
777 * @lock: lock to return counters for
779 * Return: the number of times a lock is held for read, intent and write.
781 struct six_lock_count six_lock_counts(struct six_lock *lock) in six_lock_counts() argument
785 ret.n[SIX_LOCK_read] = !lock->readers in six_lock_counts()
786 ? atomic_read(&lock->state) & SIX_LOCK_HELD_read in six_lock_counts()
787 : pcpu_read_count(lock); in six_lock_counts()
788 ret.n[SIX_LOCK_intent] = !!(atomic_read(&lock->state) & SIX_LOCK_HELD_intent) + in six_lock_counts()
789 lock->intent_lock_recurse; in six_lock_counts()
790 ret.n[SIX_LOCK_write] = !!(atomic_read(&lock->state) & SIX_LOCK_HELD_write); in six_lock_counts()
797 * six_lock_readers_add - directly manipulate reader count of a lock
798 * @lock: lock to add/subtract readers for
801 * When an upper layer is implementing lock reentrency, we may have both read
802 * and intent locks on the same lock.
804 * When we need to take a write lock, the read locks will cause self-deadlock,
810 * failed, count up its own read locks, subtract them, take the write lock, and
813 * As in any other situation when taking a write lock, @lock must be held for
814 * intent one (or more) times, so @lock will never be left unlocked.
816 void six_lock_readers_add(struct six_lock *lock, int nr) in six_lock_readers_add() argument
818 if (lock->readers) { in six_lock_readers_add()
819 this_cpu_add(*lock->readers, nr); in six_lock_readers_add()
821 EBUG_ON((int) (atomic_read(&lock->state) & SIX_LOCK_HELD_read) + nr < 0); in six_lock_readers_add()
823 atomic_add(nr, &lock->state); in six_lock_readers_add()
829 * six_lock_exit - release resources held by a lock prior to freeing
830 * @lock: lock to exit
832 * When a lock was initialized in percpu mode (SIX_OLCK_INIT_PCPU), this is
835 void six_lock_exit(struct six_lock *lock) in six_lock_exit() argument
837 WARN_ON(lock->readers && pcpu_read_count(lock)); in six_lock_exit()
838 WARN_ON(atomic_read(&lock->state) & SIX_LOCK_HELD_read); in six_lock_exit()
840 free_percpu(lock->readers); in six_lock_exit()
841 lock->readers = NULL; in six_lock_exit()
845 void __six_lock_init(struct six_lock *lock, const char *name, in __six_lock_init() argument
848 atomic_set(&lock->state, 0); in __six_lock_init()
849 raw_spin_lock_init(&lock->wait_lock); in __six_lock_init()
850 INIT_LIST_HEAD(&lock->wait_list); in __six_lock_init()
852 debug_check_no_locks_freed((void *) lock, sizeof(*lock)); in __six_lock_init()
853 lockdep_init_map(&lock->dep_map, name, key, 0); in __six_lock_init()
866 * failure if they wish by checking lock->readers, but generally in __six_lock_init()
869 lock->readers = alloc_percpu(unsigned); in __six_lock_init()