Lines Matching +full:lock +full:- +full:state
1 /* SPDX-License-Identifier: GPL-2.0 */
16 * pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val
17 * pv_kick(cpu) -- wakes a suspended vcpu
30 * not running. The one lock stealing attempt allowed at slowpath entry
31 * mitigates the slight slowdown for non-overcommitted guest with this
32 * aggressive wait-early mechanism.
53 u8 state; member
57 * Hybrid PV queued/unfair lock
60 * it will be called once when a lock waiter enter the PV slowpath before
64 * pv_wait_head_or_lock() to signal that it is ready to spin on the lock.
65 * When that bit becomes visible to the incoming waiters, no lock stealing
67 * enter the MCS wait queue. So lock starvation shouldn't happen as long
69 * and hence disabling lock stealing.
71 * When the pending bit isn't set, the lock waiters will stay in the unfair
72 * mode spinning on the lock unless the MCS wait queue is empty. In this
73 * case, the lock waiters will enter the queued mode slowpath trying to
76 * This hybrid PV queued/unfair lock combines the best attributes of a
77 * queued lock (no lock starvation) and an unfair lock (good performance
81 static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock) in pv_hybrid_queued_unfair_trylock() argument
84 * Stay in unfair lock mode as long as queued mode waiters are in pv_hybrid_queued_unfair_trylock()
88 int val = atomic_read(&lock->val); in pv_hybrid_queued_unfair_trylock()
92 try_cmpxchg_acquire(&lock->locked, &old, _Q_LOCKED_VAL)) { in pv_hybrid_queued_unfair_trylock()
107 * is actively spinning on the lock and no lock stealing is allowed.
110 static __always_inline void set_pending(struct qspinlock *lock) in set_pending() argument
112 WRITE_ONCE(lock->pending, 1); in set_pending()
118 * lock just to be sure that it will get it.
120 static __always_inline bool trylock_clear_pending(struct qspinlock *lock) in trylock_clear_pending() argument
124 return !READ_ONCE(lock->locked) && in trylock_clear_pending()
125 try_cmpxchg_acquire(&lock->locked_pending, &old, _Q_LOCKED_VAL); in trylock_clear_pending()
128 static __always_inline void set_pending(struct qspinlock *lock) in set_pending() argument
130 atomic_or(_Q_PENDING_VAL, &lock->val); in set_pending()
133 static __always_inline bool trylock_clear_pending(struct qspinlock *lock) in trylock_clear_pending() argument
137 old = atomic_read(&lock->val); in trylock_clear_pending()
145 } while (!atomic_try_cmpxchg_acquire (&lock->val, &old, new)); in trylock_clear_pending()
152 * Lock and MCS node addresses hash table for fast lookup
154 * Hashing is done on a per-cacheline basis to minimize the need to access
160 * 256 (64-bit) or 512 (32-bit) to fully utilize a 4k page.
168 struct qspinlock *lock; member
192 * Allocate space from bootmem which should be page-size aligned in __pv_init_lock_hash()
204 for (hash &= ~(PV_HE_PER_LINE - 1), he = &pv_lock_hash[hash], offset = 0; \
206 offset++, he = &pv_lock_hash[(hash + offset) & ((1 << pv_lock_hash_bits) - 1)])
208 static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node) in pv_hash() argument
210 unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits); in pv_hash()
217 if (try_cmpxchg(&he->lock, &old, lock)) { in pv_hash()
218 WRITE_ONCE(he->node, node); in pv_hash()
220 return &he->lock; in pv_hash()
226 * This is guaranteed by ensuring every blocked lock only ever consumes in pv_hash()
230 * The single entry is guaranteed by having the lock owner unhash in pv_hash()
236 static struct pv_node *pv_unhash(struct qspinlock *lock) in pv_unhash() argument
238 unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits); in pv_unhash()
243 if (READ_ONCE(he->lock) == lock) { in pv_unhash()
244 node = READ_ONCE(he->node); in pv_unhash()
245 WRITE_ONCE(he->lock, NULL); in pv_unhash()
253 * having the lock owner do the unhash -- IFF the unlock sees the in pv_unhash()
261 * in a running state.
269 return READ_ONCE(prev->state) != vcpu_running; in pv_wait_early()
281 pn->cpu = smp_processor_id(); in pv_init_node()
282 pn->state = vcpu_running; in pv_init_node()
286 * Wait for node->locked to become true, halt the vcpu after a short spin.
298 for (wait_early = false, loop = SPIN_THRESHOLD; loop; loop--) { in pv_wait_node()
299 if (READ_ONCE(node->locked)) in pv_wait_node()
309 * Order pn->state vs pn->locked thusly: in pv_wait_node()
311 * [S] pn->state = vcpu_halted [S] next->locked = 1 in pv_wait_node()
313 * [L] pn->locked [RmW] pn->state = vcpu_hashed in pv_wait_node()
317 smp_store_mb(pn->state, vcpu_halted); in pv_wait_node()
319 if (!READ_ONCE(node->locked)) { in pv_wait_node()
322 pv_wait(&pn->state, vcpu_halted); in pv_wait_node()
328 * to hash this lock. in pv_wait_node()
330 cmpxchg(&pn->state, vcpu_halted, vcpu_running); in pv_wait_node()
337 * MCS lock will be released soon. in pv_wait_node()
340 !READ_ONCE(node->locked)); in pv_wait_node()
344 * By now our node->locked should be 1 and our caller will not actually in pv_wait_node()
345 * spin-wait for it. We do however rely on our caller to do a in pv_wait_node()
346 * load-acquire for us. in pv_wait_node()
351 * Called after setting next->locked = 1 when we're the lock owner.
353 * Instead of waking the waiters stuck in pv_wait_node() advance their state
357 static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node) in pv_kick_node() argument
362 * If the vCPU is indeed halted, advance its state to match that of in pv_kick_node()
364 * observe its next->locked value and advance itself. in pv_kick_node()
368 * The write to next->locked in arch_mcs_spin_unlock_contended() in pv_kick_node()
369 * must be ordered before the read of pn->state in the cmpxchg() in pv_kick_node()
373 * dependency will order the reading of pn->state before any in pv_kick_node()
377 if (!try_cmpxchg_relaxed(&pn->state, &old, vcpu_hashed)) in pv_kick_node()
381 * Put the lock into the hash table and set the _Q_SLOW_VAL. in pv_kick_node()
387 WRITE_ONCE(lock->locked, _Q_SLOW_VAL); in pv_kick_node()
388 (void)pv_hash(lock, pn); in pv_kick_node()
392 * Wait for l->locked to become clear and acquire the lock;
396 * The current value of the lock will be returned for additional processing.
399 pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node) in pv_wait_head_or_lock() argument
407 * If pv_kick_node() already advanced our state, we don't need to in pv_wait_head_or_lock()
410 if (READ_ONCE(pn->state) == vcpu_hashed) in pv_wait_head_or_lock()
420 * Set correct vCPU state to be used by queue node wait-early in pv_wait_head_or_lock()
423 WRITE_ONCE(pn->state, vcpu_running); in pv_wait_head_or_lock()
426 * Set the pending bit in the active lock spinning loop to in pv_wait_head_or_lock()
427 * disable lock stealing before attempting to acquire the lock. in pv_wait_head_or_lock()
429 set_pending(lock); in pv_wait_head_or_lock()
430 for (loop = SPIN_THRESHOLD; loop; loop--) { in pv_wait_head_or_lock()
431 if (trylock_clear_pending(lock)) in pv_wait_head_or_lock()
435 clear_pending(lock); in pv_wait_head_or_lock()
439 lp = pv_hash(lock, pn); in pv_wait_head_or_lock()
446 * [S] <hash> [Rmw] l->locked == _Q_SLOW_VAL in pv_wait_head_or_lock()
448 * [RmW] l->locked = _Q_SLOW_VAL [L] <unhash> in pv_wait_head_or_lock()
452 if (xchg(&lock->locked, _Q_SLOW_VAL) == 0) { in pv_wait_head_or_lock()
454 * The lock was free and now we own the lock. in pv_wait_head_or_lock()
455 * Change the lock value back to _Q_LOCKED_VAL in pv_wait_head_or_lock()
458 WRITE_ONCE(lock->locked, _Q_LOCKED_VAL); in pv_wait_head_or_lock()
463 WRITE_ONCE(pn->state, vcpu_hashed); in pv_wait_head_or_lock()
466 pv_wait(&lock->locked, _Q_SLOW_VAL); in pv_wait_head_or_lock()
469 * Because of lock stealing, the queue head vCPU may not be in pv_wait_head_or_lock()
470 * able to acquire the lock before it has to wait again. in pv_wait_head_or_lock()
481 return (u32)(atomic_read(&lock->val) | _Q_LOCKED_VAL); in pv_wait_head_or_lock()
485 * Include the architecture specific callee-save thunk of the
487 * __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock
499 __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked) in __pv_queued_spin_unlock_slowpath() argument
505 "pvqspinlock: lock 0x%lx has corrupted value 0x%x!\n", in __pv_queued_spin_unlock_slowpath()
506 (unsigned long)lock, atomic_read(&lock->val)); in __pv_queued_spin_unlock_slowpath()
511 * A failed cmpxchg doesn't provide any memory-ordering guarantees, in __pv_queued_spin_unlock_slowpath()
513 * pv_unhash *after* we've read the lock being _Q_SLOW_VAL. in __pv_queued_spin_unlock_slowpath()
523 node = pv_unhash(lock); in __pv_queued_spin_unlock_slowpath()
527 * release the lock. in __pv_queued_spin_unlock_slowpath()
529 smp_store_release(&lock->locked, 0); in __pv_queued_spin_unlock_slowpath()
532 * At this point the memory pointed at by lock can be freed/reused, in __pv_queued_spin_unlock_slowpath()
539 pv_kick(node->cpu); in __pv_queued_spin_unlock_slowpath()
543 __visible __lockfunc void __pv_queued_spin_unlock(struct qspinlock *lock) in __pv_queued_spin_unlock() argument
549 * unhash. Otherwise it would be possible to have multiple @lock in __pv_queued_spin_unlock()
552 if (try_cmpxchg_release(&lock->locked, &locked, 0)) in __pv_queued_spin_unlock()
555 __pv_queued_spin_unlock_slowpath(lock, locked); in __pv_queued_spin_unlock()