/linux-6.12.1/include/linux/ |
D | rwlock_api_smp.h | 18 void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock); 19 void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock); 20 void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass) __acquires(lock); 21 void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock); 22 void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock); 23 void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock); 24 void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock); 25 unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) 26 __acquires(lock); 27 unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) [all …]
|
D | spinlock_api_up.h | 19 #define assert_raw_spin_locked(lock) do { (void)(lock); } while (0) argument 24 * flags straight, to suppress compiler warnings of unused lock 27 #define ___LOCK(lock) \ argument 28 do { __acquire(lock); (void)(lock); } while (0) 30 #define __LOCK(lock) \ argument 31 do { preempt_disable(); ___LOCK(lock); } while (0) 33 #define __LOCK_BH(lock) \ argument 34 do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK(lock); } while (0) 36 #define __LOCK_IRQ(lock) \ argument 37 do { local_irq_disable(); __LOCK(lock); } while (0) [all …]
|
D | rwlock.h | 18 extern void __rwlock_init(rwlock_t *lock, const char *name, 20 # define rwlock_init(lock) \ argument 24 __rwlock_init((lock), #lock, &__key); \ 27 # define rwlock_init(lock) \ argument 28 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) 32 extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock); 33 extern int do_raw_read_trylock(rwlock_t *lock); 34 extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock); 35 extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock); 36 extern int do_raw_write_trylock(rwlock_t *lock); [all …]
|
D | spinlock.h | 72 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME 101 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, 104 # define raw_spin_lock_init(lock) \ argument 108 __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \ 112 # define raw_spin_lock_init(lock) \ argument 113 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) 116 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) argument 119 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) argument 121 #define raw_spin_is_contended(lock) (((void)(lock), 0)) argument 126 * between program-order earlier lock acquisitions and program-order later [all …]
|
D | spinlock_api_smp.h | 22 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); 23 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) 24 __acquires(lock); 26 _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) 27 __acquires(lock); 28 void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); 29 void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) 30 __acquires(lock); 32 unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) 33 __acquires(lock); [all …]
|
D | spinlock_rt.h | 10 extern void __rt_spin_lock_init(spinlock_t *lock, const char *name, 13 static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name, in __rt_spin_lock_init() argument 23 rt_mutex_base_init(&(slock)->lock); \ 31 rt_mutex_base_init(&(slock)->lock); \ 35 extern void rt_spin_lock(spinlock_t *lock); 36 extern void rt_spin_lock_nested(spinlock_t *lock, int subclass); 37 extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock); 38 extern void rt_spin_unlock(spinlock_t *lock); 39 extern void rt_spin_lock_unlock(spinlock_t *lock); 40 extern int rt_spin_trylock_bh(spinlock_t *lock); [all …]
|
D | local_lock.h | 8 * local_lock_init - Runtime initialize a lock instance 10 #define local_lock_init(lock) __local_lock_init(lock) argument 13 * local_lock - Acquire a per CPU local lock 14 * @lock: The lock variable 16 #define local_lock(lock) __local_lock(lock) argument 19 * local_lock_irq - Acquire a per CPU local lock and disable interrupts 20 * @lock: The lock variable 22 #define local_lock_irq(lock) __local_lock_irq(lock) argument 25 * local_lock_irqsave - Acquire a per CPU local lock, save and disable 27 * @lock: The lock variable [all …]
|
D | local_lock_internal.h | 54 #define __local_lock_init(lock) \ argument 58 debug_check_no_locks_freed((void *)lock, sizeof(*lock));\ 59 lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \ 62 local_lock_debug_init(lock); \ 65 #define __spinlock_nested_bh_init(lock) \ argument 69 debug_check_no_locks_freed((void *)lock, sizeof(*lock));\ 70 lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \ 73 local_lock_debug_init(lock); \ 76 #define __local_lock(lock) \ argument 79 local_lock_acquire(this_cpu_ptr(lock)); \ [all …]
|
D | lockdep.h | 45 * Every lock has a list of other locks that were taken after it. 61 * bit 0 is reused to indicate if the lock has been accessed in BFS. 67 * struct lock_chain - lock dependency chain record 72 * @entry: the collided lock chains in lock_chain hash list 90 extern void lockdep_reset_lock(struct lockdep_map *lock); 128 extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name, 132 lockdep_init_map_waits(struct lockdep_map *lock, const char *name, in lockdep_init_map_waits() argument 135 lockdep_init_map_type(lock, name, key, subclass, inner, outer, LD_LOCK_NORMAL); in lockdep_init_map_waits() 139 lockdep_init_map_wait(struct lockdep_map *lock, const char *name, in lockdep_init_map_wait() argument 142 lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV); in lockdep_init_map_wait() [all …]
|
/linux-6.12.1/kernel/locking/ |
D | spinlock_debug.c | 17 void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, in __raw_spin_lock_init() argument 22 * Make sure we are not reinitializing a held lock: in __raw_spin_lock_init() 24 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); in __raw_spin_lock_init() 25 lockdep_init_map_wait(&lock->dep_map, name, key, 0, inner); in __raw_spin_lock_init() 27 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in __raw_spin_lock_init() 28 lock->magic = SPINLOCK_MAGIC; in __raw_spin_lock_init() 29 lock->owner = SPINLOCK_OWNER_INIT; in __raw_spin_lock_init() 30 lock->owner_cpu = -1; in __raw_spin_lock_init() 36 void __rwlock_init(rwlock_t *lock, const char *name, in __rwlock_init() argument 41 * Make sure we are not reinitializing a held lock: in __rwlock_init() [all …]
|
D | rtmutex_api.c | 17 * Debug aware fast / slowpath lock,trylock,unlock 22 static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock, in __rt_mutex_lock_common() argument 30 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, _RET_IP_); in __rt_mutex_lock_common() 31 ret = __rt_mutex_lock(&lock->rtmutex, state); in __rt_mutex_lock_common() 33 mutex_release(&lock->dep_map, _RET_IP_); in __rt_mutex_lock_common() 45 * rt_mutex_lock_nested - lock a rt_mutex 47 * @lock: the rt_mutex to be locked 50 void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass) in rt_mutex_lock_nested() argument 52 __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, subclass); in rt_mutex_lock_nested() 56 void __sched _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock) in _rt_mutex_lock_nest_lock() argument [all …]
|
D | mutex.c | 34 #include <trace/events/lock.h> 46 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) in __mutex_init() argument 48 atomic_long_set(&lock->owner, 0); in __mutex_init() 49 raw_spin_lock_init(&lock->wait_lock); in __mutex_init() 50 INIT_LIST_HEAD(&lock->wait_list); in __mutex_init() 52 osq_lock_init(&lock->osq); in __mutex_init() 55 debug_mutex_init(lock, name, key); in __mutex_init() 60 * @owner: contains: 'struct task_struct *' to the current lock owner, 65 * Bit1 indicates unlock needs to hand the lock to the top-waiter 79 static inline struct task_struct *__mutex_owner(struct mutex *lock) in __mutex_owner() argument [all …]
|
D | rtmutex.c | 27 #include <trace/events/lock.h> 36 struct rt_mutex *lock, in __ww_mutex_add_waiter() argument 42 static inline void __ww_mutex_check_waiters(struct rt_mutex *lock, in __ww_mutex_check_waiters() argument 47 static inline void ww_mutex_lock_acquired(struct ww_mutex *lock, in ww_mutex_lock_acquired() argument 52 static inline int __ww_mutex_check_kill(struct rt_mutex *lock, in __ww_mutex_check_kill() argument 66 * lock->owner state tracking: 68 * lock->owner holds the task_struct pointer of the owner. Bit 0 69 * is used to keep track of the "lock has waiters" state. 72 * NULL 0 lock is free (fast acquire possible) 73 * NULL 1 lock is free and has waiters and the top waiter [all …]
|
D | spinlock.c | 35 * not re-enabled during lock-acquire (which the preempt-spin-ops do): 46 * Some architectures can relax in favour of the CPU owning the lock. 63 * This could be a long-held lock. We both prepare to spin for a long 65 * towards that other CPU that it should break the lock ASAP. 68 void __lockfunc __raw_##op##_lock(locktype##_t *lock) \ 72 if (likely(do_raw_##op##_trylock(lock))) \ 76 arch_##op##_relax(&lock->raw_lock); \ 80 unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \ 87 if (likely(do_raw_##op##_trylock(lock))) \ 92 arch_##op##_relax(&lock->raw_lock); \ [all …]
|
D | ww_mutex.h | 9 __ww_waiter_first(struct mutex *lock) in __ww_waiter_first() argument 13 w = list_first_entry(&lock->wait_list, struct mutex_waiter, list); in __ww_waiter_first() 14 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_first() 21 __ww_waiter_next(struct mutex *lock, struct mutex_waiter *w) in __ww_waiter_next() argument 24 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_next() 31 __ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w) in __ww_waiter_prev() argument 34 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_prev() 41 __ww_waiter_last(struct mutex *lock) in __ww_waiter_last() argument 45 w = list_last_entry(&lock->wait_list, struct mutex_waiter, list); in __ww_waiter_last() 46 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_last() [all …]
|
/linux-6.12.1/fs/bcachefs/ |
D | six.c | 14 #include <trace/events/lock.h> 27 static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type); 38 /* Value we add to the lock in order to take the lock: */ 41 /* If the lock has this value (used as a mask), taking the lock fails: */ 44 /* Mask that indicates lock is held for this type: */ 47 /* Waitlist we wakeup when releasing the lock: */ 72 static inline void six_set_bitmask(struct six_lock *lock, u32 mask) in six_set_bitmask() argument 74 if ((atomic_read(&lock->state) & mask) != mask) in six_set_bitmask() 75 atomic_or(mask, &lock->state); in six_set_bitmask() 78 static inline void six_clear_bitmask(struct six_lock *lock, u32 mask) in six_clear_bitmask() argument [all …]
|
D | six.h | 14 * write lock without deadlocking, so an operation that updates multiple nodes 23 * six_lock_read(&foo->lock); 24 * six_unlock_read(&foo->lock); 26 * An intent lock must be held before taking a write lock: 27 * six_lock_intent(&foo->lock); 28 * six_lock_write(&foo->lock); 29 * six_unlock_write(&foo->lock); 30 * six_unlock_intent(&foo->lock); 40 * There are also interfaces that take the lock type as an enum: 42 * six_lock_type(&foo->lock, SIX_LOCK_read); [all …]
|
/linux-6.12.1/drivers/md/dm-vdo/ |
D | physical-zone.c | 26 /* Each user data_vio needs a PBN read lock and write lock. */ 54 static inline bool has_lock_type(const struct pbn_lock *lock, enum pbn_lock_type type) in has_lock_type() argument 56 return (lock->implementation == &LOCK_IMPLEMENTATIONS[type]); in has_lock_type() 60 * vdo_is_pbn_read_lock() - Check whether a pbn_lock is a read lock. 61 * @lock: The lock to check. 63 * Return: true if the lock is a read lock. 65 bool vdo_is_pbn_read_lock(const struct pbn_lock *lock) in vdo_is_pbn_read_lock() argument 67 return has_lock_type(lock, VIO_READ_LOCK); in vdo_is_pbn_read_lock() 70 static inline void set_pbn_lock_type(struct pbn_lock *lock, enum pbn_lock_type type) in set_pbn_lock_type() argument 72 lock->implementation = &LOCK_IMPLEMENTATIONS[type]; in set_pbn_lock_type() [all …]
|
D | dedupe.c | 14 * deduplicate against a single block instead of being serialized through a PBN read lock. Only one 22 * A hash_lock acts like a state machine perhaps more than as a lock. Other than the starting and 25 * containing the lock. An asynchronous operation is almost always performed upon entering a state, 28 * In all states except DEDUPING, there is a single data_vio, called the lock agent, performing the 29 * asynchronous operations on behalf of the lock. The agent will change during the lifetime of the 30 * lock if the lock is shared by more than one data_vio. data_vios waiting to deduplicate are kept 31 * on a wait queue. Viewed a different way, the agent holds the lock exclusively until the lock 32 * enters the DEDUPING state, at which point it becomes a shared lock that all the waiters (and any 33 * new data_vios that arrive) use to share a PBN lock. In state DEDUPING, there is no agent. When 34 * the last data_vio in the lock calls back in DEDUPING, it becomes the agent and the lock becomes [all …]
|
/linux-6.12.1/fs/ocfs2/dlm/ |
D | dlmast.c | 35 struct dlm_lock *lock); 36 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 39 * lock level will obsolete a pending bast. 40 * For example, if dlm_thread queued a bast for an EX lock that 42 * lock owner downconverted to NL, the bast is now obsolete. 44 * This is needed because the lock and convert paths can queue 47 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) in dlm_should_cancel_bast() argument 50 assert_spin_locked(&lock->spinlock); in dlm_should_cancel_bast() 52 if (lock->ml.highest_blocked == LKM_IVMODE) in dlm_should_cancel_bast() 54 BUG_ON(lock->ml.highest_blocked == LKM_NLMODE); in dlm_should_cancel_bast() [all …]
|
D | dlmlock.c | 5 * underlying calls for lock creation 45 struct dlm_lock *lock, int flags); 49 static void dlm_lock_detach_lockres(struct dlm_lock *lock); 66 /* Tell us whether we can grant a new lock request. 71 * returns: 1 if the lock can be granted, 0 otherwise. 74 struct dlm_lock *lock) in dlm_can_grant_new_lock() argument 79 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) in dlm_can_grant_new_lock() 84 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) in dlm_can_grant_new_lock() 87 lock->ml.type)) in dlm_can_grant_new_lock() 94 /* performs lock creation at the lockres master site [all …]
|
D | dlmconvert.c | 5 * underlying calls for lock conversion 38 * only one that holds a lock on exit (res->spinlock). 43 struct dlm_lock *lock, int flags, 48 struct dlm_lock *lock, int flags, int type); 61 struct dlm_lock *lock, int flags, int type) in dlmconvert_master() argument 72 status = __dlmconvert_master(dlm, res, lock, flags, type, in dlmconvert_master() 83 dlm_queue_ast(dlm, lock); in dlmconvert_master() 93 /* performs lock conversion at the lockres master site 96 * taken: takes and drops lock->spinlock 99 * call_ast: whether ast should be called for this lock [all …]
|
/linux-6.12.1/drivers/md/persistent-data/ |
D | dm-block-manager.c | 32 * trace is also emitted for the previous lock acquisition. 45 spinlock_t lock; member 61 static unsigned int __find_holder(struct block_lock *lock, in __find_holder() argument 67 if (lock->holders[i] == task) in __find_holder() 74 /* call this *after* you increment lock->count */ 75 static void __add_holder(struct block_lock *lock, struct task_struct *task) in __add_holder() argument 77 unsigned int h = __find_holder(lock, NULL); in __add_holder() 83 lock->holders[h] = task; in __add_holder() 86 t = lock->traces + h; in __add_holder() 91 /* call this *before* you decrement lock->count */ [all …]
|
/linux-6.12.1/include/asm-generic/ |
D | qrwlock.h | 3 * Queue read/write lock 28 #define _QW_LOCKED 0x0ff /* A writer holds the lock */ 36 extern void queued_read_lock_slowpath(struct qrwlock *lock); 37 extern void queued_write_lock_slowpath(struct qrwlock *lock); 40 * queued_read_trylock - try to acquire read lock of a queued rwlock 41 * @lock : Pointer to queued rwlock structure 42 * Return: 1 if lock acquired, 0 if failed 44 static inline int queued_read_trylock(struct qrwlock *lock) in queued_read_trylock() argument 48 cnts = atomic_read(&lock->cnts); in queued_read_trylock() 50 cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts); in queued_read_trylock() [all …]
|
/linux-6.12.1/rust/kernel/sync/ |
D | lock.rs | 3 //! Generic kernel lock and guard. 5 //! It contains a generic Rust lock and guard that allow for different backends (e.g., mutexes, 16 /// The "backend" of a lock. 18 /// It is the actual implementation of the lock, without the need to repeat patterns used in all 23 /// - Implementers must ensure that only one thread/CPU may access the protected data once the lock 24 /// is owned, that is, between calls to [`lock`] and [`unlock`]. 26 /// lock operation. 28 /// [`lock`]: Backend::lock 32 /// The state required by the lock. 35 /// The state required to be kept between [`lock`] and [`unlock`]. [all …]
|