Lines Matching +full:lock +full:- +full:- +full:- +full:-
1 /* SPDX-License-Identifier: GPL-2.0 */
8 * see Documentation/locking/lockdep-design.rst for more details.
41 to->class_cache[i] = NULL; in lockdep_copy_map()
45 * Every lock has a list of other locks that were taken after it.
56 /* used by BFS to record whether "prev -> this" only has -(*R)-> */
60 * The parent field is used to implement breadth-first search, and the
61 * bit 0 is reused to indicate if the lock has been accessed in BFS.
67 * struct lock_chain - lock dependency chain record
72 * @entry: the collided lock chains in lock_chain hash list
86 * Initialization, self-test and debugging-output methods:
90 extern void lockdep_reset_lock(struct lockdep_map *lock);
102 #define LOCKDEP_RECURSION_MASK (LOCKDEP_OFF - 1)
111 current->lockdep_recursion += LOCKDEP_OFF; \
116 current->lockdep_recursion -= LOCKDEP_OFF; \
128 extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
132 lockdep_init_map_waits(struct lockdep_map *lock, const char *name, in lockdep_init_map_waits() argument
135 lockdep_init_map_type(lock, name, key, subclass, inner, outer, LD_LOCK_NORMAL); in lockdep_init_map_waits()
139 lockdep_init_map_wait(struct lockdep_map *lock, const char *name, in lockdep_init_map_wait() argument
142 lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV); in lockdep_init_map_wait()
145 static inline void lockdep_init_map(struct lockdep_map *lock, const char *name, in lockdep_init_map() argument
148 lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV); in lockdep_init_map()
152 * Reinitialize a lock key - for cases where there is special locking or
154 * of dependencies wrong: they are either too broad (they need a class-split)
155 * or they are too narrow (they suffer from a false class-split):
157 #define lockdep_set_class(lock, key) \ argument
158 lockdep_init_map_type(&(lock)->dep_map, #key, key, 0, \
159 (lock)->dep_map.wait_type_inner, \
160 (lock)->dep_map.wait_type_outer, \
161 (lock)->dep_map.lock_type)
163 #define lockdep_set_class_and_name(lock, key, name) \ argument
164 lockdep_init_map_type(&(lock)->dep_map, name, key, 0, \
165 (lock)->dep_map.wait_type_inner, \
166 (lock)->dep_map.wait_type_outer, \
167 (lock)->dep_map.lock_type)
169 #define lockdep_set_class_and_subclass(lock, key, sub) \ argument
170 lockdep_init_map_type(&(lock)->dep_map, #key, key, sub, \
171 (lock)->dep_map.wait_type_inner, \
172 (lock)->dep_map.wait_type_outer, \
173 (lock)->dep_map.lock_type)
175 #define lockdep_set_subclass(lock, sub) \ argument
176 lockdep_init_map_type(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
177 (lock)->dep_map.wait_type_inner, \
178 (lock)->dep_map.wait_type_outer, \
179 (lock)->dep_map.lock_type)
182 * lockdep_set_novalidate_class: disable checking of lock ordering on a given
183 * lock
184 * @lock: Lock to mark
186 * Lockdep will still record that this lock has been taken, and print held
189 #define lockdep_set_novalidate_class(lock) \ argument
190 lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
193 * lockdep_set_notrack_class: disable lockdep tracking of a given lock entirely
194 * @lock: Lock to mark
199 #define lockdep_set_notrack_class(lock) \ argument
200 lockdep_set_class_and_name(lock, &__lockdep_no_track__, #lock)
205 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) argument
207 static inline int lockdep_match_key(struct lockdep_map *lock, in lockdep_match_key() argument
210 return lock->key == key; in lockdep_match_key()
214 * Acquire a lock.
219 * 1: read-acquire (no recursion allowed)
220 * 2: read-acquire with same-instance recursion allowed
224 * 0: simple checks (freeing, held-at-exit-time, etc.)
227 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
231 extern void lock_release(struct lockdep_map *lock, unsigned long ip);
233 extern void lock_sync(struct lockdep_map *lock, unsigned int subclass,
238 #define LOCK_STATE_UNKNOWN -1
243 * Same "read" as for lock_acquire(), except -1 means any.
245 extern int lock_is_held_type(const struct lockdep_map *lock, int read);
247 static inline int lock_is_held(const struct lockdep_map *lock) in lock_is_held() argument
249 return lock_is_held_type(lock, -1); in lock_is_held()
252 #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) argument
253 #define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r)) argument
255 extern void lock_set_class(struct lockdep_map *lock, const char *name,
262 static inline void lock_set_subclass(struct lockdep_map *lock, in lock_set_subclass() argument
265 lock_set_class(lock, lock->name, lock->key, subclass, ip); in lock_set_subclass()
268 extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
272 extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
273 extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
274 extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
276 #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
300 lockdep_assert_once(!current->lockdep_depth)
302 #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
304 #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
305 #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
306 #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
314 .name = #_name "-wait-type-override", \
343 # define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \ argument
345 # define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \ argument
347 # define lockdep_init_map_wait(lock, name, key, sub, inner) \ argument
349 # define lockdep_init_map(lock, name, key, sub) \ argument
351 # define lockdep_set_class(lock, key) do { (void)(key); } while (0) argument
352 # define lockdep_set_class_and_name(lock, key, name) \ argument
354 #define lockdep_set_class_and_subclass(lock, key, sub) \ argument
356 #define lockdep_set_subclass(lock, sub) do { } while (0) argument
358 #define lockdep_set_novalidate_class(lock) do { } while (0) argument
359 #define lockdep_set_notrack_class(lock) do { } while (0) argument
382 * Dummy forward declarations, allow users to write less ifdef-y code
415 #define lock_set_cmp_fn(lock, ...) lockdep_set_lock_cmp_fn(&(lock)->dep_map, __VA_ARGS__) argument
417 #define lock_set_cmp_fn(lock, ...) do { } while (0) argument
438 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
439 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
441 #define LOCK_CONTENDED(_lock, try, lock) \ argument
444 lock_contended(&(_lock)->dep_map, _RET_IP_); \
445 lock(_lock); \
447 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
450 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \ argument
454 lock_contended(&(_lock)->dep_map, _RET_IP_); \
455 ____err = lock(_lock); \
458 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
467 #define LOCK_CONTENDED(_lock, try, lock) \ argument
468 lock(_lock)
470 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \ argument
471 lock(_lock)
498 * For trivial one-depth nesting of a lock-class, the following
500 * of nesting should define their own lock-nesting subclasses.)
506 * on the per lock-class debug mode:
549 # define might_lock(lock) \ argument
551 typecheck(struct lockdep_map *, &(lock)->dep_map); \
552 lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
553 lock_release(&(lock)->dep_map, _THIS_IP_); \
555 # define might_lock_read(lock) \ argument
557 typecheck(struct lockdep_map *, &(lock)->dep_map); \
558 lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
559 lock_release(&(lock)->dep_map, _THIS_IP_); \
561 # define might_lock_nested(lock, subclass) \ argument
563 typecheck(struct lockdep_map *, &(lock)->dep_map); \
564 lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \
566 lock_release(&(lock)->dep_map, _THIS_IP_); \
613 * Acceptable for protecting per-CPU resources accessed from BH.
614 * Much like in_softirq() - semantics are ambiguous, use carefully.
625 # define might_lock(lock) do { } while (0) argument
626 # define might_lock_read(lock) do { } while (0) argument
627 # define might_lock_nested(lock, subclass) do { } while (0) argument
643 WARN_ONCE(debug_locks && !current->lockdep_recursion && \
645 !(current->hardirq_threaded || current->irq_config), \