1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_LOCAL_LOCK_H
3 # error "Do not include directly, include linux/local_lock.h"
4 #endif
5 
6 #include <linux/percpu-defs.h>
7 #include <linux/lockdep.h>
8 
9 #ifndef CONFIG_PREEMPT_RT
10 
11 typedef struct {
12 #ifdef CONFIG_DEBUG_LOCK_ALLOC
13 	struct lockdep_map	dep_map;
14 	struct task_struct	*owner;
15 #endif
16 } local_lock_t;
17 
18 #ifdef CONFIG_DEBUG_LOCK_ALLOC
19 # define LOCAL_LOCK_DEBUG_INIT(lockname)		\
20 	.dep_map = {					\
21 		.name = #lockname,			\
22 		.wait_type_inner = LD_WAIT_CONFIG,	\
23 		.lock_type = LD_LOCK_PERCPU,		\
24 	},						\
25 	.owner = NULL,
26 
local_lock_acquire(local_lock_t * l)27 static inline void local_lock_acquire(local_lock_t *l)
28 {
29 	lock_map_acquire(&l->dep_map);
30 	DEBUG_LOCKS_WARN_ON(l->owner);
31 	l->owner = current;
32 }
33 
local_lock_release(local_lock_t * l)34 static inline void local_lock_release(local_lock_t *l)
35 {
36 	DEBUG_LOCKS_WARN_ON(l->owner != current);
37 	l->owner = NULL;
38 	lock_map_release(&l->dep_map);
39 }
40 
local_lock_debug_init(local_lock_t * l)41 static inline void local_lock_debug_init(local_lock_t *l)
42 {
43 	l->owner = NULL;
44 }
45 #else /* CONFIG_DEBUG_LOCK_ALLOC */
46 # define LOCAL_LOCK_DEBUG_INIT(lockname)
local_lock_acquire(local_lock_t * l)47 static inline void local_lock_acquire(local_lock_t *l) { }
local_lock_release(local_lock_t * l)48 static inline void local_lock_release(local_lock_t *l) { }
local_lock_debug_init(local_lock_t * l)49 static inline void local_lock_debug_init(local_lock_t *l) { }
50 #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
51 
52 #define INIT_LOCAL_LOCK(lockname)	{ LOCAL_LOCK_DEBUG_INIT(lockname) }
53 
54 #define __local_lock_init(lock)					\
55 do {								\
56 	static struct lock_class_key __key;			\
57 								\
58 	debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
59 	lockdep_init_map_type(&(lock)->dep_map, #lock, &__key,  \
60 			      0, LD_WAIT_CONFIG, LD_WAIT_INV,	\
61 			      LD_LOCK_PERCPU);			\
62 	local_lock_debug_init(lock);				\
63 } while (0)
64 
65 #define __spinlock_nested_bh_init(lock)				\
66 do {								\
67 	static struct lock_class_key __key;			\
68 								\
69 	debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
70 	lockdep_init_map_type(&(lock)->dep_map, #lock, &__key,  \
71 			      0, LD_WAIT_CONFIG, LD_WAIT_INV,	\
72 			      LD_LOCK_NORMAL);			\
73 	local_lock_debug_init(lock);				\
74 } while (0)
75 
76 #define __local_lock(lock)					\
77 	do {							\
78 		preempt_disable();				\
79 		local_lock_acquire(this_cpu_ptr(lock));		\
80 	} while (0)
81 
82 #define __local_lock_irq(lock)					\
83 	do {							\
84 		local_irq_disable();				\
85 		local_lock_acquire(this_cpu_ptr(lock));		\
86 	} while (0)
87 
88 #define __local_lock_irqsave(lock, flags)			\
89 	do {							\
90 		local_irq_save(flags);				\
91 		local_lock_acquire(this_cpu_ptr(lock));		\
92 	} while (0)
93 
94 #define __local_unlock(lock)					\
95 	do {							\
96 		local_lock_release(this_cpu_ptr(lock));		\
97 		preempt_enable();				\
98 	} while (0)
99 
100 #define __local_unlock_irq(lock)				\
101 	do {							\
102 		local_lock_release(this_cpu_ptr(lock));		\
103 		local_irq_enable();				\
104 	} while (0)
105 
106 #define __local_unlock_irqrestore(lock, flags)			\
107 	do {							\
108 		local_lock_release(this_cpu_ptr(lock));		\
109 		local_irq_restore(flags);			\
110 	} while (0)
111 
112 #define __local_lock_nested_bh(lock)				\
113 	do {							\
114 		lockdep_assert_in_softirq();			\
115 		local_lock_acquire(this_cpu_ptr(lock));	\
116 	} while (0)
117 
118 #define __local_unlock_nested_bh(lock)				\
119 	local_lock_release(this_cpu_ptr(lock))
120 
121 #else /* !CONFIG_PREEMPT_RT */
122 
123 /*
124  * On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the
125  * critical section while staying preemptible.
126  */
127 typedef spinlock_t local_lock_t;
128 
129 #define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
130 
131 #define __local_lock_init(l)					\
132 	do {							\
133 		local_spin_lock_init((l));			\
134 	} while (0)
135 
136 #define __local_lock(__lock)					\
137 	do {							\
138 		migrate_disable();				\
139 		spin_lock(this_cpu_ptr((__lock)));		\
140 	} while (0)
141 
142 #define __local_lock_irq(lock)			__local_lock(lock)
143 
144 #define __local_lock_irqsave(lock, flags)			\
145 	do {							\
146 		typecheck(unsigned long, flags);		\
147 		flags = 0;					\
148 		__local_lock(lock);				\
149 	} while (0)
150 
151 #define __local_unlock(__lock)					\
152 	do {							\
153 		spin_unlock(this_cpu_ptr((__lock)));		\
154 		migrate_enable();				\
155 	} while (0)
156 
157 #define __local_unlock_irq(lock)		__local_unlock(lock)
158 
159 #define __local_unlock_irqrestore(lock, flags)	__local_unlock(lock)
160 
161 #define __local_lock_nested_bh(lock)				\
162 do {								\
163 	lockdep_assert_in_softirq_func();			\
164 	spin_lock(this_cpu_ptr(lock));				\
165 } while (0)
166 
167 #define __local_unlock_nested_bh(lock)				\
168 do {								\
169 	spin_unlock(this_cpu_ptr((lock)));			\
170 } while (0)
171 
172 #endif /* CONFIG_PREEMPT_RT */
173