1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * allocation tagging
4 */
5 #ifndef _LINUX_ALLOC_TAG_H
6 #define _LINUX_ALLOC_TAG_H
7
8 #include <linux/bug.h>
9 #include <linux/codetag.h>
10 #include <linux/container_of.h>
11 #include <linux/preempt.h>
12 #include <asm/percpu.h>
13 #include <linux/cpumask.h>
14 #include <linux/smp.h>
15 #include <linux/static_key.h>
16 #include <linux/irqflags.h>
17
18 struct alloc_tag_counters {
19 u64 bytes;
20 u64 calls;
21 };
22
23 /*
24 * An instance of this structure is created in a special ELF section at every
25 * allocation callsite. At runtime, the special section is treated as
26 * an array of these. Embedded codetag utilizes codetag framework.
27 */
28 struct alloc_tag {
29 struct codetag ct;
30 struct alloc_tag_counters __percpu *counters;
31 } __aligned(8);
32
33 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
34
35 #define CODETAG_EMPTY ((void *)1)
36
is_codetag_empty(union codetag_ref * ref)37 static inline bool is_codetag_empty(union codetag_ref *ref)
38 {
39 return ref->ct == CODETAG_EMPTY;
40 }
41
set_codetag_empty(union codetag_ref * ref)42 static inline void set_codetag_empty(union codetag_ref *ref)
43 {
44 if (ref)
45 ref->ct = CODETAG_EMPTY;
46 }
47
48 #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
49
is_codetag_empty(union codetag_ref * ref)50 static inline bool is_codetag_empty(union codetag_ref *ref) { return false; }
set_codetag_empty(union codetag_ref * ref)51 static inline void set_codetag_empty(union codetag_ref *ref) {}
52
53 #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
54
55 #ifdef CONFIG_MEM_ALLOC_PROFILING
56
57 struct codetag_bytes {
58 struct codetag *ct;
59 s64 bytes;
60 };
61
62 size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sleep);
63
ct_to_alloc_tag(struct codetag * ct)64 static inline struct alloc_tag *ct_to_alloc_tag(struct codetag *ct)
65 {
66 return container_of(ct, struct alloc_tag, ct);
67 }
68
69 #ifdef ARCH_NEEDS_WEAK_PER_CPU
70 /*
71 * When percpu variables are required to be defined as weak, static percpu
72 * variables can't be used inside a function (see comments for DECLARE_PER_CPU_SECTION).
73 * Instead we will account all module allocations to a single counter.
74 */
75 DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
76
77 #define DEFINE_ALLOC_TAG(_alloc_tag) \
78 static struct alloc_tag _alloc_tag __used __aligned(8) \
79 __section("alloc_tags") = { \
80 .ct = CODE_TAG_INIT, \
81 .counters = &_shared_alloc_tag };
82
83 #else /* ARCH_NEEDS_WEAK_PER_CPU */
84
85 #define DEFINE_ALLOC_TAG(_alloc_tag) \
86 static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr); \
87 static struct alloc_tag _alloc_tag __used __aligned(8) \
88 __section("alloc_tags") = { \
89 .ct = CODE_TAG_INIT, \
90 .counters = &_alloc_tag_cntr };
91
92 #endif /* ARCH_NEEDS_WEAK_PER_CPU */
93
94 DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
95 mem_alloc_profiling_key);
96
mem_alloc_profiling_enabled(void)97 static inline bool mem_alloc_profiling_enabled(void)
98 {
99 return static_branch_maybe(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
100 &mem_alloc_profiling_key);
101 }
102
alloc_tag_read(struct alloc_tag * tag)103 static inline struct alloc_tag_counters alloc_tag_read(struct alloc_tag *tag)
104 {
105 struct alloc_tag_counters v = { 0, 0 };
106 struct alloc_tag_counters *counter;
107 int cpu;
108
109 for_each_possible_cpu(cpu) {
110 counter = per_cpu_ptr(tag->counters, cpu);
111 v.bytes += counter->bytes;
112 v.calls += counter->calls;
113 }
114
115 return v;
116 }
117
118 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
alloc_tag_add_check(union codetag_ref * ref,struct alloc_tag * tag)119 static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag)
120 {
121 WARN_ONCE(ref && ref->ct,
122 "alloc_tag was not cleared (got tag for %s:%u)\n",
123 ref->ct->filename, ref->ct->lineno);
124
125 WARN_ONCE(!tag, "current->alloc_tag not set\n");
126 }
127
alloc_tag_sub_check(union codetag_ref * ref)128 static inline void alloc_tag_sub_check(union codetag_ref *ref)
129 {
130 WARN_ONCE(ref && !ref->ct, "alloc_tag was not set\n");
131 }
132 #else
alloc_tag_add_check(union codetag_ref * ref,struct alloc_tag * tag)133 static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag) {}
alloc_tag_sub_check(union codetag_ref * ref)134 static inline void alloc_tag_sub_check(union codetag_ref *ref) {}
135 #endif
136
137 /* Caller should verify both ref and tag to be valid */
__alloc_tag_ref_set(union codetag_ref * ref,struct alloc_tag * tag)138 static inline bool __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
139 {
140 alloc_tag_add_check(ref, tag);
141 if (!ref || !tag)
142 return false;
143
144 ref->ct = &tag->ct;
145 return true;
146 }
147
alloc_tag_ref_set(union codetag_ref * ref,struct alloc_tag * tag)148 static inline bool alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
149 {
150 if (unlikely(!__alloc_tag_ref_set(ref, tag)))
151 return false;
152
153 /*
154 * We need in increment the call counter every time we have a new
155 * allocation or when we split a large allocation into smaller ones.
156 * Each new reference for every sub-allocation needs to increment call
157 * counter because when we free each part the counter will be decremented.
158 */
159 this_cpu_inc(tag->counters->calls);
160 return true;
161 }
162
alloc_tag_add(union codetag_ref * ref,struct alloc_tag * tag,size_t bytes)163 static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes)
164 {
165 if (likely(alloc_tag_ref_set(ref, tag)))
166 this_cpu_add(tag->counters->bytes, bytes);
167 }
168
alloc_tag_sub(union codetag_ref * ref,size_t bytes)169 static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes)
170 {
171 struct alloc_tag *tag;
172
173 alloc_tag_sub_check(ref);
174 if (!ref || !ref->ct)
175 return;
176
177 if (is_codetag_empty(ref)) {
178 ref->ct = NULL;
179 return;
180 }
181
182 tag = ct_to_alloc_tag(ref->ct);
183
184 this_cpu_sub(tag->counters->bytes, bytes);
185 this_cpu_dec(tag->counters->calls);
186
187 ref->ct = NULL;
188 }
189
190 #define alloc_tag_record(p) ((p) = current->alloc_tag)
191
192 #else /* CONFIG_MEM_ALLOC_PROFILING */
193
194 #define DEFINE_ALLOC_TAG(_alloc_tag)
mem_alloc_profiling_enabled(void)195 static inline bool mem_alloc_profiling_enabled(void) { return false; }
alloc_tag_add(union codetag_ref * ref,struct alloc_tag * tag,size_t bytes)196 static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag,
197 size_t bytes) {}
alloc_tag_sub(union codetag_ref * ref,size_t bytes)198 static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {}
199 #define alloc_tag_record(p) do {} while (0)
200
201 #endif /* CONFIG_MEM_ALLOC_PROFILING */
202
203 #define alloc_hooks_tag(_tag, _do_alloc) \
204 ({ \
205 struct alloc_tag * __maybe_unused _old = alloc_tag_save(_tag); \
206 typeof(_do_alloc) _res = _do_alloc; \
207 alloc_tag_restore(_tag, _old); \
208 _res; \
209 })
210
211 #define alloc_hooks(_do_alloc) \
212 ({ \
213 DEFINE_ALLOC_TAG(_alloc_tag); \
214 alloc_hooks_tag(&_alloc_tag, _do_alloc); \
215 })
216
217 #endif /* _LINUX_ALLOC_TAG_H */
218