1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
3 #include <linux/bpf.h>
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_tracing.h>
6 #include <stdbool.h>
7 #include <stdatomic.h>
8 #include "bpf_arena_common.h"
9
10 struct {
11 __uint(type, BPF_MAP_TYPE_ARENA);
12 __uint(map_flags, BPF_F_MMAPABLE);
13 __uint(max_entries, 10); /* number of pages */
14 #ifdef __TARGET_ARCH_arm64
15 __ulong(map_extra, 0x1ull << 32); /* start of mmap() region */
16 #else
17 __ulong(map_extra, 0x1ull << 44); /* start of mmap() region */
18 #endif
19 } arena SEC(".maps");
20
21 #if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST)
22 bool skip_tests __attribute((__section__(".data"))) = false;
23 #else
24 bool skip_tests = true;
25 #endif
26
27 __u32 pid = 0;
28
29 __u64 __arena_global add64_value = 1;
30 __u64 __arena_global add64_result = 0;
31 __u32 __arena_global add32_value = 1;
32 __u32 __arena_global add32_result = 0;
33 __u64 __arena_global add_stack_value_copy = 0;
34 __u64 __arena_global add_stack_result = 0;
35 __u64 __arena_global add_noreturn_value = 1;
36
37 SEC("raw_tp/sys_enter")
add(const void * ctx)38 int add(const void *ctx)
39 {
40 if (pid != (bpf_get_current_pid_tgid() >> 32))
41 return 0;
42 #ifdef ENABLE_ATOMICS_TESTS
43 __u64 add_stack_value = 1;
44
45 add64_result = __sync_fetch_and_add(&add64_value, 2);
46 add32_result = __sync_fetch_and_add(&add32_value, 2);
47 add_stack_result = __sync_fetch_and_add(&add_stack_value, 2);
48 add_stack_value_copy = add_stack_value;
49 __sync_fetch_and_add(&add_noreturn_value, 2);
50 #endif
51
52 return 0;
53 }
54
55 __s64 __arena_global sub64_value = 1;
56 __s64 __arena_global sub64_result = 0;
57 __s32 __arena_global sub32_value = 1;
58 __s32 __arena_global sub32_result = 0;
59 __s64 __arena_global sub_stack_value_copy = 0;
60 __s64 __arena_global sub_stack_result = 0;
61 __s64 __arena_global sub_noreturn_value = 1;
62
63 SEC("raw_tp/sys_enter")
sub(const void * ctx)64 int sub(const void *ctx)
65 {
66 if (pid != (bpf_get_current_pid_tgid() >> 32))
67 return 0;
68 #ifdef ENABLE_ATOMICS_TESTS
69 __u64 sub_stack_value = 1;
70
71 sub64_result = __sync_fetch_and_sub(&sub64_value, 2);
72 sub32_result = __sync_fetch_and_sub(&sub32_value, 2);
73 sub_stack_result = __sync_fetch_and_sub(&sub_stack_value, 2);
74 sub_stack_value_copy = sub_stack_value;
75 __sync_fetch_and_sub(&sub_noreturn_value, 2);
76 #endif
77
78 return 0;
79 }
80
81 #ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
82 _Atomic __u64 __arena_global and64_value = (0x110ull << 32);
83 _Atomic __u32 __arena_global and32_value = 0x110;
84 #else
85 __u64 __arena_global and64_value = (0x110ull << 32);
86 __u32 __arena_global and32_value = 0x110;
87 #endif
88
89 SEC("raw_tp/sys_enter")
and(const void * ctx)90 int and(const void *ctx)
91 {
92 if (pid != (bpf_get_current_pid_tgid() >> 32))
93 return 0;
94 #ifdef ENABLE_ATOMICS_TESTS
95 #ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
96 __c11_atomic_fetch_and(&and64_value, 0x011ull << 32, memory_order_relaxed);
97 __c11_atomic_fetch_and(&and32_value, 0x011, memory_order_relaxed);
98 #else
99 __sync_fetch_and_and(&and64_value, 0x011ull << 32);
100 __sync_fetch_and_and(&and32_value, 0x011);
101 #endif
102 #endif
103
104 return 0;
105 }
106
107 #ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
108 _Atomic __u32 __arena_global or32_value = 0x110;
109 _Atomic __u64 __arena_global or64_value = (0x110ull << 32);
110 #else
111 __u32 __arena_global or32_value = 0x110;
112 __u64 __arena_global or64_value = (0x110ull << 32);
113 #endif
114
115 SEC("raw_tp/sys_enter")
or(const void * ctx)116 int or(const void *ctx)
117 {
118 if (pid != (bpf_get_current_pid_tgid() >> 32))
119 return 0;
120 #ifdef ENABLE_ATOMICS_TESTS
121 #ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
122 __c11_atomic_fetch_or(&or64_value, 0x011ull << 32, memory_order_relaxed);
123 __c11_atomic_fetch_or(&or32_value, 0x011, memory_order_relaxed);
124 #else
125 __sync_fetch_and_or(&or64_value, 0x011ull << 32);
126 __sync_fetch_and_or(&or32_value, 0x011);
127 #endif
128 #endif
129
130 return 0;
131 }
132
133 #ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
134 _Atomic __u64 __arena_global xor64_value = (0x110ull << 32);
135 _Atomic __u32 __arena_global xor32_value = 0x110;
136 #else
137 __u64 __arena_global xor64_value = (0x110ull << 32);
138 __u32 __arena_global xor32_value = 0x110;
139 #endif
140
141 SEC("raw_tp/sys_enter")
xor(const void * ctx)142 int xor(const void *ctx)
143 {
144 if (pid != (bpf_get_current_pid_tgid() >> 32))
145 return 0;
146 #ifdef ENABLE_ATOMICS_TESTS
147 #ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
148 __c11_atomic_fetch_xor(&xor64_value, 0x011ull << 32, memory_order_relaxed);
149 __c11_atomic_fetch_xor(&xor32_value, 0x011, memory_order_relaxed);
150 #else
151 __sync_fetch_and_xor(&xor64_value, 0x011ull << 32);
152 __sync_fetch_and_xor(&xor32_value, 0x011);
153 #endif
154 #endif
155
156 return 0;
157 }
158
159 __u32 __arena_global cmpxchg32_value = 1;
160 __u32 __arena_global cmpxchg32_result_fail = 0;
161 __u32 __arena_global cmpxchg32_result_succeed = 0;
162 __u64 __arena_global cmpxchg64_value = 1;
163 __u64 __arena_global cmpxchg64_result_fail = 0;
164 __u64 __arena_global cmpxchg64_result_succeed = 0;
165
166 SEC("raw_tp/sys_enter")
cmpxchg(const void * ctx)167 int cmpxchg(const void *ctx)
168 {
169 if (pid != (bpf_get_current_pid_tgid() >> 32))
170 return 0;
171 #ifdef ENABLE_ATOMICS_TESTS
172 cmpxchg64_result_fail = __sync_val_compare_and_swap(&cmpxchg64_value, 0, 3);
173 cmpxchg64_result_succeed = __sync_val_compare_and_swap(&cmpxchg64_value, 1, 2);
174
175 cmpxchg32_result_fail = __sync_val_compare_and_swap(&cmpxchg32_value, 0, 3);
176 cmpxchg32_result_succeed = __sync_val_compare_and_swap(&cmpxchg32_value, 1, 2);
177 #endif
178
179 return 0;
180 }
181
182 __u64 __arena_global xchg64_value = 1;
183 __u64 __arena_global xchg64_result = 0;
184 __u32 __arena_global xchg32_value = 1;
185 __u32 __arena_global xchg32_result = 0;
186
187 SEC("raw_tp/sys_enter")
xchg(const void * ctx)188 int xchg(const void *ctx)
189 {
190 if (pid != (bpf_get_current_pid_tgid() >> 32))
191 return 0;
192 #ifdef ENABLE_ATOMICS_TESTS
193 __u64 val64 = 2;
194 __u32 val32 = 2;
195
196 xchg64_result = __sync_lock_test_and_set(&xchg64_value, val64);
197 xchg32_result = __sync_lock_test_and_set(&xchg32_value, val32);
198 #endif
199
200 return 0;
201 }
202
203 __u64 __arena_global uaf_sink;
204 volatile __u64 __arena_global uaf_recovery_fails;
205
206 SEC("syscall")
uaf(const void * ctx)207 int uaf(const void *ctx)
208 {
209 if (pid != (bpf_get_current_pid_tgid() >> 32))
210 return 0;
211 #if defined(ENABLE_ATOMICS_TESTS) && !defined(__TARGET_ARCH_arm64) && \
212 !defined(__TARGET_ARCH_x86)
213 __u32 __arena *page32;
214 __u64 __arena *page64;
215 void __arena *page;
216
217 page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
218 bpf_arena_free_pages(&arena, page, 1);
219 uaf_recovery_fails = 24;
220
221 page32 = (__u32 __arena *)page;
222 uaf_sink += __sync_fetch_and_add(page32, 1);
223 uaf_recovery_fails -= 1;
224 __sync_add_and_fetch(page32, 1);
225 uaf_recovery_fails -= 1;
226 uaf_sink += __sync_fetch_and_sub(page32, 1);
227 uaf_recovery_fails -= 1;
228 __sync_sub_and_fetch(page32, 1);
229 uaf_recovery_fails -= 1;
230 uaf_sink += __sync_fetch_and_and(page32, 1);
231 uaf_recovery_fails -= 1;
232 __sync_and_and_fetch(page32, 1);
233 uaf_recovery_fails -= 1;
234 uaf_sink += __sync_fetch_and_or(page32, 1);
235 uaf_recovery_fails -= 1;
236 __sync_or_and_fetch(page32, 1);
237 uaf_recovery_fails -= 1;
238 uaf_sink += __sync_fetch_and_xor(page32, 1);
239 uaf_recovery_fails -= 1;
240 __sync_xor_and_fetch(page32, 1);
241 uaf_recovery_fails -= 1;
242 uaf_sink += __sync_val_compare_and_swap(page32, 0, 1);
243 uaf_recovery_fails -= 1;
244 uaf_sink += __sync_lock_test_and_set(page32, 1);
245 uaf_recovery_fails -= 1;
246
247 page64 = (__u64 __arena *)page;
248 uaf_sink += __sync_fetch_and_add(page64, 1);
249 uaf_recovery_fails -= 1;
250 __sync_add_and_fetch(page64, 1);
251 uaf_recovery_fails -= 1;
252 uaf_sink += __sync_fetch_and_sub(page64, 1);
253 uaf_recovery_fails -= 1;
254 __sync_sub_and_fetch(page64, 1);
255 uaf_recovery_fails -= 1;
256 uaf_sink += __sync_fetch_and_and(page64, 1);
257 uaf_recovery_fails -= 1;
258 __sync_and_and_fetch(page64, 1);
259 uaf_recovery_fails -= 1;
260 uaf_sink += __sync_fetch_and_or(page64, 1);
261 uaf_recovery_fails -= 1;
262 __sync_or_and_fetch(page64, 1);
263 uaf_recovery_fails -= 1;
264 uaf_sink += __sync_fetch_and_xor(page64, 1);
265 uaf_recovery_fails -= 1;
266 __sync_xor_and_fetch(page64, 1);
267 uaf_recovery_fails -= 1;
268 uaf_sink += __sync_val_compare_and_swap(page64, 0, 1);
269 uaf_recovery_fails -= 1;
270 uaf_sink += __sync_lock_test_and_set(page64, 1);
271 uaf_recovery_fails -= 1;
272 #endif
273
274 return 0;
275 }
276
277 char _license[] SEC("license") = "GPL";
278