1 // SPDX-License-Identifier: GPL-2.0
2 #include <vmlinux.h>
3 #include <bpf/bpf_tracing.h>
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_core_read.h>
6 #include <bpf/bpf_endian.h>
7 #include "bpf_misc.h"
8 #include "bpf_experimental.h"
9 
10 #ifndef ETH_P_IP
11 #define ETH_P_IP 0x0800
12 #endif
13 
14 struct {
15 	__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
16 	__uint(max_entries, 4);
17 	__uint(key_size, sizeof(__u32));
18 	__uint(value_size, sizeof(__u32));
19 } jmp_table SEC(".maps");
20 
static_func(u64 i)21 static __noinline int static_func(u64 i)
22 {
23 	bpf_throw(32);
24 	return i;
25 }
26 
global2static_simple(u64 i)27 __noinline int global2static_simple(u64 i)
28 {
29 	static_func(i + 2);
30 	return i - 1;
31 }
32 
global2static(u64 i)33 __noinline int global2static(u64 i)
34 {
35 	if (i == ETH_P_IP)
36 		bpf_throw(16);
37 	return static_func(i);
38 }
39 
static2global(u64 i)40 static __noinline int static2global(u64 i)
41 {
42 	return global2static(i) + i;
43 }
44 
45 SEC("tc")
exception_throw_always_1(struct __sk_buff * ctx)46 int exception_throw_always_1(struct __sk_buff *ctx)
47 {
48 	bpf_throw(64);
49 	return 0;
50 }
51 
52 /* In this case, the global func will never be seen executing after call to
53  * static subprog, hence verifier will DCE the remaining instructions. Ensure we
54  * are resilient to that.
55  */
56 SEC("tc")
exception_throw_always_2(struct __sk_buff * ctx)57 int exception_throw_always_2(struct __sk_buff *ctx)
58 {
59 	return global2static_simple(ctx->protocol);
60 }
61 
62 SEC("tc")
exception_throw_unwind_1(struct __sk_buff * ctx)63 int exception_throw_unwind_1(struct __sk_buff *ctx)
64 {
65 	return static2global(bpf_ntohs(ctx->protocol));
66 }
67 
68 SEC("tc")
exception_throw_unwind_2(struct __sk_buff * ctx)69 int exception_throw_unwind_2(struct __sk_buff *ctx)
70 {
71 	return static2global(bpf_ntohs(ctx->protocol) - 1);
72 }
73 
74 SEC("tc")
exception_throw_default(struct __sk_buff * ctx)75 int exception_throw_default(struct __sk_buff *ctx)
76 {
77 	bpf_throw(0);
78 	return 1;
79 }
80 
81 SEC("tc")
exception_throw_default_value(struct __sk_buff * ctx)82 int exception_throw_default_value(struct __sk_buff *ctx)
83 {
84 	bpf_throw(5);
85 	return 1;
86 }
87 
88 SEC("tc")
exception_tail_call_target(struct __sk_buff * ctx)89 int exception_tail_call_target(struct __sk_buff *ctx)
90 {
91 	bpf_throw(16);
92 	return 0;
93 }
94 
95 static __noinline
exception_tail_call_subprog(struct __sk_buff * ctx)96 int exception_tail_call_subprog(struct __sk_buff *ctx)
97 {
98 	volatile int ret = 10;
99 
100 	bpf_tail_call_static(ctx, &jmp_table, 0);
101 	return ret;
102 }
103 
104 SEC("tc")
exception_tail_call(struct __sk_buff * ctx)105 int exception_tail_call(struct __sk_buff *ctx) {
106 	volatile int ret = 0;
107 
108 	ret = exception_tail_call_subprog(ctx);
109 	return ret + 8;
110 }
111 
exception_ext_global(struct __sk_buff * ctx)112 __noinline int exception_ext_global(struct __sk_buff *ctx)
113 {
114 	volatile int ret = 0;
115 
116 	return ret;
117 }
118 
exception_ext_static(struct __sk_buff * ctx)119 static __noinline int exception_ext_static(struct __sk_buff *ctx)
120 {
121 	return exception_ext_global(ctx);
122 }
123 
124 SEC("tc")
exception_ext(struct __sk_buff * ctx)125 int exception_ext(struct __sk_buff *ctx)
126 {
127 	return exception_ext_static(ctx);
128 }
129 
exception_cb_mod_global(u64 cookie)130 __noinline int exception_cb_mod_global(u64 cookie)
131 {
132 	volatile int ret = 0;
133 
134 	return ret;
135 }
136 
137 /* Example of how the exception callback supplied during verification can still
138  * introduce extensions by calling to dummy global functions, and alter runtime
139  * behavior.
140  *
141  * Right now we don't allow freplace attachment to exception callback itself,
142  * but if the need arises this restriction is technically feasible to relax in
143  * the future.
144  */
exception_cb_mod(u64 cookie)145 __noinline int exception_cb_mod(u64 cookie)
146 {
147 	return exception_cb_mod_global(cookie) + cookie + 10;
148 }
149 
150 SEC("tc")
__exception_cb(exception_cb_mod)151 __exception_cb(exception_cb_mod)
152 int exception_ext_mod_cb_runtime(struct __sk_buff *ctx)
153 {
154 	bpf_throw(25);
155 	return 0;
156 }
157 
subprog(struct __sk_buff * ctx)158 __noinline static int subprog(struct __sk_buff *ctx)
159 {
160 	return bpf_ktime_get_ns();
161 }
162 
throwing_subprog(struct __sk_buff * ctx)163 __noinline static int throwing_subprog(struct __sk_buff *ctx)
164 {
165 	if (ctx->tstamp)
166 		bpf_throw(0);
167 	return bpf_ktime_get_ns();
168 }
169 
global_subprog(struct __sk_buff * ctx)170 __noinline int global_subprog(struct __sk_buff *ctx)
171 {
172 	return bpf_ktime_get_ns();
173 }
174 
throwing_global_subprog(struct __sk_buff * ctx)175 __noinline int throwing_global_subprog(struct __sk_buff *ctx)
176 {
177 	if (ctx->tstamp)
178 		bpf_throw(0);
179 	return bpf_ktime_get_ns();
180 }
181 
182 SEC("tc")
exception_throw_subprog(struct __sk_buff * ctx)183 int exception_throw_subprog(struct __sk_buff *ctx)
184 {
185 	switch (ctx->protocol) {
186 	case 1:
187 		return subprog(ctx);
188 	case 2:
189 		return global_subprog(ctx);
190 	case 3:
191 		return throwing_subprog(ctx);
192 	case 4:
193 		return throwing_global_subprog(ctx);
194 	default:
195 		break;
196 	}
197 	bpf_throw(1);
198 	return 0;
199 }
200 
assert_nz_gfunc(u64 c)201 __noinline int assert_nz_gfunc(u64 c)
202 {
203 	volatile u64 cookie = c;
204 
205 	bpf_assert(cookie != 0);
206 	return 0;
207 }
208 
assert_zero_gfunc(u64 c)209 __noinline int assert_zero_gfunc(u64 c)
210 {
211 	volatile u64 cookie = c;
212 
213 	bpf_assert(bpf_cmp_unlikely(cookie, ==, 0));
214 	return 0;
215 }
216 
assert_neg_gfunc(s64 c)217 __noinline int assert_neg_gfunc(s64 c)
218 {
219 	volatile s64 cookie = c;
220 
221 	bpf_assert(bpf_cmp_unlikely(cookie, <, 0));
222 	return 0;
223 }
224 
assert_pos_gfunc(s64 c)225 __noinline int assert_pos_gfunc(s64 c)
226 {
227 	volatile s64 cookie = c;
228 
229 	bpf_assert(bpf_cmp_unlikely(cookie, >, 0));
230 	return 0;
231 }
232 
assert_negeq_gfunc(s64 c)233 __noinline int assert_negeq_gfunc(s64 c)
234 {
235 	volatile s64 cookie = c;
236 
237 	bpf_assert(bpf_cmp_unlikely(cookie, <=, -1));
238 	return 0;
239 }
240 
assert_poseq_gfunc(s64 c)241 __noinline int assert_poseq_gfunc(s64 c)
242 {
243 	volatile s64 cookie = c;
244 
245 	bpf_assert(bpf_cmp_unlikely(cookie, >=, 1));
246 	return 0;
247 }
248 
assert_nz_gfunc_with(u64 c)249 __noinline int assert_nz_gfunc_with(u64 c)
250 {
251 	volatile u64 cookie = c;
252 
253 	bpf_assert_with(cookie != 0, cookie + 100);
254 	return 0;
255 }
256 
assert_zero_gfunc_with(u64 c)257 __noinline int assert_zero_gfunc_with(u64 c)
258 {
259 	volatile u64 cookie = c;
260 
261 	bpf_assert_with(bpf_cmp_unlikely(cookie, ==, 0), cookie + 100);
262 	return 0;
263 }
264 
assert_neg_gfunc_with(s64 c)265 __noinline int assert_neg_gfunc_with(s64 c)
266 {
267 	volatile s64 cookie = c;
268 
269 	bpf_assert_with(bpf_cmp_unlikely(cookie, <, 0), cookie + 100);
270 	return 0;
271 }
272 
assert_pos_gfunc_with(s64 c)273 __noinline int assert_pos_gfunc_with(s64 c)
274 {
275 	volatile s64 cookie = c;
276 
277 	bpf_assert_with(bpf_cmp_unlikely(cookie, >, 0), cookie + 100);
278 	return 0;
279 }
280 
assert_negeq_gfunc_with(s64 c)281 __noinline int assert_negeq_gfunc_with(s64 c)
282 {
283 	volatile s64 cookie = c;
284 
285 	bpf_assert_with(bpf_cmp_unlikely(cookie, <=, -1), cookie + 100);
286 	return 0;
287 }
288 
assert_poseq_gfunc_with(s64 c)289 __noinline int assert_poseq_gfunc_with(s64 c)
290 {
291 	volatile s64 cookie = c;
292 
293 	bpf_assert_with(bpf_cmp_unlikely(cookie, >=, 1), cookie + 100);
294 	return 0;
295 }
296 
297 #define check_assert(name, cookie, tag)				\
298 SEC("tc")							\
299 int exception##tag##name(struct __sk_buff *ctx)			\
300 {								\
301 	return name(cookie) + 1;				\
302 }
303 
304 check_assert(assert_nz_gfunc, 5, _);
305 check_assert(assert_zero_gfunc, 0, _);
306 check_assert(assert_neg_gfunc, -100, _);
307 check_assert(assert_pos_gfunc, 100, _);
308 check_assert(assert_negeq_gfunc, -1, _);
309 check_assert(assert_poseq_gfunc, 1, _);
310 
311 check_assert(assert_nz_gfunc_with, 5, _);
312 check_assert(assert_zero_gfunc_with, 0, _);
313 check_assert(assert_neg_gfunc_with, -100, _);
314 check_assert(assert_pos_gfunc_with, 100, _);
315 check_assert(assert_negeq_gfunc_with, -1, _);
316 check_assert(assert_poseq_gfunc_with, 1, _);
317 
318 check_assert(assert_nz_gfunc, 0, _bad_);
319 check_assert(assert_zero_gfunc, 5, _bad_);
320 check_assert(assert_neg_gfunc, 100, _bad_);
321 check_assert(assert_pos_gfunc, -100, _bad_);
322 check_assert(assert_negeq_gfunc, 1, _bad_);
323 check_assert(assert_poseq_gfunc, -1, _bad_);
324 
325 check_assert(assert_nz_gfunc_with, 0, _bad_);
326 check_assert(assert_zero_gfunc_with, 5, _bad_);
327 check_assert(assert_neg_gfunc_with, 100, _bad_);
328 check_assert(assert_pos_gfunc_with, -100, _bad_);
329 check_assert(assert_negeq_gfunc_with, 1, _bad_);
330 check_assert(assert_poseq_gfunc_with, -1, _bad_);
331 
332 SEC("tc")
exception_assert_range(struct __sk_buff * ctx)333 int exception_assert_range(struct __sk_buff *ctx)
334 {
335 	u64 time = bpf_ktime_get_ns();
336 
337 	bpf_assert_range(time, 0, ~0ULL);
338 	return 1;
339 }
340 
341 SEC("tc")
exception_assert_range_with(struct __sk_buff * ctx)342 int exception_assert_range_with(struct __sk_buff *ctx)
343 {
344 	u64 time = bpf_ktime_get_ns();
345 
346 	bpf_assert_range_with(time, 0, ~0ULL, 10);
347 	return 1;
348 }
349 
350 SEC("tc")
exception_bad_assert_range(struct __sk_buff * ctx)351 int exception_bad_assert_range(struct __sk_buff *ctx)
352 {
353 	u64 time = bpf_ktime_get_ns();
354 
355 	bpf_assert_range(time, -100, 100);
356 	return 1;
357 }
358 
359 SEC("tc")
exception_bad_assert_range_with(struct __sk_buff * ctx)360 int exception_bad_assert_range_with(struct __sk_buff *ctx)
361 {
362 	u64 time = bpf_ktime_get_ns();
363 
364 	bpf_assert_range_with(time, -1000, 1000, 10);
365 	return 1;
366 }
367 
368 char _license[] SEC("license") = "GPL";
369