1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook */
3 #include <linux/compiler.h>
4 #include <linux/err.h>
5
6 #include <sys/resource.h>
7 #include <sys/socket.h>
8 #include <sys/types.h>
9 #include <linux/btf.h>
10 #include <unistd.h>
11 #include <signal.h>
12 #include <errno.h>
13 #include <string.h>
14 #include <pthread.h>
15
16 #include <bpf/bpf.h>
17 #include <bpf/libbpf.h>
18
19 #include <test_btf.h>
20 #include <test_maps.h>
21
22 static struct bpf_map_create_opts map_opts = {
23 .sz = sizeof(map_opts),
24 .btf_key_type_id = 1,
25 .btf_value_type_id = 3,
26 .btf_fd = -1,
27 .map_flags = BPF_F_NO_PREALLOC,
28 };
29
30 static unsigned int nr_sk_threads_done;
31 static unsigned int nr_sk_threads_err;
32 static unsigned int nr_sk_per_thread = 4096;
33 static unsigned int nr_sk_threads = 4;
34 static int sk_storage_map = -1;
35 static unsigned int stop;
36 static int runtime_s = 5;
37
is_stopped(void)38 static bool is_stopped(void)
39 {
40 return READ_ONCE(stop);
41 }
42
threads_err(void)43 static unsigned int threads_err(void)
44 {
45 return READ_ONCE(nr_sk_threads_err);
46 }
47
notify_thread_err(void)48 static void notify_thread_err(void)
49 {
50 __sync_add_and_fetch(&nr_sk_threads_err, 1);
51 }
52
wait_for_threads_err(void)53 static bool wait_for_threads_err(void)
54 {
55 while (!is_stopped() && !threads_err())
56 usleep(500);
57
58 return !is_stopped();
59 }
60
threads_done(void)61 static unsigned int threads_done(void)
62 {
63 return READ_ONCE(nr_sk_threads_done);
64 }
65
notify_thread_done(void)66 static void notify_thread_done(void)
67 {
68 __sync_add_and_fetch(&nr_sk_threads_done, 1);
69 }
70
notify_thread_redo(void)71 static void notify_thread_redo(void)
72 {
73 __sync_sub_and_fetch(&nr_sk_threads_done, 1);
74 }
75
wait_for_threads_done(void)76 static bool wait_for_threads_done(void)
77 {
78 while (threads_done() != nr_sk_threads && !is_stopped() &&
79 !threads_err())
80 usleep(50);
81
82 return !is_stopped() && !threads_err();
83 }
84
wait_for_threads_redo(void)85 static bool wait_for_threads_redo(void)
86 {
87 while (threads_done() && !is_stopped() && !threads_err())
88 usleep(50);
89
90 return !is_stopped() && !threads_err();
91 }
92
wait_for_map(void)93 static bool wait_for_map(void)
94 {
95 while (READ_ONCE(sk_storage_map) == -1 && !is_stopped())
96 usleep(50);
97
98 return !is_stopped();
99 }
100
wait_for_map_close(void)101 static bool wait_for_map_close(void)
102 {
103 while (READ_ONCE(sk_storage_map) != -1 && !is_stopped())
104 ;
105
106 return !is_stopped();
107 }
108
load_btf(void)109 static int load_btf(void)
110 {
111 const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l";
112 __u32 btf_raw_types[] = {
113 /* int */
114 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
115 /* struct bpf_spin_lock */ /* [2] */
116 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
117 BTF_MEMBER_ENC(15, 1, 0), /* int val; */
118 /* struct val */ /* [3] */
119 BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
120 BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
121 BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
122 };
123 struct btf_header btf_hdr = {
124 .magic = BTF_MAGIC,
125 .version = BTF_VERSION,
126 .hdr_len = sizeof(struct btf_header),
127 .type_len = sizeof(btf_raw_types),
128 .str_off = sizeof(btf_raw_types),
129 .str_len = sizeof(btf_str_sec),
130 };
131 __u8 raw_btf[sizeof(struct btf_header) + sizeof(btf_raw_types) +
132 sizeof(btf_str_sec)];
133
134 memcpy(raw_btf, &btf_hdr, sizeof(btf_hdr));
135 memcpy(raw_btf + sizeof(btf_hdr), btf_raw_types, sizeof(btf_raw_types));
136 memcpy(raw_btf + sizeof(btf_hdr) + sizeof(btf_raw_types),
137 btf_str_sec, sizeof(btf_str_sec));
138
139 return bpf_btf_load(raw_btf, sizeof(raw_btf), NULL);
140 }
141
create_sk_storage_map(void)142 static int create_sk_storage_map(void)
143 {
144 int btf_fd, map_fd;
145
146 btf_fd = load_btf();
147 CHECK(btf_fd == -1, "bpf_load_btf", "btf_fd:%d errno:%d\n",
148 btf_fd, errno);
149 map_opts.btf_fd = btf_fd;
150
151 map_fd = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &map_opts);
152 map_opts.btf_fd = -1;
153 close(btf_fd);
154 CHECK(map_fd == -1,
155 "bpf_map_create()", "errno:%d\n", errno);
156
157 return map_fd;
158 }
159
insert_close_thread(void * arg)160 static void *insert_close_thread(void *arg)
161 {
162 struct {
163 int cnt;
164 int lock;
165 } value = { .cnt = 0xeB9F, .lock = 0, };
166 int i, map_fd, err, *sk_fds;
167
168 sk_fds = malloc(sizeof(*sk_fds) * nr_sk_per_thread);
169 if (!sk_fds) {
170 notify_thread_err();
171 return ERR_PTR(-ENOMEM);
172 }
173
174 for (i = 0; i < nr_sk_per_thread; i++)
175 sk_fds[i] = -1;
176
177 while (!is_stopped()) {
178 if (!wait_for_map())
179 goto close_all;
180
181 map_fd = READ_ONCE(sk_storage_map);
182 for (i = 0; i < nr_sk_per_thread && !is_stopped(); i++) {
183 sk_fds[i] = socket(AF_INET6, SOCK_STREAM, 0);
184 if (sk_fds[i] == -1) {
185 err = -errno;
186 fprintf(stderr, "socket(): errno:%d\n", errno);
187 goto errout;
188 }
189 err = bpf_map_update_elem(map_fd, &sk_fds[i], &value,
190 BPF_NOEXIST);
191 if (err) {
192 err = -errno;
193 fprintf(stderr,
194 "bpf_map_update_elem(): errno:%d\n",
195 errno);
196 goto errout;
197 }
198 }
199
200 notify_thread_done();
201 wait_for_map_close();
202
203 close_all:
204 for (i = 0; i < nr_sk_per_thread; i++) {
205 close(sk_fds[i]);
206 sk_fds[i] = -1;
207 }
208
209 notify_thread_redo();
210 }
211
212 free(sk_fds);
213 return NULL;
214
215 errout:
216 for (i = 0; i < nr_sk_per_thread && sk_fds[i] != -1; i++)
217 close(sk_fds[i]);
218 free(sk_fds);
219 notify_thread_err();
220 return ERR_PTR(err);
221 }
222
do_sk_storage_map_stress_free(void)223 static int do_sk_storage_map_stress_free(void)
224 {
225 int i, map_fd = -1, err = 0, nr_threads_created = 0;
226 pthread_t *sk_thread_ids;
227 void *thread_ret;
228
229 sk_thread_ids = malloc(sizeof(pthread_t) * nr_sk_threads);
230 if (!sk_thread_ids) {
231 fprintf(stderr, "malloc(sk_threads): NULL\n");
232 return -ENOMEM;
233 }
234
235 for (i = 0; i < nr_sk_threads; i++) {
236 err = pthread_create(&sk_thread_ids[i], NULL,
237 insert_close_thread, NULL);
238 if (err) {
239 err = -errno;
240 goto done;
241 }
242 nr_threads_created++;
243 }
244
245 while (!is_stopped()) {
246 map_fd = create_sk_storage_map();
247 WRITE_ONCE(sk_storage_map, map_fd);
248
249 if (!wait_for_threads_done())
250 break;
251
252 WRITE_ONCE(sk_storage_map, -1);
253 close(map_fd);
254 map_fd = -1;
255
256 if (!wait_for_threads_redo())
257 break;
258 }
259
260 done:
261 WRITE_ONCE(stop, 1);
262 for (i = 0; i < nr_threads_created; i++) {
263 pthread_join(sk_thread_ids[i], &thread_ret);
264 if (IS_ERR(thread_ret) && !err) {
265 err = PTR_ERR(thread_ret);
266 fprintf(stderr, "threads#%u: err:%d\n", i, err);
267 }
268 }
269 free(sk_thread_ids);
270
271 if (map_fd != -1)
272 close(map_fd);
273
274 return err;
275 }
276
update_thread(void * arg)277 static void *update_thread(void *arg)
278 {
279 struct {
280 int cnt;
281 int lock;
282 } value = { .cnt = 0xeB9F, .lock = 0, };
283 int map_fd = READ_ONCE(sk_storage_map);
284 int sk_fd = *(int *)arg;
285 int err = 0; /* Suppress compiler false alarm */
286
287 while (!is_stopped()) {
288 err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0);
289 if (err && errno != EAGAIN) {
290 err = -errno;
291 fprintf(stderr, "bpf_map_update_elem: %d %d\n",
292 err, errno);
293 break;
294 }
295 }
296
297 if (!is_stopped()) {
298 notify_thread_err();
299 return ERR_PTR(err);
300 }
301
302 return NULL;
303 }
304
delete_thread(void * arg)305 static void *delete_thread(void *arg)
306 {
307 int map_fd = READ_ONCE(sk_storage_map);
308 int sk_fd = *(int *)arg;
309 int err = 0; /* Suppress compiler false alarm */
310
311 while (!is_stopped()) {
312 err = bpf_map_delete_elem(map_fd, &sk_fd);
313 if (err && errno != ENOENT) {
314 err = -errno;
315 fprintf(stderr, "bpf_map_delete_elem: %d %d\n",
316 err, errno);
317 break;
318 }
319 }
320
321 if (!is_stopped()) {
322 notify_thread_err();
323 return ERR_PTR(err);
324 }
325
326 return NULL;
327 }
328
do_sk_storage_map_stress_change(void)329 static int do_sk_storage_map_stress_change(void)
330 {
331 int i, sk_fd, map_fd = -1, err = 0, nr_threads_created = 0;
332 pthread_t *sk_thread_ids;
333 void *thread_ret;
334
335 sk_thread_ids = malloc(sizeof(pthread_t) * nr_sk_threads);
336 if (!sk_thread_ids) {
337 fprintf(stderr, "malloc(sk_threads): NULL\n");
338 return -ENOMEM;
339 }
340
341 sk_fd = socket(AF_INET6, SOCK_STREAM, 0);
342 if (sk_fd == -1) {
343 err = -errno;
344 goto done;
345 }
346
347 map_fd = create_sk_storage_map();
348 WRITE_ONCE(sk_storage_map, map_fd);
349
350 for (i = 0; i < nr_sk_threads; i++) {
351 if (i & 0x1)
352 err = pthread_create(&sk_thread_ids[i], NULL,
353 update_thread, &sk_fd);
354 else
355 err = pthread_create(&sk_thread_ids[i], NULL,
356 delete_thread, &sk_fd);
357 if (err) {
358 err = -errno;
359 goto done;
360 }
361 nr_threads_created++;
362 }
363
364 wait_for_threads_err();
365
366 done:
367 WRITE_ONCE(stop, 1);
368 for (i = 0; i < nr_threads_created; i++) {
369 pthread_join(sk_thread_ids[i], &thread_ret);
370 if (IS_ERR(thread_ret) && !err) {
371 err = PTR_ERR(thread_ret);
372 fprintf(stderr, "threads#%u: err:%d\n", i, err);
373 }
374 }
375 free(sk_thread_ids);
376
377 if (sk_fd != -1)
378 close(sk_fd);
379 close(map_fd);
380
381 return err;
382 }
383
stop_handler(int signum)384 static void stop_handler(int signum)
385 {
386 if (signum != SIGALRM)
387 printf("stopping...\n");
388 WRITE_ONCE(stop, 1);
389 }
390
391 #define BPF_SK_STORAGE_MAP_TEST_NR_THREADS "BPF_SK_STORAGE_MAP_TEST_NR_THREADS"
392 #define BPF_SK_STORAGE_MAP_TEST_SK_PER_THREAD "BPF_SK_STORAGE_MAP_TEST_SK_PER_THREAD"
393 #define BPF_SK_STORAGE_MAP_TEST_RUNTIME_S "BPF_SK_STORAGE_MAP_TEST_RUNTIME_S"
394 #define BPF_SK_STORAGE_MAP_TEST_NAME "BPF_SK_STORAGE_MAP_TEST_NAME"
395
test_sk_storage_map_stress_free(void)396 static void test_sk_storage_map_stress_free(void)
397 {
398 struct rlimit rlim_old, rlim_new = {};
399 int err;
400
401 getrlimit(RLIMIT_NOFILE, &rlim_old);
402
403 signal(SIGTERM, stop_handler);
404 signal(SIGINT, stop_handler);
405 if (runtime_s > 0) {
406 signal(SIGALRM, stop_handler);
407 alarm(runtime_s);
408 }
409
410 if (rlim_old.rlim_cur < nr_sk_threads * nr_sk_per_thread) {
411 rlim_new.rlim_cur = nr_sk_threads * nr_sk_per_thread + 128;
412 rlim_new.rlim_max = rlim_new.rlim_cur + 128;
413 err = setrlimit(RLIMIT_NOFILE, &rlim_new);
414 CHECK(err, "setrlimit(RLIMIT_NOFILE)", "rlim_new:%lu errno:%d",
415 (unsigned long) rlim_new.rlim_cur, errno);
416 }
417
418 err = do_sk_storage_map_stress_free();
419
420 signal(SIGTERM, SIG_DFL);
421 signal(SIGINT, SIG_DFL);
422 if (runtime_s > 0) {
423 signal(SIGALRM, SIG_DFL);
424 alarm(0);
425 }
426
427 if (rlim_new.rlim_cur)
428 setrlimit(RLIMIT_NOFILE, &rlim_old);
429
430 CHECK(err, "test_sk_storage_map_stress_free", "err:%d\n", err);
431 }
432
test_sk_storage_map_stress_change(void)433 static void test_sk_storage_map_stress_change(void)
434 {
435 int err;
436
437 signal(SIGTERM, stop_handler);
438 signal(SIGINT, stop_handler);
439 if (runtime_s > 0) {
440 signal(SIGALRM, stop_handler);
441 alarm(runtime_s);
442 }
443
444 err = do_sk_storage_map_stress_change();
445
446 signal(SIGTERM, SIG_DFL);
447 signal(SIGINT, SIG_DFL);
448 if (runtime_s > 0) {
449 signal(SIGALRM, SIG_DFL);
450 alarm(0);
451 }
452
453 CHECK(err, "test_sk_storage_map_stress_change", "err:%d\n", err);
454 }
455
test_sk_storage_map_basic(void)456 static void test_sk_storage_map_basic(void)
457 {
458 struct {
459 int cnt;
460 int lock;
461 } value = { .cnt = 0xeB9f, .lock = 1, }, lookup_value;
462 struct bpf_map_create_opts bad_xattr;
463 int btf_fd, map_fd, sk_fd, err;
464
465 btf_fd = load_btf();
466 CHECK(btf_fd == -1, "bpf_load_btf", "btf_fd:%d errno:%d\n",
467 btf_fd, errno);
468 map_opts.btf_fd = btf_fd;
469
470 sk_fd = socket(AF_INET6, SOCK_STREAM, 0);
471 CHECK(sk_fd == -1, "socket()", "sk_fd:%d errno:%d\n",
472 sk_fd, errno);
473
474 map_fd = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &map_opts);
475 CHECK(map_fd == -1, "bpf_map_create(good_xattr)",
476 "map_fd:%d errno:%d\n", map_fd, errno);
477
478 /* Add new elem */
479 memcpy(&lookup_value, &value, sizeof(value));
480 err = bpf_map_update_elem(map_fd, &sk_fd, &value,
481 BPF_NOEXIST | BPF_F_LOCK);
482 CHECK(err, "bpf_map_update_elem(BPF_NOEXIST|BPF_F_LOCK)",
483 "err:%d errno:%d\n", err, errno);
484 err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
485 BPF_F_LOCK);
486 CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
487 "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
488 "err:%d errno:%d lock:%x cnt:%x(%x)\n",
489 err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
490
491 /* Bump the cnt and update with BPF_EXIST | BPF_F_LOCK */
492 value.cnt += 1;
493 value.lock = 2;
494 err = bpf_map_update_elem(map_fd, &sk_fd, &value,
495 BPF_EXIST | BPF_F_LOCK);
496 CHECK(err, "bpf_map_update_elem(BPF_EXIST|BPF_F_LOCK)",
497 "err:%d errno:%d\n", err, errno);
498 err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
499 BPF_F_LOCK);
500 CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
501 "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
502 "err:%d errno:%d lock:%x cnt:%x(%x)\n",
503 err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
504
505 /* Bump the cnt and update with BPF_EXIST */
506 value.cnt += 1;
507 value.lock = 2;
508 err = bpf_map_update_elem(map_fd, &sk_fd, &value, BPF_EXIST);
509 CHECK(err, "bpf_map_update_elem(BPF_EXIST)",
510 "err:%d errno:%d\n", err, errno);
511 err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
512 BPF_F_LOCK);
513 CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
514 "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
515 "err:%d errno:%d lock:%x cnt:%x(%x)\n",
516 err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
517
518 /* Update with BPF_NOEXIST */
519 value.cnt += 1;
520 value.lock = 2;
521 err = bpf_map_update_elem(map_fd, &sk_fd, &value,
522 BPF_NOEXIST | BPF_F_LOCK);
523 CHECK(!err || errno != EEXIST,
524 "bpf_map_update_elem(BPF_NOEXIST|BPF_F_LOCK)",
525 "err:%d errno:%d\n", err, errno);
526 err = bpf_map_update_elem(map_fd, &sk_fd, &value, BPF_NOEXIST);
527 CHECK(!err || errno != EEXIST, "bpf_map_update_elem(BPF_NOEXIST)",
528 "err:%d errno:%d\n", err, errno);
529 value.cnt -= 1;
530 err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
531 BPF_F_LOCK);
532 CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
533 "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
534 "err:%d errno:%d lock:%x cnt:%x(%x)\n",
535 err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
536
537 /* Bump the cnt again and update with map_flags == 0 */
538 value.cnt += 1;
539 value.lock = 2;
540 err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0);
541 CHECK(err, "bpf_map_update_elem()", "err:%d errno:%d\n",
542 err, errno);
543 err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
544 BPF_F_LOCK);
545 CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
546 "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
547 "err:%d errno:%d lock:%x cnt:%x(%x)\n",
548 err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
549
550 /* Test delete elem */
551 err = bpf_map_delete_elem(map_fd, &sk_fd);
552 CHECK(err, "bpf_map_delete_elem()", "err:%d errno:%d\n",
553 err, errno);
554 err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
555 BPF_F_LOCK);
556 CHECK(!err || errno != ENOENT,
557 "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
558 "err:%d errno:%d\n", err, errno);
559 err = bpf_map_delete_elem(map_fd, &sk_fd);
560 CHECK(!err || errno != ENOENT, "bpf_map_delete_elem()",
561 "err:%d errno:%d\n", err, errno);
562
563 memcpy(&bad_xattr, &map_opts, sizeof(map_opts));
564 bad_xattr.btf_key_type_id = 0;
565 err = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &bad_xattr);
566 CHECK(!err || errno != EINVAL, "bpf_map_create(bad_xattr)",
567 "err:%d errno:%d\n", err, errno);
568
569 memcpy(&bad_xattr, &map_opts, sizeof(map_opts));
570 bad_xattr.btf_key_type_id = 3;
571 err = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &bad_xattr);
572 CHECK(!err || errno != EINVAL, "bpf_map_create(bad_xattr)",
573 "err:%d errno:%d\n", err, errno);
574
575 err = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 1, &map_opts);
576 CHECK(!err || errno != EINVAL, "bpf_map_create(bad_xattr)",
577 "err:%d errno:%d\n", err, errno);
578
579 memcpy(&bad_xattr, &map_opts, sizeof(map_opts));
580 bad_xattr.map_flags = 0;
581 err = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &bad_xattr);
582 CHECK(!err || errno != EINVAL, "bap_create_map_xattr(bad_xattr)",
583 "err:%d errno:%d\n", err, errno);
584
585 map_opts.btf_fd = -1;
586 close(btf_fd);
587 close(map_fd);
588 close(sk_fd);
589 }
590
test_sk_storage_map(void)591 void test_sk_storage_map(void)
592 {
593 const char *test_name, *env_opt;
594 bool test_ran = false;
595
596 test_name = getenv(BPF_SK_STORAGE_MAP_TEST_NAME);
597
598 env_opt = getenv(BPF_SK_STORAGE_MAP_TEST_NR_THREADS);
599 if (env_opt)
600 nr_sk_threads = atoi(env_opt);
601
602 env_opt = getenv(BPF_SK_STORAGE_MAP_TEST_SK_PER_THREAD);
603 if (env_opt)
604 nr_sk_per_thread = atoi(env_opt);
605
606 env_opt = getenv(BPF_SK_STORAGE_MAP_TEST_RUNTIME_S);
607 if (env_opt)
608 runtime_s = atoi(env_opt);
609
610 if (!test_name || !strcmp(test_name, "basic")) {
611 test_sk_storage_map_basic();
612 test_ran = true;
613 }
614 if (!test_name || !strcmp(test_name, "stress_free")) {
615 test_sk_storage_map_stress_free();
616 test_ran = true;
617 }
618 if (!test_name || !strcmp(test_name, "stress_change")) {
619 test_sk_storage_map_stress_change();
620 test_ran = true;
621 }
622
623 if (test_ran)
624 printf("%s:PASS\n", __func__);
625 else
626 CHECK(1, "Invalid test_name", "%s\n", test_name);
627 }
628