1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Runtime test cases for CONFIG_FORTIFY_SOURCE. For additional memcpy()
4 * testing see FORTIFY_MEM_* tests in LKDTM (drivers/misc/lkdtm/fortify.c).
5 *
6 * For corner cases with UBSAN, try testing with:
7 *
8 * ./tools/testing/kunit/kunit.py run --arch=x86_64 \
9 * --kconfig_add CONFIG_FORTIFY_SOURCE=y \
10 * --kconfig_add CONFIG_UBSAN=y \
11 * --kconfig_add CONFIG_UBSAN_TRAP=y \
12 * --kconfig_add CONFIG_UBSAN_BOUNDS=y \
13 * --kconfig_add CONFIG_UBSAN_LOCAL_BOUNDS=y \
14 * --make_options LLVM=1 fortify
15 */
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18 /* We don't need to fill dmesg with the fortify WARNs during testing. */
19 #ifdef DEBUG
20 # define FORTIFY_REPORT_KUNIT(x...) __fortify_report(x)
21 # define FORTIFY_WARN_KUNIT(x...) WARN_ONCE(x)
22 #else
23 # define FORTIFY_REPORT_KUNIT(x...) do { } while (0)
24 # define FORTIFY_WARN_KUNIT(x...) do { } while (0)
25 #endif
26
27 /* Redefine fortify_panic() to track failures. */
28 void fortify_add_kunit_error(int write);
29 #define fortify_panic(func, write, avail, size, retfail) do { \
30 FORTIFY_REPORT_KUNIT(FORTIFY_REASON(func, write), avail, size); \
31 fortify_add_kunit_error(write); \
32 return (retfail); \
33 } while (0)
34
35 /* Redefine fortify_warn_once() to track memcpy() failures. */
36 #define fortify_warn_once(chk_func, x...) do { \
37 bool __result = chk_func; \
38 FORTIFY_WARN_KUNIT(__result, x); \
39 if (__result) \
40 fortify_add_kunit_error(1); \
41 } while (0)
42
43 #include <kunit/device.h>
44 #include <kunit/test.h>
45 #include <kunit/test-bug.h>
46 #include <linux/device.h>
47 #include <linux/slab.h>
48 #include <linux/string.h>
49 #include <linux/vmalloc.h>
50
51 /* Handle being built without CONFIG_FORTIFY_SOURCE */
52 #ifndef __compiletime_strlen
53 # define __compiletime_strlen __builtin_strlen
54 #endif
55
56 static struct kunit_resource read_resource;
57 static struct kunit_resource write_resource;
58 static int fortify_read_overflows;
59 static int fortify_write_overflows;
60
61 static const char array_of_10[] = "this is 10";
62 static const char *ptr_of_11 = "this is 11!";
63 static char array_unknown[] = "compiler thinks I might change";
64
fortify_add_kunit_error(int write)65 void fortify_add_kunit_error(int write)
66 {
67 struct kunit_resource *resource;
68 struct kunit *current_test;
69
70 current_test = kunit_get_current_test();
71 if (!current_test)
72 return;
73
74 resource = kunit_find_named_resource(current_test,
75 write ? "fortify_write_overflows"
76 : "fortify_read_overflows");
77 if (!resource)
78 return;
79
80 (*(int *)resource->data)++;
81 kunit_put_resource(resource);
82 }
83
fortify_test_known_sizes(struct kunit * test)84 static void fortify_test_known_sizes(struct kunit *test)
85 {
86 KUNIT_EXPECT_EQ(test, __compiletime_strlen("88888888"), 8);
87 KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_of_10), 10);
88 KUNIT_EXPECT_EQ(test, __compiletime_strlen(ptr_of_11), 11);
89
90 KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_unknown), SIZE_MAX);
91 /* Externally defined and dynamically sized string pointer: */
92 KUNIT_EXPECT_EQ(test, __compiletime_strlen(test->name), SIZE_MAX);
93 }
94
95 /* This is volatile so the optimizer can't perform DCE below. */
96 static volatile int pick;
97
98 /* Not inline to keep optimizer from figuring out which string we want. */
want_minus_one(int pick)99 static noinline size_t want_minus_one(int pick)
100 {
101 const char *str;
102
103 switch (pick) {
104 case 1:
105 str = "4444";
106 break;
107 case 2:
108 str = "333";
109 break;
110 default:
111 str = "1";
112 break;
113 }
114 return __compiletime_strlen(str);
115 }
116
fortify_test_control_flow_split(struct kunit * test)117 static void fortify_test_control_flow_split(struct kunit *test)
118 {
119 KUNIT_EXPECT_EQ(test, want_minus_one(pick), SIZE_MAX);
120 }
121
122 #define KUNIT_EXPECT_BOS(test, p, expected, name) \
123 KUNIT_EXPECT_EQ_MSG(test, __builtin_object_size(p, 1), \
124 expected, \
125 "__alloc_size() not working with __bos on " name "\n")
126
127 #if !__has_builtin(__builtin_dynamic_object_size)
128 #define KUNIT_EXPECT_BDOS(test, p, expected, name) \
129 /* Silence "unused variable 'expected'" warning. */ \
130 KUNIT_EXPECT_EQ(test, expected, expected)
131 #else
132 #define KUNIT_EXPECT_BDOS(test, p, expected, name) \
133 KUNIT_EXPECT_EQ_MSG(test, __builtin_dynamic_object_size(p, 1), \
134 expected, \
135 "__alloc_size() not working with __bdos on " name "\n")
136 #endif
137
138 /* If the execpted size is a constant value, __bos can see it. */
139 #define check_const(_expected, alloc, free) do { \
140 size_t expected = (_expected); \
141 void *p = alloc; \
142 KUNIT_EXPECT_TRUE_MSG(test, p != NULL, #alloc " failed?!\n"); \
143 KUNIT_EXPECT_BOS(test, p, expected, #alloc); \
144 KUNIT_EXPECT_BDOS(test, p, expected, #alloc); \
145 free; \
146 } while (0)
147
148 /* If the execpted size is NOT a constant value, __bos CANNOT see it. */
149 #define check_dynamic(_expected, alloc, free) do { \
150 size_t expected = (_expected); \
151 void *p = alloc; \
152 KUNIT_EXPECT_TRUE_MSG(test, p != NULL, #alloc " failed?!\n"); \
153 KUNIT_EXPECT_BOS(test, p, SIZE_MAX, #alloc); \
154 KUNIT_EXPECT_BDOS(test, p, expected, #alloc); \
155 free; \
156 } while (0)
157
158 /* Assortment of constant-value kinda-edge cases. */
159 #define CONST_TEST_BODY(TEST_alloc) do { \
160 /* Special-case vmalloc()-family to skip 0-sized allocs. */ \
161 if (strcmp(#TEST_alloc, "TEST_vmalloc") != 0) \
162 TEST_alloc(check_const, 0, 0); \
163 TEST_alloc(check_const, 1, 1); \
164 TEST_alloc(check_const, 128, 128); \
165 TEST_alloc(check_const, 1023, 1023); \
166 TEST_alloc(check_const, 1025, 1025); \
167 TEST_alloc(check_const, 4096, 4096); \
168 TEST_alloc(check_const, 4097, 4097); \
169 } while (0)
170
171 static volatile size_t zero_size;
172 static volatile size_t unknown_size = 50;
173
174 #if !__has_builtin(__builtin_dynamic_object_size)
175 #define DYNAMIC_TEST_BODY(TEST_alloc) \
176 kunit_skip(test, "Compiler is missing __builtin_dynamic_object_size() support\n")
177 #else
178 #define DYNAMIC_TEST_BODY(TEST_alloc) do { \
179 size_t size = unknown_size; \
180 \
181 /* \
182 * Expected size is "size" in each test, before it is then \
183 * internally incremented in each test. Requires we disable \
184 * -Wunsequenced. \
185 */ \
186 TEST_alloc(check_dynamic, size, size++); \
187 /* Make sure incrementing actually happened. */ \
188 KUNIT_EXPECT_NE(test, size, unknown_size); \
189 } while (0)
190 #endif
191
192 #define DEFINE_ALLOC_SIZE_TEST_PAIR(allocator) \
193 static void fortify_test_alloc_size_##allocator##_const(struct kunit *test) \
194 { \
195 CONST_TEST_BODY(TEST_##allocator); \
196 } \
197 static void fortify_test_alloc_size_##allocator##_dynamic(struct kunit *test) \
198 { \
199 DYNAMIC_TEST_BODY(TEST_##allocator); \
200 }
201
202 #define TEST_kmalloc(checker, expected_size, alloc_size) do { \
203 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
204 void *orig; \
205 size_t len; \
206 \
207 checker(expected_size, kmalloc(alloc_size, gfp), \
208 kfree(p)); \
209 checker(expected_size, \
210 kmalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
211 kfree(p)); \
212 checker(expected_size, kzalloc(alloc_size, gfp), \
213 kfree(p)); \
214 checker(expected_size, \
215 kzalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
216 kfree(p)); \
217 checker(expected_size, kcalloc(1, alloc_size, gfp), \
218 kfree(p)); \
219 checker(expected_size, kcalloc(alloc_size, 1, gfp), \
220 kfree(p)); \
221 checker(expected_size, \
222 kcalloc_node(1, alloc_size, gfp, NUMA_NO_NODE), \
223 kfree(p)); \
224 checker(expected_size, \
225 kcalloc_node(alloc_size, 1, gfp, NUMA_NO_NODE), \
226 kfree(p)); \
227 checker(expected_size, kmalloc_array(1, alloc_size, gfp), \
228 kfree(p)); \
229 checker(expected_size, kmalloc_array(alloc_size, 1, gfp), \
230 kfree(p)); \
231 checker(expected_size, \
232 kmalloc_array_node(1, alloc_size, gfp, NUMA_NO_NODE), \
233 kfree(p)); \
234 checker(expected_size, \
235 kmalloc_array_node(alloc_size, 1, gfp, NUMA_NO_NODE), \
236 kfree(p)); \
237 \
238 orig = kmalloc(alloc_size, gfp); \
239 KUNIT_EXPECT_TRUE(test, orig != NULL); \
240 checker((expected_size) * 2, \
241 krealloc(orig, (alloc_size) * 2, gfp), \
242 kfree(p)); \
243 orig = kmalloc(alloc_size, gfp); \
244 KUNIT_EXPECT_TRUE(test, orig != NULL); \
245 checker((expected_size) * 2, \
246 krealloc_array(orig, 1, (alloc_size) * 2, gfp), \
247 kfree(p)); \
248 orig = kmalloc(alloc_size, gfp); \
249 KUNIT_EXPECT_TRUE(test, orig != NULL); \
250 checker((expected_size) * 2, \
251 krealloc_array(orig, (alloc_size) * 2, 1, gfp), \
252 kfree(p)); \
253 \
254 len = 11; \
255 /* Using memdup() with fixed size, so force unknown length. */ \
256 if (!__builtin_constant_p(expected_size)) \
257 len += zero_size; \
258 checker(len, kmemdup("hello there", len, gfp), kfree(p)); \
259 } while (0)
260 DEFINE_ALLOC_SIZE_TEST_PAIR(kmalloc)
261
262 /* Sizes are in pages, not bytes. */
263 #define TEST_vmalloc(checker, expected_pages, alloc_pages) do { \
264 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
265 checker((expected_pages) * PAGE_SIZE, \
266 vmalloc((alloc_pages) * PAGE_SIZE), vfree(p)); \
267 checker((expected_pages) * PAGE_SIZE, \
268 vzalloc((alloc_pages) * PAGE_SIZE), vfree(p)); \
269 checker((expected_pages) * PAGE_SIZE, \
270 __vmalloc((alloc_pages) * PAGE_SIZE, gfp), vfree(p)); \
271 } while (0)
272 DEFINE_ALLOC_SIZE_TEST_PAIR(vmalloc)
273
274 /* Sizes are in pages (and open-coded for side-effects), not bytes. */
275 #define TEST_kvmalloc(checker, expected_pages, alloc_pages) do { \
276 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
277 size_t prev_size; \
278 void *orig; \
279 \
280 checker((expected_pages) * PAGE_SIZE, \
281 kvmalloc((alloc_pages) * PAGE_SIZE, gfp), \
282 kvfree(p)); \
283 checker((expected_pages) * PAGE_SIZE, \
284 kvmalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
285 kvfree(p)); \
286 checker((expected_pages) * PAGE_SIZE, \
287 kvzalloc((alloc_pages) * PAGE_SIZE, gfp), \
288 kvfree(p)); \
289 checker((expected_pages) * PAGE_SIZE, \
290 kvzalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
291 kvfree(p)); \
292 checker((expected_pages) * PAGE_SIZE, \
293 kvcalloc(1, (alloc_pages) * PAGE_SIZE, gfp), \
294 kvfree(p)); \
295 checker((expected_pages) * PAGE_SIZE, \
296 kvcalloc((alloc_pages) * PAGE_SIZE, 1, gfp), \
297 kvfree(p)); \
298 checker((expected_pages) * PAGE_SIZE, \
299 kvmalloc_array(1, (alloc_pages) * PAGE_SIZE, gfp), \
300 kvfree(p)); \
301 checker((expected_pages) * PAGE_SIZE, \
302 kvmalloc_array((alloc_pages) * PAGE_SIZE, 1, gfp), \
303 kvfree(p)); \
304 \
305 prev_size = (expected_pages) * PAGE_SIZE; \
306 orig = kvmalloc(prev_size, gfp); \
307 KUNIT_EXPECT_TRUE(test, orig != NULL); \
308 checker(((expected_pages) * PAGE_SIZE) * 2, \
309 kvrealloc(orig, ((alloc_pages) * PAGE_SIZE) * 2, gfp), \
310 kvfree(p)); \
311 } while (0)
312 DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc)
313
314 #define TEST_devm_kmalloc(checker, expected_size, alloc_size) do { \
315 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
316 const char dev_name[] = "fortify-test"; \
317 struct device *dev; \
318 void *orig; \
319 size_t len; \
320 \
321 /* Create dummy device for devm_kmalloc()-family tests. */ \
322 dev = kunit_device_register(test, dev_name); \
323 KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev), \
324 "Cannot register test device\n"); \
325 \
326 checker(expected_size, devm_kmalloc(dev, alloc_size, gfp), \
327 devm_kfree(dev, p)); \
328 checker(expected_size, devm_kzalloc(dev, alloc_size, gfp), \
329 devm_kfree(dev, p)); \
330 checker(expected_size, \
331 devm_kmalloc_array(dev, 1, alloc_size, gfp), \
332 devm_kfree(dev, p)); \
333 checker(expected_size, \
334 devm_kmalloc_array(dev, alloc_size, 1, gfp), \
335 devm_kfree(dev, p)); \
336 checker(expected_size, \
337 devm_kcalloc(dev, 1, alloc_size, gfp), \
338 devm_kfree(dev, p)); \
339 checker(expected_size, \
340 devm_kcalloc(dev, alloc_size, 1, gfp), \
341 devm_kfree(dev, p)); \
342 \
343 orig = devm_kmalloc(dev, alloc_size, gfp); \
344 KUNIT_EXPECT_TRUE(test, orig != NULL); \
345 checker((expected_size) * 2, \
346 devm_krealloc(dev, orig, (alloc_size) * 2, gfp), \
347 devm_kfree(dev, p)); \
348 \
349 len = 4; \
350 /* Using memdup() with fixed size, so force unknown length. */ \
351 if (!__builtin_constant_p(expected_size)) \
352 len += zero_size; \
353 checker(len, devm_kmemdup(dev, "Ohai", len, gfp), \
354 devm_kfree(dev, p)); \
355 \
356 kunit_device_unregister(test, dev); \
357 } while (0)
358 DEFINE_ALLOC_SIZE_TEST_PAIR(devm_kmalloc)
359
360 static const char * const test_strs[] = {
361 "",
362 "Hello there",
363 "A longer string, just for variety",
364 };
365
366 #define TEST_realloc(checker) do { \
367 gfp_t gfp = GFP_KERNEL; \
368 size_t len; \
369 int i; \
370 \
371 for (i = 0; i < ARRAY_SIZE(test_strs); i++) { \
372 len = strlen(test_strs[i]); \
373 KUNIT_EXPECT_EQ(test, __builtin_constant_p(len), 0); \
374 checker(len, kmemdup_array(test_strs[i], 1, len, gfp), \
375 kfree(p)); \
376 checker(len, kmemdup(test_strs[i], len, gfp), \
377 kfree(p)); \
378 } \
379 } while (0)
fortify_test_realloc_size(struct kunit * test)380 static void fortify_test_realloc_size(struct kunit *test)
381 {
382 TEST_realloc(check_dynamic);
383 }
384
385 /*
386 * We can't have an array at the end of a structure or else
387 * builds without -fstrict-flex-arrays=3 will report them as
388 * being an unknown length. Additionally, add bytes before
389 * and after the string to catch over/underflows if tests
390 * fail.
391 */
392 struct fortify_padding {
393 unsigned long bytes_before;
394 char buf[32];
395 unsigned long bytes_after;
396 };
397 /* Force compiler into not being able to resolve size at compile-time. */
398 static volatile int unconst;
399
fortify_test_strlen(struct kunit * test)400 static void fortify_test_strlen(struct kunit *test)
401 {
402 struct fortify_padding pad = { };
403 int i, end = sizeof(pad.buf) - 1;
404
405 /* Fill 31 bytes with valid characters. */
406 for (i = 0; i < sizeof(pad.buf) - 1; i++)
407 pad.buf[i] = i + '0';
408 /* Trailing bytes are still %NUL. */
409 KUNIT_EXPECT_EQ(test, pad.buf[end], '\0');
410 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
411
412 /* String is terminated, so strlen() is valid. */
413 KUNIT_EXPECT_EQ(test, strlen(pad.buf), end);
414 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
415
416 /* Make string unterminated, and recount. */
417 pad.buf[end] = 'A';
418 end = sizeof(pad.buf);
419 KUNIT_EXPECT_EQ(test, strlen(pad.buf), end);
420 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
421 }
422
fortify_test_strnlen(struct kunit * test)423 static void fortify_test_strnlen(struct kunit *test)
424 {
425 struct fortify_padding pad = { };
426 int i, end = sizeof(pad.buf) - 1;
427
428 /* Fill 31 bytes with valid characters. */
429 for (i = 0; i < sizeof(pad.buf) - 1; i++)
430 pad.buf[i] = i + '0';
431 /* Trailing bytes are still %NUL. */
432 KUNIT_EXPECT_EQ(test, pad.buf[end], '\0');
433 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
434
435 /* String is terminated, so strnlen() is valid. */
436 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, sizeof(pad.buf)), end);
437 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
438 /* A truncated strnlen() will be safe, too. */
439 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, sizeof(pad.buf) / 2),
440 sizeof(pad.buf) / 2);
441 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
442
443 /* Make string unterminated, and recount. */
444 pad.buf[end] = 'A';
445 end = sizeof(pad.buf);
446 /* Reading beyond with strncpy() will fail. */
447 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end + 1), end);
448 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
449 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end + 2), end);
450 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
451
452 /* Early-truncated is safe still, though. */
453 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end), end);
454 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
455
456 end = sizeof(pad.buf) / 2;
457 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end), end);
458 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
459 }
460
fortify_test_strcpy(struct kunit * test)461 static void fortify_test_strcpy(struct kunit *test)
462 {
463 struct fortify_padding pad = { };
464 char src[sizeof(pad.buf) + 1] = { };
465 int i;
466
467 /* Fill 31 bytes with valid characters. */
468 for (i = 0; i < sizeof(src) - 2; i++)
469 src[i] = i + '0';
470
471 /* Destination is %NUL-filled to start with. */
472 KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
473 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
474 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
475 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
476 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
477
478 /* Legitimate strcpy() 1 less than of max size. */
479 KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
480 == pad.buf);
481 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
482 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
483 /* Only last byte should be %NUL */
484 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
485 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
486 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
487
488 src[sizeof(src) - 2] = 'A';
489 /* But now we trip the overflow checking. */
490 KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
491 == pad.buf);
492 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
493 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
494 /* Trailing %NUL -- thanks to FORTIFY. */
495 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
496 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
497 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
498 /* And we will not have gone beyond. */
499 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
500
501 src[sizeof(src) - 1] = 'A';
502 /* And for sure now, two bytes past. */
503 KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
504 == pad.buf);
505 /*
506 * Which trips both the strlen() on the unterminated src,
507 * and the resulting copy attempt.
508 */
509 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
510 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
511 /* Trailing %NUL -- thanks to FORTIFY. */
512 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
513 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
514 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
515 /* And we will not have gone beyond. */
516 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
517 }
518
fortify_test_strncpy(struct kunit * test)519 static void fortify_test_strncpy(struct kunit *test)
520 {
521 struct fortify_padding pad = { };
522 char src[] = "Copy me fully into a small buffer and I will overflow!";
523
524 /* Destination is %NUL-filled to start with. */
525 KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
526 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
527 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
528 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
529 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
530
531 /* Legitimate strncpy() 1 less than of max size. */
532 KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
533 sizeof(pad.buf) + unconst - 1)
534 == pad.buf);
535 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
536 /* Only last byte should be %NUL */
537 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
538 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
539 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
540
541 /* Legitimate (though unterminated) max-size strncpy. */
542 KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
543 sizeof(pad.buf) + unconst)
544 == pad.buf);
545 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
546 /* No trailing %NUL -- thanks strncpy API. */
547 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
548 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
549 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
550 /* But we will not have gone beyond. */
551 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
552
553 /* Now verify that FORTIFY is working... */
554 KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
555 sizeof(pad.buf) + unconst + 1)
556 == pad.buf);
557 /* Should catch the overflow. */
558 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
559 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
560 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
561 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
562 /* And we will not have gone beyond. */
563 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
564
565 /* And further... */
566 KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
567 sizeof(pad.buf) + unconst + 2)
568 == pad.buf);
569 /* Should catch the overflow. */
570 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
571 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
572 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
573 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
574 /* And we will not have gone beyond. */
575 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
576 }
577
fortify_test_strscpy(struct kunit * test)578 static void fortify_test_strscpy(struct kunit *test)
579 {
580 struct fortify_padding pad = { };
581 char src[] = "Copy me fully into a small buffer and I will overflow!";
582
583 /* Destination is %NUL-filled to start with. */
584 KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
585 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
586 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
587 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
588 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
589
590 /* Legitimate strscpy() 1 less than of max size. */
591 KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
592 sizeof(pad.buf) + unconst - 1),
593 -E2BIG);
594 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
595 /* Keeping space for %NUL, last two bytes should be %NUL */
596 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
597 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
598 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
599
600 /* Legitimate max-size strscpy. */
601 KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
602 sizeof(pad.buf) + unconst),
603 -E2BIG);
604 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
605 /* A trailing %NUL will exist. */
606 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
607 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
608 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
609
610 /* Now verify that FORTIFY is working... */
611 KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
612 sizeof(pad.buf) + unconst + 1),
613 -E2BIG);
614 /* Should catch the overflow. */
615 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
616 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
617 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
618 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
619 /* And we will not have gone beyond. */
620 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
621
622 /* And much further... */
623 KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
624 sizeof(src) * 2 + unconst),
625 -E2BIG);
626 /* Should catch the overflow. */
627 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
628 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
629 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
630 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
631 /* And we will not have gone beyond. */
632 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
633 }
634
fortify_test_strcat(struct kunit * test)635 static void fortify_test_strcat(struct kunit *test)
636 {
637 struct fortify_padding pad = { };
638 char src[sizeof(pad.buf) / 2] = { };
639 char one[] = "A";
640 char two[] = "BC";
641 int i;
642
643 /* Fill 15 bytes with valid characters. */
644 for (i = 0; i < sizeof(src) - 1; i++)
645 src[i] = i + 'A';
646
647 /* Destination is %NUL-filled to start with. */
648 KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
649 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
650 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
651 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
652 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
653
654 /* Legitimate strcat() using less than half max size. */
655 KUNIT_ASSERT_TRUE(test, strcat(pad.buf, src) == pad.buf);
656 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
657 /* Legitimate strcat() now 2 bytes shy of end. */
658 KUNIT_ASSERT_TRUE(test, strcat(pad.buf, src) == pad.buf);
659 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
660 /* Last two bytes should be %NUL */
661 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
662 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
663 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
664
665 /* Add one more character to the end. */
666 KUNIT_ASSERT_TRUE(test, strcat(pad.buf, one) == pad.buf);
667 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
668 /* Last byte should be %NUL */
669 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
670 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
671 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
672
673 /* And this one char will overflow. */
674 KUNIT_ASSERT_TRUE(test, strcat(pad.buf, one) == pad.buf);
675 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
676 /* Last byte should be %NUL thanks to FORTIFY. */
677 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
678 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
679 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
680 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
681
682 /* And adding two will overflow more. */
683 KUNIT_ASSERT_TRUE(test, strcat(pad.buf, two) == pad.buf);
684 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
685 /* Last byte should be %NUL thanks to FORTIFY. */
686 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
687 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
688 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
689 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
690 }
691
fortify_test_strncat(struct kunit * test)692 static void fortify_test_strncat(struct kunit *test)
693 {
694 struct fortify_padding pad = { };
695 char src[sizeof(pad.buf)] = { };
696 int i, partial;
697
698 /* Fill 31 bytes with valid characters. */
699 partial = sizeof(src) / 2 - 1;
700 for (i = 0; i < partial; i++)
701 src[i] = i + 'A';
702
703 /* Destination is %NUL-filled to start with. */
704 KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
705 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
706 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
707 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
708 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
709
710 /* Legitimate strncat() using less than half max size. */
711 KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, partial) == pad.buf);
712 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
713 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
714 /* Legitimate strncat() now 2 bytes shy of end. */
715 KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, partial) == pad.buf);
716 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
717 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
718 /* Last two bytes should be %NUL */
719 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
720 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
721 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
722
723 /* Add one more character to the end. */
724 KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
725 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
726 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
727 /* Last byte should be %NUL */
728 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
729 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
730 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
731
732 /* And this one char will overflow. */
733 KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
734 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
735 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
736 /* Last byte should be %NUL thanks to FORTIFY. */
737 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
738 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
739 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
740 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
741
742 /* And adding two will overflow more. */
743 KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 2) == pad.buf);
744 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
745 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
746 /* Last byte should be %NUL thanks to FORTIFY. */
747 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
748 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
749 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
750 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
751
752 /* Force an unterminated destination, and overflow. */
753 pad.buf[sizeof(pad.buf) - 1] = 'A';
754 KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
755 /* This will have tripped both strlen() and strcat(). */
756 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
757 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 3);
758 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
759 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
760 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
761 /* But we should not go beyond the end. */
762 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
763 }
764
fortify_test_strlcat(struct kunit * test)765 static void fortify_test_strlcat(struct kunit *test)
766 {
767 struct fortify_padding pad = { };
768 char src[sizeof(pad.buf)] = { };
769 int i, partial;
770 int len = sizeof(pad.buf) + unconst;
771
772 /* Fill 15 bytes with valid characters. */
773 partial = sizeof(src) / 2 - 1;
774 for (i = 0; i < partial; i++)
775 src[i] = i + 'A';
776
777 /* Destination is %NUL-filled to start with. */
778 KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
779 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
780 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
781 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
782 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
783
784 /* Legitimate strlcat() using less than half max size. */
785 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len), partial);
786 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
787 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
788 /* Legitimate strlcat() now 2 bytes shy of end. */
789 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len), partial * 2);
790 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
791 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
792 /* Last two bytes should be %NUL */
793 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
794 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
795 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
796
797 /* Add one more character to the end. */
798 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "Q", len), partial * 2 + 1);
799 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
800 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
801 /* Last byte should be %NUL */
802 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
803 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
804 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
805
806 /* And this one char will overflow. */
807 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "V", len * 2), len);
808 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
809 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
810 /* Last byte should be %NUL thanks to FORTIFY. */
811 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
812 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
813 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
814 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
815
816 /* And adding two will overflow more. */
817 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "QQ", len * 2), len + 1);
818 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
819 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
820 /* Last byte should be %NUL thanks to FORTIFY. */
821 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
822 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
823 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
824 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
825
826 /* Force an unterminated destination, and overflow. */
827 pad.buf[sizeof(pad.buf) - 1] = 'A';
828 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "TT", len * 2), len + 2);
829 /* This will have tripped both strlen() and strlcat(). */
830 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
831 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
832 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
833 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
834 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
835 /* But we should not go beyond the end. */
836 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
837
838 /* Force an unterminated source, and overflow. */
839 memset(src, 'B', sizeof(src));
840 pad.buf[sizeof(pad.buf) - 1] = '\0';
841 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len * 3), len - 1 + sizeof(src));
842 /* This will have tripped both strlen() and strlcat(). */
843 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3);
844 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 3);
845 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
846 /* But we should not go beyond the end. */
847 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
848 }
849
850 /* Check for 0-sized arrays... */
851 struct fortify_zero_sized {
852 unsigned long bytes_before;
853 char buf[0];
854 unsigned long bytes_after;
855 };
856
857 #define __fortify_test(memfunc) \
858 static void fortify_test_##memfunc(struct kunit *test) \
859 { \
860 struct fortify_zero_sized zero = { }; \
861 struct fortify_padding pad = { }; \
862 char srcA[sizeof(pad.buf) + 2]; \
863 char srcB[sizeof(pad.buf) + 2]; \
864 size_t len = sizeof(pad.buf) + unconst; \
865 \
866 memset(srcA, 'A', sizeof(srcA)); \
867 KUNIT_ASSERT_EQ(test, srcA[0], 'A'); \
868 memset(srcB, 'B', sizeof(srcB)); \
869 KUNIT_ASSERT_EQ(test, srcB[0], 'B'); \
870 \
871 memfunc(pad.buf, srcA, 0 + unconst); \
872 KUNIT_EXPECT_EQ(test, pad.buf[0], '\0'); \
873 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
874 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
875 memfunc(pad.buf + 1, srcB, 1 + unconst); \
876 KUNIT_EXPECT_EQ(test, pad.buf[0], '\0'); \
877 KUNIT_EXPECT_EQ(test, pad.buf[1], 'B'); \
878 KUNIT_EXPECT_EQ(test, pad.buf[2], '\0'); \
879 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
880 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
881 memfunc(pad.buf, srcA, 1 + unconst); \
882 KUNIT_EXPECT_EQ(test, pad.buf[0], 'A'); \
883 KUNIT_EXPECT_EQ(test, pad.buf[1], 'B'); \
884 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
885 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
886 memfunc(pad.buf, srcA, len - 1); \
887 KUNIT_EXPECT_EQ(test, pad.buf[1], 'A'); \
888 KUNIT_EXPECT_EQ(test, pad.buf[len - 1], '\0'); \
889 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
890 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
891 memfunc(pad.buf, srcA, len); \
892 KUNIT_EXPECT_EQ(test, pad.buf[1], 'A'); \
893 KUNIT_EXPECT_EQ(test, pad.buf[len - 1], 'A'); \
894 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); \
895 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
896 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
897 memfunc(pad.buf, srcA, len + 1); \
898 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
899 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); \
900 memfunc(pad.buf + 1, srcB, len); \
901 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
902 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2); \
903 \
904 /* Reset error counter. */ \
905 fortify_write_overflows = 0; \
906 /* Copy nothing into nothing: no errors. */ \
907 memfunc(zero.buf, srcB, 0 + unconst); \
908 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
909 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
910 memfunc(zero.buf, srcB, 1 + unconst); \
911 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
912 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); \
913 }
914 __fortify_test(memcpy)
__fortify_test(memmove)915 __fortify_test(memmove)
916
917 static void fortify_test_memscan(struct kunit *test)
918 {
919 char haystack[] = "Where oh where is my memory range?";
920 char *mem = haystack + strlen("Where oh where is ");
921 char needle = 'm';
922 size_t len = sizeof(haystack) + unconst;
923
924 KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len),
925 mem);
926 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
927 /* Catch too-large range. */
928 KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len + 1),
929 NULL);
930 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
931 KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len * 2),
932 NULL);
933 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
934 }
935
fortify_test_memchr(struct kunit * test)936 static void fortify_test_memchr(struct kunit *test)
937 {
938 char haystack[] = "Where oh where is my memory range?";
939 char *mem = haystack + strlen("Where oh where is ");
940 char needle = 'm';
941 size_t len = sizeof(haystack) + unconst;
942
943 KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len),
944 mem);
945 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
946 /* Catch too-large range. */
947 KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len + 1),
948 NULL);
949 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
950 KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len * 2),
951 NULL);
952 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
953 }
954
fortify_test_memchr_inv(struct kunit * test)955 static void fortify_test_memchr_inv(struct kunit *test)
956 {
957 char haystack[] = "Where oh where is my memory range?";
958 char *mem = haystack + 1;
959 char needle = 'W';
960 size_t len = sizeof(haystack) + unconst;
961
962 /* Normal search is okay. */
963 KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len),
964 mem);
965 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
966 /* Catch too-large range. */
967 KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len + 1),
968 NULL);
969 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
970 KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len * 2),
971 NULL);
972 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
973 }
974
fortify_test_memcmp(struct kunit * test)975 static void fortify_test_memcmp(struct kunit *test)
976 {
977 char one[] = "My mind is going ...";
978 char two[] = "My mind is going ... I can feel it.";
979 size_t one_len = sizeof(one) + unconst - 1;
980 size_t two_len = sizeof(two) + unconst - 1;
981
982 /* We match the first string (ignoring the %NUL). */
983 KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len), 0);
984 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
985 /* Still in bounds, but no longer matching. */
986 KUNIT_ASSERT_LT(test, memcmp(one, two, one_len + 1), 0);
987 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
988
989 /* Catch too-large ranges. */
990 KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len + 2), INT_MIN);
991 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
992
993 KUNIT_ASSERT_EQ(test, memcmp(two, one, two_len + 2), INT_MIN);
994 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
995 }
996
fortify_test_kmemdup(struct kunit * test)997 static void fortify_test_kmemdup(struct kunit *test)
998 {
999 char src[] = "I got Doom running on it!";
1000 char *copy;
1001 size_t len = sizeof(src) + unconst;
1002
1003 /* Copy is within bounds. */
1004 copy = kmemdup(src, len, GFP_KERNEL);
1005 KUNIT_EXPECT_NOT_NULL(test, copy);
1006 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
1007 kfree(copy);
1008
1009 /* Without %NUL. */
1010 copy = kmemdup(src, len - 1, GFP_KERNEL);
1011 KUNIT_EXPECT_NOT_NULL(test, copy);
1012 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
1013 kfree(copy);
1014
1015 /* Tiny bounds. */
1016 copy = kmemdup(src, 1, GFP_KERNEL);
1017 KUNIT_EXPECT_NOT_NULL(test, copy);
1018 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
1019 kfree(copy);
1020
1021 /* Out of bounds by 1 byte. */
1022 copy = kmemdup(src, len + 1, GFP_KERNEL);
1023 KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
1024 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
1025 kfree(copy);
1026
1027 /* Way out of bounds. */
1028 copy = kmemdup(src, len * 2, GFP_KERNEL);
1029 KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
1030 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
1031 kfree(copy);
1032
1033 /* Starting offset causing out of bounds. */
1034 copy = kmemdup(src + 1, len, GFP_KERNEL);
1035 KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
1036 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3);
1037 kfree(copy);
1038 }
1039
fortify_test_init(struct kunit * test)1040 static int fortify_test_init(struct kunit *test)
1041 {
1042 if (!IS_ENABLED(CONFIG_FORTIFY_SOURCE))
1043 kunit_skip(test, "Not built with CONFIG_FORTIFY_SOURCE=y");
1044
1045 fortify_read_overflows = 0;
1046 kunit_add_named_resource(test, NULL, NULL, &read_resource,
1047 "fortify_read_overflows",
1048 &fortify_read_overflows);
1049 fortify_write_overflows = 0;
1050 kunit_add_named_resource(test, NULL, NULL, &write_resource,
1051 "fortify_write_overflows",
1052 &fortify_write_overflows);
1053 return 0;
1054 }
1055
1056 static struct kunit_case fortify_test_cases[] = {
1057 KUNIT_CASE(fortify_test_known_sizes),
1058 KUNIT_CASE(fortify_test_control_flow_split),
1059 KUNIT_CASE(fortify_test_alloc_size_kmalloc_const),
1060 KUNIT_CASE(fortify_test_alloc_size_kmalloc_dynamic),
1061 KUNIT_CASE(fortify_test_alloc_size_vmalloc_const),
1062 KUNIT_CASE(fortify_test_alloc_size_vmalloc_dynamic),
1063 KUNIT_CASE(fortify_test_alloc_size_kvmalloc_const),
1064 KUNIT_CASE(fortify_test_alloc_size_kvmalloc_dynamic),
1065 KUNIT_CASE(fortify_test_alloc_size_devm_kmalloc_const),
1066 KUNIT_CASE(fortify_test_alloc_size_devm_kmalloc_dynamic),
1067 KUNIT_CASE(fortify_test_realloc_size),
1068 KUNIT_CASE(fortify_test_strlen),
1069 KUNIT_CASE(fortify_test_strnlen),
1070 KUNIT_CASE(fortify_test_strcpy),
1071 KUNIT_CASE(fortify_test_strncpy),
1072 KUNIT_CASE(fortify_test_strscpy),
1073 KUNIT_CASE(fortify_test_strcat),
1074 KUNIT_CASE(fortify_test_strncat),
1075 KUNIT_CASE(fortify_test_strlcat),
1076 /* skip memset: performs bounds checking on whole structs */
1077 KUNIT_CASE(fortify_test_memcpy),
1078 KUNIT_CASE(fortify_test_memmove),
1079 KUNIT_CASE(fortify_test_memscan),
1080 KUNIT_CASE(fortify_test_memchr),
1081 KUNIT_CASE(fortify_test_memchr_inv),
1082 KUNIT_CASE(fortify_test_memcmp),
1083 KUNIT_CASE(fortify_test_kmemdup),
1084 {}
1085 };
1086
1087 static struct kunit_suite fortify_test_suite = {
1088 .name = "fortify",
1089 .init = fortify_test_init,
1090 .test_cases = fortify_test_cases,
1091 };
1092
1093 kunit_test_suite(fortify_test_suite);
1094
1095 MODULE_DESCRIPTION("Runtime test cases for CONFIG_FORTIFY_SOURCE");
1096 MODULE_LICENSE("GPL");
1097