1 // SPDX-License-Identifier: GPL-2.0-or-later OR copyleft-next-0.3.1
2 /*
3  * kmod stress test driver
4  *
5  * Copyright (C) 2017 Luis R. Rodriguez <mcgrof@kernel.org>
6  */
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 
9 /*
10  * This driver provides an interface to trigger and test the kernel's
11  * module loader through a series of configurations and a few triggers.
12  * To test this driver use the following script as root:
13  *
14  * tools/testing/selftests/kmod/kmod.sh --help
15  */
16 
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/kmod.h>
20 #include <linux/printk.h>
21 #include <linux/kthread.h>
22 #include <linux/sched.h>
23 #include <linux/fs.h>
24 #include <linux/miscdevice.h>
25 #include <linux/vmalloc.h>
26 #include <linux/slab.h>
27 #include <linux/device.h>
28 
29 #define TEST_START_NUM_THREADS	50
30 #define TEST_START_DRIVER	"test_module"
31 #define TEST_START_TEST_FS	"xfs"
32 #define TEST_START_TEST_CASE	TEST_KMOD_DRIVER
33 
34 
35 static bool force_init_test = false;
36 module_param(force_init_test, bool_enable_only, 0644);
37 MODULE_PARM_DESC(force_init_test,
38 		 "Force kicking a test immediately after driver loads");
39 
40 /*
41  * For device allocation / registration
42  */
43 static DEFINE_MUTEX(reg_dev_mutex);
44 static LIST_HEAD(reg_test_devs);
45 
46 /*
47  * num_test_devs actually represents the *next* ID of the next
48  * device we will allow to create.
49  */
50 static int num_test_devs;
51 
52 /**
53  * enum kmod_test_case - linker table test case
54  * @TEST_KMOD_DRIVER: stress tests request_module()
55  * @TEST_KMOD_FS_TYPE: stress tests get_fs_type()
56  *
57  * If you add a  test case, please be sure to review if you need to set
58  * @need_mod_put for your tests case.
59  */
60 enum kmod_test_case {
61 	/* private: */
62 	__TEST_KMOD_INVALID = 0,
63 	/* public: */
64 
65 	TEST_KMOD_DRIVER,
66 	TEST_KMOD_FS_TYPE,
67 
68 	/* private: */
69 	__TEST_KMOD_MAX,
70 };
71 
72 struct test_config {
73 	char *test_driver;
74 	char *test_fs;
75 	unsigned int num_threads;
76 	enum kmod_test_case test_case;
77 	int test_result;
78 };
79 
80 struct kmod_test_device;
81 
82 /**
83  * struct kmod_test_device_info - thread info
84  *
85  * @ret_sync: return value if request_module() is used, sync request for
86  * 	@TEST_KMOD_DRIVER
87  * @fs_sync: return value of get_fs_type() for @TEST_KMOD_FS_TYPE
88  * @task_sync: kthread's task_struct or %NULL if not running
89  * @thread_idx: thread ID
90  * @test_dev: test device test is being performed under
91  * @need_mod_put: Some tests (get_fs_type() is one) requires putting the module
92  *	(module_put(fs_sync->owner)) when done, otherwise you will not be able
93  *	to unload the respective modules and re-test. We use this to keep
94  *	accounting of when we need this and to help out in case we need to
95  *	error out and deal with module_put() on error.
96  */
97 struct kmod_test_device_info {
98 	int ret_sync;
99 	struct file_system_type *fs_sync;
100 	struct task_struct *task_sync;
101 	unsigned int thread_idx;
102 	struct kmod_test_device *test_dev;
103 	bool need_mod_put;
104 };
105 
106 /**
107  * struct kmod_test_device - test device to help test kmod
108  *
109  * @dev_idx: unique ID for test device
110  * @config: configuration for the test
111  * @misc_dev: we use a misc device under the hood
112  * @dev: pointer to misc_dev's own struct device
113  * @config_mutex: protects configuration of test
114  * @trigger_mutex: the test trigger can only be fired once at a time
115  * @thread_mutex: protects @done count, and the @info per each thread
116  * @done: number of threads which have completed or failed
117  * @test_is_oom: when we run out of memory, use this to halt moving forward
118  * @kthreads_done: completion used to signal when all work is done
119  * @list: needed to be part of the reg_test_devs
120  * @info: array of info for each thread
121  */
122 struct kmod_test_device {
123 	int dev_idx;
124 	struct test_config config;
125 	struct miscdevice misc_dev;
126 	struct device *dev;
127 	struct mutex config_mutex;
128 	struct mutex trigger_mutex;
129 	struct mutex thread_mutex;
130 
131 	unsigned int done;
132 
133 	bool test_is_oom;
134 	struct completion kthreads_done;
135 	struct list_head list;
136 
137 	struct kmod_test_device_info *info;
138 };
139 
test_case_str(enum kmod_test_case test_case)140 static const char *test_case_str(enum kmod_test_case test_case)
141 {
142 	switch (test_case) {
143 	case TEST_KMOD_DRIVER:
144 		return "TEST_KMOD_DRIVER";
145 	case TEST_KMOD_FS_TYPE:
146 		return "TEST_KMOD_FS_TYPE";
147 	default:
148 		return "invalid";
149 	}
150 }
151 
dev_to_misc_dev(struct device * dev)152 static struct miscdevice *dev_to_misc_dev(struct device *dev)
153 {
154 	return dev_get_drvdata(dev);
155 }
156 
misc_dev_to_test_dev(struct miscdevice * misc_dev)157 static struct kmod_test_device *misc_dev_to_test_dev(struct miscdevice *misc_dev)
158 {
159 	return container_of(misc_dev, struct kmod_test_device, misc_dev);
160 }
161 
dev_to_test_dev(struct device * dev)162 static struct kmod_test_device *dev_to_test_dev(struct device *dev)
163 {
164 	struct miscdevice *misc_dev;
165 
166 	misc_dev = dev_to_misc_dev(dev);
167 
168 	return misc_dev_to_test_dev(misc_dev);
169 }
170 
171 /* Must run with thread_mutex held */
kmod_test_done_check(struct kmod_test_device * test_dev,unsigned int idx)172 static void kmod_test_done_check(struct kmod_test_device *test_dev,
173 				 unsigned int idx)
174 {
175 	struct test_config *config = &test_dev->config;
176 
177 	test_dev->done++;
178 	dev_dbg(test_dev->dev, "Done thread count: %u\n", test_dev->done);
179 
180 	if (test_dev->done == config->num_threads) {
181 		dev_info(test_dev->dev, "Done: %u threads have all run now\n",
182 			 test_dev->done);
183 		dev_info(test_dev->dev, "Last thread to run: %u\n", idx);
184 		complete(&test_dev->kthreads_done);
185 	}
186 }
187 
test_kmod_put_module(struct kmod_test_device_info * info)188 static void test_kmod_put_module(struct kmod_test_device_info *info)
189 {
190 	struct kmod_test_device *test_dev = info->test_dev;
191 	struct test_config *config = &test_dev->config;
192 
193 	if (!info->need_mod_put)
194 		return;
195 
196 	switch (config->test_case) {
197 	case TEST_KMOD_DRIVER:
198 		break;
199 	case TEST_KMOD_FS_TYPE:
200 		if (info->fs_sync && info->fs_sync->owner)
201 			module_put(info->fs_sync->owner);
202 		break;
203 	default:
204 		BUG();
205 	}
206 
207 	info->need_mod_put = true;
208 }
209 
run_request(void * data)210 static int run_request(void *data)
211 {
212 	struct kmod_test_device_info *info = data;
213 	struct kmod_test_device *test_dev = info->test_dev;
214 	struct test_config *config = &test_dev->config;
215 
216 	switch (config->test_case) {
217 	case TEST_KMOD_DRIVER:
218 		info->ret_sync = request_module("%s", config->test_driver);
219 		break;
220 	case TEST_KMOD_FS_TYPE:
221 		info->fs_sync = get_fs_type(config->test_fs);
222 		info->need_mod_put = true;
223 		break;
224 	default:
225 		/* __trigger_config_run() already checked for test sanity */
226 		BUG();
227 		return -EINVAL;
228 	}
229 
230 	dev_dbg(test_dev->dev, "Ran thread %u\n", info->thread_idx);
231 
232 	test_kmod_put_module(info);
233 
234 	mutex_lock(&test_dev->thread_mutex);
235 	info->task_sync = NULL;
236 	kmod_test_done_check(test_dev, info->thread_idx);
237 	mutex_unlock(&test_dev->thread_mutex);
238 
239 	return 0;
240 }
241 
tally_work_test(struct kmod_test_device_info * info)242 static int tally_work_test(struct kmod_test_device_info *info)
243 {
244 	struct kmod_test_device *test_dev = info->test_dev;
245 	struct test_config *config = &test_dev->config;
246 	int err_ret = 0;
247 
248 	switch (config->test_case) {
249 	case TEST_KMOD_DRIVER:
250 		/*
251 		 * Only capture errors, if one is found that's
252 		 * enough, for now.
253 		 */
254 		if (info->ret_sync != 0)
255 			err_ret = info->ret_sync;
256 		dev_info(test_dev->dev,
257 			 "Sync thread %d return status: %d\n",
258 			 info->thread_idx, info->ret_sync);
259 		break;
260 	case TEST_KMOD_FS_TYPE:
261 		/* For now we make this simple */
262 		if (!info->fs_sync)
263 			err_ret = -EINVAL;
264 		dev_info(test_dev->dev, "Sync thread %u fs: %s\n",
265 			 info->thread_idx, info->fs_sync ? config->test_fs :
266 			 "NULL");
267 		break;
268 	default:
269 		BUG();
270 	}
271 
272 	return err_ret;
273 }
274 
275 /*
276  * XXX: add result option to display if all errors did not match.
277  * For now we just keep any error code if one was found.
278  *
279  * If this ran it means *all* tasks were created fine and we
280  * are now just collecting results.
281  *
282  * Only propagate errors, do not override with a subsequent success case.
283  */
tally_up_work(struct kmod_test_device * test_dev)284 static void tally_up_work(struct kmod_test_device *test_dev)
285 {
286 	struct test_config *config = &test_dev->config;
287 	struct kmod_test_device_info *info;
288 	unsigned int idx;
289 	int err_ret = 0;
290 	int ret = 0;
291 
292 	mutex_lock(&test_dev->thread_mutex);
293 
294 	dev_info(test_dev->dev, "Results:\n");
295 
296 	for (idx=0; idx < config->num_threads; idx++) {
297 		info = &test_dev->info[idx];
298 		ret = tally_work_test(info);
299 		if (ret)
300 			err_ret = ret;
301 	}
302 
303 	/*
304 	 * Note: request_module() returns 256 for a module not found even
305 	 * though modprobe itself returns 1.
306 	 */
307 	config->test_result = err_ret;
308 
309 	mutex_unlock(&test_dev->thread_mutex);
310 }
311 
try_one_request(struct kmod_test_device * test_dev,unsigned int idx)312 static int try_one_request(struct kmod_test_device *test_dev, unsigned int idx)
313 {
314 	struct kmod_test_device_info *info = &test_dev->info[idx];
315 	int fail_ret = -ENOMEM;
316 
317 	mutex_lock(&test_dev->thread_mutex);
318 
319 	info->thread_idx = idx;
320 	info->test_dev = test_dev;
321 	info->task_sync = kthread_run(run_request, info, "%s-%u",
322 				      KBUILD_MODNAME, idx);
323 
324 	if (!info->task_sync || IS_ERR(info->task_sync)) {
325 		test_dev->test_is_oom = true;
326 		dev_err(test_dev->dev, "Setting up thread %u failed\n", idx);
327 		info->task_sync = NULL;
328 		goto err_out;
329 	} else
330 		dev_dbg(test_dev->dev, "Kicked off thread %u\n", idx);
331 
332 	mutex_unlock(&test_dev->thread_mutex);
333 
334 	return 0;
335 
336 err_out:
337 	info->ret_sync = fail_ret;
338 	mutex_unlock(&test_dev->thread_mutex);
339 
340 	return fail_ret;
341 }
342 
test_dev_kmod_stop_tests(struct kmod_test_device * test_dev)343 static void test_dev_kmod_stop_tests(struct kmod_test_device *test_dev)
344 {
345 	struct test_config *config = &test_dev->config;
346 	struct kmod_test_device_info *info;
347 	unsigned int i;
348 
349 	dev_info(test_dev->dev, "Ending request_module() tests\n");
350 
351 	mutex_lock(&test_dev->thread_mutex);
352 
353 	for (i=0; i < config->num_threads; i++) {
354 		info = &test_dev->info[i];
355 		if (info->task_sync && !IS_ERR(info->task_sync)) {
356 			dev_info(test_dev->dev,
357 				 "Stopping still-running thread %i\n", i);
358 			kthread_stop(info->task_sync);
359 		}
360 
361 		/*
362 		 * info->task_sync is well protected, it can only be
363 		 * NULL or a pointer to a struct. If its NULL we either
364 		 * never ran, or we did and we completed the work. Completed
365 		 * tasks *always* put the module for us. This is a sanity
366 		 * check -- just in case.
367 		 */
368 		if (info->task_sync && info->need_mod_put)
369 			test_kmod_put_module(info);
370 	}
371 
372 	mutex_unlock(&test_dev->thread_mutex);
373 }
374 
375 /*
376  * Only wait *iff* we did not run into any errors during all of our thread
377  * set up. If run into any issues we stop threads and just bail out with
378  * an error to the trigger. This also means we don't need any tally work
379  * for any threads which fail.
380  */
try_requests(struct kmod_test_device * test_dev)381 static int try_requests(struct kmod_test_device *test_dev)
382 {
383 	struct test_config *config = &test_dev->config;
384 	unsigned int idx;
385 	int ret;
386 	bool any_error = false;
387 
388 	for (idx=0; idx < config->num_threads; idx++) {
389 		if (test_dev->test_is_oom) {
390 			any_error = true;
391 			break;
392 		}
393 
394 		ret = try_one_request(test_dev, idx);
395 		if (ret) {
396 			any_error = true;
397 			break;
398 		}
399 	}
400 
401 	if (!any_error) {
402 		test_dev->test_is_oom = false;
403 		dev_info(test_dev->dev,
404 			 "No errors were found while initializing threads\n");
405 		wait_for_completion(&test_dev->kthreads_done);
406 		tally_up_work(test_dev);
407 	} else {
408 		test_dev->test_is_oom = true;
409 		dev_info(test_dev->dev,
410 			 "At least one thread failed to start, stop all work\n");
411 		test_dev_kmod_stop_tests(test_dev);
412 		return -ENOMEM;
413 	}
414 
415 	return 0;
416 }
417 
run_test_driver(struct kmod_test_device * test_dev)418 static int run_test_driver(struct kmod_test_device *test_dev)
419 {
420 	struct test_config *config = &test_dev->config;
421 
422 	dev_info(test_dev->dev, "Test case: %s (%u)\n",
423 		 test_case_str(config->test_case),
424 		 config->test_case);
425 	dev_info(test_dev->dev, "Test driver to load: %s\n",
426 		 config->test_driver);
427 	dev_info(test_dev->dev, "Number of threads to run: %u\n",
428 		 config->num_threads);
429 	dev_info(test_dev->dev, "Thread IDs will range from 0 - %u\n",
430 		 config->num_threads - 1);
431 
432 	return try_requests(test_dev);
433 }
434 
run_test_fs_type(struct kmod_test_device * test_dev)435 static int run_test_fs_type(struct kmod_test_device *test_dev)
436 {
437 	struct test_config *config = &test_dev->config;
438 
439 	dev_info(test_dev->dev, "Test case: %s (%u)\n",
440 		 test_case_str(config->test_case),
441 		 config->test_case);
442 	dev_info(test_dev->dev, "Test filesystem to load: %s\n",
443 		 config->test_fs);
444 	dev_info(test_dev->dev, "Number of threads to run: %u\n",
445 		 config->num_threads);
446 	dev_info(test_dev->dev, "Thread IDs will range from 0 - %u\n",
447 		 config->num_threads - 1);
448 
449 	return try_requests(test_dev);
450 }
451 
config_show(struct device * dev,struct device_attribute * attr,char * buf)452 static ssize_t config_show(struct device *dev,
453 			   struct device_attribute *attr,
454 			   char *buf)
455 {
456 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
457 	struct test_config *config = &test_dev->config;
458 	int len = 0;
459 
460 	mutex_lock(&test_dev->config_mutex);
461 
462 	len += snprintf(buf, PAGE_SIZE,
463 			"Custom trigger configuration for: %s\n",
464 			dev_name(dev));
465 
466 	len += snprintf(buf+len, PAGE_SIZE - len,
467 			"Number of threads:\t%u\n",
468 			config->num_threads);
469 
470 	len += snprintf(buf+len, PAGE_SIZE - len,
471 			"Test_case:\t%s (%u)\n",
472 			test_case_str(config->test_case),
473 			config->test_case);
474 
475 	if (config->test_driver)
476 		len += snprintf(buf+len, PAGE_SIZE - len,
477 				"driver:\t%s\n",
478 				config->test_driver);
479 	else
480 		len += snprintf(buf+len, PAGE_SIZE - len,
481 				"driver:\tEMPTY\n");
482 
483 	if (config->test_fs)
484 		len += snprintf(buf+len, PAGE_SIZE - len,
485 				"fs:\t%s\n",
486 				config->test_fs);
487 	else
488 		len += snprintf(buf+len, PAGE_SIZE - len,
489 				"fs:\tEMPTY\n");
490 
491 	mutex_unlock(&test_dev->config_mutex);
492 
493 	return len;
494 }
495 static DEVICE_ATTR_RO(config);
496 
497 /*
498  * This ensures we don't allow kicking threads through if our configuration
499  * is faulty.
500  */
__trigger_config_run(struct kmod_test_device * test_dev)501 static int __trigger_config_run(struct kmod_test_device *test_dev)
502 {
503 	struct test_config *config = &test_dev->config;
504 
505 	test_dev->done = 0;
506 
507 	switch (config->test_case) {
508 	case TEST_KMOD_DRIVER:
509 		return run_test_driver(test_dev);
510 	case TEST_KMOD_FS_TYPE:
511 		return run_test_fs_type(test_dev);
512 	default:
513 		dev_warn(test_dev->dev,
514 			 "Invalid test case requested: %u\n",
515 			 config->test_case);
516 		return -EINVAL;
517 	}
518 }
519 
trigger_config_run(struct kmod_test_device * test_dev)520 static int trigger_config_run(struct kmod_test_device *test_dev)
521 {
522 	struct test_config *config = &test_dev->config;
523 	int ret;
524 
525 	mutex_lock(&test_dev->trigger_mutex);
526 	mutex_lock(&test_dev->config_mutex);
527 
528 	ret = __trigger_config_run(test_dev);
529 	if (ret < 0)
530 		goto out;
531 	dev_info(test_dev->dev, "General test result: %d\n",
532 		 config->test_result);
533 
534 	/*
535 	 * We must return 0 after a trigger even unless something went
536 	 * wrong with the setup of the test. If the test setup went fine
537 	 * then userspace must just check the result of config->test_result.
538 	 * One issue with relying on the return from a call in the kernel
539 	 * is if the kernel returns a positive value using this trigger
540 	 * will not return the value to userspace, it would be lost.
541 	 *
542 	 * By not relying on capturing the return value of tests we are using
543 	 * through the trigger it also us to run tests with set -e and only
544 	 * fail when something went wrong with the driver upon trigger
545 	 * requests.
546 	 */
547 	ret = 0;
548 
549 out:
550 	mutex_unlock(&test_dev->config_mutex);
551 	mutex_unlock(&test_dev->trigger_mutex);
552 
553 	return ret;
554 }
555 
556 static ssize_t
trigger_config_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)557 trigger_config_store(struct device *dev,
558 		     struct device_attribute *attr,
559 		     const char *buf, size_t count)
560 {
561 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
562 	int ret;
563 
564 	if (test_dev->test_is_oom)
565 		return -ENOMEM;
566 
567 	/* For all intents and purposes we don't care what userspace
568 	 * sent this trigger, we care only that we were triggered.
569 	 * We treat the return value only for caputuring issues with
570 	 * the test setup. At this point all the test variables should
571 	 * have been allocated so typically this should never fail.
572 	 */
573 	ret = trigger_config_run(test_dev);
574 	if (unlikely(ret < 0))
575 		goto out;
576 
577 	/*
578 	 * Note: any return > 0 will be treated as success
579 	 * and the error value will not be available to userspace.
580 	 * Do not rely on trying to send to userspace a test value
581 	 * return value as positive return errors will be lost.
582 	 */
583 	if (WARN_ON(ret > 0))
584 		return -EINVAL;
585 
586 	ret = count;
587 out:
588 	return ret;
589 }
590 static DEVICE_ATTR_WO(trigger_config);
591 
592 /*
593  * XXX: move to kstrncpy() once merged.
594  *
595  * Users should use kfree_const() when freeing these.
596  */
__kstrncpy(char ** dst,const char * name,size_t count,gfp_t gfp)597 static int __kstrncpy(char **dst, const char *name, size_t count, gfp_t gfp)
598 {
599 	*dst = kstrndup(name, count, gfp);
600 	if (!*dst)
601 		return -ENOSPC;
602 	return count;
603 }
604 
config_copy_test_driver_name(struct test_config * config,const char * name,size_t count)605 static int config_copy_test_driver_name(struct test_config *config,
606 				    const char *name,
607 				    size_t count)
608 {
609 	return __kstrncpy(&config->test_driver, name, count, GFP_KERNEL);
610 }
611 
612 
config_copy_test_fs(struct test_config * config,const char * name,size_t count)613 static int config_copy_test_fs(struct test_config *config, const char *name,
614 			       size_t count)
615 {
616 	return __kstrncpy(&config->test_fs, name, count, GFP_KERNEL);
617 }
618 
__kmod_config_free(struct test_config * config)619 static void __kmod_config_free(struct test_config *config)
620 {
621 	if (!config)
622 		return;
623 
624 	kfree_const(config->test_driver);
625 	config->test_driver = NULL;
626 
627 	kfree_const(config->test_fs);
628 	config->test_fs = NULL;
629 }
630 
kmod_config_free(struct kmod_test_device * test_dev)631 static void kmod_config_free(struct kmod_test_device *test_dev)
632 {
633 	struct test_config *config;
634 
635 	if (!test_dev)
636 		return;
637 
638 	config = &test_dev->config;
639 
640 	mutex_lock(&test_dev->config_mutex);
641 	__kmod_config_free(config);
642 	mutex_unlock(&test_dev->config_mutex);
643 }
644 
config_test_driver_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)645 static ssize_t config_test_driver_store(struct device *dev,
646 					struct device_attribute *attr,
647 					const char *buf, size_t count)
648 {
649 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
650 	struct test_config *config = &test_dev->config;
651 	int copied;
652 
653 	mutex_lock(&test_dev->config_mutex);
654 
655 	kfree_const(config->test_driver);
656 	config->test_driver = NULL;
657 
658 	copied = config_copy_test_driver_name(config, buf, count);
659 	mutex_unlock(&test_dev->config_mutex);
660 
661 	return copied;
662 }
663 
664 /*
665  * As per sysfs_kf_seq_show() the buf is max PAGE_SIZE.
666  */
config_test_show_str(struct mutex * config_mutex,char * dst,char * src)667 static ssize_t config_test_show_str(struct mutex *config_mutex,
668 				    char *dst,
669 				    char *src)
670 {
671 	int len;
672 
673 	mutex_lock(config_mutex);
674 	len = snprintf(dst, PAGE_SIZE, "%s\n", src);
675 	mutex_unlock(config_mutex);
676 
677 	return len;
678 }
679 
config_test_driver_show(struct device * dev,struct device_attribute * attr,char * buf)680 static ssize_t config_test_driver_show(struct device *dev,
681 					struct device_attribute *attr,
682 					char *buf)
683 {
684 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
685 	struct test_config *config = &test_dev->config;
686 
687 	return config_test_show_str(&test_dev->config_mutex, buf,
688 				    config->test_driver);
689 }
690 static DEVICE_ATTR_RW(config_test_driver);
691 
config_test_fs_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)692 static ssize_t config_test_fs_store(struct device *dev,
693 				    struct device_attribute *attr,
694 				    const char *buf, size_t count)
695 {
696 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
697 	struct test_config *config = &test_dev->config;
698 	int copied;
699 
700 	mutex_lock(&test_dev->config_mutex);
701 
702 	kfree_const(config->test_fs);
703 	config->test_fs = NULL;
704 
705 	copied = config_copy_test_fs(config, buf, count);
706 	mutex_unlock(&test_dev->config_mutex);
707 
708 	return copied;
709 }
710 
config_test_fs_show(struct device * dev,struct device_attribute * attr,char * buf)711 static ssize_t config_test_fs_show(struct device *dev,
712 				   struct device_attribute *attr,
713 				   char *buf)
714 {
715 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
716 	struct test_config *config = &test_dev->config;
717 
718 	return config_test_show_str(&test_dev->config_mutex, buf,
719 				    config->test_fs);
720 }
721 static DEVICE_ATTR_RW(config_test_fs);
722 
trigger_config_run_type(struct kmod_test_device * test_dev,enum kmod_test_case test_case,const char * test_str)723 static int trigger_config_run_type(struct kmod_test_device *test_dev,
724 				   enum kmod_test_case test_case,
725 				   const char *test_str)
726 {
727 	int copied = 0;
728 	struct test_config *config = &test_dev->config;
729 
730 	mutex_lock(&test_dev->config_mutex);
731 
732 	switch (test_case) {
733 	case TEST_KMOD_DRIVER:
734 		kfree_const(config->test_driver);
735 		config->test_driver = NULL;
736 		copied = config_copy_test_driver_name(config, test_str,
737 						      strlen(test_str));
738 		break;
739 	case TEST_KMOD_FS_TYPE:
740 		kfree_const(config->test_fs);
741 		config->test_fs = NULL;
742 		copied = config_copy_test_fs(config, test_str,
743 					     strlen(test_str));
744 		break;
745 	default:
746 		mutex_unlock(&test_dev->config_mutex);
747 		return -EINVAL;
748 	}
749 
750 	config->test_case = test_case;
751 
752 	mutex_unlock(&test_dev->config_mutex);
753 
754 	if (copied <= 0 || copied != strlen(test_str)) {
755 		test_dev->test_is_oom = true;
756 		return -ENOMEM;
757 	}
758 
759 	test_dev->test_is_oom = false;
760 
761 	return trigger_config_run(test_dev);
762 }
763 
free_test_dev_info(struct kmod_test_device * test_dev)764 static void free_test_dev_info(struct kmod_test_device *test_dev)
765 {
766 	vfree(test_dev->info);
767 	test_dev->info = NULL;
768 }
769 
kmod_config_sync_info(struct kmod_test_device * test_dev)770 static int kmod_config_sync_info(struct kmod_test_device *test_dev)
771 {
772 	struct test_config *config = &test_dev->config;
773 
774 	free_test_dev_info(test_dev);
775 	test_dev->info =
776 		vzalloc(array_size(sizeof(struct kmod_test_device_info),
777 				   config->num_threads));
778 	if (!test_dev->info)
779 		return -ENOMEM;
780 
781 	return 0;
782 }
783 
784 /*
785  * Old kernels may not have this, if you want to port this code to
786  * test it on older kernels.
787  */
788 #ifdef get_kmod_umh_limit
kmod_init_test_thread_limit(void)789 static unsigned int kmod_init_test_thread_limit(void)
790 {
791 	return get_kmod_umh_limit();
792 }
793 #else
kmod_init_test_thread_limit(void)794 static unsigned int kmod_init_test_thread_limit(void)
795 {
796 	return TEST_START_NUM_THREADS;
797 }
798 #endif
799 
__kmod_config_init(struct kmod_test_device * test_dev)800 static int __kmod_config_init(struct kmod_test_device *test_dev)
801 {
802 	struct test_config *config = &test_dev->config;
803 	int ret = -ENOMEM, copied;
804 
805 	__kmod_config_free(config);
806 
807 	copied = config_copy_test_driver_name(config, TEST_START_DRIVER,
808 					      strlen(TEST_START_DRIVER));
809 	if (copied != strlen(TEST_START_DRIVER))
810 		goto err_out;
811 
812 	copied = config_copy_test_fs(config, TEST_START_TEST_FS,
813 				     strlen(TEST_START_TEST_FS));
814 	if (copied != strlen(TEST_START_TEST_FS))
815 		goto err_out;
816 
817 	config->num_threads = kmod_init_test_thread_limit();
818 	config->test_result = 0;
819 	config->test_case = TEST_START_TEST_CASE;
820 
821 	ret = kmod_config_sync_info(test_dev);
822 	if (ret)
823 		goto err_out;
824 
825 	test_dev->test_is_oom = false;
826 
827 	return 0;
828 
829 err_out:
830 	test_dev->test_is_oom = true;
831 	WARN_ON(test_dev->test_is_oom);
832 
833 	__kmod_config_free(config);
834 
835 	return ret;
836 }
837 
reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)838 static ssize_t reset_store(struct device *dev,
839 			   struct device_attribute *attr,
840 			   const char *buf, size_t count)
841 {
842 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
843 	int ret;
844 
845 	mutex_lock(&test_dev->trigger_mutex);
846 	mutex_lock(&test_dev->config_mutex);
847 
848 	ret = __kmod_config_init(test_dev);
849 	if (ret < 0) {
850 		ret = -ENOMEM;
851 		dev_err(dev, "could not alloc settings for config trigger: %d\n",
852 		       ret);
853 		goto out;
854 	}
855 
856 	dev_info(dev, "reset\n");
857 	ret = count;
858 
859 out:
860 	mutex_unlock(&test_dev->config_mutex);
861 	mutex_unlock(&test_dev->trigger_mutex);
862 
863 	return ret;
864 }
865 static DEVICE_ATTR_WO(reset);
866 
test_dev_config_update_uint_sync(struct kmod_test_device * test_dev,const char * buf,size_t size,unsigned int * config,int (* test_sync)(struct kmod_test_device * test_dev))867 static int test_dev_config_update_uint_sync(struct kmod_test_device *test_dev,
868 					    const char *buf, size_t size,
869 					    unsigned int *config,
870 					    int (*test_sync)(struct kmod_test_device *test_dev))
871 {
872 	int ret;
873 	unsigned int val;
874 	unsigned int old_val;
875 
876 	ret = kstrtouint(buf, 10, &val);
877 	if (ret)
878 		return ret;
879 
880 	mutex_lock(&test_dev->config_mutex);
881 
882 	old_val = *config;
883 	*(unsigned int *)config = val;
884 
885 	ret = test_sync(test_dev);
886 	if (ret) {
887 		*(unsigned int *)config = old_val;
888 
889 		ret = test_sync(test_dev);
890 		WARN_ON(ret);
891 
892 		mutex_unlock(&test_dev->config_mutex);
893 		return -EINVAL;
894 	}
895 
896 	mutex_unlock(&test_dev->config_mutex);
897 	/* Always return full write size even if we didn't consume all */
898 	return size;
899 }
900 
test_dev_config_update_uint_range(struct kmod_test_device * test_dev,const char * buf,size_t size,unsigned int * config,unsigned int min,unsigned int max)901 static int test_dev_config_update_uint_range(struct kmod_test_device *test_dev,
902 					     const char *buf, size_t size,
903 					     unsigned int *config,
904 					     unsigned int min,
905 					     unsigned int max)
906 {
907 	unsigned int val;
908 	int ret;
909 
910 	ret = kstrtouint(buf, 10, &val);
911 	if (ret)
912 		return ret;
913 
914 	if (val < min || val > max)
915 		return -EINVAL;
916 
917 	mutex_lock(&test_dev->config_mutex);
918 	*config = val;
919 	mutex_unlock(&test_dev->config_mutex);
920 
921 	/* Always return full write size even if we didn't consume all */
922 	return size;
923 }
924 
test_dev_config_update_int(struct kmod_test_device * test_dev,const char * buf,size_t size,int * config)925 static int test_dev_config_update_int(struct kmod_test_device *test_dev,
926 				      const char *buf, size_t size,
927 				      int *config)
928 {
929 	int val;
930 	int ret;
931 
932 	ret = kstrtoint(buf, 10, &val);
933 	if (ret)
934 		return ret;
935 
936 	mutex_lock(&test_dev->config_mutex);
937 	*config = val;
938 	mutex_unlock(&test_dev->config_mutex);
939 	/* Always return full write size even if we didn't consume all */
940 	return size;
941 }
942 
test_dev_config_show_int(struct kmod_test_device * test_dev,char * buf,int config)943 static ssize_t test_dev_config_show_int(struct kmod_test_device *test_dev,
944 					char *buf,
945 					int config)
946 {
947 	int val;
948 
949 	mutex_lock(&test_dev->config_mutex);
950 	val = config;
951 	mutex_unlock(&test_dev->config_mutex);
952 
953 	return snprintf(buf, PAGE_SIZE, "%d\n", val);
954 }
955 
test_dev_config_show_uint(struct kmod_test_device * test_dev,char * buf,unsigned int config)956 static ssize_t test_dev_config_show_uint(struct kmod_test_device *test_dev,
957 					 char *buf,
958 					 unsigned int config)
959 {
960 	unsigned int val;
961 
962 	mutex_lock(&test_dev->config_mutex);
963 	val = config;
964 	mutex_unlock(&test_dev->config_mutex);
965 
966 	return snprintf(buf, PAGE_SIZE, "%u\n", val);
967 }
968 
test_result_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)969 static ssize_t test_result_store(struct device *dev,
970 				 struct device_attribute *attr,
971 				 const char *buf, size_t count)
972 {
973 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
974 	struct test_config *config = &test_dev->config;
975 
976 	return test_dev_config_update_int(test_dev, buf, count,
977 					  &config->test_result);
978 }
979 
config_num_threads_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)980 static ssize_t config_num_threads_store(struct device *dev,
981 					struct device_attribute *attr,
982 					const char *buf, size_t count)
983 {
984 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
985 	struct test_config *config = &test_dev->config;
986 
987 	return test_dev_config_update_uint_sync(test_dev, buf, count,
988 						&config->num_threads,
989 						kmod_config_sync_info);
990 }
991 
config_num_threads_show(struct device * dev,struct device_attribute * attr,char * buf)992 static ssize_t config_num_threads_show(struct device *dev,
993 				       struct device_attribute *attr,
994 				       char *buf)
995 {
996 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
997 	struct test_config *config = &test_dev->config;
998 
999 	return test_dev_config_show_int(test_dev, buf, config->num_threads);
1000 }
1001 static DEVICE_ATTR_RW(config_num_threads);
1002 
config_test_case_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1003 static ssize_t config_test_case_store(struct device *dev,
1004 				      struct device_attribute *attr,
1005 				      const char *buf, size_t count)
1006 {
1007 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
1008 	struct test_config *config = &test_dev->config;
1009 
1010 	return test_dev_config_update_uint_range(test_dev, buf, count,
1011 						 &config->test_case,
1012 						 __TEST_KMOD_INVALID + 1,
1013 						 __TEST_KMOD_MAX - 1);
1014 }
1015 
config_test_case_show(struct device * dev,struct device_attribute * attr,char * buf)1016 static ssize_t config_test_case_show(struct device *dev,
1017 				     struct device_attribute *attr,
1018 				     char *buf)
1019 {
1020 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
1021 	struct test_config *config = &test_dev->config;
1022 
1023 	return test_dev_config_show_uint(test_dev, buf, config->test_case);
1024 }
1025 static DEVICE_ATTR_RW(config_test_case);
1026 
test_result_show(struct device * dev,struct device_attribute * attr,char * buf)1027 static ssize_t test_result_show(struct device *dev,
1028 				struct device_attribute *attr,
1029 				char *buf)
1030 {
1031 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
1032 	struct test_config *config = &test_dev->config;
1033 
1034 	return test_dev_config_show_int(test_dev, buf, config->test_result);
1035 }
1036 static DEVICE_ATTR_RW(test_result);
1037 
1038 #define TEST_KMOD_DEV_ATTR(name)		&dev_attr_##name.attr
1039 
1040 static struct attribute *test_dev_attrs[] = {
1041 	TEST_KMOD_DEV_ATTR(trigger_config),
1042 	TEST_KMOD_DEV_ATTR(config),
1043 	TEST_KMOD_DEV_ATTR(reset),
1044 
1045 	TEST_KMOD_DEV_ATTR(config_test_driver),
1046 	TEST_KMOD_DEV_ATTR(config_test_fs),
1047 	TEST_KMOD_DEV_ATTR(config_num_threads),
1048 	TEST_KMOD_DEV_ATTR(config_test_case),
1049 	TEST_KMOD_DEV_ATTR(test_result),
1050 
1051 	NULL,
1052 };
1053 
1054 ATTRIBUTE_GROUPS(test_dev);
1055 
kmod_config_init(struct kmod_test_device * test_dev)1056 static int kmod_config_init(struct kmod_test_device *test_dev)
1057 {
1058 	int ret;
1059 
1060 	mutex_lock(&test_dev->config_mutex);
1061 	ret = __kmod_config_init(test_dev);
1062 	mutex_unlock(&test_dev->config_mutex);
1063 
1064 	return ret;
1065 }
1066 
alloc_test_dev_kmod(int idx)1067 static struct kmod_test_device *alloc_test_dev_kmod(int idx)
1068 {
1069 	int ret;
1070 	struct kmod_test_device *test_dev;
1071 	struct miscdevice *misc_dev;
1072 
1073 	test_dev = vzalloc(sizeof(struct kmod_test_device));
1074 	if (!test_dev)
1075 		goto err_out;
1076 
1077 	mutex_init(&test_dev->config_mutex);
1078 	mutex_init(&test_dev->trigger_mutex);
1079 	mutex_init(&test_dev->thread_mutex);
1080 
1081 	init_completion(&test_dev->kthreads_done);
1082 
1083 	ret = kmod_config_init(test_dev);
1084 	if (ret < 0) {
1085 		pr_err("Cannot alloc kmod_config_init()\n");
1086 		goto err_out_free;
1087 	}
1088 
1089 	test_dev->dev_idx = idx;
1090 	misc_dev = &test_dev->misc_dev;
1091 
1092 	misc_dev->minor = MISC_DYNAMIC_MINOR;
1093 	misc_dev->name = kasprintf(GFP_KERNEL, "test_kmod%d", idx);
1094 	if (!misc_dev->name) {
1095 		pr_err("Cannot alloc misc_dev->name\n");
1096 		goto err_out_free_config;
1097 	}
1098 	misc_dev->groups = test_dev_groups;
1099 
1100 	return test_dev;
1101 
1102 err_out_free_config:
1103 	free_test_dev_info(test_dev);
1104 	kmod_config_free(test_dev);
1105 err_out_free:
1106 	vfree(test_dev);
1107 	test_dev = NULL;
1108 err_out:
1109 	return NULL;
1110 }
1111 
free_test_dev_kmod(struct kmod_test_device * test_dev)1112 static void free_test_dev_kmod(struct kmod_test_device *test_dev)
1113 {
1114 	if (test_dev) {
1115 		kfree_const(test_dev->misc_dev.name);
1116 		test_dev->misc_dev.name = NULL;
1117 		free_test_dev_info(test_dev);
1118 		kmod_config_free(test_dev);
1119 		vfree(test_dev);
1120 		test_dev = NULL;
1121 	}
1122 }
1123 
register_test_dev_kmod(void)1124 static struct kmod_test_device *register_test_dev_kmod(void)
1125 {
1126 	struct kmod_test_device *test_dev = NULL;
1127 	int ret;
1128 
1129 	mutex_lock(&reg_dev_mutex);
1130 
1131 	/* int should suffice for number of devices, test for wrap */
1132 	if (num_test_devs + 1 == INT_MAX) {
1133 		pr_err("reached limit of number of test devices\n");
1134 		goto out;
1135 	}
1136 
1137 	test_dev = alloc_test_dev_kmod(num_test_devs);
1138 	if (!test_dev)
1139 		goto out;
1140 
1141 	ret = misc_register(&test_dev->misc_dev);
1142 	if (ret) {
1143 		pr_err("could not register misc device: %d\n", ret);
1144 		free_test_dev_kmod(test_dev);
1145 		test_dev = NULL;
1146 		goto out;
1147 	}
1148 
1149 	test_dev->dev = test_dev->misc_dev.this_device;
1150 	list_add_tail(&test_dev->list, &reg_test_devs);
1151 	dev_info(test_dev->dev, "interface ready\n");
1152 
1153 	num_test_devs++;
1154 
1155 out:
1156 	mutex_unlock(&reg_dev_mutex);
1157 
1158 	return test_dev;
1159 
1160 }
1161 
test_kmod_init(void)1162 static int __init test_kmod_init(void)
1163 {
1164 	struct kmod_test_device *test_dev;
1165 	int ret;
1166 
1167 	test_dev = register_test_dev_kmod();
1168 	if (!test_dev) {
1169 		pr_err("Cannot add first test kmod device\n");
1170 		return -ENODEV;
1171 	}
1172 
1173 	/*
1174 	 * With some work we might be able to gracefully enable
1175 	 * testing with this driver built-in, for now this seems
1176 	 * rather risky. For those willing to try have at it,
1177 	 * and enable the below. Good luck! If that works, try
1178 	 * lowering the init level for more fun.
1179 	 */
1180 	if (force_init_test) {
1181 		ret = trigger_config_run_type(test_dev,
1182 					      TEST_KMOD_DRIVER, "tun");
1183 		if (WARN_ON(ret))
1184 			return ret;
1185 		ret = trigger_config_run_type(test_dev,
1186 					      TEST_KMOD_FS_TYPE, "btrfs");
1187 		if (WARN_ON(ret))
1188 			return ret;
1189 	}
1190 
1191 	return 0;
1192 }
1193 late_initcall(test_kmod_init);
1194 
1195 static
unregister_test_dev_kmod(struct kmod_test_device * test_dev)1196 void unregister_test_dev_kmod(struct kmod_test_device *test_dev)
1197 {
1198 	mutex_lock(&test_dev->trigger_mutex);
1199 	mutex_lock(&test_dev->config_mutex);
1200 
1201 	test_dev_kmod_stop_tests(test_dev);
1202 
1203 	dev_info(test_dev->dev, "removing interface\n");
1204 	misc_deregister(&test_dev->misc_dev);
1205 
1206 	mutex_unlock(&test_dev->config_mutex);
1207 	mutex_unlock(&test_dev->trigger_mutex);
1208 
1209 	free_test_dev_kmod(test_dev);
1210 }
1211 
test_kmod_exit(void)1212 static void __exit test_kmod_exit(void)
1213 {
1214 	struct kmod_test_device *test_dev, *tmp;
1215 
1216 	mutex_lock(&reg_dev_mutex);
1217 	list_for_each_entry_safe(test_dev, tmp, &reg_test_devs, list) {
1218 		list_del(&test_dev->list);
1219 		unregister_test_dev_kmod(test_dev);
1220 	}
1221 	mutex_unlock(&reg_dev_mutex);
1222 }
1223 module_exit(test_kmod_exit);
1224 
1225 MODULE_AUTHOR("Luis R. Rodriguez <mcgrof@kernel.org>");
1226 MODULE_DESCRIPTION("kmod stress test driver");
1227 MODULE_LICENSE("GPL");
1228