1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains core hardware tag-based KASAN code.
4  *
5  * Copyright (c) 2020 Google, Inc.
6  * Author: Andrey Konovalov <andreyknvl@google.com>
7  */
8 
9 #define pr_fmt(fmt) "kasan: " fmt
10 
11 #include <linux/init.h>
12 #include <linux/kasan.h>
13 #include <linux/kernel.h>
14 #include <linux/memory.h>
15 #include <linux/mm.h>
16 #include <linux/static_key.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 #include <linux/vmalloc.h>
20 
21 #include "kasan.h"
22 
23 enum kasan_arg {
24 	KASAN_ARG_DEFAULT,
25 	KASAN_ARG_OFF,
26 	KASAN_ARG_ON,
27 };
28 
29 enum kasan_arg_mode {
30 	KASAN_ARG_MODE_DEFAULT,
31 	KASAN_ARG_MODE_SYNC,
32 	KASAN_ARG_MODE_ASYNC,
33 	KASAN_ARG_MODE_ASYMM,
34 };
35 
36 enum kasan_arg_vmalloc {
37 	KASAN_ARG_VMALLOC_DEFAULT,
38 	KASAN_ARG_VMALLOC_OFF,
39 	KASAN_ARG_VMALLOC_ON,
40 };
41 
42 static enum kasan_arg kasan_arg __ro_after_init;
43 static enum kasan_arg_mode kasan_arg_mode __ro_after_init;
44 static enum kasan_arg_vmalloc kasan_arg_vmalloc __initdata;
45 
46 /*
47  * Whether KASAN is enabled at all.
48  * The value remains false until KASAN is initialized by kasan_init_hw_tags().
49  */
50 DEFINE_STATIC_KEY_FALSE(kasan_flag_enabled);
51 EXPORT_SYMBOL(kasan_flag_enabled);
52 
53 /*
54  * Whether the selected mode is synchronous, asynchronous, or asymmetric.
55  * Defaults to KASAN_MODE_SYNC.
56  */
57 enum kasan_mode kasan_mode __ro_after_init;
58 EXPORT_SYMBOL_GPL(kasan_mode);
59 
60 /* Whether to enable vmalloc tagging. */
61 #ifdef CONFIG_KASAN_VMALLOC
62 DEFINE_STATIC_KEY_TRUE(kasan_flag_vmalloc);
63 #else
64 DEFINE_STATIC_KEY_FALSE(kasan_flag_vmalloc);
65 #endif
66 EXPORT_SYMBOL_GPL(kasan_flag_vmalloc);
67 
68 #define PAGE_ALLOC_SAMPLE_DEFAULT	1
69 #define PAGE_ALLOC_SAMPLE_ORDER_DEFAULT	3
70 
71 /*
72  * Sampling interval of page_alloc allocation (un)poisoning.
73  * Defaults to no sampling.
74  */
75 unsigned long kasan_page_alloc_sample = PAGE_ALLOC_SAMPLE_DEFAULT;
76 
77 /*
78  * Minimum order of page_alloc allocations to be affected by sampling.
79  * The default value is chosen to match both
80  * PAGE_ALLOC_COSTLY_ORDER and SKB_FRAG_PAGE_ORDER.
81  */
82 unsigned int kasan_page_alloc_sample_order = PAGE_ALLOC_SAMPLE_ORDER_DEFAULT;
83 
84 DEFINE_PER_CPU(long, kasan_page_alloc_skip);
85 
86 /* kasan=off/on */
early_kasan_flag(char * arg)87 static int __init early_kasan_flag(char *arg)
88 {
89 	if (!arg)
90 		return -EINVAL;
91 
92 	if (!strcmp(arg, "off"))
93 		kasan_arg = KASAN_ARG_OFF;
94 	else if (!strcmp(arg, "on"))
95 		kasan_arg = KASAN_ARG_ON;
96 	else
97 		return -EINVAL;
98 
99 	return 0;
100 }
101 early_param("kasan", early_kasan_flag);
102 
103 /* kasan.mode=sync/async/asymm */
early_kasan_mode(char * arg)104 static int __init early_kasan_mode(char *arg)
105 {
106 	if (!arg)
107 		return -EINVAL;
108 
109 	if (!strcmp(arg, "sync"))
110 		kasan_arg_mode = KASAN_ARG_MODE_SYNC;
111 	else if (!strcmp(arg, "async"))
112 		kasan_arg_mode = KASAN_ARG_MODE_ASYNC;
113 	else if (!strcmp(arg, "asymm"))
114 		kasan_arg_mode = KASAN_ARG_MODE_ASYMM;
115 	else
116 		return -EINVAL;
117 
118 	return 0;
119 }
120 early_param("kasan.mode", early_kasan_mode);
121 
122 /* kasan.vmalloc=off/on */
early_kasan_flag_vmalloc(char * arg)123 static int __init early_kasan_flag_vmalloc(char *arg)
124 {
125 	if (!arg)
126 		return -EINVAL;
127 
128 	if (!IS_ENABLED(CONFIG_KASAN_VMALLOC))
129 		return 0;
130 
131 	if (!strcmp(arg, "off"))
132 		kasan_arg_vmalloc = KASAN_ARG_VMALLOC_OFF;
133 	else if (!strcmp(arg, "on"))
134 		kasan_arg_vmalloc = KASAN_ARG_VMALLOC_ON;
135 	else
136 		return -EINVAL;
137 
138 	return 0;
139 }
140 early_param("kasan.vmalloc", early_kasan_flag_vmalloc);
141 
kasan_mode_info(void)142 static inline const char *kasan_mode_info(void)
143 {
144 	if (kasan_mode == KASAN_MODE_ASYNC)
145 		return "async";
146 	else if (kasan_mode == KASAN_MODE_ASYMM)
147 		return "asymm";
148 	else
149 		return "sync";
150 }
151 
152 /* kasan.page_alloc.sample=<sampling interval> */
early_kasan_flag_page_alloc_sample(char * arg)153 static int __init early_kasan_flag_page_alloc_sample(char *arg)
154 {
155 	int rv;
156 
157 	if (!arg)
158 		return -EINVAL;
159 
160 	rv = kstrtoul(arg, 0, &kasan_page_alloc_sample);
161 	if (rv)
162 		return rv;
163 
164 	if (!kasan_page_alloc_sample || kasan_page_alloc_sample > LONG_MAX) {
165 		kasan_page_alloc_sample = PAGE_ALLOC_SAMPLE_DEFAULT;
166 		return -EINVAL;
167 	}
168 
169 	return 0;
170 }
171 early_param("kasan.page_alloc.sample", early_kasan_flag_page_alloc_sample);
172 
173 /* kasan.page_alloc.sample.order=<minimum page order> */
early_kasan_flag_page_alloc_sample_order(char * arg)174 static int __init early_kasan_flag_page_alloc_sample_order(char *arg)
175 {
176 	int rv;
177 
178 	if (!arg)
179 		return -EINVAL;
180 
181 	rv = kstrtouint(arg, 0, &kasan_page_alloc_sample_order);
182 	if (rv)
183 		return rv;
184 
185 	if (kasan_page_alloc_sample_order > INT_MAX) {
186 		kasan_page_alloc_sample_order = PAGE_ALLOC_SAMPLE_ORDER_DEFAULT;
187 		return -EINVAL;
188 	}
189 
190 	return 0;
191 }
192 early_param("kasan.page_alloc.sample.order", early_kasan_flag_page_alloc_sample_order);
193 
194 /*
195  * kasan_init_hw_tags_cpu() is called for each CPU.
196  * Not marked as __init as a CPU can be hot-plugged after boot.
197  */
kasan_init_hw_tags_cpu(void)198 void kasan_init_hw_tags_cpu(void)
199 {
200 	/*
201 	 * There's no need to check that the hardware is MTE-capable here,
202 	 * as this function is only called for MTE-capable hardware.
203 	 */
204 
205 	/*
206 	 * If KASAN is disabled via command line, don't initialize it.
207 	 * When this function is called, kasan_flag_enabled is not yet
208 	 * set by kasan_init_hw_tags(). Thus, check kasan_arg instead.
209 	 */
210 	if (kasan_arg == KASAN_ARG_OFF)
211 		return;
212 
213 	/*
214 	 * Enable async or asymm modes only when explicitly requested
215 	 * through the command line.
216 	 */
217 	kasan_enable_hw_tags();
218 }
219 
220 /* kasan_init_hw_tags() is called once on boot CPU. */
kasan_init_hw_tags(void)221 void __init kasan_init_hw_tags(void)
222 {
223 	/* If hardware doesn't support MTE, don't initialize KASAN. */
224 	if (!system_supports_mte())
225 		return;
226 
227 	/* If KASAN is disabled via command line, don't initialize it. */
228 	if (kasan_arg == KASAN_ARG_OFF)
229 		return;
230 
231 	switch (kasan_arg_mode) {
232 	case KASAN_ARG_MODE_DEFAULT:
233 		/* Default is specified by kasan_mode definition. */
234 		break;
235 	case KASAN_ARG_MODE_SYNC:
236 		kasan_mode = KASAN_MODE_SYNC;
237 		break;
238 	case KASAN_ARG_MODE_ASYNC:
239 		kasan_mode = KASAN_MODE_ASYNC;
240 		break;
241 	case KASAN_ARG_MODE_ASYMM:
242 		kasan_mode = KASAN_MODE_ASYMM;
243 		break;
244 	}
245 
246 	switch (kasan_arg_vmalloc) {
247 	case KASAN_ARG_VMALLOC_DEFAULT:
248 		/* Default is specified by kasan_flag_vmalloc definition. */
249 		break;
250 	case KASAN_ARG_VMALLOC_OFF:
251 		static_branch_disable(&kasan_flag_vmalloc);
252 		break;
253 	case KASAN_ARG_VMALLOC_ON:
254 		static_branch_enable(&kasan_flag_vmalloc);
255 		break;
256 	}
257 
258 	kasan_init_tags();
259 
260 	/* KASAN is now initialized, enable it. */
261 	static_branch_enable(&kasan_flag_enabled);
262 
263 	pr_info("KernelAddressSanitizer initialized (hw-tags, mode=%s, vmalloc=%s, stacktrace=%s)\n",
264 		kasan_mode_info(),
265 		kasan_vmalloc_enabled() ? "on" : "off",
266 		kasan_stack_collection_enabled() ? "on" : "off");
267 }
268 
269 #ifdef CONFIG_KASAN_VMALLOC
270 
unpoison_vmalloc_pages(const void * addr,u8 tag)271 static void unpoison_vmalloc_pages(const void *addr, u8 tag)
272 {
273 	struct vm_struct *area;
274 	int i;
275 
276 	/*
277 	 * As hardware tag-based KASAN only tags VM_ALLOC vmalloc allocations
278 	 * (see the comment in __kasan_unpoison_vmalloc), all of the pages
279 	 * should belong to a single area.
280 	 */
281 	area = find_vm_area((void *)addr);
282 	if (WARN_ON(!area))
283 		return;
284 
285 	for (i = 0; i < area->nr_pages; i++) {
286 		struct page *page = area->pages[i];
287 
288 		page_kasan_tag_set(page, tag);
289 	}
290 }
291 
init_vmalloc_pages(const void * start,unsigned long size)292 static void init_vmalloc_pages(const void *start, unsigned long size)
293 {
294 	const void *addr;
295 
296 	for (addr = start; addr < start + size; addr += PAGE_SIZE) {
297 		struct page *page = vmalloc_to_page(addr);
298 
299 		clear_highpage_kasan_tagged(page);
300 	}
301 }
302 
__kasan_unpoison_vmalloc(const void * start,unsigned long size,kasan_vmalloc_flags_t flags)303 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
304 				kasan_vmalloc_flags_t flags)
305 {
306 	u8 tag;
307 	unsigned long redzone_start, redzone_size;
308 
309 	if (!kasan_vmalloc_enabled()) {
310 		if (flags & KASAN_VMALLOC_INIT)
311 			init_vmalloc_pages(start, size);
312 		return (void *)start;
313 	}
314 
315 	/*
316 	 * Don't tag non-VM_ALLOC mappings, as:
317 	 *
318 	 * 1. Unlike the software KASAN modes, hardware tag-based KASAN only
319 	 *    supports tagging physical memory. Therefore, it can only tag a
320 	 *    single mapping of normal physical pages.
321 	 * 2. Hardware tag-based KASAN can only tag memory mapped with special
322 	 *    mapping protection bits, see arch_vmap_pgprot_tagged().
323 	 *    As non-VM_ALLOC mappings can be mapped outside of vmalloc code,
324 	 *    providing these bits would require tracking all non-VM_ALLOC
325 	 *    mappers.
326 	 *
327 	 * Thus, for VM_ALLOC mappings, hardware tag-based KASAN only tags
328 	 * the first virtual mapping, which is created by vmalloc().
329 	 * Tagging the page_alloc memory backing that vmalloc() allocation is
330 	 * skipped, see ___GFP_SKIP_KASAN.
331 	 *
332 	 * For non-VM_ALLOC allocations, page_alloc memory is tagged as usual.
333 	 */
334 	if (!(flags & KASAN_VMALLOC_VM_ALLOC)) {
335 		WARN_ON(flags & KASAN_VMALLOC_INIT);
336 		return (void *)start;
337 	}
338 
339 	/*
340 	 * Don't tag executable memory.
341 	 * The kernel doesn't tolerate having the PC register tagged.
342 	 */
343 	if (!(flags & KASAN_VMALLOC_PROT_NORMAL)) {
344 		WARN_ON(flags & KASAN_VMALLOC_INIT);
345 		return (void *)start;
346 	}
347 
348 	tag = kasan_random_tag();
349 	start = set_tag(start, tag);
350 
351 	/* Unpoison and initialize memory up to size. */
352 	kasan_unpoison(start, size, flags & KASAN_VMALLOC_INIT);
353 
354 	/*
355 	 * Explicitly poison and initialize the in-page vmalloc() redzone.
356 	 * Unlike software KASAN modes, hardware tag-based KASAN doesn't
357 	 * unpoison memory when populating shadow for vmalloc() space.
358 	 */
359 	redzone_start = round_up((unsigned long)start + size,
360 				 KASAN_GRANULE_SIZE);
361 	redzone_size = round_up(redzone_start, PAGE_SIZE) - redzone_start;
362 	kasan_poison((void *)redzone_start, redzone_size, KASAN_TAG_INVALID,
363 		     flags & KASAN_VMALLOC_INIT);
364 
365 	/*
366 	 * Set per-page tag flags to allow accessing physical memory for the
367 	 * vmalloc() mapping through page_address(vmalloc_to_page()).
368 	 */
369 	unpoison_vmalloc_pages(start, tag);
370 
371 	return (void *)start;
372 }
373 
__kasan_poison_vmalloc(const void * start,unsigned long size)374 void __kasan_poison_vmalloc(const void *start, unsigned long size)
375 {
376 	/*
377 	 * No tagging here.
378 	 * The physical pages backing the vmalloc() allocation are poisoned
379 	 * through the usual page_alloc paths.
380 	 */
381 }
382 
383 #endif
384 
kasan_enable_hw_tags(void)385 void kasan_enable_hw_tags(void)
386 {
387 	if (kasan_arg_mode == KASAN_ARG_MODE_ASYNC)
388 		hw_enable_tag_checks_async();
389 	else if (kasan_arg_mode == KASAN_ARG_MODE_ASYMM)
390 		hw_enable_tag_checks_asymm();
391 	else
392 		hw_enable_tag_checks_sync();
393 }
394 
395 #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
396 
397 EXPORT_SYMBOL_GPL(kasan_enable_hw_tags);
398 
kasan_force_async_fault(void)399 void kasan_force_async_fault(void)
400 {
401 	hw_force_async_tag_fault();
402 }
403 EXPORT_SYMBOL_GPL(kasan_force_async_fault);
404 
405 #endif
406