1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * This file contains common KASAN code.
4   *
5   * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6   * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7   *
8   * Some code borrowed from https://github.com/xairy/kasan-prototype by
9   *        Andrey Konovalov <andreyknvl@gmail.com>
10   */
11  
12  #include <linux/export.h>
13  #include <linux/init.h>
14  #include <linux/kasan.h>
15  #include <linux/kernel.h>
16  #include <linux/linkage.h>
17  #include <linux/memblock.h>
18  #include <linux/memory.h>
19  #include <linux/mm.h>
20  #include <linux/module.h>
21  #include <linux/printk.h>
22  #include <linux/sched.h>
23  #include <linux/sched/clock.h>
24  #include <linux/sched/task_stack.h>
25  #include <linux/slab.h>
26  #include <linux/stackdepot.h>
27  #include <linux/stacktrace.h>
28  #include <linux/string.h>
29  #include <linux/types.h>
30  #include <linux/bug.h>
31  
32  #include "kasan.h"
33  #include "../slab.h"
34  
kasan_addr_to_slab(const void * addr)35  struct slab *kasan_addr_to_slab(const void *addr)
36  {
37  	if (virt_addr_valid(addr))
38  		return virt_to_slab(addr);
39  	return NULL;
40  }
41  
kasan_save_stack(gfp_t flags,depot_flags_t depot_flags)42  depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags)
43  {
44  	unsigned long entries[KASAN_STACK_DEPTH];
45  	unsigned int nr_entries;
46  
47  	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
48  	return stack_depot_save_flags(entries, nr_entries, flags, depot_flags);
49  }
50  
kasan_set_track(struct kasan_track * track,depot_stack_handle_t stack)51  void kasan_set_track(struct kasan_track *track, depot_stack_handle_t stack)
52  {
53  #ifdef CONFIG_KASAN_EXTRA_INFO
54  	u32 cpu = raw_smp_processor_id();
55  	u64 ts_nsec = local_clock();
56  
57  	track->cpu = cpu;
58  	track->timestamp = ts_nsec >> 9;
59  #endif /* CONFIG_KASAN_EXTRA_INFO */
60  	track->pid = current->pid;
61  	track->stack = stack;
62  }
63  
kasan_save_track(struct kasan_track * track,gfp_t flags)64  void kasan_save_track(struct kasan_track *track, gfp_t flags)
65  {
66  	depot_stack_handle_t stack;
67  
68  	stack = kasan_save_stack(flags, STACK_DEPOT_FLAG_CAN_ALLOC);
69  	kasan_set_track(track, stack);
70  }
71  
72  #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
kasan_enable_current(void)73  void kasan_enable_current(void)
74  {
75  	current->kasan_depth++;
76  }
77  EXPORT_SYMBOL(kasan_enable_current);
78  
kasan_disable_current(void)79  void kasan_disable_current(void)
80  {
81  	current->kasan_depth--;
82  }
83  EXPORT_SYMBOL(kasan_disable_current);
84  
85  #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
86  
__kasan_unpoison_range(const void * address,size_t size)87  void __kasan_unpoison_range(const void *address, size_t size)
88  {
89  	if (is_kfence_address(address))
90  		return;
91  
92  	kasan_unpoison(address, size, false);
93  }
94  
95  #ifdef CONFIG_KASAN_STACK
96  /* Unpoison the entire stack for a task. */
kasan_unpoison_task_stack(struct task_struct * task)97  void kasan_unpoison_task_stack(struct task_struct *task)
98  {
99  	void *base = task_stack_page(task);
100  
101  	kasan_unpoison(base, THREAD_SIZE, false);
102  }
103  
104  /* Unpoison the stack for the current task beyond a watermark sp value. */
kasan_unpoison_task_stack_below(const void * watermark)105  asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
106  {
107  	/*
108  	 * Calculate the task stack base address.  Avoid using 'current'
109  	 * because this function is called by early resume code which hasn't
110  	 * yet set up the percpu register (%gs).
111  	 */
112  	void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
113  
114  	kasan_unpoison(base, watermark - base, false);
115  }
116  #endif /* CONFIG_KASAN_STACK */
117  
__kasan_unpoison_pages(struct page * page,unsigned int order,bool init)118  bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
119  {
120  	u8 tag;
121  	unsigned long i;
122  
123  	if (unlikely(PageHighMem(page)))
124  		return false;
125  
126  	if (!kasan_sample_page_alloc(order))
127  		return false;
128  
129  	tag = kasan_random_tag();
130  	kasan_unpoison(set_tag(page_address(page), tag),
131  		       PAGE_SIZE << order, init);
132  	for (i = 0; i < (1 << order); i++)
133  		page_kasan_tag_set(page + i, tag);
134  
135  	return true;
136  }
137  
__kasan_poison_pages(struct page * page,unsigned int order,bool init)138  void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
139  {
140  	if (likely(!PageHighMem(page)))
141  		kasan_poison(page_address(page), PAGE_SIZE << order,
142  			     KASAN_PAGE_FREE, init);
143  }
144  
__kasan_poison_slab(struct slab * slab)145  void __kasan_poison_slab(struct slab *slab)
146  {
147  	struct page *page = slab_page(slab);
148  	unsigned long i;
149  
150  	for (i = 0; i < compound_nr(page); i++)
151  		page_kasan_tag_reset(page + i);
152  	kasan_poison(page_address(page), page_size(page),
153  		     KASAN_SLAB_REDZONE, false);
154  }
155  
__kasan_unpoison_new_object(struct kmem_cache * cache,void * object)156  void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object)
157  {
158  	kasan_unpoison(object, cache->object_size, false);
159  }
160  
__kasan_poison_new_object(struct kmem_cache * cache,void * object)161  void __kasan_poison_new_object(struct kmem_cache *cache, void *object)
162  {
163  	kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
164  			KASAN_SLAB_REDZONE, false);
165  }
166  
167  /*
168   * This function assigns a tag to an object considering the following:
169   * 1. A cache might have a constructor, which might save a pointer to a slab
170   *    object somewhere (e.g. in the object itself). We preassign a tag for
171   *    each object in caches with constructors during slab creation and reuse
172   *    the same tag each time a particular object is allocated.
173   * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
174   *    accessed after being freed. We preassign tags for objects in these
175   *    caches as well.
176   */
assign_tag(struct kmem_cache * cache,const void * object,bool init)177  static inline u8 assign_tag(struct kmem_cache *cache,
178  					const void *object, bool init)
179  {
180  	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
181  		return 0xff;
182  
183  	/*
184  	 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
185  	 * set, assign a tag when the object is being allocated (init == false).
186  	 */
187  	if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
188  		return init ? KASAN_TAG_KERNEL : kasan_random_tag();
189  
190  	/*
191  	 * For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU,
192  	 * assign a random tag during slab creation, otherwise reuse
193  	 * the already assigned tag.
194  	 */
195  	return init ? kasan_random_tag() : get_tag(object);
196  }
197  
__kasan_init_slab_obj(struct kmem_cache * cache,const void * object)198  void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
199  						const void *object)
200  {
201  	/* Initialize per-object metadata if it is present. */
202  	if (kasan_requires_meta())
203  		kasan_init_object_meta(cache, object);
204  
205  	/* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
206  	object = set_tag(object, assign_tag(cache, object, true));
207  
208  	return (void *)object;
209  }
210  
211  /* Returns true when freeing the object is not safe. */
check_slab_allocation(struct kmem_cache * cache,void * object,unsigned long ip)212  static bool check_slab_allocation(struct kmem_cache *cache, void *object,
213  				  unsigned long ip)
214  {
215  	void *tagged_object = object;
216  
217  	object = kasan_reset_tag(object);
218  
219  	if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) {
220  		kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE);
221  		return true;
222  	}
223  
224  	if (!kasan_byte_accessible(tagged_object)) {
225  		kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_DOUBLE_FREE);
226  		return true;
227  	}
228  
229  	return false;
230  }
231  
poison_slab_object(struct kmem_cache * cache,void * object,bool init,bool still_accessible)232  static inline void poison_slab_object(struct kmem_cache *cache, void *object,
233  				      bool init, bool still_accessible)
234  {
235  	void *tagged_object = object;
236  
237  	object = kasan_reset_tag(object);
238  
239  	/* RCU slabs could be legally used after free within the RCU period. */
240  	if (unlikely(still_accessible))
241  		return;
242  
243  	kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
244  			KASAN_SLAB_FREE, init);
245  
246  	if (kasan_stack_collection_enabled())
247  		kasan_save_free_info(cache, tagged_object);
248  }
249  
__kasan_slab_pre_free(struct kmem_cache * cache,void * object,unsigned long ip)250  bool __kasan_slab_pre_free(struct kmem_cache *cache, void *object,
251  				unsigned long ip)
252  {
253  	if (!kasan_arch_is_ready() || is_kfence_address(object))
254  		return false;
255  	return check_slab_allocation(cache, object, ip);
256  }
257  
__kasan_slab_free(struct kmem_cache * cache,void * object,bool init,bool still_accessible)258  bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init,
259  		       bool still_accessible)
260  {
261  	if (!kasan_arch_is_ready() || is_kfence_address(object))
262  		return false;
263  
264  	poison_slab_object(cache, object, init, still_accessible);
265  
266  	/*
267  	 * If the object is put into quarantine, do not let slab put the object
268  	 * onto the freelist for now. The object's metadata is kept until the
269  	 * object gets evicted from quarantine.
270  	 */
271  	if (kasan_quarantine_put(cache, object))
272  		return true;
273  
274  	/*
275  	 * Note: Keep per-object metadata to allow KASAN print stack traces for
276  	 * use-after-free-before-realloc bugs.
277  	 */
278  
279  	/* Let slab put the object onto the freelist. */
280  	return false;
281  }
282  
check_page_allocation(void * ptr,unsigned long ip)283  static inline bool check_page_allocation(void *ptr, unsigned long ip)
284  {
285  	if (!kasan_arch_is_ready())
286  		return false;
287  
288  	if (ptr != page_address(virt_to_head_page(ptr))) {
289  		kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE);
290  		return true;
291  	}
292  
293  	if (!kasan_byte_accessible(ptr)) {
294  		kasan_report_invalid_free(ptr, ip, KASAN_REPORT_DOUBLE_FREE);
295  		return true;
296  	}
297  
298  	return false;
299  }
300  
__kasan_kfree_large(void * ptr,unsigned long ip)301  void __kasan_kfree_large(void *ptr, unsigned long ip)
302  {
303  	check_page_allocation(ptr, ip);
304  
305  	/* The object will be poisoned by kasan_poison_pages(). */
306  }
307  
unpoison_slab_object(struct kmem_cache * cache,void * object,gfp_t flags,bool init)308  static inline void unpoison_slab_object(struct kmem_cache *cache, void *object,
309  					gfp_t flags, bool init)
310  {
311  	/*
312  	 * Unpoison the whole object. For kmalloc() allocations,
313  	 * poison_kmalloc_redzone() will do precise poisoning.
314  	 */
315  	kasan_unpoison(object, cache->object_size, init);
316  
317  	/* Save alloc info (if possible) for non-kmalloc() allocations. */
318  	if (kasan_stack_collection_enabled() && !is_kmalloc_cache(cache))
319  		kasan_save_alloc_info(cache, object, flags);
320  }
321  
__kasan_slab_alloc(struct kmem_cache * cache,void * object,gfp_t flags,bool init)322  void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
323  					void *object, gfp_t flags, bool init)
324  {
325  	u8 tag;
326  	void *tagged_object;
327  
328  	if (gfpflags_allow_blocking(flags))
329  		kasan_quarantine_reduce();
330  
331  	if (unlikely(object == NULL))
332  		return NULL;
333  
334  	if (is_kfence_address(object))
335  		return (void *)object;
336  
337  	/*
338  	 * Generate and assign random tag for tag-based modes.
339  	 * Tag is ignored in set_tag() for the generic mode.
340  	 */
341  	tag = assign_tag(cache, object, false);
342  	tagged_object = set_tag(object, tag);
343  
344  	/* Unpoison the object and save alloc info for non-kmalloc() allocations. */
345  	unpoison_slab_object(cache, tagged_object, flags, init);
346  
347  	return tagged_object;
348  }
349  
poison_kmalloc_redzone(struct kmem_cache * cache,const void * object,size_t size,gfp_t flags)350  static inline void poison_kmalloc_redzone(struct kmem_cache *cache,
351  				const void *object, size_t size, gfp_t flags)
352  {
353  	unsigned long redzone_start;
354  	unsigned long redzone_end;
355  
356  	/*
357  	 * The redzone has byte-level precision for the generic mode.
358  	 * Partially poison the last object granule to cover the unaligned
359  	 * part of the redzone.
360  	 */
361  	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
362  		kasan_poison_last_granule((void *)object, size);
363  
364  	/* Poison the aligned part of the redzone. */
365  	redzone_start = round_up((unsigned long)(object + size),
366  				KASAN_GRANULE_SIZE);
367  	redzone_end = round_up((unsigned long)(object + cache->object_size),
368  				KASAN_GRANULE_SIZE);
369  	kasan_poison((void *)redzone_start, redzone_end - redzone_start,
370  			   KASAN_SLAB_REDZONE, false);
371  
372  	/*
373  	 * Save alloc info (if possible) for kmalloc() allocations.
374  	 * This also rewrites the alloc info when called from kasan_krealloc().
375  	 */
376  	if (kasan_stack_collection_enabled() && is_kmalloc_cache(cache))
377  		kasan_save_alloc_info(cache, (void *)object, flags);
378  
379  }
380  
__kasan_kmalloc(struct kmem_cache * cache,const void * object,size_t size,gfp_t flags)381  void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
382  					size_t size, gfp_t flags)
383  {
384  	if (gfpflags_allow_blocking(flags))
385  		kasan_quarantine_reduce();
386  
387  	if (unlikely(object == NULL))
388  		return NULL;
389  
390  	if (is_kfence_address(object))
391  		return (void *)object;
392  
393  	/* The object has already been unpoisoned by kasan_slab_alloc(). */
394  	poison_kmalloc_redzone(cache, object, size, flags);
395  
396  	/* Keep the tag that was set by kasan_slab_alloc(). */
397  	return (void *)object;
398  }
399  EXPORT_SYMBOL(__kasan_kmalloc);
400  
poison_kmalloc_large_redzone(const void * ptr,size_t size,gfp_t flags)401  static inline void poison_kmalloc_large_redzone(const void *ptr, size_t size,
402  						gfp_t flags)
403  {
404  	unsigned long redzone_start;
405  	unsigned long redzone_end;
406  
407  	/*
408  	 * The redzone has byte-level precision for the generic mode.
409  	 * Partially poison the last object granule to cover the unaligned
410  	 * part of the redzone.
411  	 */
412  	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
413  		kasan_poison_last_granule(ptr, size);
414  
415  	/* Poison the aligned part of the redzone. */
416  	redzone_start = round_up((unsigned long)(ptr + size), KASAN_GRANULE_SIZE);
417  	redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
418  	kasan_poison((void *)redzone_start, redzone_end - redzone_start,
419  		     KASAN_PAGE_REDZONE, false);
420  }
421  
__kasan_kmalloc_large(const void * ptr,size_t size,gfp_t flags)422  void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
423  						gfp_t flags)
424  {
425  	if (gfpflags_allow_blocking(flags))
426  		kasan_quarantine_reduce();
427  
428  	if (unlikely(ptr == NULL))
429  		return NULL;
430  
431  	/* The object has already been unpoisoned by kasan_unpoison_pages(). */
432  	poison_kmalloc_large_redzone(ptr, size, flags);
433  
434  	/* Keep the tag that was set by alloc_pages(). */
435  	return (void *)ptr;
436  }
437  
__kasan_krealloc(const void * object,size_t size,gfp_t flags)438  void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
439  {
440  	struct slab *slab;
441  
442  	if (gfpflags_allow_blocking(flags))
443  		kasan_quarantine_reduce();
444  
445  	if (unlikely(object == ZERO_SIZE_PTR))
446  		return (void *)object;
447  
448  	if (is_kfence_address(object))
449  		return (void *)object;
450  
451  	/*
452  	 * Unpoison the object's data.
453  	 * Part of it might already have been unpoisoned, but it's unknown
454  	 * how big that part is.
455  	 */
456  	kasan_unpoison(object, size, false);
457  
458  	slab = virt_to_slab(object);
459  
460  	/* Piggy-back on kmalloc() instrumentation to poison the redzone. */
461  	if (unlikely(!slab))
462  		poison_kmalloc_large_redzone(object, size, flags);
463  	else
464  		poison_kmalloc_redzone(slab->slab_cache, object, size, flags);
465  
466  	return (void *)object;
467  }
468  
__kasan_mempool_poison_pages(struct page * page,unsigned int order,unsigned long ip)469  bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
470  				  unsigned long ip)
471  {
472  	unsigned long *ptr;
473  
474  	if (unlikely(PageHighMem(page)))
475  		return true;
476  
477  	/* Bail out if allocation was excluded due to sampling. */
478  	if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
479  	    page_kasan_tag(page) == KASAN_TAG_KERNEL)
480  		return true;
481  
482  	ptr = page_address(page);
483  
484  	if (check_page_allocation(ptr, ip))
485  		return false;
486  
487  	kasan_poison(ptr, PAGE_SIZE << order, KASAN_PAGE_FREE, false);
488  
489  	return true;
490  }
491  
__kasan_mempool_unpoison_pages(struct page * page,unsigned int order,unsigned long ip)492  void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
493  				    unsigned long ip)
494  {
495  	__kasan_unpoison_pages(page, order, false);
496  }
497  
__kasan_mempool_poison_object(void * ptr,unsigned long ip)498  bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
499  {
500  	struct folio *folio = virt_to_folio(ptr);
501  	struct slab *slab;
502  
503  	/*
504  	 * This function can be called for large kmalloc allocation that get
505  	 * their memory from page_alloc. Thus, the folio might not be a slab.
506  	 */
507  	if (unlikely(!folio_test_slab(folio))) {
508  		if (check_page_allocation(ptr, ip))
509  			return false;
510  		kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
511  		return true;
512  	}
513  
514  	if (is_kfence_address(ptr) || !kasan_arch_is_ready())
515  		return true;
516  
517  	slab = folio_slab(folio);
518  
519  	if (check_slab_allocation(slab->slab_cache, ptr, ip))
520  		return false;
521  
522  	poison_slab_object(slab->slab_cache, ptr, false, false);
523  	return true;
524  }
525  
__kasan_mempool_unpoison_object(void * ptr,size_t size,unsigned long ip)526  void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)
527  {
528  	struct slab *slab;
529  	gfp_t flags = 0; /* Might be executing under a lock. */
530  
531  	slab = virt_to_slab(ptr);
532  
533  	/*
534  	 * This function can be called for large kmalloc allocation that get
535  	 * their memory from page_alloc.
536  	 */
537  	if (unlikely(!slab)) {
538  		kasan_unpoison(ptr, size, false);
539  		poison_kmalloc_large_redzone(ptr, size, flags);
540  		return;
541  	}
542  
543  	if (is_kfence_address(ptr))
544  		return;
545  
546  	/* Unpoison the object and save alloc info for non-kmalloc() allocations. */
547  	unpoison_slab_object(slab->slab_cache, ptr, flags, false);
548  
549  	/* Poison the redzone and save alloc info for kmalloc() allocations. */
550  	if (is_kmalloc_cache(slab->slab_cache))
551  		poison_kmalloc_redzone(slab->slab_cache, ptr, size, flags);
552  }
553  
__kasan_check_byte(const void * address,unsigned long ip)554  bool __kasan_check_byte(const void *address, unsigned long ip)
555  {
556  	if (!kasan_byte_accessible(address)) {
557  		kasan_report(address, 1, false, ip);
558  		return false;
559  	}
560  	return true;
561  }
562