1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_KASAN_H
3 #define _LINUX_KASAN_H
4 
5 #include <linux/bug.h>
6 #include <linux/kasan-enabled.h>
7 #include <linux/kasan-tags.h>
8 #include <linux/kernel.h>
9 #include <linux/static_key.h>
10 #include <linux/types.h>
11 
12 struct kmem_cache;
13 struct page;
14 struct slab;
15 struct vm_struct;
16 struct task_struct;
17 
18 #ifdef CONFIG_KASAN
19 
20 #include <linux/linkage.h>
21 #include <asm/kasan.h>
22 
23 #endif
24 
25 typedef unsigned int __bitwise kasan_vmalloc_flags_t;
26 
27 #define KASAN_VMALLOC_NONE		((__force kasan_vmalloc_flags_t)0x00u)
28 #define KASAN_VMALLOC_INIT		((__force kasan_vmalloc_flags_t)0x01u)
29 #define KASAN_VMALLOC_VM_ALLOC		((__force kasan_vmalloc_flags_t)0x02u)
30 #define KASAN_VMALLOC_PROT_NORMAL	((__force kasan_vmalloc_flags_t)0x04u)
31 
32 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
33 
34 #include <linux/pgtable.h>
35 
36 /* Software KASAN implementations use shadow memory. */
37 
38 #ifdef CONFIG_KASAN_SW_TAGS
39 /* This matches KASAN_TAG_INVALID. */
40 #define KASAN_SHADOW_INIT 0xFE
41 #else
42 #define KASAN_SHADOW_INIT 0
43 #endif
44 
45 #ifndef PTE_HWTABLE_PTRS
46 #define PTE_HWTABLE_PTRS 0
47 #endif
48 
49 extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
50 extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
51 extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
52 extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
53 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
54 
55 int kasan_populate_early_shadow(const void *shadow_start,
56 				const void *shadow_end);
57 
58 #ifndef kasan_mem_to_shadow
kasan_mem_to_shadow(const void * addr)59 static inline void *kasan_mem_to_shadow(const void *addr)
60 {
61 	return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
62 		+ KASAN_SHADOW_OFFSET;
63 }
64 #endif
65 
66 int kasan_add_zero_shadow(void *start, unsigned long size);
67 void kasan_remove_zero_shadow(void *start, unsigned long size);
68 
69 /* Enable reporting bugs after kasan_disable_current() */
70 extern void kasan_enable_current(void);
71 
72 /* Disable reporting bugs for current task */
73 extern void kasan_disable_current(void);
74 
75 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
76 
kasan_add_zero_shadow(void * start,unsigned long size)77 static inline int kasan_add_zero_shadow(void *start, unsigned long size)
78 {
79 	return 0;
80 }
kasan_remove_zero_shadow(void * start,unsigned long size)81 static inline void kasan_remove_zero_shadow(void *start,
82 					unsigned long size)
83 {}
84 
kasan_enable_current(void)85 static inline void kasan_enable_current(void) {}
kasan_disable_current(void)86 static inline void kasan_disable_current(void) {}
87 
88 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
89 
90 #ifdef CONFIG_KASAN_HW_TAGS
91 
92 #else /* CONFIG_KASAN_HW_TAGS */
93 
94 #endif /* CONFIG_KASAN_HW_TAGS */
95 
kasan_has_integrated_init(void)96 static inline bool kasan_has_integrated_init(void)
97 {
98 	return kasan_hw_tags_enabled();
99 }
100 
101 #ifdef CONFIG_KASAN
102 void __kasan_unpoison_range(const void *addr, size_t size);
kasan_unpoison_range(const void * addr,size_t size)103 static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
104 {
105 	if (kasan_enabled())
106 		__kasan_unpoison_range(addr, size);
107 }
108 
109 void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
kasan_poison_pages(struct page * page,unsigned int order,bool init)110 static __always_inline void kasan_poison_pages(struct page *page,
111 						unsigned int order, bool init)
112 {
113 	if (kasan_enabled())
114 		__kasan_poison_pages(page, order, init);
115 }
116 
117 bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
kasan_unpoison_pages(struct page * page,unsigned int order,bool init)118 static __always_inline bool kasan_unpoison_pages(struct page *page,
119 						 unsigned int order, bool init)
120 {
121 	if (kasan_enabled())
122 		return __kasan_unpoison_pages(page, order, init);
123 	return false;
124 }
125 
126 void __kasan_poison_slab(struct slab *slab);
kasan_poison_slab(struct slab * slab)127 static __always_inline void kasan_poison_slab(struct slab *slab)
128 {
129 	if (kasan_enabled())
130 		__kasan_poison_slab(slab);
131 }
132 
133 void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object);
134 /**
135  * kasan_unpoison_new_object - Temporarily unpoison a new slab object.
136  * @cache: Cache the object belong to.
137  * @object: Pointer to the object.
138  *
139  * This function is intended for the slab allocator's internal use. It
140  * temporarily unpoisons an object from a newly allocated slab without doing
141  * anything else. The object must later be repoisoned by
142  * kasan_poison_new_object().
143  */
kasan_unpoison_new_object(struct kmem_cache * cache,void * object)144 static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache,
145 							void *object)
146 {
147 	if (kasan_enabled())
148 		__kasan_unpoison_new_object(cache, object);
149 }
150 
151 void __kasan_poison_new_object(struct kmem_cache *cache, void *object);
152 /**
153  * kasan_unpoison_new_object - Repoison a new slab object.
154  * @cache: Cache the object belong to.
155  * @object: Pointer to the object.
156  *
157  * This function is intended for the slab allocator's internal use. It
158  * repoisons an object that was previously unpoisoned by
159  * kasan_unpoison_new_object() without doing anything else.
160  */
kasan_poison_new_object(struct kmem_cache * cache,void * object)161 static __always_inline void kasan_poison_new_object(struct kmem_cache *cache,
162 							void *object)
163 {
164 	if (kasan_enabled())
165 		__kasan_poison_new_object(cache, object);
166 }
167 
168 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
169 					  const void *object);
kasan_init_slab_obj(struct kmem_cache * cache,const void * object)170 static __always_inline void * __must_check kasan_init_slab_obj(
171 				struct kmem_cache *cache, const void *object)
172 {
173 	if (kasan_enabled())
174 		return __kasan_init_slab_obj(cache, object);
175 	return (void *)object;
176 }
177 
178 bool __kasan_slab_pre_free(struct kmem_cache *s, void *object,
179 			unsigned long ip);
180 /**
181  * kasan_slab_pre_free - Check whether freeing a slab object is safe.
182  * @object: Object to be freed.
183  *
184  * This function checks whether freeing the given object is safe. It may
185  * check for double-free and invalid-free bugs and report them.
186  *
187  * This function is intended only for use by the slab allocator.
188  *
189  * @Return true if freeing the object is unsafe; false otherwise.
190  */
kasan_slab_pre_free(struct kmem_cache * s,void * object)191 static __always_inline bool kasan_slab_pre_free(struct kmem_cache *s,
192 						void *object)
193 {
194 	if (kasan_enabled())
195 		return __kasan_slab_pre_free(s, object, _RET_IP_);
196 	return false;
197 }
198 
199 bool __kasan_slab_free(struct kmem_cache *s, void *object, bool init,
200 		       bool still_accessible);
201 /**
202  * kasan_slab_free - Poison, initialize, and quarantine a slab object.
203  * @object: Object to be freed.
204  * @init: Whether to initialize the object.
205  * @still_accessible: Whether the object contents are still accessible.
206  *
207  * This function informs that a slab object has been freed and is not
208  * supposed to be accessed anymore, except when @still_accessible is set
209  * (indicating that the object is in a SLAB_TYPESAFE_BY_RCU cache and an RCU
210  * grace period might not have passed yet).
211  *
212  * For KASAN modes that have integrated memory initialization
213  * (kasan_has_integrated_init() == true), this function also initializes
214  * the object's memory. For other modes, the @init argument is ignored.
215  *
216  * This function might also take ownership of the object to quarantine it.
217  * When this happens, KASAN will defer freeing the object to a later
218  * stage and handle it internally until then. The return value indicates
219  * whether KASAN took ownership of the object.
220  *
221  * This function is intended only for use by the slab allocator.
222  *
223  * @Return true if KASAN took ownership of the object; false otherwise.
224  */
kasan_slab_free(struct kmem_cache * s,void * object,bool init,bool still_accessible)225 static __always_inline bool kasan_slab_free(struct kmem_cache *s,
226 						void *object, bool init,
227 						bool still_accessible)
228 {
229 	if (kasan_enabled())
230 		return __kasan_slab_free(s, object, init, still_accessible);
231 	return false;
232 }
233 
234 void __kasan_kfree_large(void *ptr, unsigned long ip);
kasan_kfree_large(void * ptr)235 static __always_inline void kasan_kfree_large(void *ptr)
236 {
237 	if (kasan_enabled())
238 		__kasan_kfree_large(ptr, _RET_IP_);
239 }
240 
241 void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
242 				       void *object, gfp_t flags, bool init);
kasan_slab_alloc(struct kmem_cache * s,void * object,gfp_t flags,bool init)243 static __always_inline void * __must_check kasan_slab_alloc(
244 		struct kmem_cache *s, void *object, gfp_t flags, bool init)
245 {
246 	if (kasan_enabled())
247 		return __kasan_slab_alloc(s, object, flags, init);
248 	return object;
249 }
250 
251 void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
252 				    size_t size, gfp_t flags);
kasan_kmalloc(struct kmem_cache * s,const void * object,size_t size,gfp_t flags)253 static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
254 				const void *object, size_t size, gfp_t flags)
255 {
256 	if (kasan_enabled())
257 		return __kasan_kmalloc(s, object, size, flags);
258 	return (void *)object;
259 }
260 
261 void * __must_check __kasan_kmalloc_large(const void *ptr,
262 					  size_t size, gfp_t flags);
kasan_kmalloc_large(const void * ptr,size_t size,gfp_t flags)263 static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
264 						      size_t size, gfp_t flags)
265 {
266 	if (kasan_enabled())
267 		return __kasan_kmalloc_large(ptr, size, flags);
268 	return (void *)ptr;
269 }
270 
271 void * __must_check __kasan_krealloc(const void *object,
272 				     size_t new_size, gfp_t flags);
kasan_krealloc(const void * object,size_t new_size,gfp_t flags)273 static __always_inline void * __must_check kasan_krealloc(const void *object,
274 						 size_t new_size, gfp_t flags)
275 {
276 	if (kasan_enabled())
277 		return __kasan_krealloc(object, new_size, flags);
278 	return (void *)object;
279 }
280 
281 bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
282 				  unsigned long ip);
283 /**
284  * kasan_mempool_poison_pages - Check and poison a mempool page allocation.
285  * @page: Pointer to the page allocation.
286  * @order: Order of the allocation.
287  *
288  * This function is intended for kernel subsystems that cache page allocations
289  * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
290  *
291  * This function is similar to kasan_mempool_poison_object() but operates on
292  * page allocations.
293  *
294  * Before the poisoned allocation can be reused, it must be unpoisoned via
295  * kasan_mempool_unpoison_pages().
296  *
297  * Return: true if the allocation can be safely reused; false otherwise.
298  */
kasan_mempool_poison_pages(struct page * page,unsigned int order)299 static __always_inline bool kasan_mempool_poison_pages(struct page *page,
300 						       unsigned int order)
301 {
302 	if (kasan_enabled())
303 		return __kasan_mempool_poison_pages(page, order, _RET_IP_);
304 	return true;
305 }
306 
307 void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
308 				    unsigned long ip);
309 /**
310  * kasan_mempool_unpoison_pages - Unpoison a mempool page allocation.
311  * @page: Pointer to the page allocation.
312  * @order: Order of the allocation.
313  *
314  * This function is intended for kernel subsystems that cache page allocations
315  * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
316  *
317  * This function unpoisons a page allocation that was previously poisoned by
318  * kasan_mempool_poison_pages() without zeroing the allocation's memory. For
319  * the tag-based modes, this function assigns a new tag to the allocation.
320  */
kasan_mempool_unpoison_pages(struct page * page,unsigned int order)321 static __always_inline void kasan_mempool_unpoison_pages(struct page *page,
322 							 unsigned int order)
323 {
324 	if (kasan_enabled())
325 		__kasan_mempool_unpoison_pages(page, order, _RET_IP_);
326 }
327 
328 bool __kasan_mempool_poison_object(void *ptr, unsigned long ip);
329 /**
330  * kasan_mempool_poison_object - Check and poison a mempool slab allocation.
331  * @ptr: Pointer to the slab allocation.
332  *
333  * This function is intended for kernel subsystems that cache slab allocations
334  * to reuse them instead of freeing them back to the slab allocator (e.g.
335  * mempool).
336  *
337  * This function poisons a slab allocation and saves a free stack trace for it
338  * without initializing the allocation's memory and without putting it into the
339  * quarantine (for the Generic mode).
340  *
341  * This function also performs checks to detect double-free and invalid-free
342  * bugs and reports them. The caller can use the return value of this function
343  * to find out if the allocation is buggy.
344  *
345  * Before the poisoned allocation can be reused, it must be unpoisoned via
346  * kasan_mempool_unpoison_object().
347  *
348  * This function operates on all slab allocations including large kmalloc
349  * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
350  * size > KMALLOC_MAX_SIZE).
351  *
352  * Return: true if the allocation can be safely reused; false otherwise.
353  */
kasan_mempool_poison_object(void * ptr)354 static __always_inline bool kasan_mempool_poison_object(void *ptr)
355 {
356 	if (kasan_enabled())
357 		return __kasan_mempool_poison_object(ptr, _RET_IP_);
358 	return true;
359 }
360 
361 void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip);
362 /**
363  * kasan_mempool_unpoison_object - Unpoison a mempool slab allocation.
364  * @ptr: Pointer to the slab allocation.
365  * @size: Size to be unpoisoned.
366  *
367  * This function is intended for kernel subsystems that cache slab allocations
368  * to reuse them instead of freeing them back to the slab allocator (e.g.
369  * mempool).
370  *
371  * This function unpoisons a slab allocation that was previously poisoned via
372  * kasan_mempool_poison_object() and saves an alloc stack trace for it without
373  * initializing the allocation's memory. For the tag-based modes, this function
374  * does not assign a new tag to the allocation and instead restores the
375  * original tags based on the pointer value.
376  *
377  * This function operates on all slab allocations including large kmalloc
378  * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
379  * size > KMALLOC_MAX_SIZE).
380  */
kasan_mempool_unpoison_object(void * ptr,size_t size)381 static __always_inline void kasan_mempool_unpoison_object(void *ptr,
382 							  size_t size)
383 {
384 	if (kasan_enabled())
385 		__kasan_mempool_unpoison_object(ptr, size, _RET_IP_);
386 }
387 
388 /*
389  * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
390  * the hardware tag-based mode that doesn't rely on compiler instrumentation.
391  */
392 bool __kasan_check_byte(const void *addr, unsigned long ip);
kasan_check_byte(const void * addr)393 static __always_inline bool kasan_check_byte(const void *addr)
394 {
395 	if (kasan_enabled())
396 		return __kasan_check_byte(addr, _RET_IP_);
397 	return true;
398 }
399 
400 #else /* CONFIG_KASAN */
401 
kasan_unpoison_range(const void * address,size_t size)402 static inline void kasan_unpoison_range(const void *address, size_t size) {}
kasan_poison_pages(struct page * page,unsigned int order,bool init)403 static inline void kasan_poison_pages(struct page *page, unsigned int order,
404 				      bool init) {}
kasan_unpoison_pages(struct page * page,unsigned int order,bool init)405 static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
406 					bool init)
407 {
408 	return false;
409 }
kasan_poison_slab(struct slab * slab)410 static inline void kasan_poison_slab(struct slab *slab) {}
kasan_unpoison_new_object(struct kmem_cache * cache,void * object)411 static inline void kasan_unpoison_new_object(struct kmem_cache *cache,
412 					void *object) {}
kasan_poison_new_object(struct kmem_cache * cache,void * object)413 static inline void kasan_poison_new_object(struct kmem_cache *cache,
414 					void *object) {}
kasan_init_slab_obj(struct kmem_cache * cache,const void * object)415 static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
416 				const void *object)
417 {
418 	return (void *)object;
419 }
420 
kasan_slab_pre_free(struct kmem_cache * s,void * object)421 static inline bool kasan_slab_pre_free(struct kmem_cache *s, void *object)
422 {
423 	return false;
424 }
425 
kasan_slab_free(struct kmem_cache * s,void * object,bool init,bool still_accessible)426 static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
427 				   bool init, bool still_accessible)
428 {
429 	return false;
430 }
kasan_kfree_large(void * ptr)431 static inline void kasan_kfree_large(void *ptr) {}
kasan_slab_alloc(struct kmem_cache * s,void * object,gfp_t flags,bool init)432 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
433 				   gfp_t flags, bool init)
434 {
435 	return object;
436 }
kasan_kmalloc(struct kmem_cache * s,const void * object,size_t size,gfp_t flags)437 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
438 				size_t size, gfp_t flags)
439 {
440 	return (void *)object;
441 }
kasan_kmalloc_large(const void * ptr,size_t size,gfp_t flags)442 static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
443 {
444 	return (void *)ptr;
445 }
kasan_krealloc(const void * object,size_t new_size,gfp_t flags)446 static inline void *kasan_krealloc(const void *object, size_t new_size,
447 				 gfp_t flags)
448 {
449 	return (void *)object;
450 }
kasan_mempool_poison_pages(struct page * page,unsigned int order)451 static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int order)
452 {
453 	return true;
454 }
kasan_mempool_unpoison_pages(struct page * page,unsigned int order)455 static inline void kasan_mempool_unpoison_pages(struct page *page, unsigned int order) {}
kasan_mempool_poison_object(void * ptr)456 static inline bool kasan_mempool_poison_object(void *ptr)
457 {
458 	return true;
459 }
kasan_mempool_unpoison_object(void * ptr,size_t size)460 static inline void kasan_mempool_unpoison_object(void *ptr, size_t size) {}
461 
kasan_check_byte(const void * address)462 static inline bool kasan_check_byte(const void *address)
463 {
464 	return true;
465 }
466 
467 #endif /* CONFIG_KASAN */
468 
469 #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
470 void kasan_unpoison_task_stack(struct task_struct *task);
471 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
472 #else
kasan_unpoison_task_stack(struct task_struct * task)473 static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
kasan_unpoison_task_stack_below(const void * watermark)474 static inline void kasan_unpoison_task_stack_below(const void *watermark) {}
475 #endif
476 
477 #ifdef CONFIG_KASAN_GENERIC
478 
479 struct kasan_cache {
480 	int alloc_meta_offset;
481 	int free_meta_offset;
482 };
483 
484 size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object);
485 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
486 			slab_flags_t *flags);
487 
488 void kasan_cache_shrink(struct kmem_cache *cache);
489 void kasan_cache_shutdown(struct kmem_cache *cache);
490 void kasan_record_aux_stack(void *ptr);
491 void kasan_record_aux_stack_noalloc(void *ptr);
492 
493 #else /* CONFIG_KASAN_GENERIC */
494 
495 /* Tag-based KASAN modes do not use per-object metadata. */
kasan_metadata_size(struct kmem_cache * cache,bool in_object)496 static inline size_t kasan_metadata_size(struct kmem_cache *cache,
497 						bool in_object)
498 {
499 	return 0;
500 }
501 /* And no cache-related metadata initialization is required. */
kasan_cache_create(struct kmem_cache * cache,unsigned int * size,slab_flags_t * flags)502 static inline void kasan_cache_create(struct kmem_cache *cache,
503 				      unsigned int *size,
504 				      slab_flags_t *flags) {}
505 
kasan_cache_shrink(struct kmem_cache * cache)506 static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
kasan_cache_shutdown(struct kmem_cache * cache)507 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
kasan_record_aux_stack(void * ptr)508 static inline void kasan_record_aux_stack(void *ptr) {}
kasan_record_aux_stack_noalloc(void * ptr)509 static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
510 
511 #endif /* CONFIG_KASAN_GENERIC */
512 
513 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
514 
kasan_reset_tag(const void * addr)515 static inline void *kasan_reset_tag(const void *addr)
516 {
517 	return (void *)arch_kasan_reset_tag(addr);
518 }
519 
520 /**
521  * kasan_report - print a report about a bad memory access detected by KASAN
522  * @addr: address of the bad access
523  * @size: size of the bad access
524  * @is_write: whether the bad access is a write or a read
525  * @ip: instruction pointer for the accessibility check or the bad access itself
526  */
527 bool kasan_report(const void *addr, size_t size,
528 		bool is_write, unsigned long ip);
529 
530 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
531 
kasan_reset_tag(const void * addr)532 static inline void *kasan_reset_tag(const void *addr)
533 {
534 	return (void *)addr;
535 }
536 
537 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
538 
539 #ifdef CONFIG_KASAN_HW_TAGS
540 
541 void kasan_report_async(void);
542 
543 #endif /* CONFIG_KASAN_HW_TAGS */
544 
545 #ifdef CONFIG_KASAN_SW_TAGS
546 void __init kasan_init_sw_tags(void);
547 #else
kasan_init_sw_tags(void)548 static inline void kasan_init_sw_tags(void) { }
549 #endif
550 
551 #ifdef CONFIG_KASAN_HW_TAGS
552 void kasan_init_hw_tags_cpu(void);
553 void __init kasan_init_hw_tags(void);
554 #else
kasan_init_hw_tags_cpu(void)555 static inline void kasan_init_hw_tags_cpu(void) { }
kasan_init_hw_tags(void)556 static inline void kasan_init_hw_tags(void) { }
557 #endif
558 
559 #ifdef CONFIG_KASAN_VMALLOC
560 
561 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
562 
563 void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
564 int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
565 void kasan_release_vmalloc(unsigned long start, unsigned long end,
566 			   unsigned long free_region_start,
567 			   unsigned long free_region_end);
568 
569 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
570 
kasan_populate_early_vm_area_shadow(void * start,unsigned long size)571 static inline void kasan_populate_early_vm_area_shadow(void *start,
572 						       unsigned long size)
573 { }
kasan_populate_vmalloc(unsigned long start,unsigned long size)574 static inline int kasan_populate_vmalloc(unsigned long start,
575 					unsigned long size)
576 {
577 	return 0;
578 }
kasan_release_vmalloc(unsigned long start,unsigned long end,unsigned long free_region_start,unsigned long free_region_end)579 static inline void kasan_release_vmalloc(unsigned long start,
580 					 unsigned long end,
581 					 unsigned long free_region_start,
582 					 unsigned long free_region_end) { }
583 
584 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
585 
586 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
587 			       kasan_vmalloc_flags_t flags);
kasan_unpoison_vmalloc(const void * start,unsigned long size,kasan_vmalloc_flags_t flags)588 static __always_inline void *kasan_unpoison_vmalloc(const void *start,
589 						unsigned long size,
590 						kasan_vmalloc_flags_t flags)
591 {
592 	if (kasan_enabled())
593 		return __kasan_unpoison_vmalloc(start, size, flags);
594 	return (void *)start;
595 }
596 
597 void __kasan_poison_vmalloc(const void *start, unsigned long size);
kasan_poison_vmalloc(const void * start,unsigned long size)598 static __always_inline void kasan_poison_vmalloc(const void *start,
599 						 unsigned long size)
600 {
601 	if (kasan_enabled())
602 		__kasan_poison_vmalloc(start, size);
603 }
604 
605 #else /* CONFIG_KASAN_VMALLOC */
606 
kasan_populate_early_vm_area_shadow(void * start,unsigned long size)607 static inline void kasan_populate_early_vm_area_shadow(void *start,
608 						       unsigned long size) { }
kasan_populate_vmalloc(unsigned long start,unsigned long size)609 static inline int kasan_populate_vmalloc(unsigned long start,
610 					unsigned long size)
611 {
612 	return 0;
613 }
kasan_release_vmalloc(unsigned long start,unsigned long end,unsigned long free_region_start,unsigned long free_region_end)614 static inline void kasan_release_vmalloc(unsigned long start,
615 					 unsigned long end,
616 					 unsigned long free_region_start,
617 					 unsigned long free_region_end) { }
618 
kasan_unpoison_vmalloc(const void * start,unsigned long size,kasan_vmalloc_flags_t flags)619 static inline void *kasan_unpoison_vmalloc(const void *start,
620 					   unsigned long size,
621 					   kasan_vmalloc_flags_t flags)
622 {
623 	return (void *)start;
624 }
kasan_poison_vmalloc(const void * start,unsigned long size)625 static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
626 { }
627 
628 #endif /* CONFIG_KASAN_VMALLOC */
629 
630 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
631 		!defined(CONFIG_KASAN_VMALLOC)
632 
633 /*
634  * These functions allocate and free shadow memory for kernel modules.
635  * They are only required when KASAN_VMALLOC is not supported, as otherwise
636  * shadow memory is allocated by the generic vmalloc handlers.
637  */
638 int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask);
639 void kasan_free_module_shadow(const struct vm_struct *vm);
640 
641 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
642 
kasan_alloc_module_shadow(void * addr,size_t size,gfp_t gfp_mask)643 static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
kasan_free_module_shadow(const struct vm_struct * vm)644 static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
645 
646 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
647 
648 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
649 void kasan_non_canonical_hook(unsigned long addr);
650 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
kasan_non_canonical_hook(unsigned long addr)651 static inline void kasan_non_canonical_hook(unsigned long addr) { }
652 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
653 
654 #endif /* LINUX_KASAN_H */
655