Lines Matching full:object
156 void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object) in __kasan_unpoison_new_object() argument
158 kasan_unpoison(object, cache->object_size, false); in __kasan_unpoison_new_object()
161 void __kasan_poison_new_object(struct kmem_cache *cache, void *object) in __kasan_poison_new_object() argument
163 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), in __kasan_poison_new_object()
168 * This function assigns a tag to an object considering the following:
170 * object somewhere (e.g. in the object itself). We preassign a tag for
171 * each object in caches with constructors during slab creation and reuse
172 * the same tag each time a particular object is allocated.
178 const void *object, bool init) in assign_tag() argument
185 * set, assign a tag when the object is being allocated (init == false). in assign_tag()
195 return init ? kasan_random_tag() : get_tag(object); in assign_tag()
199 const void *object) in __kasan_init_slab_obj() argument
201 /* Initialize per-object metadata if it is present. */ in __kasan_init_slab_obj()
203 kasan_init_object_meta(cache, object); in __kasan_init_slab_obj()
206 object = set_tag(object, assign_tag(cache, object, true)); in __kasan_init_slab_obj()
208 return (void *)object; in __kasan_init_slab_obj()
211 /* Returns true when freeing the object is not safe. */
212 static bool check_slab_allocation(struct kmem_cache *cache, void *object, in check_slab_allocation() argument
215 void *tagged_object = object; in check_slab_allocation()
217 object = kasan_reset_tag(object); in check_slab_allocation()
219 if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) { in check_slab_allocation()
232 static inline void poison_slab_object(struct kmem_cache *cache, void *object, in poison_slab_object() argument
235 void *tagged_object = object; in poison_slab_object()
237 object = kasan_reset_tag(object); in poison_slab_object()
243 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), in poison_slab_object()
250 bool __kasan_slab_pre_free(struct kmem_cache *cache, void *object, in __kasan_slab_pre_free() argument
253 if (!kasan_arch_is_ready() || is_kfence_address(object)) in __kasan_slab_pre_free()
255 return check_slab_allocation(cache, object, ip); in __kasan_slab_pre_free()
258 bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init, in __kasan_slab_free() argument
261 if (!kasan_arch_is_ready() || is_kfence_address(object)) in __kasan_slab_free()
264 poison_slab_object(cache, object, init, still_accessible); in __kasan_slab_free()
267 * If the object is put into quarantine, do not let slab put the object in __kasan_slab_free()
268 * onto the freelist for now. The object's metadata is kept until the in __kasan_slab_free()
269 * object gets evicted from quarantine. in __kasan_slab_free()
271 if (kasan_quarantine_put(cache, object)) in __kasan_slab_free()
275 * Note: Keep per-object metadata to allow KASAN print stack traces for in __kasan_slab_free()
279 /* Let slab put the object onto the freelist. */ in __kasan_slab_free()
305 /* The object will be poisoned by kasan_poison_pages(). */ in __kasan_kfree_large()
308 static inline void unpoison_slab_object(struct kmem_cache *cache, void *object, in unpoison_slab_object() argument
312 * Unpoison the whole object. For kmalloc() allocations, in unpoison_slab_object()
315 kasan_unpoison(object, cache->object_size, init); in unpoison_slab_object()
319 kasan_save_alloc_info(cache, object, flags); in unpoison_slab_object()
323 void *object, gfp_t flags, bool init) in __kasan_slab_alloc() argument
331 if (unlikely(object == NULL)) in __kasan_slab_alloc()
334 if (is_kfence_address(object)) in __kasan_slab_alloc()
335 return (void *)object; in __kasan_slab_alloc()
341 tag = assign_tag(cache, object, false); in __kasan_slab_alloc()
342 tagged_object = set_tag(object, tag); in __kasan_slab_alloc()
344 /* Unpoison the object and save alloc info for non-kmalloc() allocations. */ in __kasan_slab_alloc()
351 const void *object, size_t size, gfp_t flags) in poison_kmalloc_redzone() argument
358 * Partially poison the last object granule to cover the unaligned in poison_kmalloc_redzone()
362 kasan_poison_last_granule((void *)object, size); in poison_kmalloc_redzone()
365 redzone_start = round_up((unsigned long)(object + size), in poison_kmalloc_redzone()
367 redzone_end = round_up((unsigned long)(object + cache->object_size), in poison_kmalloc_redzone()
377 kasan_save_alloc_info(cache, (void *)object, flags); in poison_kmalloc_redzone()
381 void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object, in __kasan_kmalloc() argument
387 if (unlikely(object == NULL)) in __kasan_kmalloc()
390 if (is_kfence_address(object)) in __kasan_kmalloc()
391 return (void *)object; in __kasan_kmalloc()
393 /* The object has already been unpoisoned by kasan_slab_alloc(). */ in __kasan_kmalloc()
394 poison_kmalloc_redzone(cache, object, size, flags); in __kasan_kmalloc()
397 return (void *)object; in __kasan_kmalloc()
409 * Partially poison the last object granule to cover the unaligned in poison_kmalloc_large_redzone()
431 /* The object has already been unpoisoned by kasan_unpoison_pages(). */ in __kasan_kmalloc_large()
438 void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags) in __kasan_krealloc() argument
445 if (unlikely(object == ZERO_SIZE_PTR)) in __kasan_krealloc()
446 return (void *)object; in __kasan_krealloc()
448 if (is_kfence_address(object)) in __kasan_krealloc()
449 return (void *)object; in __kasan_krealloc()
452 * Unpoison the object's data. in __kasan_krealloc()
456 kasan_unpoison(object, size, false); in __kasan_krealloc()
458 slab = virt_to_slab(object); in __kasan_krealloc()
462 poison_kmalloc_large_redzone(object, size, flags); in __kasan_krealloc()
464 poison_kmalloc_redzone(slab->slab_cache, object, size, flags); in __kasan_krealloc()
466 return (void *)object; in __kasan_krealloc()
546 /* Unpoison the object and save alloc info for non-kmalloc() allocations. */ in __kasan_mempool_unpoison_object()