Lines Matching full:object

17  *   del_state modifications and accesses to the object trees
20 * kmemleak_object) for the allocated memory blocks. The object trees are
23 * the object_list and the object tree root in the create_object() function
38 * Note that the kmemleak_object.use_count is incremented when an object is
46 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
48 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
110 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
130 * object->lock. Insertions or deletions from object_list, gray_list or
137 unsigned int flags; /* object status flags */
142 /* object usage count; object freed when use_count == 0 */
151 /* the total number of pointers found pointing to this object */
156 /* memory ranges to be scanned inside an object (empty for all) */
165 /* flag set after the first reporting of an unreference object */
167 /* flag set to not scan the object */
169 /* flag set to fully scan the object when scan_area allocation failed */
171 /* flag set for object allocated with physical address */
199 /* search tree for object boundaries */
201 /* search tree for object (with OBJECT_PHYS flag) boundaries */
203 /* search tree for object (with OBJECT_PERCPU flag) boundaries */
293 * with the object->lock held.
296 struct kmemleak_object *object) in hex_dump_object() argument
298 const u8 *ptr = (const u8 *)object->pointer; in hex_dump_object()
301 if (WARN_ON_ONCE(object->flags & OBJECT_PHYS)) in hex_dump_object()
304 if (object->flags & OBJECT_PERCPU) in hex_dump_object()
305 ptr = (const u8 *)this_cpu_ptr((void __percpu *)object->pointer); in hex_dump_object()
308 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE); in hex_dump_object()
310 if (object->flags & OBJECT_PERCPU) in hex_dump_object()
322 * Object colors, encoded with count and min_count:
323 * - white - orphan object, not enough references to it (count < min_count)
328 * Newly created objects don't have any color assigned (object->count == -1)
331 static bool color_white(const struct kmemleak_object *object) in color_white() argument
333 return object->count != KMEMLEAK_BLACK && in color_white()
334 object->count < object->min_count; in color_white()
337 static bool color_gray(const struct kmemleak_object *object) in color_gray() argument
339 return object->min_count != KMEMLEAK_BLACK && in color_gray()
340 object->count >= object->min_count; in color_gray()
348 static bool unreferenced_object(struct kmemleak_object *object) in unreferenced_object() argument
350 return (color_white(object) && object->flags & OBJECT_ALLOCATED) && in unreferenced_object()
351 time_before_eq(object->jiffies + jiffies_min_age, in unreferenced_object()
357 * print_unreferenced function must be called with the object->lock held.
360 struct kmemleak_object *object) in print_unreferenced() argument
366 nr_entries = stack_depot_fetch(object->trace_handle, &entries); in print_unreferenced()
367 warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", in print_unreferenced()
368 object->pointer, object->size); in print_unreferenced()
370 object->comm, object->pid, object->jiffies); in print_unreferenced()
371 hex_dump_object(seq, object); in print_unreferenced()
372 warn_or_seq_printf(seq, " backtrace (crc %x):\n", object->checksum); in print_unreferenced()
383 * the object->lock held.
385 static void dump_object_info(struct kmemleak_object *object) in dump_object_info() argument
387 pr_notice("Object 0x%08lx (size %zu):\n", in dump_object_info()
388 object->pointer, object->size); in dump_object_info()
390 object->comm, object->pid, object->jiffies); in dump_object_info()
391 pr_notice(" min_count = %d\n", object->min_count); in dump_object_info()
392 pr_notice(" count = %d\n", object->count); in dump_object_info()
393 pr_notice(" flags = 0x%x\n", object->flags); in dump_object_info()
394 pr_notice(" checksum = %u\n", object->checksum); in dump_object_info()
396 if (object->trace_handle) in dump_object_info()
397 stack_depot_print(object->trace_handle); in dump_object_info()
410 * Look-up a memory block metadata (kmemleak_object) in the object search
422 struct kmemleak_object *object; in __lookup_object() local
425 object = rb_entry(rb, struct kmemleak_object, rb_node); in __lookup_object()
426 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer); in __lookup_object()
429 rb = object->rb_node.rb_left; in __lookup_object()
430 else if (untagged_objp + object->size <= untagged_ptr) in __lookup_object()
431 rb = object->rb_node.rb_right; in __lookup_object()
433 return object; in __lookup_object()
435 kmemleak_warn("Found object by alias at 0x%08lx\n", in __lookup_object()
437 dump_object_info(object); in __lookup_object()
444 /* Look-up a kmemleak object which allocated with virtual address. */
451 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
452 * that once an object's use_count reached 0, the RCU freeing was already
453 * registered and the object should no longer be used. This function must be
456 static int get_object(struct kmemleak_object *object) in get_object() argument
458 return atomic_inc_not_zero(&object->use_count); in get_object()
467 struct kmemleak_object *object; in mem_pool_alloc() local
471 object = kmem_cache_alloc_noprof(object_cache, in mem_pool_alloc()
473 if (object) in mem_pool_alloc()
474 return object; in mem_pool_alloc()
479 object = list_first_entry_or_null(&mem_pool_free_list, in mem_pool_alloc()
480 typeof(*object), object_list); in mem_pool_alloc()
481 if (object) in mem_pool_alloc()
482 list_del(&object->object_list); in mem_pool_alloc()
484 object = &mem_pool[--mem_pool_free_count]; in mem_pool_alloc()
489 return object; in mem_pool_alloc()
493 * Return the object to either the slab allocator or the memory pool.
495 static void mem_pool_free(struct kmemleak_object *object) in mem_pool_free() argument
499 if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) { in mem_pool_free()
500 kmem_cache_free(object_cache, object); in mem_pool_free()
504 /* add the object to the memory pool free list */ in mem_pool_free()
506 list_add(&object->object_list, &mem_pool_free_list); in mem_pool_free()
517 struct kmemleak_object *object = in free_object_rcu() local
522 * code accessing this object, hence no need for locking. in free_object_rcu()
524 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) { in free_object_rcu()
528 mem_pool_free(object); in free_object_rcu()
532 * Decrement the object use_count. Once the count is 0, free the object using
538 static void put_object(struct kmemleak_object *object) in put_object() argument
540 if (!atomic_dec_and_test(&object->use_count)) in put_object()
544 WARN_ON(object->flags & OBJECT_ALLOCATED); in put_object()
549 * came from the memory pool. Free the object directly. in put_object()
552 call_rcu(&object->rcu, free_object_rcu); in put_object()
554 free_object_rcu(&object->rcu); in put_object()
558 * Look up an object in the object search tree and increase its use_count.
564 struct kmemleak_object *object; in __find_and_get_object() local
568 object = __lookup_object(ptr, alias, objflags); in __find_and_get_object()
571 /* check whether the object is still available */ in __find_and_get_object()
572 if (object && !get_object(object)) in __find_and_get_object()
573 object = NULL; in __find_and_get_object()
576 return object; in __find_and_get_object()
579 /* Look up and get an object which allocated with virtual address. */
586 * Remove an object from its object tree and object_list. Must be called with
589 static void __remove_object(struct kmemleak_object *object) in __remove_object() argument
591 rb_erase(&object->rb_node, object_tree(object->flags)); in __remove_object()
592 if (!(object->del_state & DELSTATE_NO_DELETE)) in __remove_object()
593 list_del_rcu(&object->object_list); in __remove_object()
594 object->del_state |= DELSTATE_REMOVED; in __remove_object()
601 struct kmemleak_object *object; in __find_and_remove_object() local
603 object = __lookup_object(ptr, alias, objflags); in __find_and_remove_object()
604 if (object) in __find_and_remove_object()
605 __remove_object(object); in __find_and_remove_object()
607 return object; in __find_and_remove_object()
611 * Look up an object in the object search tree and remove it from both object
612 * tree root and object_list. The returned object's use_count should be at
619 struct kmemleak_object *object; in find_and_remove_object() local
622 object = __find_and_remove_object(ptr, alias, objflags); in find_and_remove_object()
625 return object; in find_and_remove_object()
649 struct kmemleak_object *object; in __alloc_object() local
651 object = mem_pool_alloc(gfp); in __alloc_object()
652 if (!object) { in __alloc_object()
658 INIT_LIST_HEAD(&object->object_list); in __alloc_object()
659 INIT_LIST_HEAD(&object->gray_list); in __alloc_object()
660 INIT_HLIST_HEAD(&object->area_list); in __alloc_object()
661 raw_spin_lock_init(&object->lock); in __alloc_object()
662 atomic_set(&object->use_count, 1); in __alloc_object()
663 object->excess_ref = 0; in __alloc_object()
664 object->count = 0; /* white color initially */ in __alloc_object()
665 object->checksum = 0; in __alloc_object()
666 object->del_state = 0; in __alloc_object()
670 object->pid = 0; in __alloc_object()
671 strscpy(object->comm, "hardirq"); in __alloc_object()
673 object->pid = 0; in __alloc_object()
674 strscpy(object->comm, "softirq"); in __alloc_object()
676 object->pid = current->pid; in __alloc_object()
683 strscpy(object->comm, current->comm); in __alloc_object()
687 object->trace_handle = set_track_prepare(); in __alloc_object()
689 return object; in __alloc_object()
692 static int __link_object(struct kmemleak_object *object, unsigned long ptr, in __link_object() argument
701 object->flags = OBJECT_ALLOCATED | objflags; in __link_object()
702 object->pointer = ptr; in __link_object()
703 object->size = kfence_ksize((void *)ptr) ?: size; in __link_object()
704 object->min_count = min_count; in __link_object()
705 object->jiffies = jiffies; in __link_object()
709 * Only update min_addr and max_addr with object storing virtual in __link_object()
731 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n", in __link_object()
741 rb_link_node(&object->rb_node, rb_parent, link); in __link_object()
742 rb_insert_color(&object->rb_node, object_tree(objflags)); in __link_object()
743 list_add_tail_rcu(&object->object_list, &object_list); in __link_object()
750 * memory block and add it to the object_list and object tree.
755 struct kmemleak_object *object; in __create_object() local
759 object = __alloc_object(gfp); in __create_object()
760 if (!object) in __create_object()
764 ret = __link_object(object, ptr, size, min_count, objflags); in __create_object()
767 mem_pool_free(object); in __create_object()
770 /* Create kmemleak object which allocated with virtual address. */
777 /* Create kmemleak object which allocated with physical address. */
784 /* Create kmemleak object corresponding to a per-CPU allocation. */
792 * Mark the object as not allocated and schedule RCU freeing via put_object().
794 static void __delete_object(struct kmemleak_object *object) in __delete_object() argument
798 WARN_ON(!(object->flags & OBJECT_ALLOCATED)); in __delete_object()
799 WARN_ON(atomic_read(&object->use_count) < 1); in __delete_object()
805 raw_spin_lock_irqsave(&object->lock, flags); in __delete_object()
806 object->flags &= ~OBJECT_ALLOCATED; in __delete_object()
807 raw_spin_unlock_irqrestore(&object->lock, flags); in __delete_object()
808 put_object(object); in __delete_object()
817 struct kmemleak_object *object; in delete_object_full() local
819 object = find_and_remove_object(ptr, 0, objflags); in delete_object_full()
820 if (!object) { in delete_object_full()
822 kmemleak_warn("Freeing unknown object at 0x%08lx\n", in delete_object_full()
827 __delete_object(object); in delete_object_full()
838 struct kmemleak_object *object, *object_l, *object_r; in delete_object_part() local
850 object = __find_and_remove_object(ptr, 1, objflags); in delete_object_part()
851 if (!object) { in delete_object_part()
853 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n", in delete_object_part()
864 start = object->pointer; in delete_object_part()
865 end = object->pointer + object->size; in delete_object_part()
868 object->min_count, objflags)) in delete_object_part()
872 object->min_count, objflags)) in delete_object_part()
877 if (object) in delete_object_part()
878 __delete_object(object); in delete_object_part()
887 static void __paint_it(struct kmemleak_object *object, int color) in __paint_it() argument
889 object->min_count = color; in __paint_it()
891 object->flags |= OBJECT_NO_SCAN; in __paint_it()
894 static void paint_it(struct kmemleak_object *object, int color) in paint_it() argument
898 raw_spin_lock_irqsave(&object->lock, flags); in paint_it()
899 __paint_it(object, color); in paint_it()
900 raw_spin_unlock_irqrestore(&object->lock, flags); in paint_it()
905 struct kmemleak_object *object; in paint_ptr() local
907 object = __find_and_get_object(ptr, 0, objflags); in paint_ptr()
908 if (!object) { in paint_ptr()
909 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n", in paint_ptr()
915 paint_it(object, color); in paint_ptr()
916 put_object(object); in paint_ptr()
920 * Mark an object permanently as gray-colored so that it can no longer be
929 * Mark the object as black-colored so that it is ignored from scans and
938 * Add a scanning area to the object. If at least one such area is added,
944 struct kmemleak_object *object; in add_scan_area() local
949 object = find_and_get_object(ptr, 1); in add_scan_area()
950 if (!object) { in add_scan_area()
951 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n", in add_scan_area()
957 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer); in add_scan_area()
963 raw_spin_lock_irqsave(&object->lock, flags); in add_scan_area()
965 pr_warn_once("Cannot allocate a scan area, scanning the full object\n"); in add_scan_area()
966 /* mark the object for full scan to avoid false positives */ in add_scan_area()
967 object->flags |= OBJECT_FULL_SCAN; in add_scan_area()
971 size = untagged_objp + object->size - untagged_ptr; in add_scan_area()
972 } else if (untagged_ptr + size > untagged_objp + object->size) { in add_scan_area()
973 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); in add_scan_area()
974 dump_object_info(object); in add_scan_area()
983 hlist_add_head(&area->node, &object->area_list); in add_scan_area()
985 raw_spin_unlock_irqrestore(&object->lock, flags); in add_scan_area()
986 put_object(object); in add_scan_area()
990 * Any surplus references (object already gray) to 'ptr' are passed to
992 * vm_struct may be used as an alternative reference to the vmalloc'ed object
998 struct kmemleak_object *object; in object_set_excess_ref() local
1000 object = find_and_get_object(ptr, 0); in object_set_excess_ref()
1001 if (!object) { in object_set_excess_ref()
1002 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n", in object_set_excess_ref()
1007 raw_spin_lock_irqsave(&object->lock, flags); in object_set_excess_ref()
1008 object->excess_ref = excess_ref; in object_set_excess_ref()
1009 raw_spin_unlock_irqrestore(&object->lock, flags); in object_set_excess_ref()
1010 put_object(object); in object_set_excess_ref()
1014 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
1015 * pointer. Such object will not be scanned by kmemleak but references to it
1021 struct kmemleak_object *object; in object_no_scan() local
1023 object = find_and_get_object(ptr, 0); in object_no_scan()
1024 if (!object) { in object_no_scan()
1025 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr); in object_no_scan()
1029 raw_spin_lock_irqsave(&object->lock, flags); in object_no_scan()
1030 object->flags |= OBJECT_NO_SCAN; in object_no_scan()
1031 raw_spin_unlock_irqrestore(&object->lock, flags); in object_no_scan()
1032 put_object(object); in object_no_scan()
1036 * kmemleak_alloc - register a newly allocated object
1037 * @ptr: pointer to beginning of the object
1038 * @size: size of the object
1039 * @min_count: minimum number of references to this object. If during memory
1041 * the object is reported as a memory leak. If @min_count is 0,
1042 * the object is never reported as a leak. If @min_count is -1,
1043 * the object is ignored (not scanned and not reported as a leak)
1046 * This function is called from the kernel allocators when a new object
1060 * kmemleak_alloc_percpu - register a newly allocated __percpu object
1061 * @ptr: __percpu pointer to beginning of the object
1062 * @size: size of the object
1065 * This function is called from the kernel percpu allocator when a new object
1079 * kmemleak_vmalloc - register a newly vmalloc'ed object
1081 * @size: size of the object
1085 * object (memory block) is allocated.
1104 * kmemleak_free - unregister a previously registered object
1105 * @ptr: pointer to beginning of the object
1107 * This function is called from the kernel allocators when an object (memory
1120 * kmemleak_free_part - partially unregister a previously registered object
1121 * @ptr: pointer to the beginning or inside the object. This also
1138 * kmemleak_free_percpu - unregister a previously registered __percpu object
1139 * @ptr: __percpu pointer to beginning of the object
1141 * This function is called from the kernel percpu allocator when an object
1154 * kmemleak_update_trace - update object allocation stack trace
1155 * @ptr: pointer to beginning of the object
1157 * Override the object allocation stack trace for cases where the actual
1162 struct kmemleak_object *object; in kmemleak_update_trace() local
1171 object = find_and_get_object((unsigned long)ptr, 1); in kmemleak_update_trace()
1172 if (!object) { in kmemleak_update_trace()
1174 kmemleak_warn("Updating stack trace for unknown object at %p\n", in kmemleak_update_trace()
1181 raw_spin_lock_irqsave(&object->lock, flags); in kmemleak_update_trace()
1182 object->trace_handle = trace_handle; in kmemleak_update_trace()
1183 raw_spin_unlock_irqrestore(&object->lock, flags); in kmemleak_update_trace()
1185 put_object(object); in kmemleak_update_trace()
1190 * kmemleak_not_leak - mark an allocated object as false positive
1191 * @ptr: pointer to beginning of the object
1193 * Calling this function on an object will cause the memory block to no longer
1206 * kmemleak_ignore - ignore an allocated object
1207 * @ptr: pointer to beginning of the object
1209 * Calling this function on an object will cause the memory block to be
1224 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1225 * @ptr: pointer to beginning or inside the object. This also
1230 * This function is used when it is known that only certain parts of an object
1244 * kmemleak_no_scan - do not scan an allocated object
1245 * @ptr: pointer to beginning of the object
1248 * in situations where it is known that the given object does not contain any
1264 * @phys: physical address of the object
1265 * @size: size of the object
1274 * Create object with OBJECT_PHYS flag and in kmemleak_alloc_phys()
1284 * @phys: physical address if the beginning or inside an object. This
1300 * @phys: physical address of the object
1312 * Update an object's checksum and return true if it was modified.
1314 static bool update_checksum(struct kmemleak_object *object) in update_checksum() argument
1316 u32 old_csum = object->checksum; in update_checksum()
1318 if (WARN_ON_ONCE(object->flags & OBJECT_PHYS)) in update_checksum()
1323 if (object->flags & OBJECT_PERCPU) { in update_checksum()
1326 object->checksum = 0; in update_checksum()
1328 void *ptr = per_cpu_ptr((void __percpu *)object->pointer, cpu); in update_checksum()
1330 object->checksum ^= crc32(0, kasan_reset_tag((void *)ptr), object->size); in update_checksum()
1333 object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size); in update_checksum()
1338 return object->checksum != old_csum; in update_checksum()
1342 * Update an object's references. object->lock must be held by the caller.
1344 static void update_refs(struct kmemleak_object *object) in update_refs() argument
1346 if (!color_white(object)) { in update_refs()
1352 * Increase the object's reference count (number of pointers to the in update_refs()
1354 * object's color will become gray and it will be added to the in update_refs()
1357 object->count++; in update_refs()
1358 if (color_gray(object)) { in update_refs()
1360 WARN_ON(!get_object(object)); in update_refs()
1361 list_add_tail(&object->gray_list, &gray_list); in update_refs()
1368 struct kmemleak_object *object; in pointer_update_refs() local
1383 * object->use_count cannot be dropped to 0 while the object in pointer_update_refs()
1387 object = __lookup_object(pointer, 1, objflags); in pointer_update_refs()
1388 if (!object) in pointer_update_refs()
1390 if (object == scanned) in pointer_update_refs()
1395 * Avoid the lockdep recursive warning on object->lock being in pointer_update_refs()
1399 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); in pointer_update_refs()
1400 /* only pass surplus references (object already gray) */ in pointer_update_refs()
1401 if (color_gray(object)) { in pointer_update_refs()
1402 excess_ref = object->excess_ref; in pointer_update_refs()
1403 /* no need for update_refs() if object already gray */ in pointer_update_refs()
1406 update_refs(object); in pointer_update_refs()
1408 raw_spin_unlock(&object->lock); in pointer_update_refs()
1411 object = lookup_object(excess_ref, 0); in pointer_update_refs()
1412 if (!object) in pointer_update_refs()
1414 if (object == scanned) in pointer_update_refs()
1417 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); in pointer_update_refs()
1418 update_refs(object); in pointer_update_refs()
1419 raw_spin_unlock(&object->lock); in pointer_update_refs()
1492 * that object->use_count >= 1.
1494 static void scan_object(struct kmemleak_object *object) in scan_object() argument
1500 * Once the object->lock is acquired, the corresponding memory block in scan_object()
1503 raw_spin_lock_irqsave(&object->lock, flags); in scan_object()
1504 if (object->flags & OBJECT_NO_SCAN) in scan_object()
1506 if (!(object->flags & OBJECT_ALLOCATED)) in scan_object()
1507 /* already freed object */ in scan_object()
1510 if (object->flags & OBJECT_PERCPU) { in scan_object()
1514 void *start = per_cpu_ptr((void __percpu *)object->pointer, cpu); in scan_object()
1515 void *end = start + object->size; in scan_object()
1517 scan_block(start, end, object); in scan_object()
1519 raw_spin_unlock_irqrestore(&object->lock, flags); in scan_object()
1521 raw_spin_lock_irqsave(&object->lock, flags); in scan_object()
1522 if (!(object->flags & OBJECT_ALLOCATED)) in scan_object()
1525 } else if (hlist_empty(&object->area_list) || in scan_object()
1526 object->flags & OBJECT_FULL_SCAN) { in scan_object()
1527 void *start = object->flags & OBJECT_PHYS ? in scan_object()
1528 __va((phys_addr_t)object->pointer) : in scan_object()
1529 (void *)object->pointer; in scan_object()
1530 void *end = start + object->size; in scan_object()
1535 scan_block(start, next, object); in scan_object()
1541 raw_spin_unlock_irqrestore(&object->lock, flags); in scan_object()
1543 raw_spin_lock_irqsave(&object->lock, flags); in scan_object()
1544 } while (object->flags & OBJECT_ALLOCATED); in scan_object()
1546 hlist_for_each_entry(area, &object->area_list, node) in scan_object()
1549 object); in scan_object()
1552 raw_spin_unlock_irqrestore(&object->lock, flags); in scan_object()
1561 struct kmemleak_object *object, *tmp; in scan_gray_list() local
1568 object = list_entry(gray_list.next, typeof(*object), gray_list); in scan_gray_list()
1569 while (&object->gray_list != &gray_list) { in scan_gray_list()
1574 scan_object(object); in scan_gray_list()
1576 tmp = list_entry(object->gray_list.next, typeof(*object), in scan_gray_list()
1579 /* remove the object from the list and release it */ in scan_gray_list()
1580 list_del(&object->gray_list); in scan_gray_list()
1581 put_object(object); in scan_gray_list()
1583 object = tmp; in scan_gray_list()
1589 * Conditionally call resched() in an object iteration loop while making sure
1590 * that the given object won't go away without RCU read lock by performing a
1593 static void kmemleak_cond_resched(struct kmemleak_object *object) in kmemleak_cond_resched() argument
1595 if (!get_object(object)) in kmemleak_cond_resched()
1596 return; /* Try next object */ in kmemleak_cond_resched()
1599 if (object->del_state & DELSTATE_REMOVED) in kmemleak_cond_resched()
1600 goto unlock_put; /* Object removed */ in kmemleak_cond_resched()
1601 object->del_state |= DELSTATE_NO_DELETE; in kmemleak_cond_resched()
1609 if (object->del_state & DELSTATE_REMOVED) in kmemleak_cond_resched()
1610 list_del_rcu(&object->object_list); in kmemleak_cond_resched()
1611 object->del_state &= ~DELSTATE_NO_DELETE; in kmemleak_cond_resched()
1614 put_object(object); in kmemleak_cond_resched()
1624 struct kmemleak_object *object; in kmemleak_scan() local
1633 list_for_each_entry_rcu(object, &object_list, object_list) { in kmemleak_scan()
1634 raw_spin_lock_irq(&object->lock); in kmemleak_scan()
1638 * 1 reference to any object at this point. in kmemleak_scan()
1640 if (atomic_read(&object->use_count) > 1) { in kmemleak_scan()
1641 pr_debug("object->use_count = %d\n", in kmemleak_scan()
1642 atomic_read(&object->use_count)); in kmemleak_scan()
1643 dump_object_info(object); in kmemleak_scan()
1648 if ((object->flags & OBJECT_PHYS) && in kmemleak_scan()
1649 !(object->flags & OBJECT_NO_SCAN)) { in kmemleak_scan()
1650 unsigned long phys = object->pointer; in kmemleak_scan()
1653 PHYS_PFN(phys + object->size) >= max_low_pfn) in kmemleak_scan()
1654 __paint_it(object, KMEMLEAK_BLACK); in kmemleak_scan()
1657 /* reset the reference count (whiten the object) */ in kmemleak_scan()
1658 object->count = 0; in kmemleak_scan()
1659 if (color_gray(object) && get_object(object)) in kmemleak_scan()
1660 list_add_tail(&object->gray_list, &gray_list); in kmemleak_scan()
1662 raw_spin_unlock_irq(&object->lock); in kmemleak_scan()
1665 kmemleak_cond_resched(object); in kmemleak_scan()
1733 list_for_each_entry_rcu(object, &object_list, object_list) { in kmemleak_scan()
1735 kmemleak_cond_resched(object); in kmemleak_scan()
1742 if (!color_white(object)) in kmemleak_scan()
1744 raw_spin_lock_irq(&object->lock); in kmemleak_scan()
1745 if (color_white(object) && (object->flags & OBJECT_ALLOCATED) in kmemleak_scan()
1746 && update_checksum(object) && get_object(object)) { in kmemleak_scan()
1748 object->count = object->min_count; in kmemleak_scan()
1749 list_add_tail(&object->gray_list, &gray_list); in kmemleak_scan()
1751 raw_spin_unlock_irq(&object->lock); in kmemleak_scan()
1770 list_for_each_entry_rcu(object, &object_list, object_list) { in kmemleak_scan()
1772 kmemleak_cond_resched(object); in kmemleak_scan()
1779 if (!color_white(object)) in kmemleak_scan()
1781 raw_spin_lock_irq(&object->lock); in kmemleak_scan()
1782 if (unreferenced_object(object) && in kmemleak_scan()
1783 !(object->flags & OBJECT_REPORTED)) { in kmemleak_scan()
1784 object->flags |= OBJECT_REPORTED; in kmemleak_scan()
1787 print_unreferenced(NULL, object); in kmemleak_scan()
1791 raw_spin_unlock_irq(&object->lock); in kmemleak_scan()
1869 * Iterate over the object_list and return the first valid object at or after
1875 struct kmemleak_object *object; in kmemleak_seq_start() local
1884 list_for_each_entry_rcu(object, &object_list, object_list) { in kmemleak_seq_start()
1887 if (get_object(object)) in kmemleak_seq_start()
1890 object = NULL; in kmemleak_seq_start()
1892 return object; in kmemleak_seq_start()
1896 * Return the next object in the object_list. The function decrements the
1897 * use_count of the previous object and increases that of the next one.
1919 * Decrement the use_count of the last object required, if any.
1936 * Print the information for an unreferenced object to the seq file.
1940 struct kmemleak_object *object = v; in kmemleak_seq_show() local
1943 raw_spin_lock_irqsave(&object->lock, flags); in kmemleak_seq_show()
1944 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) in kmemleak_seq_show()
1945 print_unreferenced(seq, object); in kmemleak_seq_show()
1946 raw_spin_unlock_irqrestore(&object->lock, flags); in kmemleak_seq_show()
1965 struct kmemleak_object *object; in dump_str_object_info() local
1970 object = find_and_get_object(addr, 0); in dump_str_object_info()
1971 if (!object) { in dump_str_object_info()
1972 pr_info("Unknown object at 0x%08lx\n", addr); in dump_str_object_info()
1976 raw_spin_lock_irqsave(&object->lock, flags); in dump_str_object_info()
1977 dump_object_info(object); in dump_str_object_info()
1978 raw_spin_unlock_irqrestore(&object->lock, flags); in dump_str_object_info()
1980 put_object(object); in dump_str_object_info()
1992 struct kmemleak_object *object; in kmemleak_clear() local
1995 list_for_each_entry_rcu(object, &object_list, object_list) { in kmemleak_clear()
1996 raw_spin_lock_irq(&object->lock); in kmemleak_clear()
1997 if ((object->flags & OBJECT_REPORTED) && in kmemleak_clear()
1998 unreferenced_object(object)) in kmemleak_clear()
1999 __paint_it(object, KMEMLEAK_GREY); in kmemleak_clear()
2000 raw_spin_unlock_irq(&object->lock); in kmemleak_clear()
2023 * dump=... - dump information about the object found at the given address
2109 struct kmemleak_object *object, *tmp; in __kmemleak_do_cleanup() local
2115 list_for_each_entry_safe(object, tmp, &object_list, object_list) { in __kmemleak_do_cleanup()
2116 __remove_object(object); in __kmemleak_do_cleanup()
2117 __delete_object(object); in __kmemleak_do_cleanup()
2133 * longer track object freeing. Ordering of the scan thread stopping and in kmemleak_do_cleanup()