Lines Matching full:object
17 * del_state modifications and accesses to the object trees
20 * kmemleak_object) for the allocated memory blocks. The object trees are
23 * the object_list and the object tree root in the create_object() function
38 * Note that the kmemleak_object.use_count is incremented when an object is
46 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
48 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
110 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
130 * object->lock. Insertions or deletions from object_list, gray_list or
137 unsigned int flags; /* object status flags */
142 /* object usage count; object freed when use_count == 0 */
151 /* the total number of pointers found pointing to this object */
156 /* memory ranges to be scanned inside an object (empty for all) */
165 /* flag set after the first reporting of an unreference object */
167 /* flag set to not scan the object */
169 /* flag set to fully scan the object when scan_area allocation failed */
171 /* flag set for object allocated with physical address */
199 /* search tree for object boundaries */
201 /* search tree for object (with OBJECT_PHYS flag) boundaries */
203 /* search tree for object (with OBJECT_PERCPU flag) boundaries */
293 * with the object->lock held.
296 struct kmemleak_object *object) in hex_dump_object() argument
298 const u8 *ptr = (const u8 *)object->pointer; in hex_dump_object()
301 if (WARN_ON_ONCE(object->flags & OBJECT_PHYS)) in hex_dump_object()
304 if (object->flags & OBJECT_PERCPU) in hex_dump_object()
305 ptr = (const u8 *)this_cpu_ptr((void __percpu *)object->pointer); in hex_dump_object()
308 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE); in hex_dump_object()
310 if (object->flags & OBJECT_PERCPU) in hex_dump_object()
322 * Object colors, encoded with count and min_count:
323 * - white - orphan object, not enough references to it (count < min_count)
328 * Newly created objects don't have any color assigned (object->count == -1)
331 static bool color_white(const struct kmemleak_object *object) in color_white() argument
333 return object->count != KMEMLEAK_BLACK && in color_white()
334 object->count < object->min_count; in color_white()
337 static bool color_gray(const struct kmemleak_object *object) in color_gray() argument
339 return object->min_count != KMEMLEAK_BLACK && in color_gray()
340 object->count >= object->min_count; in color_gray()
348 static bool unreferenced_object(struct kmemleak_object *object) in unreferenced_object() argument
350 return (color_white(object) && object->flags & OBJECT_ALLOCATED) && in unreferenced_object()
351 time_before_eq(object->jiffies + jiffies_min_age, in unreferenced_object()
355 static const char *__object_type_str(struct kmemleak_object *object) in __object_type_str() argument
357 if (object->flags & OBJECT_PHYS) in __object_type_str()
359 if (object->flags & OBJECT_PERCPU) in __object_type_str()
366 * print_unreferenced function must be called with the object->lock held.
369 struct kmemleak_object *object) in print_unreferenced() argument
375 nr_entries = stack_depot_fetch(object->trace_handle, &entries); in print_unreferenced()
376 warn_or_seq_printf(seq, "unreferenced object%s 0x%08lx (size %zu):\n", in print_unreferenced()
377 __object_type_str(object), in print_unreferenced()
378 object->pointer, object->size); in print_unreferenced()
380 object->comm, object->pid, object->jiffies); in print_unreferenced()
381 hex_dump_object(seq, object); in print_unreferenced()
382 warn_or_seq_printf(seq, " backtrace (crc %x):\n", object->checksum); in print_unreferenced()
393 * the object->lock held.
395 static void dump_object_info(struct kmemleak_object *object) in dump_object_info() argument
397 pr_notice("Object%s 0x%08lx (size %zu):\n", in dump_object_info()
398 __object_type_str(object), object->pointer, object->size); in dump_object_info()
400 object->comm, object->pid, object->jiffies); in dump_object_info()
401 pr_notice(" min_count = %d\n", object->min_count); in dump_object_info()
402 pr_notice(" count = %d\n", object->count); in dump_object_info()
403 pr_notice(" flags = 0x%x\n", object->flags); in dump_object_info()
404 pr_notice(" checksum = %u\n", object->checksum); in dump_object_info()
406 if (object->trace_handle) in dump_object_info()
407 stack_depot_print(object->trace_handle); in dump_object_info()
420 * Look-up a memory block metadata (kmemleak_object) in the object search
432 struct kmemleak_object *object; in __lookup_object() local
435 object = rb_entry(rb, struct kmemleak_object, rb_node); in __lookup_object()
436 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer); in __lookup_object()
439 rb = object->rb_node.rb_left; in __lookup_object()
440 else if (untagged_objp + object->size <= untagged_ptr) in __lookup_object()
441 rb = object->rb_node.rb_right; in __lookup_object()
443 return object; in __lookup_object()
445 kmemleak_warn("Found object by alias at 0x%08lx\n", in __lookup_object()
447 dump_object_info(object); in __lookup_object()
454 /* Look-up a kmemleak object which allocated with virtual address. */
461 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
462 * that once an object's use_count reached 0, the RCU freeing was already
463 * registered and the object should no longer be used. This function must be
466 static int get_object(struct kmemleak_object *object) in get_object() argument
468 return atomic_inc_not_zero(&object->use_count); in get_object()
477 struct kmemleak_object *object; in mem_pool_alloc() local
481 object = kmem_cache_alloc_noprof(object_cache, in mem_pool_alloc()
483 if (object) in mem_pool_alloc()
484 return object; in mem_pool_alloc()
489 object = list_first_entry_or_null(&mem_pool_free_list, in mem_pool_alloc()
490 typeof(*object), object_list); in mem_pool_alloc()
491 if (object) in mem_pool_alloc()
492 list_del(&object->object_list); in mem_pool_alloc()
494 object = &mem_pool[--mem_pool_free_count]; in mem_pool_alloc()
499 return object; in mem_pool_alloc()
503 * Return the object to either the slab allocator or the memory pool.
505 static void mem_pool_free(struct kmemleak_object *object) in mem_pool_free() argument
509 if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) { in mem_pool_free()
510 kmem_cache_free(object_cache, object); in mem_pool_free()
514 /* add the object to the memory pool free list */ in mem_pool_free()
516 list_add(&object->object_list, &mem_pool_free_list); in mem_pool_free()
527 struct kmemleak_object *object = in free_object_rcu() local
532 * code accessing this object, hence no need for locking. in free_object_rcu()
534 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) { in free_object_rcu()
538 mem_pool_free(object); in free_object_rcu()
542 * Decrement the object use_count. Once the count is 0, free the object using
548 static void put_object(struct kmemleak_object *object) in put_object() argument
550 if (!atomic_dec_and_test(&object->use_count)) in put_object()
554 WARN_ON(object->flags & OBJECT_ALLOCATED); in put_object()
559 * came from the memory pool. Free the object directly. in put_object()
562 call_rcu(&object->rcu, free_object_rcu); in put_object()
564 free_object_rcu(&object->rcu); in put_object()
568 * Look up an object in the object search tree and increase its use_count.
574 struct kmemleak_object *object; in __find_and_get_object() local
578 object = __lookup_object(ptr, alias, objflags); in __find_and_get_object()
581 /* check whether the object is still available */ in __find_and_get_object()
582 if (object && !get_object(object)) in __find_and_get_object()
583 object = NULL; in __find_and_get_object()
586 return object; in __find_and_get_object()
589 /* Look up and get an object which allocated with virtual address. */
596 * Remove an object from its object tree and object_list. Must be called with
599 static void __remove_object(struct kmemleak_object *object) in __remove_object() argument
601 rb_erase(&object->rb_node, object_tree(object->flags)); in __remove_object()
602 if (!(object->del_state & DELSTATE_NO_DELETE)) in __remove_object()
603 list_del_rcu(&object->object_list); in __remove_object()
604 object->del_state |= DELSTATE_REMOVED; in __remove_object()
611 struct kmemleak_object *object; in __find_and_remove_object() local
613 object = __lookup_object(ptr, alias, objflags); in __find_and_remove_object()
614 if (object) in __find_and_remove_object()
615 __remove_object(object); in __find_and_remove_object()
617 return object; in __find_and_remove_object()
621 * Look up an object in the object search tree and remove it from both object
622 * tree root and object_list. The returned object's use_count should be at
629 struct kmemleak_object *object; in find_and_remove_object() local
632 object = __find_and_remove_object(ptr, alias, objflags); in find_and_remove_object()
635 return object; in find_and_remove_object()
659 struct kmemleak_object *object; in __alloc_object() local
661 object = mem_pool_alloc(gfp); in __alloc_object()
662 if (!object) { in __alloc_object()
668 INIT_LIST_HEAD(&object->object_list); in __alloc_object()
669 INIT_LIST_HEAD(&object->gray_list); in __alloc_object()
670 INIT_HLIST_HEAD(&object->area_list); in __alloc_object()
671 raw_spin_lock_init(&object->lock); in __alloc_object()
672 atomic_set(&object->use_count, 1); in __alloc_object()
673 object->excess_ref = 0; in __alloc_object()
674 object->count = 0; /* white color initially */ in __alloc_object()
675 object->checksum = 0; in __alloc_object()
676 object->del_state = 0; in __alloc_object()
680 object->pid = 0; in __alloc_object()
681 strscpy(object->comm, "hardirq"); in __alloc_object()
683 object->pid = 0; in __alloc_object()
684 strscpy(object->comm, "softirq"); in __alloc_object()
686 object->pid = current->pid; in __alloc_object()
693 strscpy(object->comm, current->comm); in __alloc_object()
697 object->trace_handle = set_track_prepare(); in __alloc_object()
699 return object; in __alloc_object()
702 static int __link_object(struct kmemleak_object *object, unsigned long ptr, in __link_object() argument
711 object->flags = OBJECT_ALLOCATED | objflags; in __link_object()
712 object->pointer = ptr; in __link_object()
713 object->size = kfence_ksize((void *)ptr) ?: size; in __link_object()
714 object->min_count = min_count; in __link_object()
715 object->jiffies = jiffies; in __link_object()
719 * Only update min_addr and max_addr with object storing virtual in __link_object()
741 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n", in __link_object()
751 rb_link_node(&object->rb_node, rb_parent, link); in __link_object()
752 rb_insert_color(&object->rb_node, object_tree(objflags)); in __link_object()
753 list_add_tail_rcu(&object->object_list, &object_list); in __link_object()
760 * memory block and add it to the object_list and object tree.
765 struct kmemleak_object *object; in __create_object() local
769 object = __alloc_object(gfp); in __create_object()
770 if (!object) in __create_object()
774 ret = __link_object(object, ptr, size, min_count, objflags); in __create_object()
777 mem_pool_free(object); in __create_object()
780 /* Create kmemleak object which allocated with virtual address. */
787 /* Create kmemleak object which allocated with physical address. */
794 /* Create kmemleak object corresponding to a per-CPU allocation. */
802 * Mark the object as not allocated and schedule RCU freeing via put_object().
804 static void __delete_object(struct kmemleak_object *object) in __delete_object() argument
808 WARN_ON(!(object->flags & OBJECT_ALLOCATED)); in __delete_object()
809 WARN_ON(atomic_read(&object->use_count) < 1); in __delete_object()
815 raw_spin_lock_irqsave(&object->lock, flags); in __delete_object()
816 object->flags &= ~OBJECT_ALLOCATED; in __delete_object()
817 raw_spin_unlock_irqrestore(&object->lock, flags); in __delete_object()
818 put_object(object); in __delete_object()
827 struct kmemleak_object *object; in delete_object_full() local
829 object = find_and_remove_object(ptr, 0, objflags); in delete_object_full()
830 if (!object) { in delete_object_full()
832 kmemleak_warn("Freeing unknown object at 0x%08lx\n", in delete_object_full()
837 __delete_object(object); in delete_object_full()
848 struct kmemleak_object *object, *object_l, *object_r; in delete_object_part() local
860 object = __find_and_remove_object(ptr, 1, objflags); in delete_object_part()
861 if (!object) { in delete_object_part()
863 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n", in delete_object_part()
874 start = object->pointer; in delete_object_part()
875 end = object->pointer + object->size; in delete_object_part()
878 object->min_count, objflags)) in delete_object_part()
882 object->min_count, objflags)) in delete_object_part()
887 if (object) in delete_object_part()
888 __delete_object(object); in delete_object_part()
897 static void __paint_it(struct kmemleak_object *object, int color) in __paint_it() argument
899 object->min_count = color; in __paint_it()
901 object->flags |= OBJECT_NO_SCAN; in __paint_it()
904 static void paint_it(struct kmemleak_object *object, int color) in paint_it() argument
908 raw_spin_lock_irqsave(&object->lock, flags); in paint_it()
909 __paint_it(object, color); in paint_it()
910 raw_spin_unlock_irqrestore(&object->lock, flags); in paint_it()
915 struct kmemleak_object *object; in paint_ptr() local
917 object = __find_and_get_object(ptr, 0, objflags); in paint_ptr()
918 if (!object) { in paint_ptr()
919 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n", in paint_ptr()
925 paint_it(object, color); in paint_ptr()
926 put_object(object); in paint_ptr()
930 * Mark an object permanently as gray-colored so that it can no longer be
939 * Mark the object as black-colored so that it is ignored from scans and
948 * Reset the checksum of an object. The immediate effect is that it will not
954 struct kmemleak_object *object; in reset_checksum() local
956 object = find_and_get_object(ptr, 0); in reset_checksum()
957 if (!object) { in reset_checksum()
958 kmemleak_warn("Not resetting the checksum of an unknown object at 0x%08lx\n", in reset_checksum()
963 raw_spin_lock_irqsave(&object->lock, flags); in reset_checksum()
964 object->checksum = 0; in reset_checksum()
965 raw_spin_unlock_irqrestore(&object->lock, flags); in reset_checksum()
966 put_object(object); in reset_checksum()
970 * Add a scanning area to the object. If at least one such area is added,
976 struct kmemleak_object *object; in add_scan_area() local
981 object = find_and_get_object(ptr, 1); in add_scan_area()
982 if (!object) { in add_scan_area()
983 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n", in add_scan_area()
989 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer); in add_scan_area()
995 raw_spin_lock_irqsave(&object->lock, flags); in add_scan_area()
997 pr_warn_once("Cannot allocate a scan area, scanning the full object\n"); in add_scan_area()
998 /* mark the object for full scan to avoid false positives */ in add_scan_area()
999 object->flags |= OBJECT_FULL_SCAN; in add_scan_area()
1003 size = untagged_objp + object->size - untagged_ptr; in add_scan_area()
1004 } else if (untagged_ptr + size > untagged_objp + object->size) { in add_scan_area()
1005 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); in add_scan_area()
1006 dump_object_info(object); in add_scan_area()
1015 hlist_add_head(&area->node, &object->area_list); in add_scan_area()
1017 raw_spin_unlock_irqrestore(&object->lock, flags); in add_scan_area()
1018 put_object(object); in add_scan_area()
1022 * Any surplus references (object already gray) to 'ptr' are passed to
1024 * vm_struct may be used as an alternative reference to the vmalloc'ed object
1030 struct kmemleak_object *object; in object_set_excess_ref() local
1032 object = find_and_get_object(ptr, 0); in object_set_excess_ref()
1033 if (!object) { in object_set_excess_ref()
1034 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n", in object_set_excess_ref()
1039 raw_spin_lock_irqsave(&object->lock, flags); in object_set_excess_ref()
1040 object->excess_ref = excess_ref; in object_set_excess_ref()
1041 raw_spin_unlock_irqrestore(&object->lock, flags); in object_set_excess_ref()
1042 put_object(object); in object_set_excess_ref()
1046 * Set the OBJECT_NO_SCAN flag for the object corresponding to the given
1047 * pointer. Such object will not be scanned by kmemleak but references to it
1053 struct kmemleak_object *object; in object_no_scan() local
1055 object = find_and_get_object(ptr, 0); in object_no_scan()
1056 if (!object) { in object_no_scan()
1057 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr); in object_no_scan()
1061 raw_spin_lock_irqsave(&object->lock, flags); in object_no_scan()
1062 object->flags |= OBJECT_NO_SCAN; in object_no_scan()
1063 raw_spin_unlock_irqrestore(&object->lock, flags); in object_no_scan()
1064 put_object(object); in object_no_scan()
1068 * kmemleak_alloc - register a newly allocated object
1069 * @ptr: pointer to beginning of the object
1070 * @size: size of the object
1071 * @min_count: minimum number of references to this object. If during memory
1073 * the object is reported as a memory leak. If @min_count is 0,
1074 * the object is never reported as a leak. If @min_count is -1,
1075 * the object is ignored (not scanned and not reported as a leak)
1078 * This function is called from the kernel allocators when a new object
1092 * kmemleak_alloc_percpu - register a newly allocated __percpu object
1093 * @ptr: __percpu pointer to beginning of the object
1094 * @size: size of the object
1097 * This function is called from the kernel percpu allocator when a new object
1111 * kmemleak_vmalloc - register a newly vmalloc'ed object
1113 * @size: size of the object
1117 * object (memory block) is allocated.
1136 * kmemleak_free - unregister a previously registered object
1137 * @ptr: pointer to beginning of the object
1139 * This function is called from the kernel allocators when an object (memory
1152 * kmemleak_free_part - partially unregister a previously registered object
1153 * @ptr: pointer to the beginning or inside the object. This also
1170 * kmemleak_free_percpu - unregister a previously registered __percpu object
1171 * @ptr: __percpu pointer to beginning of the object
1173 * This function is called from the kernel percpu allocator when an object
1186 * kmemleak_update_trace - update object allocation stack trace
1187 * @ptr: pointer to beginning of the object
1189 * Override the object allocation stack trace for cases where the actual
1194 struct kmemleak_object *object; in kmemleak_update_trace() local
1203 object = find_and_get_object((unsigned long)ptr, 1); in kmemleak_update_trace()
1204 if (!object) { in kmemleak_update_trace()
1206 kmemleak_warn("Updating stack trace for unknown object at %p\n", in kmemleak_update_trace()
1213 raw_spin_lock_irqsave(&object->lock, flags); in kmemleak_update_trace()
1214 object->trace_handle = trace_handle; in kmemleak_update_trace()
1215 raw_spin_unlock_irqrestore(&object->lock, flags); in kmemleak_update_trace()
1217 put_object(object); in kmemleak_update_trace()
1222 * kmemleak_not_leak - mark an allocated object as false positive
1223 * @ptr: pointer to beginning of the object
1225 * Calling this function on an object will cause the memory block to no longer
1238 * kmemleak_transient_leak - mark an allocated object as transient false positive
1239 * @ptr: pointer to beginning of the object
1241 * Calling this function on an object will cause the memory block to not be
1242 * reported as a leak temporarily. This may happen, for example, if the object
1255 * kmemleak_ignore - ignore an allocated object
1256 * @ptr: pointer to beginning of the object
1258 * Calling this function on an object will cause the memory block to be
1273 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1274 * @ptr: pointer to beginning or inside the object. This also
1279 * This function is used when it is known that only certain parts of an object
1293 * kmemleak_no_scan - do not scan an allocated object
1294 * @ptr: pointer to beginning of the object
1297 * in situations where it is known that the given object does not contain any
1313 * @phys: physical address of the object
1314 * @size: size of the object
1323 * Create object with OBJECT_PHYS flag and in kmemleak_alloc_phys()
1333 * @phys: physical address if the beginning or inside an object. This
1349 * @phys: physical address of the object
1361 * Update an object's checksum and return true if it was modified.
1363 static bool update_checksum(struct kmemleak_object *object) in update_checksum() argument
1365 u32 old_csum = object->checksum; in update_checksum()
1367 if (WARN_ON_ONCE(object->flags & OBJECT_PHYS)) in update_checksum()
1372 if (object->flags & OBJECT_PERCPU) { in update_checksum()
1375 object->checksum = 0; in update_checksum()
1377 void *ptr = per_cpu_ptr((void __percpu *)object->pointer, cpu); in update_checksum()
1379 object->checksum ^= crc32(0, kasan_reset_tag((void *)ptr), object->size); in update_checksum()
1382 object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size); in update_checksum()
1387 return object->checksum != old_csum; in update_checksum()
1391 * Update an object's references. object->lock must be held by the caller.
1393 static void update_refs(struct kmemleak_object *object) in update_refs() argument
1395 if (!color_white(object)) { in update_refs()
1401 * Increase the object's reference count (number of pointers to the in update_refs()
1403 * object's color will become gray and it will be added to the in update_refs()
1406 object->count++; in update_refs()
1407 if (color_gray(object)) { in update_refs()
1409 WARN_ON(!get_object(object)); in update_refs()
1410 list_add_tail(&object->gray_list, &gray_list); in update_refs()
1417 struct kmemleak_object *object; in pointer_update_refs() local
1432 * object->use_count cannot be dropped to 0 while the object in pointer_update_refs()
1436 object = __lookup_object(pointer, 1, objflags); in pointer_update_refs()
1437 if (!object) in pointer_update_refs()
1439 if (object == scanned) in pointer_update_refs()
1444 * Avoid the lockdep recursive warning on object->lock being in pointer_update_refs()
1448 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); in pointer_update_refs()
1449 /* only pass surplus references (object already gray) */ in pointer_update_refs()
1450 if (color_gray(object)) { in pointer_update_refs()
1451 excess_ref = object->excess_ref; in pointer_update_refs()
1452 /* no need for update_refs() if object already gray */ in pointer_update_refs()
1455 update_refs(object); in pointer_update_refs()
1457 raw_spin_unlock(&object->lock); in pointer_update_refs()
1460 object = lookup_object(excess_ref, 0); in pointer_update_refs()
1461 if (!object) in pointer_update_refs()
1463 if (object == scanned) in pointer_update_refs()
1466 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); in pointer_update_refs()
1467 update_refs(object); in pointer_update_refs()
1468 raw_spin_unlock(&object->lock); in pointer_update_refs()
1541 * that object->use_count >= 1.
1543 static void scan_object(struct kmemleak_object *object) in scan_object() argument
1549 * Once the object->lock is acquired, the corresponding memory block in scan_object()
1552 raw_spin_lock_irqsave(&object->lock, flags); in scan_object()
1553 if (object->flags & OBJECT_NO_SCAN) in scan_object()
1555 if (!(object->flags & OBJECT_ALLOCATED)) in scan_object()
1556 /* already freed object */ in scan_object()
1559 if (object->flags & OBJECT_PERCPU) { in scan_object()
1563 void *start = per_cpu_ptr((void __percpu *)object->pointer, cpu); in scan_object()
1564 void *end = start + object->size; in scan_object()
1566 scan_block(start, end, object); in scan_object()
1568 raw_spin_unlock_irqrestore(&object->lock, flags); in scan_object()
1570 raw_spin_lock_irqsave(&object->lock, flags); in scan_object()
1571 if (!(object->flags & OBJECT_ALLOCATED)) in scan_object()
1574 } else if (hlist_empty(&object->area_list) || in scan_object()
1575 object->flags & OBJECT_FULL_SCAN) { in scan_object()
1576 void *start = object->flags & OBJECT_PHYS ? in scan_object()
1577 __va((phys_addr_t)object->pointer) : in scan_object()
1578 (void *)object->pointer; in scan_object()
1579 void *end = start + object->size; in scan_object()
1584 scan_block(start, next, object); in scan_object()
1590 raw_spin_unlock_irqrestore(&object->lock, flags); in scan_object()
1592 raw_spin_lock_irqsave(&object->lock, flags); in scan_object()
1593 } while (object->flags & OBJECT_ALLOCATED); in scan_object()
1595 hlist_for_each_entry(area, &object->area_list, node) in scan_object()
1598 object); in scan_object()
1601 raw_spin_unlock_irqrestore(&object->lock, flags); in scan_object()
1610 struct kmemleak_object *object, *tmp; in scan_gray_list() local
1617 object = list_entry(gray_list.next, typeof(*object), gray_list); in scan_gray_list()
1618 while (&object->gray_list != &gray_list) { in scan_gray_list()
1623 scan_object(object); in scan_gray_list()
1625 tmp = list_entry(object->gray_list.next, typeof(*object), in scan_gray_list()
1628 /* remove the object from the list and release it */ in scan_gray_list()
1629 list_del(&object->gray_list); in scan_gray_list()
1630 put_object(object); in scan_gray_list()
1632 object = tmp; in scan_gray_list()
1638 * Conditionally call resched() in an object iteration loop while making sure
1639 * that the given object won't go away without RCU read lock by performing a
1642 static void kmemleak_cond_resched(struct kmemleak_object *object) in kmemleak_cond_resched() argument
1644 if (!get_object(object)) in kmemleak_cond_resched()
1645 return; /* Try next object */ in kmemleak_cond_resched()
1648 if (object->del_state & DELSTATE_REMOVED) in kmemleak_cond_resched()
1649 goto unlock_put; /* Object removed */ in kmemleak_cond_resched()
1650 object->del_state |= DELSTATE_NO_DELETE; in kmemleak_cond_resched()
1658 if (object->del_state & DELSTATE_REMOVED) in kmemleak_cond_resched()
1659 list_del_rcu(&object->object_list); in kmemleak_cond_resched()
1660 object->del_state &= ~DELSTATE_NO_DELETE; in kmemleak_cond_resched()
1663 put_object(object); in kmemleak_cond_resched()
1673 struct kmemleak_object *object; in kmemleak_scan() local
1682 list_for_each_entry_rcu(object, &object_list, object_list) { in kmemleak_scan()
1683 raw_spin_lock_irq(&object->lock); in kmemleak_scan()
1687 * 1 reference to any object at this point. in kmemleak_scan()
1689 if (atomic_read(&object->use_count) > 1) { in kmemleak_scan()
1690 pr_debug("object->use_count = %d\n", in kmemleak_scan()
1691 atomic_read(&object->use_count)); in kmemleak_scan()
1692 dump_object_info(object); in kmemleak_scan()
1697 if ((object->flags & OBJECT_PHYS) && in kmemleak_scan()
1698 !(object->flags & OBJECT_NO_SCAN)) { in kmemleak_scan()
1699 unsigned long phys = object->pointer; in kmemleak_scan()
1702 PHYS_PFN(phys + object->size) > max_low_pfn) in kmemleak_scan()
1703 __paint_it(object, KMEMLEAK_BLACK); in kmemleak_scan()
1706 /* reset the reference count (whiten the object) */ in kmemleak_scan()
1707 object->count = 0; in kmemleak_scan()
1708 if (color_gray(object) && get_object(object)) in kmemleak_scan()
1709 list_add_tail(&object->gray_list, &gray_list); in kmemleak_scan()
1711 raw_spin_unlock_irq(&object->lock); in kmemleak_scan()
1714 kmemleak_cond_resched(object); in kmemleak_scan()
1782 list_for_each_entry_rcu(object, &object_list, object_list) { in kmemleak_scan()
1784 kmemleak_cond_resched(object); in kmemleak_scan()
1791 if (!color_white(object)) in kmemleak_scan()
1793 raw_spin_lock_irq(&object->lock); in kmemleak_scan()
1794 if (color_white(object) && (object->flags & OBJECT_ALLOCATED) in kmemleak_scan()
1795 && update_checksum(object) && get_object(object)) { in kmemleak_scan()
1797 object->count = object->min_count; in kmemleak_scan()
1798 list_add_tail(&object->gray_list, &gray_list); in kmemleak_scan()
1800 raw_spin_unlock_irq(&object->lock); in kmemleak_scan()
1819 list_for_each_entry_rcu(object, &object_list, object_list) { in kmemleak_scan()
1821 kmemleak_cond_resched(object); in kmemleak_scan()
1828 if (!color_white(object)) in kmemleak_scan()
1830 raw_spin_lock_irq(&object->lock); in kmemleak_scan()
1831 if (unreferenced_object(object) && in kmemleak_scan()
1832 !(object->flags & OBJECT_REPORTED)) { in kmemleak_scan()
1833 object->flags |= OBJECT_REPORTED; in kmemleak_scan()
1836 print_unreferenced(NULL, object); in kmemleak_scan()
1840 raw_spin_unlock_irq(&object->lock); in kmemleak_scan()
1918 * Iterate over the object_list and return the first valid object at or after
1924 struct kmemleak_object *object; in kmemleak_seq_start() local
1933 list_for_each_entry_rcu(object, &object_list, object_list) { in kmemleak_seq_start()
1936 if (get_object(object)) in kmemleak_seq_start()
1939 object = NULL; in kmemleak_seq_start()
1941 return object; in kmemleak_seq_start()
1945 * Return the next object in the object_list. The function decrements the
1946 * use_count of the previous object and increases that of the next one.
1968 * Decrement the use_count of the last object required, if any.
1985 * Print the information for an unreferenced object to the seq file.
1989 struct kmemleak_object *object = v; in kmemleak_seq_show() local
1992 raw_spin_lock_irqsave(&object->lock, flags); in kmemleak_seq_show()
1993 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) in kmemleak_seq_show()
1994 print_unreferenced(seq, object); in kmemleak_seq_show()
1995 raw_spin_unlock_irqrestore(&object->lock, flags); in kmemleak_seq_show()
2014 struct kmemleak_object *object; in __dump_str_object_info() local
2016 object = __find_and_get_object(addr, 1, objflags); in __dump_str_object_info()
2017 if (!object) in __dump_str_object_info()
2020 raw_spin_lock_irqsave(&object->lock, flags); in __dump_str_object_info()
2021 dump_object_info(object); in __dump_str_object_info()
2022 raw_spin_unlock_irqrestore(&object->lock, flags); in __dump_str_object_info()
2024 put_object(object); in __dump_str_object_info()
2042 pr_info("Unknown object at 0x%08lx\n", addr); in dump_str_object_info()
2057 struct kmemleak_object *object; in kmemleak_clear() local
2060 list_for_each_entry_rcu(object, &object_list, object_list) { in kmemleak_clear()
2061 raw_spin_lock_irq(&object->lock); in kmemleak_clear()
2062 if ((object->flags & OBJECT_REPORTED) && in kmemleak_clear()
2063 unreferenced_object(object)) in kmemleak_clear()
2064 __paint_it(object, KMEMLEAK_GREY); in kmemleak_clear()
2065 raw_spin_unlock_irq(&object->lock); in kmemleak_clear()
2088 * dump=... - dump information about the object found at the given address
2174 struct kmemleak_object *object, *tmp; in __kmemleak_do_cleanup() local
2180 list_for_each_entry_safe(object, tmp, &object_list, object_list) { in __kmemleak_do_cleanup()
2181 __remove_object(object); in __kmemleak_do_cleanup()
2182 __delete_object(object); in __kmemleak_do_cleanup()
2198 * longer track object freeing. Ordering of the scan thread stopping and in kmemleak_do_cleanup()