Lines Matching full:object
17 * del_state modifications and accesses to the object trees
20 * kmemleak_object) for the allocated memory blocks. The object trees are
23 * the object_list and the object tree root in the create_object() function
38 * Note that the kmemleak_object.use_count is incremented when an object is
46 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
48 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
110 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
136 * object->lock. Insertions or deletions from object_list, gray_list or
143 unsigned int flags; /* object status flags */
148 /* object usage count; object freed when use_count == 0 */
157 /* the total number of pointers found pointing to this object */
161 /* memory ranges to be scanned inside an object (empty for all) */
171 /* flag set after the first reporting of an unreference object */
173 /* flag set to not scan the object */
175 /* flag set to fully scan the object when scan_area allocation failed */
177 /* flag set for object allocated with physical address */
205 /* search tree for object boundaries */
207 /* search tree for object (with OBJECT_PHYS flag) boundaries */
209 /* search tree for object (with OBJECT_PERCPU flag) boundaries */
295 * with the object->lock held.
298 struct kmemleak_object *object) in hex_dump_object() argument
300 const u8 *ptr = (const u8 *)object->pointer; in hex_dump_object()
303 if (WARN_ON_ONCE(object->flags & (OBJECT_PHYS | OBJECT_PERCPU))) in hex_dump_object()
307 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE); in hex_dump_object()
317 * Object colors, encoded with count and min_count:
318 * - white - orphan object, not enough references to it (count < min_count)
323 * Newly created objects don't have any color assigned (object->count == -1)
326 static bool color_white(const struct kmemleak_object *object) in color_white() argument
328 return object->count != KMEMLEAK_BLACK && in color_white()
329 object->count < object->min_count; in color_white()
332 static bool color_gray(const struct kmemleak_object *object) in color_gray() argument
334 return object->min_count != KMEMLEAK_BLACK && in color_gray()
335 object->count >= object->min_count; in color_gray()
343 static bool unreferenced_object(struct kmemleak_object *object) in unreferenced_object() argument
345 return (color_white(object) && object->flags & OBJECT_ALLOCATED) && in unreferenced_object()
346 time_before_eq(object->jiffies + jiffies_min_age, in unreferenced_object()
352 * print_unreferenced function must be called with the object->lock held.
355 struct kmemleak_object *object) in print_unreferenced() argument
361 nr_entries = stack_depot_fetch(object->trace_handle, &entries); in print_unreferenced()
362 warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", in print_unreferenced()
363 object->pointer, object->size); in print_unreferenced()
365 object->comm, object->pid, object->jiffies); in print_unreferenced()
366 hex_dump_object(seq, object); in print_unreferenced()
367 warn_or_seq_printf(seq, " backtrace (crc %x):\n", object->checksum); in print_unreferenced()
378 * the object->lock held.
380 static void dump_object_info(struct kmemleak_object *object) in dump_object_info() argument
382 pr_notice("Object 0x%08lx (size %zu):\n", in dump_object_info()
383 object->pointer, object->size); in dump_object_info()
385 object->comm, object->pid, object->jiffies); in dump_object_info()
386 pr_notice(" min_count = %d\n", object->min_count); in dump_object_info()
387 pr_notice(" count = %d\n", object->count); in dump_object_info()
388 pr_notice(" flags = 0x%x\n", object->flags); in dump_object_info()
389 pr_notice(" checksum = %u\n", object->checksum); in dump_object_info()
391 if (object->trace_handle) in dump_object_info()
392 stack_depot_print(object->trace_handle); in dump_object_info()
405 * Look-up a memory block metadata (kmemleak_object) in the object search
417 struct kmemleak_object *object; in __lookup_object() local
420 object = rb_entry(rb, struct kmemleak_object, rb_node); in __lookup_object()
421 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer); in __lookup_object()
424 rb = object->rb_node.rb_left; in __lookup_object()
425 else if (untagged_objp + object->size <= untagged_ptr) in __lookup_object()
426 rb = object->rb_node.rb_right; in __lookup_object()
428 return object; in __lookup_object()
430 kmemleak_warn("Found object by alias at 0x%08lx\n", in __lookup_object()
432 dump_object_info(object); in __lookup_object()
439 /* Look-up a kmemleak object which allocated with virtual address. */
446 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
447 * that once an object's use_count reached 0, the RCU freeing was already
448 * registered and the object should no longer be used. This function must be
451 static int get_object(struct kmemleak_object *object) in get_object() argument
453 return atomic_inc_not_zero(&object->use_count); in get_object()
462 struct kmemleak_object *object; in mem_pool_alloc() local
466 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); in mem_pool_alloc()
467 if (object) in mem_pool_alloc()
468 return object; in mem_pool_alloc()
473 object = list_first_entry_or_null(&mem_pool_free_list, in mem_pool_alloc()
474 typeof(*object), object_list); in mem_pool_alloc()
475 if (object) in mem_pool_alloc()
476 list_del(&object->object_list); in mem_pool_alloc()
478 object = &mem_pool[--mem_pool_free_count]; in mem_pool_alloc()
483 return object; in mem_pool_alloc()
487 * Return the object to either the slab allocator or the memory pool.
489 static void mem_pool_free(struct kmemleak_object *object) in mem_pool_free() argument
493 if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) { in mem_pool_free()
494 kmem_cache_free(object_cache, object); in mem_pool_free()
498 /* add the object to the memory pool free list */ in mem_pool_free()
500 list_add(&object->object_list, &mem_pool_free_list); in mem_pool_free()
511 struct kmemleak_object *object = in free_object_rcu() local
516 * code accessing this object, hence no need for locking. in free_object_rcu()
518 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) { in free_object_rcu()
522 mem_pool_free(object); in free_object_rcu()
526 * Decrement the object use_count. Once the count is 0, free the object using
532 static void put_object(struct kmemleak_object *object) in put_object() argument
534 if (!atomic_dec_and_test(&object->use_count)) in put_object()
538 WARN_ON(object->flags & OBJECT_ALLOCATED); in put_object()
543 * came from the memory pool. Free the object directly. in put_object()
546 call_rcu(&object->rcu, free_object_rcu); in put_object()
548 free_object_rcu(&object->rcu); in put_object()
552 * Look up an object in the object search tree and increase its use_count.
558 struct kmemleak_object *object; in __find_and_get_object() local
562 object = __lookup_object(ptr, alias, objflags); in __find_and_get_object()
565 /* check whether the object is still available */ in __find_and_get_object()
566 if (object && !get_object(object)) in __find_and_get_object()
567 object = NULL; in __find_and_get_object()
570 return object; in __find_and_get_object()
573 /* Look up and get an object which allocated with virtual address. */
580 * Remove an object from its object tree and object_list. Must be called with
583 static void __remove_object(struct kmemleak_object *object) in __remove_object() argument
585 rb_erase(&object->rb_node, object_tree(object->flags)); in __remove_object()
586 if (!(object->del_state & DELSTATE_NO_DELETE)) in __remove_object()
587 list_del_rcu(&object->object_list); in __remove_object()
588 object->del_state |= DELSTATE_REMOVED; in __remove_object()
595 struct kmemleak_object *object; in __find_and_remove_object() local
597 object = __lookup_object(ptr, alias, objflags); in __find_and_remove_object()
598 if (object) in __find_and_remove_object()
599 __remove_object(object); in __find_and_remove_object()
601 return object; in __find_and_remove_object()
605 * Look up an object in the object search tree and remove it from both object
606 * tree root and object_list. The returned object's use_count should be at
613 struct kmemleak_object *object; in find_and_remove_object() local
616 object = __find_and_remove_object(ptr, alias, objflags); in find_and_remove_object()
619 return object; in find_and_remove_object()
643 struct kmemleak_object *object; in __alloc_object() local
645 object = mem_pool_alloc(gfp); in __alloc_object()
646 if (!object) { in __alloc_object()
652 INIT_LIST_HEAD(&object->object_list); in __alloc_object()
653 INIT_LIST_HEAD(&object->gray_list); in __alloc_object()
654 INIT_HLIST_HEAD(&object->area_list); in __alloc_object()
655 raw_spin_lock_init(&object->lock); in __alloc_object()
656 atomic_set(&object->use_count, 1); in __alloc_object()
657 object->excess_ref = 0; in __alloc_object()
658 object->count = 0; /* white color initially */ in __alloc_object()
659 object->checksum = 0; in __alloc_object()
660 object->del_state = 0; in __alloc_object()
664 object->pid = 0; in __alloc_object()
665 strncpy(object->comm, "hardirq", sizeof(object->comm)); in __alloc_object()
667 object->pid = 0; in __alloc_object()
668 strncpy(object->comm, "softirq", sizeof(object->comm)); in __alloc_object()
670 object->pid = current->pid; in __alloc_object()
677 strncpy(object->comm, current->comm, sizeof(object->comm)); in __alloc_object()
681 object->trace_handle = set_track_prepare(); in __alloc_object()
683 return object; in __alloc_object()
686 static int __link_object(struct kmemleak_object *object, unsigned long ptr, in __link_object() argument
695 object->flags = OBJECT_ALLOCATED | objflags; in __link_object()
696 object->pointer = ptr; in __link_object()
697 object->size = kfence_ksize((void *)ptr) ?: size; in __link_object()
698 object->min_count = min_count; in __link_object()
699 object->jiffies = jiffies; in __link_object()
703 * Only update min_addr and max_addr with object in __link_object()
721 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n", in __link_object()
731 rb_link_node(&object->rb_node, rb_parent, link); in __link_object()
732 rb_insert_color(&object->rb_node, object_tree(objflags)); in __link_object()
733 list_add_tail_rcu(&object->object_list, &object_list); in __link_object()
740 * memory block and add it to the object_list and object tree.
745 struct kmemleak_object *object; in __create_object() local
749 object = __alloc_object(gfp); in __create_object()
750 if (!object) in __create_object()
754 ret = __link_object(object, ptr, size, min_count, objflags); in __create_object()
757 mem_pool_free(object); in __create_object()
760 /* Create kmemleak object which allocated with virtual address. */
767 /* Create kmemleak object which allocated with physical address. */
774 /* Create kmemleak object corresponding to a per-CPU allocation. */
782 * Mark the object as not allocated and schedule RCU freeing via put_object().
784 static void __delete_object(struct kmemleak_object *object) in __delete_object() argument
788 WARN_ON(!(object->flags & OBJECT_ALLOCATED)); in __delete_object()
789 WARN_ON(atomic_read(&object->use_count) < 1); in __delete_object()
795 raw_spin_lock_irqsave(&object->lock, flags); in __delete_object()
796 object->flags &= ~OBJECT_ALLOCATED; in __delete_object()
797 raw_spin_unlock_irqrestore(&object->lock, flags); in __delete_object()
798 put_object(object); in __delete_object()
807 struct kmemleak_object *object; in delete_object_full() local
809 object = find_and_remove_object(ptr, 0, objflags); in delete_object_full()
810 if (!object) { in delete_object_full()
812 kmemleak_warn("Freeing unknown object at 0x%08lx\n", in delete_object_full()
817 __delete_object(object); in delete_object_full()
828 struct kmemleak_object *object, *object_l, *object_r; in delete_object_part() local
840 object = __find_and_remove_object(ptr, 1, objflags); in delete_object_part()
841 if (!object) { in delete_object_part()
843 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n", in delete_object_part()
854 start = object->pointer; in delete_object_part()
855 end = object->pointer + object->size; in delete_object_part()
858 object->min_count, objflags)) in delete_object_part()
862 object->min_count, objflags)) in delete_object_part()
867 if (object) in delete_object_part()
868 __delete_object(object); in delete_object_part()
877 static void __paint_it(struct kmemleak_object *object, int color) in __paint_it() argument
879 object->min_count = color; in __paint_it()
881 object->flags |= OBJECT_NO_SCAN; in __paint_it()
884 static void paint_it(struct kmemleak_object *object, int color) in paint_it() argument
888 raw_spin_lock_irqsave(&object->lock, flags); in paint_it()
889 __paint_it(object, color); in paint_it()
890 raw_spin_unlock_irqrestore(&object->lock, flags); in paint_it()
895 struct kmemleak_object *object; in paint_ptr() local
897 object = __find_and_get_object(ptr, 0, objflags); in paint_ptr()
898 if (!object) { in paint_ptr()
899 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n", in paint_ptr()
905 paint_it(object, color); in paint_ptr()
906 put_object(object); in paint_ptr()
910 * Mark an object permanently as gray-colored so that it can no longer be
919 * Mark the object as black-colored so that it is ignored from scans and
928 * Add a scanning area to the object. If at least one such area is added,
934 struct kmemleak_object *object; in add_scan_area() local
939 object = find_and_get_object(ptr, 1); in add_scan_area()
940 if (!object) { in add_scan_area()
941 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n", in add_scan_area()
947 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer); in add_scan_area()
952 raw_spin_lock_irqsave(&object->lock, flags); in add_scan_area()
954 pr_warn_once("Cannot allocate a scan area, scanning the full object\n"); in add_scan_area()
955 /* mark the object for full scan to avoid false positives */ in add_scan_area()
956 object->flags |= OBJECT_FULL_SCAN; in add_scan_area()
960 size = untagged_objp + object->size - untagged_ptr; in add_scan_area()
961 } else if (untagged_ptr + size > untagged_objp + object->size) { in add_scan_area()
962 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); in add_scan_area()
963 dump_object_info(object); in add_scan_area()
972 hlist_add_head(&area->node, &object->area_list); in add_scan_area()
974 raw_spin_unlock_irqrestore(&object->lock, flags); in add_scan_area()
975 put_object(object); in add_scan_area()
979 * Any surplus references (object already gray) to 'ptr' are passed to
981 * vm_struct may be used as an alternative reference to the vmalloc'ed object
987 struct kmemleak_object *object; in object_set_excess_ref() local
989 object = find_and_get_object(ptr, 0); in object_set_excess_ref()
990 if (!object) { in object_set_excess_ref()
991 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n", in object_set_excess_ref()
996 raw_spin_lock_irqsave(&object->lock, flags); in object_set_excess_ref()
997 object->excess_ref = excess_ref; in object_set_excess_ref()
998 raw_spin_unlock_irqrestore(&object->lock, flags); in object_set_excess_ref()
999 put_object(object); in object_set_excess_ref()
1003 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
1004 * pointer. Such object will not be scanned by kmemleak but references to it
1010 struct kmemleak_object *object; in object_no_scan() local
1012 object = find_and_get_object(ptr, 0); in object_no_scan()
1013 if (!object) { in object_no_scan()
1014 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr); in object_no_scan()
1018 raw_spin_lock_irqsave(&object->lock, flags); in object_no_scan()
1019 object->flags |= OBJECT_NO_SCAN; in object_no_scan()
1020 raw_spin_unlock_irqrestore(&object->lock, flags); in object_no_scan()
1021 put_object(object); in object_no_scan()
1025 * kmemleak_alloc - register a newly allocated object
1026 * @ptr: pointer to beginning of the object
1027 * @size: size of the object
1028 * @min_count: minimum number of references to this object. If during memory
1030 * the object is reported as a memory leak. If @min_count is 0,
1031 * the object is never reported as a leak. If @min_count is -1,
1032 * the object is ignored (not scanned and not reported as a leak)
1035 * This function is called from the kernel allocators when a new object
1049 * kmemleak_alloc_percpu - register a newly allocated __percpu object
1050 * @ptr: __percpu pointer to beginning of the object
1051 * @size: size of the object
1054 * This function is called from the kernel percpu allocator when a new object
1072 * kmemleak_vmalloc - register a newly vmalloc'ed object
1074 * @size: size of the object
1078 * object (memory block) is allocated.
1097 * kmemleak_free - unregister a previously registered object
1098 * @ptr: pointer to beginning of the object
1100 * This function is called from the kernel allocators when an object (memory
1113 * kmemleak_free_part - partially unregister a previously registered object
1114 * @ptr: pointer to the beginning or inside the object. This also
1131 * kmemleak_free_percpu - unregister a previously registered __percpu object
1132 * @ptr: __percpu pointer to beginning of the object
1134 * This function is called from the kernel percpu allocator when an object
1147 * kmemleak_update_trace - update object allocation stack trace
1148 * @ptr: pointer to beginning of the object
1150 * Override the object allocation stack trace for cases where the actual
1155 struct kmemleak_object *object; in kmemleak_update_trace() local
1164 object = find_and_get_object((unsigned long)ptr, 1); in kmemleak_update_trace()
1165 if (!object) { in kmemleak_update_trace()
1167 kmemleak_warn("Updating stack trace for unknown object at %p\n", in kmemleak_update_trace()
1174 raw_spin_lock_irqsave(&object->lock, flags); in kmemleak_update_trace()
1175 object->trace_handle = trace_handle; in kmemleak_update_trace()
1176 raw_spin_unlock_irqrestore(&object->lock, flags); in kmemleak_update_trace()
1178 put_object(object); in kmemleak_update_trace()
1183 * kmemleak_not_leak - mark an allocated object as false positive
1184 * @ptr: pointer to beginning of the object
1186 * Calling this function on an object will cause the memory block to no longer
1199 * kmemleak_ignore - ignore an allocated object
1200 * @ptr: pointer to beginning of the object
1202 * Calling this function on an object will cause the memory block to be
1217 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1218 * @ptr: pointer to beginning or inside the object. This also
1223 * This function is used when it is known that only certain parts of an object
1237 * kmemleak_no_scan - do not scan an allocated object
1238 * @ptr: pointer to beginning of the object
1241 * in situations where it is known that the given object does not contain any
1257 * @phys: physical address of the object
1258 * @size: size of the object
1267 * Create object with OBJECT_PHYS flag and in kmemleak_alloc_phys()
1277 * @phys: physical address if the beginning or inside an object. This
1293 * @phys: physical address of the object
1305 * Update an object's checksum and return true if it was modified.
1307 static bool update_checksum(struct kmemleak_object *object) in update_checksum() argument
1309 u32 old_csum = object->checksum; in update_checksum()
1311 if (WARN_ON_ONCE(object->flags & (OBJECT_PHYS | OBJECT_PERCPU))) in update_checksum()
1316 object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size); in update_checksum()
1320 return object->checksum != old_csum; in update_checksum()
1324 * Update an object's references. object->lock must be held by the caller.
1326 static void update_refs(struct kmemleak_object *object) in update_refs() argument
1328 if (!color_white(object)) { in update_refs()
1334 * Increase the object's reference count (number of pointers to the in update_refs()
1336 * object's color will become gray and it will be added to the in update_refs()
1339 object->count++; in update_refs()
1340 if (color_gray(object)) { in update_refs()
1342 WARN_ON(!get_object(object)); in update_refs()
1343 list_add_tail(&object->gray_list, &gray_list); in update_refs()
1383 struct kmemleak_object *object; in scan_block() local
1400 * object->use_count cannot be dropped to 0 while the object in scan_block()
1404 object = lookup_object(pointer, 1); in scan_block()
1405 if (!object) in scan_block()
1407 if (object == scanned) in scan_block()
1412 * Avoid the lockdep recursive warning on object->lock being in scan_block()
1416 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); in scan_block()
1417 /* only pass surplus references (object already gray) */ in scan_block()
1418 if (color_gray(object)) { in scan_block()
1419 excess_ref = object->excess_ref; in scan_block()
1420 /* no need for update_refs() if object already gray */ in scan_block()
1423 update_refs(object); in scan_block()
1425 raw_spin_unlock(&object->lock); in scan_block()
1428 object = lookup_object(excess_ref, 0); in scan_block()
1429 if (!object) in scan_block()
1431 if (object == scanned) in scan_block()
1434 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); in scan_block()
1435 update_refs(object); in scan_block()
1436 raw_spin_unlock(&object->lock); in scan_block()
1461 * that object->use_count >= 1.
1463 static void scan_object(struct kmemleak_object *object) in scan_object() argument
1469 * Once the object->lock is acquired, the corresponding memory block in scan_object()
1472 raw_spin_lock_irqsave(&object->lock, flags); in scan_object()
1473 if (object->flags & OBJECT_NO_SCAN) in scan_object()
1475 if (!(object->flags & OBJECT_ALLOCATED)) in scan_object()
1476 /* already freed object */ in scan_object()
1479 if (object->flags & OBJECT_PERCPU) { in scan_object()
1483 void *start = per_cpu_ptr((void __percpu *)object->pointer, cpu); in scan_object()
1484 void *end = start + object->size; in scan_object()
1486 scan_block(start, end, object); in scan_object()
1488 raw_spin_unlock_irqrestore(&object->lock, flags); in scan_object()
1490 raw_spin_lock_irqsave(&object->lock, flags); in scan_object()
1491 if (!(object->flags & OBJECT_ALLOCATED)) in scan_object()
1494 } else if (hlist_empty(&object->area_list) || in scan_object()
1495 object->flags & OBJECT_FULL_SCAN) { in scan_object()
1496 void *start = object->flags & OBJECT_PHYS ? in scan_object()
1497 __va((phys_addr_t)object->pointer) : in scan_object()
1498 (void *)object->pointer; in scan_object()
1499 void *end = start + object->size; in scan_object()
1504 scan_block(start, next, object); in scan_object()
1510 raw_spin_unlock_irqrestore(&object->lock, flags); in scan_object()
1512 raw_spin_lock_irqsave(&object->lock, flags); in scan_object()
1513 } while (object->flags & OBJECT_ALLOCATED); in scan_object()
1515 hlist_for_each_entry(area, &object->area_list, node) in scan_object()
1518 object); in scan_object()
1521 raw_spin_unlock_irqrestore(&object->lock, flags); in scan_object()
1530 struct kmemleak_object *object, *tmp; in scan_gray_list() local
1537 object = list_entry(gray_list.next, typeof(*object), gray_list); in scan_gray_list()
1538 while (&object->gray_list != &gray_list) { in scan_gray_list()
1543 scan_object(object); in scan_gray_list()
1545 tmp = list_entry(object->gray_list.next, typeof(*object), in scan_gray_list()
1548 /* remove the object from the list and release it */ in scan_gray_list()
1549 list_del(&object->gray_list); in scan_gray_list()
1550 put_object(object); in scan_gray_list()
1552 object = tmp; in scan_gray_list()
1558 * Conditionally call resched() in an object iteration loop while making sure
1559 * that the given object won't go away without RCU read lock by performing a
1562 static void kmemleak_cond_resched(struct kmemleak_object *object) in kmemleak_cond_resched() argument
1564 if (!get_object(object)) in kmemleak_cond_resched()
1565 return; /* Try next object */ in kmemleak_cond_resched()
1568 if (object->del_state & DELSTATE_REMOVED) in kmemleak_cond_resched()
1569 goto unlock_put; /* Object removed */ in kmemleak_cond_resched()
1570 object->del_state |= DELSTATE_NO_DELETE; in kmemleak_cond_resched()
1578 if (object->del_state & DELSTATE_REMOVED) in kmemleak_cond_resched()
1579 list_del_rcu(&object->object_list); in kmemleak_cond_resched()
1580 object->del_state &= ~DELSTATE_NO_DELETE; in kmemleak_cond_resched()
1583 put_object(object); in kmemleak_cond_resched()
1593 struct kmemleak_object *object; in kmemleak_scan() local
1602 list_for_each_entry_rcu(object, &object_list, object_list) { in kmemleak_scan()
1603 raw_spin_lock_irq(&object->lock); in kmemleak_scan()
1607 * 1 reference to any object at this point. in kmemleak_scan()
1609 if (atomic_read(&object->use_count) > 1) { in kmemleak_scan()
1610 pr_debug("object->use_count = %d\n", in kmemleak_scan()
1611 atomic_read(&object->use_count)); in kmemleak_scan()
1612 dump_object_info(object); in kmemleak_scan()
1617 if ((object->flags & OBJECT_PHYS) && in kmemleak_scan()
1618 !(object->flags & OBJECT_NO_SCAN)) { in kmemleak_scan()
1619 unsigned long phys = object->pointer; in kmemleak_scan()
1622 PHYS_PFN(phys + object->size) >= max_low_pfn) in kmemleak_scan()
1623 __paint_it(object, KMEMLEAK_BLACK); in kmemleak_scan()
1626 /* reset the reference count (whiten the object) */ in kmemleak_scan()
1627 object->count = 0; in kmemleak_scan()
1628 if (color_gray(object) && get_object(object)) in kmemleak_scan()
1629 list_add_tail(&object->gray_list, &gray_list); in kmemleak_scan()
1631 raw_spin_unlock_irq(&object->lock); in kmemleak_scan()
1634 kmemleak_cond_resched(object); in kmemleak_scan()
1702 list_for_each_entry_rcu(object, &object_list, object_list) { in kmemleak_scan()
1704 kmemleak_cond_resched(object); in kmemleak_scan()
1711 if (!color_white(object)) in kmemleak_scan()
1713 raw_spin_lock_irq(&object->lock); in kmemleak_scan()
1714 if (color_white(object) && (object->flags & OBJECT_ALLOCATED) in kmemleak_scan()
1715 && update_checksum(object) && get_object(object)) { in kmemleak_scan()
1717 object->count = object->min_count; in kmemleak_scan()
1718 list_add_tail(&object->gray_list, &gray_list); in kmemleak_scan()
1720 raw_spin_unlock_irq(&object->lock); in kmemleak_scan()
1739 list_for_each_entry_rcu(object, &object_list, object_list) { in kmemleak_scan()
1741 kmemleak_cond_resched(object); in kmemleak_scan()
1748 if (!color_white(object)) in kmemleak_scan()
1750 raw_spin_lock_irq(&object->lock); in kmemleak_scan()
1751 if (unreferenced_object(object) && in kmemleak_scan()
1752 !(object->flags & OBJECT_REPORTED)) { in kmemleak_scan()
1753 object->flags |= OBJECT_REPORTED; in kmemleak_scan()
1756 print_unreferenced(NULL, object); in kmemleak_scan()
1760 raw_spin_unlock_irq(&object->lock); in kmemleak_scan()
1838 * Iterate over the object_list and return the first valid object at or after
1844 struct kmemleak_object *object; in kmemleak_seq_start() local
1853 list_for_each_entry_rcu(object, &object_list, object_list) { in kmemleak_seq_start()
1856 if (get_object(object)) in kmemleak_seq_start()
1859 object = NULL; in kmemleak_seq_start()
1861 return object; in kmemleak_seq_start()
1865 * Return the next object in the object_list. The function decrements the
1866 * use_count of the previous object and increases that of the next one.
1888 * Decrement the use_count of the last object required, if any.
1905 * Print the information for an unreferenced object to the seq file.
1909 struct kmemleak_object *object = v; in kmemleak_seq_show() local
1912 raw_spin_lock_irqsave(&object->lock, flags); in kmemleak_seq_show()
1913 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) in kmemleak_seq_show()
1914 print_unreferenced(seq, object); in kmemleak_seq_show()
1915 raw_spin_unlock_irqrestore(&object->lock, flags); in kmemleak_seq_show()
1934 struct kmemleak_object *object; in dump_str_object_info() local
1939 object = find_and_get_object(addr, 0); in dump_str_object_info()
1940 if (!object) { in dump_str_object_info()
1941 pr_info("Unknown object at 0x%08lx\n", addr); in dump_str_object_info()
1945 raw_spin_lock_irqsave(&object->lock, flags); in dump_str_object_info()
1946 dump_object_info(object); in dump_str_object_info()
1947 raw_spin_unlock_irqrestore(&object->lock, flags); in dump_str_object_info()
1949 put_object(object); in dump_str_object_info()
1961 struct kmemleak_object *object; in kmemleak_clear() local
1964 list_for_each_entry_rcu(object, &object_list, object_list) { in kmemleak_clear()
1965 raw_spin_lock_irq(&object->lock); in kmemleak_clear()
1966 if ((object->flags & OBJECT_REPORTED) && in kmemleak_clear()
1967 unreferenced_object(object)) in kmemleak_clear()
1968 __paint_it(object, KMEMLEAK_GREY); in kmemleak_clear()
1969 raw_spin_unlock_irq(&object->lock); in kmemleak_clear()
1992 * dump=... - dump information about the object found at the given address
2078 struct kmemleak_object *object, *tmp; in __kmemleak_do_cleanup() local
2084 list_for_each_entry_safe(object, tmp, &object_list, object_list) { in __kmemleak_do_cleanup()
2085 __remove_object(object); in __kmemleak_do_cleanup()
2086 __delete_object(object); in __kmemleak_do_cleanup()
2102 * longer track object freeing. Ordering of the scan thread stopping and in kmemleak_do_cleanup()