Lines Matching full:object

37  *   Note that the kmemleak_object.use_count is incremented when an object is
45 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
47 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
107 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
132 * object->lock. Insertions or deletions from object_list, gray_list or
139 unsigned int flags; /* object status flags */
144 /* object usage count; object freed when use_count == 0 */
152 /* the total number of pointers found pointing to this object */
156 /* memory ranges to be scanned inside an object (empty for all) */
167 /* flag set after the first reporting of an unreference object */
169 /* flag set to not scan the object */
171 /* flag set to fully scan the object when scan_area allocation failed */
192 /* search tree for object boundaries */
278 * with the object->lock held.
281 struct kmemleak_object *object) in hex_dump_object() argument
283 const u8 *ptr = (const u8 *)object->pointer; in hex_dump_object()
287 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE); in hex_dump_object()
297 * Object colors, encoded with count and min_count:
298 * - white - orphan object, not enough references to it (count < min_count)
303 * Newly created objects don't have any color assigned (object->count == -1)
306 static bool color_white(const struct kmemleak_object *object) in color_white() argument
308 return object->count != KMEMLEAK_BLACK && in color_white()
309 object->count < object->min_count; in color_white()
312 static bool color_gray(const struct kmemleak_object *object) in color_gray() argument
314 return object->min_count != KMEMLEAK_BLACK && in color_gray()
315 object->count >= object->min_count; in color_gray()
323 static bool unreferenced_object(struct kmemleak_object *object) in unreferenced_object() argument
325 return (color_white(object) && object->flags & OBJECT_ALLOCATED) && in unreferenced_object()
326 time_before_eq(object->jiffies + jiffies_min_age, in unreferenced_object()
332 * print_unreferenced function must be called with the object->lock held.
335 struct kmemleak_object *object) in print_unreferenced() argument
338 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies); in print_unreferenced()
340 warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", in print_unreferenced()
341 object->pointer, object->size); in print_unreferenced()
343 object->comm, object->pid, object->jiffies, in print_unreferenced()
345 hex_dump_object(seq, object); in print_unreferenced()
348 for (i = 0; i < object->trace_len; i++) { in print_unreferenced()
349 void *ptr = (void *)object->trace[i]; in print_unreferenced()
357 * the object->lock held.
359 static void dump_object_info(struct kmemleak_object *object) in dump_object_info() argument
361 pr_notice("Object 0x%08lx (size %zu):\n", in dump_object_info()
362 object->pointer, object->size); in dump_object_info()
364 object->comm, object->pid, object->jiffies); in dump_object_info()
365 pr_notice(" min_count = %d\n", object->min_count); in dump_object_info()
366 pr_notice(" count = %d\n", object->count); in dump_object_info()
367 pr_notice(" flags = 0x%x\n", object->flags); in dump_object_info()
368 pr_notice(" checksum = %u\n", object->checksum); in dump_object_info()
370 stack_trace_print(object->trace, object->trace_len, 4); in dump_object_info()
374 * Look-up a memory block metadata (kmemleak_object) in the object search
384 struct kmemleak_object *object = in lookup_object() local
386 if (ptr < object->pointer) in lookup_object()
387 rb = object->rb_node.rb_left; in lookup_object()
388 else if (object->pointer + object->size <= ptr) in lookup_object()
389 rb = object->rb_node.rb_right; in lookup_object()
390 else if (object->pointer == ptr || alias) in lookup_object()
391 return object; in lookup_object()
393 kmemleak_warn("Found object by alias at 0x%08lx\n", in lookup_object()
395 dump_object_info(object); in lookup_object()
403 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
404 * that once an object's use_count reached 0, the RCU freeing was already
405 * registered and the object should no longer be used. This function must be
408 static int get_object(struct kmemleak_object *object) in get_object() argument
410 return atomic_inc_not_zero(&object->use_count); in get_object()
419 struct kmemleak_object *object; in mem_pool_alloc() local
423 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); in mem_pool_alloc()
424 if (object) in mem_pool_alloc()
425 return object; in mem_pool_alloc()
430 object = list_first_entry_or_null(&mem_pool_free_list, in mem_pool_alloc()
431 typeof(*object), object_list); in mem_pool_alloc()
432 if (object) in mem_pool_alloc()
433 list_del(&object->object_list); in mem_pool_alloc()
435 object = &mem_pool[--mem_pool_free_count]; in mem_pool_alloc()
440 return object; in mem_pool_alloc()
444 * Return the object to either the slab allocator or the memory pool.
446 static void mem_pool_free(struct kmemleak_object *object) in mem_pool_free() argument
450 if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) { in mem_pool_free()
451 kmem_cache_free(object_cache, object); in mem_pool_free()
455 /* add the object to the memory pool free list */ in mem_pool_free()
457 list_add(&object->object_list, &mem_pool_free_list); in mem_pool_free()
468 struct kmemleak_object *object = in free_object_rcu() local
473 * code accessing this object, hence no need for locking. in free_object_rcu()
475 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) { in free_object_rcu()
479 mem_pool_free(object); in free_object_rcu()
483 * Decrement the object use_count. Once the count is 0, free the object using
489 static void put_object(struct kmemleak_object *object) in put_object() argument
491 if (!atomic_dec_and_test(&object->use_count)) in put_object()
495 WARN_ON(object->flags & OBJECT_ALLOCATED); in put_object()
500 * came from the memory pool. Free the object directly. in put_object()
503 call_rcu(&object->rcu, free_object_rcu); in put_object()
505 free_object_rcu(&object->rcu); in put_object()
509 * Look up an object in the object search tree and increase its use_count.
514 struct kmemleak_object *object; in find_and_get_object() local
518 object = lookup_object(ptr, alias); in find_and_get_object()
521 /* check whether the object is still available */ in find_and_get_object()
522 if (object && !get_object(object)) in find_and_get_object()
523 object = NULL; in find_and_get_object()
526 return object; in find_and_get_object()
530 * Remove an object from the object_tree_root and object_list. Must be called
533 static void __remove_object(struct kmemleak_object *object) in __remove_object() argument
535 rb_erase(&object->rb_node, &object_tree_root); in __remove_object()
536 list_del_rcu(&object->object_list); in __remove_object()
540 * Look up an object in the object search tree and remove it from both
541 * object_tree_root and object_list. The returned object's use_count should be
547 struct kmemleak_object *object; in find_and_remove_object() local
550 object = lookup_object(ptr, alias); in find_and_remove_object()
551 if (object) in find_and_remove_object()
552 __remove_object(object); in find_and_remove_object()
555 return object; in find_and_remove_object()
574 struct kmemleak_object *object, *parent; in create_object() local
578 object = mem_pool_alloc(gfp); in create_object()
579 if (!object) { in create_object()
585 INIT_LIST_HEAD(&object->object_list); in create_object()
586 INIT_LIST_HEAD(&object->gray_list); in create_object()
587 INIT_HLIST_HEAD(&object->area_list); in create_object()
588 raw_spin_lock_init(&object->lock); in create_object()
589 atomic_set(&object->use_count, 1); in create_object()
590 object->flags = OBJECT_ALLOCATED; in create_object()
591 object->pointer = ptr; in create_object()
592 object->size = size; in create_object()
593 object->excess_ref = 0; in create_object()
594 object->min_count = min_count; in create_object()
595 object->count = 0; /* white color initially */ in create_object()
596 object->jiffies = jiffies; in create_object()
597 object->checksum = 0; in create_object()
601 object->pid = 0; in create_object()
602 strncpy(object->comm, "hardirq", sizeof(object->comm)); in create_object()
604 object->pid = 0; in create_object()
605 strncpy(object->comm, "softirq", sizeof(object->comm)); in create_object()
607 object->pid = current->pid; in create_object()
614 strncpy(object->comm, current->comm, sizeof(object->comm)); in create_object()
618 object->trace_len = __save_stack_trace(object->trace); in create_object()
635 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n", in create_object()
642 kmem_cache_free(object_cache, object); in create_object()
643 object = NULL; in create_object()
647 rb_link_node(&object->rb_node, rb_parent, link); in create_object()
648 rb_insert_color(&object->rb_node, &object_tree_root); in create_object()
650 list_add_tail_rcu(&object->object_list, &object_list); in create_object()
653 return object; in create_object()
657 * Mark the object as not allocated and schedule RCU freeing via put_object().
659 static void __delete_object(struct kmemleak_object *object) in __delete_object() argument
663 WARN_ON(!(object->flags & OBJECT_ALLOCATED)); in __delete_object()
664 WARN_ON(atomic_read(&object->use_count) < 1); in __delete_object()
670 raw_spin_lock_irqsave(&object->lock, flags); in __delete_object()
671 object->flags &= ~OBJECT_ALLOCATED; in __delete_object()
672 raw_spin_unlock_irqrestore(&object->lock, flags); in __delete_object()
673 put_object(object); in __delete_object()
682 struct kmemleak_object *object; in delete_object_full() local
684 object = find_and_remove_object(ptr, 0); in delete_object_full()
685 if (!object) { in delete_object_full()
687 kmemleak_warn("Freeing unknown object at 0x%08lx\n", in delete_object_full()
692 __delete_object(object); in delete_object_full()
702 struct kmemleak_object *object; in delete_object_part() local
705 object = find_and_remove_object(ptr, 1); in delete_object_part()
706 if (!object) { in delete_object_part()
708 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n", in delete_object_part()
719 start = object->pointer; in delete_object_part()
720 end = object->pointer + object->size; in delete_object_part()
722 create_object(start, ptr - start, object->min_count, in delete_object_part()
725 create_object(ptr + size, end - ptr - size, object->min_count, in delete_object_part()
728 __delete_object(object); in delete_object_part()
731 static void __paint_it(struct kmemleak_object *object, int color) in __paint_it() argument
733 object->min_count = color; in __paint_it()
735 object->flags |= OBJECT_NO_SCAN; in __paint_it()
738 static void paint_it(struct kmemleak_object *object, int color) in paint_it() argument
742 raw_spin_lock_irqsave(&object->lock, flags); in paint_it()
743 __paint_it(object, color); in paint_it()
744 raw_spin_unlock_irqrestore(&object->lock, flags); in paint_it()
749 struct kmemleak_object *object; in paint_ptr() local
751 object = find_and_get_object(ptr, 0); in paint_ptr()
752 if (!object) { in paint_ptr()
753 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n", in paint_ptr()
759 paint_it(object, color); in paint_ptr()
760 put_object(object); in paint_ptr()
764 * Mark an object permanently as gray-colored so that it can no longer be
773 * Mark the object as black-colored so that it is ignored from scans and
782 * Add a scanning area to the object. If at least one such area is added,
788 struct kmemleak_object *object; in add_scan_area() local
791 object = find_and_get_object(ptr, 1); in add_scan_area()
792 if (!object) { in add_scan_area()
793 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n", in add_scan_area()
801 raw_spin_lock_irqsave(&object->lock, flags); in add_scan_area()
803 pr_warn_once("Cannot allocate a scan area, scanning the full object\n"); in add_scan_area()
804 /* mark the object for full scan to avoid false positives */ in add_scan_area()
805 object->flags |= OBJECT_FULL_SCAN; in add_scan_area()
809 size = object->pointer + object->size - ptr; in add_scan_area()
810 } else if (ptr + size > object->pointer + object->size) { in add_scan_area()
811 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); in add_scan_area()
812 dump_object_info(object); in add_scan_area()
821 hlist_add_head(&area->node, &object->area_list); in add_scan_area()
823 raw_spin_unlock_irqrestore(&object->lock, flags); in add_scan_area()
824 put_object(object); in add_scan_area()
828 * Any surplus references (object already gray) to 'ptr' are passed to
830 * vm_struct may be used as an alternative reference to the vmalloc'ed object
836 struct kmemleak_object *object; in object_set_excess_ref() local
838 object = find_and_get_object(ptr, 0); in object_set_excess_ref()
839 if (!object) { in object_set_excess_ref()
840 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n", in object_set_excess_ref()
845 raw_spin_lock_irqsave(&object->lock, flags); in object_set_excess_ref()
846 object->excess_ref = excess_ref; in object_set_excess_ref()
847 raw_spin_unlock_irqrestore(&object->lock, flags); in object_set_excess_ref()
848 put_object(object); in object_set_excess_ref()
852 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
853 * pointer. Such object will not be scanned by kmemleak but references to it
859 struct kmemleak_object *object; in object_no_scan() local
861 object = find_and_get_object(ptr, 0); in object_no_scan()
862 if (!object) { in object_no_scan()
863 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr); in object_no_scan()
867 raw_spin_lock_irqsave(&object->lock, flags); in object_no_scan()
868 object->flags |= OBJECT_NO_SCAN; in object_no_scan()
869 raw_spin_unlock_irqrestore(&object->lock, flags); in object_no_scan()
870 put_object(object); in object_no_scan()
874 * kmemleak_alloc - register a newly allocated object
875 * @ptr: pointer to beginning of the object
876 * @size: size of the object
877 * @min_count: minimum number of references to this object. If during memory
879 * the object is reported as a memory leak. If @min_count is 0,
880 * the object is never reported as a leak. If @min_count is -1,
881 * the object is ignored (not scanned and not reported as a leak)
884 * This function is called from the kernel allocators when a new object
898 * kmemleak_alloc_percpu - register a newly allocated __percpu object
899 * @ptr: __percpu pointer to beginning of the object
900 * @size: size of the object
903 * This function is called from the kernel percpu allocator when a new object
925 * kmemleak_vmalloc - register a newly vmalloc'ed object
927 * @size: size of the object
931 * object (memory block) is allocated.
950 * kmemleak_free - unregister a previously registered object
951 * @ptr: pointer to beginning of the object
953 * This function is called from the kernel allocators when an object (memory
966 * kmemleak_free_part - partially unregister a previously registered object
967 * @ptr: pointer to the beginning or inside the object. This also
984 * kmemleak_free_percpu - unregister a previously registered __percpu object
985 * @ptr: __percpu pointer to beginning of the object
987 * This function is called from the kernel percpu allocator when an object
1004 * kmemleak_update_trace - update object allocation stack trace
1005 * @ptr: pointer to beginning of the object
1007 * Override the object allocation stack trace for cases where the actual
1012 struct kmemleak_object *object; in kmemleak_update_trace() local
1020 object = find_and_get_object((unsigned long)ptr, 1); in kmemleak_update_trace()
1021 if (!object) { in kmemleak_update_trace()
1023 kmemleak_warn("Updating stack trace for unknown object at %p\n", in kmemleak_update_trace()
1029 raw_spin_lock_irqsave(&object->lock, flags); in kmemleak_update_trace()
1030 object->trace_len = __save_stack_trace(object->trace); in kmemleak_update_trace()
1031 raw_spin_unlock_irqrestore(&object->lock, flags); in kmemleak_update_trace()
1033 put_object(object); in kmemleak_update_trace()
1038 * kmemleak_not_leak - mark an allocated object as false positive
1039 * @ptr: pointer to beginning of the object
1041 * Calling this function on an object will cause the memory block to no longer
1054 * kmemleak_ignore - ignore an allocated object
1055 * @ptr: pointer to beginning of the object
1057 * Calling this function on an object will cause the memory block to be
1072 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1073 * @ptr: pointer to beginning or inside the object. This also
1078 * This function is used when it is known that only certain parts of an object
1092 * kmemleak_no_scan - do not scan an allocated object
1093 * @ptr: pointer to beginning of the object
1096 * in situations where it is known that the given object does not contain any
1112 * @phys: physical address of the object
1113 * @size: size of the object
1114 * @min_count: minimum number of references to this object.
1129 * @phys: physical address if the beginning or inside an object. This
1143 * @phys: physical address of the object
1155 * @phys: physical address of the object
1165 * Update an object's checksum and return true if it was modified.
1167 static bool update_checksum(struct kmemleak_object *object) in update_checksum() argument
1169 u32 old_csum = object->checksum; in update_checksum()
1173 object->checksum = crc32(0, (void *)object->pointer, object->size); in update_checksum()
1177 return object->checksum != old_csum; in update_checksum()
1181 * Update an object's references. object->lock must be held by the caller.
1183 static void update_refs(struct kmemleak_object *object) in update_refs() argument
1185 if (!color_white(object)) { in update_refs()
1191 * Increase the object's reference count (number of pointers to the in update_refs()
1193 * object's color will become gray and it will be added to the in update_refs()
1196 object->count++; in update_refs()
1197 if (color_gray(object)) { in update_refs()
1199 WARN_ON(!get_object(object)); in update_refs()
1200 list_add_tail(&object->gray_list, &gray_list); in update_refs()
1240 struct kmemleak_object *object; in scan_block() local
1257 * object->use_count cannot be dropped to 0 while the object in scan_block()
1261 object = lookup_object(pointer, 1); in scan_block()
1262 if (!object) in scan_block()
1264 if (object == scanned) in scan_block()
1269 * Avoid the lockdep recursive warning on object->lock being in scan_block()
1273 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); in scan_block()
1274 /* only pass surplus references (object already gray) */ in scan_block()
1275 if (color_gray(object)) { in scan_block()
1276 excess_ref = object->excess_ref; in scan_block()
1277 /* no need for update_refs() if object already gray */ in scan_block()
1280 update_refs(object); in scan_block()
1282 raw_spin_unlock(&object->lock); in scan_block()
1285 object = lookup_object(excess_ref, 0); in scan_block()
1286 if (!object) in scan_block()
1288 if (object == scanned) in scan_block()
1291 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); in scan_block()
1292 update_refs(object); in scan_block()
1293 raw_spin_unlock(&object->lock); in scan_block()
1318 * that object->use_count >= 1.
1320 static void scan_object(struct kmemleak_object *object) in scan_object() argument
1326 * Once the object->lock is acquired, the corresponding memory block in scan_object()
1329 raw_spin_lock_irqsave(&object->lock, flags); in scan_object()
1330 if (object->flags & OBJECT_NO_SCAN) in scan_object()
1332 if (!(object->flags & OBJECT_ALLOCATED)) in scan_object()
1333 /* already freed object */ in scan_object()
1335 if (hlist_empty(&object->area_list) || in scan_object()
1336 object->flags & OBJECT_FULL_SCAN) { in scan_object()
1337 void *start = (void *)object->pointer; in scan_object()
1338 void *end = (void *)(object->pointer + object->size); in scan_object()
1343 scan_block(start, next, object); in scan_object()
1349 raw_spin_unlock_irqrestore(&object->lock, flags); in scan_object()
1351 raw_spin_lock_irqsave(&object->lock, flags); in scan_object()
1352 } while (object->flags & OBJECT_ALLOCATED); in scan_object()
1354 hlist_for_each_entry(area, &object->area_list, node) in scan_object()
1357 object); in scan_object()
1359 raw_spin_unlock_irqrestore(&object->lock, flags); in scan_object()
1368 struct kmemleak_object *object, *tmp; in scan_gray_list() local
1375 object = list_entry(gray_list.next, typeof(*object), gray_list); in scan_gray_list()
1376 while (&object->gray_list != &gray_list) { in scan_gray_list()
1381 scan_object(object); in scan_gray_list()
1383 tmp = list_entry(object->gray_list.next, typeof(*object), in scan_gray_list()
1386 /* remove the object from the list and release it */ in scan_gray_list()
1387 list_del(&object->gray_list); in scan_gray_list()
1388 put_object(object); in scan_gray_list()
1390 object = tmp; in scan_gray_list()
1403 struct kmemleak_object *object; in kmemleak_scan() local
1411 list_for_each_entry_rcu(object, &object_list, object_list) { in kmemleak_scan()
1412 raw_spin_lock_irqsave(&object->lock, flags); in kmemleak_scan()
1416 * 1 reference to any object at this point. in kmemleak_scan()
1418 if (atomic_read(&object->use_count) > 1) { in kmemleak_scan()
1419 pr_debug("object->use_count = %d\n", in kmemleak_scan()
1420 atomic_read(&object->use_count)); in kmemleak_scan()
1421 dump_object_info(object); in kmemleak_scan()
1424 /* reset the reference count (whiten the object) */ in kmemleak_scan()
1425 object->count = 0; in kmemleak_scan()
1426 if (color_gray(object) && get_object(object)) in kmemleak_scan()
1427 list_add_tail(&object->gray_list, &gray_list); in kmemleak_scan()
1429 raw_spin_unlock_irqrestore(&object->lock, flags); in kmemleak_scan()
1496 list_for_each_entry_rcu(object, &object_list, object_list) { in kmemleak_scan()
1497 raw_spin_lock_irqsave(&object->lock, flags); in kmemleak_scan()
1498 if (color_white(object) && (object->flags & OBJECT_ALLOCATED) in kmemleak_scan()
1499 && update_checksum(object) && get_object(object)) { in kmemleak_scan()
1501 object->count = object->min_count; in kmemleak_scan()
1502 list_add_tail(&object->gray_list, &gray_list); in kmemleak_scan()
1504 raw_spin_unlock_irqrestore(&object->lock, flags); in kmemleak_scan()
1523 list_for_each_entry_rcu(object, &object_list, object_list) { in kmemleak_scan()
1524 raw_spin_lock_irqsave(&object->lock, flags); in kmemleak_scan()
1525 if (unreferenced_object(object) && in kmemleak_scan()
1526 !(object->flags & OBJECT_REPORTED)) { in kmemleak_scan()
1527 object->flags |= OBJECT_REPORTED; in kmemleak_scan()
1530 print_unreferenced(NULL, object); in kmemleak_scan()
1534 raw_spin_unlock_irqrestore(&object->lock, flags); in kmemleak_scan()
1612 * Iterate over the object_list and return the first valid object at or after
1618 struct kmemleak_object *object; in kmemleak_seq_start() local
1627 list_for_each_entry_rcu(object, &object_list, object_list) { in kmemleak_seq_start()
1630 if (get_object(object)) in kmemleak_seq_start()
1633 object = NULL; in kmemleak_seq_start()
1635 return object; in kmemleak_seq_start()
1639 * Return the next object in the object_list. The function decrements the
1640 * use_count of the previous object and increases that of the next one.
1662 * Decrement the use_count of the last object required, if any.
1679 * Print the information for an unreferenced object to the seq file.
1683 struct kmemleak_object *object = v; in kmemleak_seq_show() local
1686 raw_spin_lock_irqsave(&object->lock, flags); in kmemleak_seq_show()
1687 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) in kmemleak_seq_show()
1688 print_unreferenced(seq, object); in kmemleak_seq_show()
1689 raw_spin_unlock_irqrestore(&object->lock, flags); in kmemleak_seq_show()
1708 struct kmemleak_object *object; in dump_str_object_info() local
1713 object = find_and_get_object(addr, 0); in dump_str_object_info()
1714 if (!object) { in dump_str_object_info()
1715 pr_info("Unknown object at 0x%08lx\n", addr); in dump_str_object_info()
1719 raw_spin_lock_irqsave(&object->lock, flags); in dump_str_object_info()
1720 dump_object_info(object); in dump_str_object_info()
1721 raw_spin_unlock_irqrestore(&object->lock, flags); in dump_str_object_info()
1723 put_object(object); in dump_str_object_info()
1735 struct kmemleak_object *object; in kmemleak_clear() local
1739 list_for_each_entry_rcu(object, &object_list, object_list) { in kmemleak_clear()
1740 raw_spin_lock_irqsave(&object->lock, flags); in kmemleak_clear()
1741 if ((object->flags & OBJECT_REPORTED) && in kmemleak_clear()
1742 unreferenced_object(object)) in kmemleak_clear()
1743 __paint_it(object, KMEMLEAK_GREY); in kmemleak_clear()
1744 raw_spin_unlock_irqrestore(&object->lock, flags); in kmemleak_clear()
1767 * dump=... - dump information about the object found at the given address
1847 struct kmemleak_object *object, *tmp; in __kmemleak_do_cleanup() local
1853 list_for_each_entry_safe(object, tmp, &object_list, object_list) { in __kmemleak_do_cleanup()
1854 __remove_object(object); in __kmemleak_do_cleanup()
1855 __delete_object(object); in __kmemleak_do_cleanup()
1871 * longer track object freeing. Ordering of the scan thread stopping and in kmemleak_do_cleanup()