1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * mm/kmemleak.c
4 *
5 * Copyright (C) 2008 ARM Limited
6 * Written by Catalin Marinas <catalin.marinas@arm.com>
7 *
8 * For more information on the algorithm and kmemleak usage, please see
9 * Documentation/dev-tools/kmemleak.rst.
10 *
11 * Notes on locking
12 * ----------------
13 *
14 * The following locks and mutexes are used by kmemleak:
15 *
16 * - kmemleak_lock (raw_spinlock_t): protects the object_list as well as
17 * del_state modifications and accesses to the object trees
18 * (object_tree_root, object_phys_tree_root, object_percpu_tree_root). The
19 * object_list is the main list holding the metadata (struct
20 * kmemleak_object) for the allocated memory blocks. The object trees are
21 * red black trees used to look-up metadata based on a pointer to the
22 * corresponding memory block. The kmemleak_object structures are added to
23 * the object_list and the object tree root in the create_object() function
24 * called from the kmemleak_alloc{,_phys,_percpu}() callback and removed in
25 * delete_object() called from the kmemleak_free{,_phys,_percpu}() callback
26 * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
27 * Accesses to the metadata (e.g. count) are protected by this lock. Note
28 * that some members of this structure may be protected by other means
29 * (atomic or kmemleak_lock). This lock is also held when scanning the
30 * corresponding memory block to avoid the kernel freeing it via the
31 * kmemleak_free() callback. This is less heavyweight than holding a global
32 * lock like kmemleak_lock during scanning.
33 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
34 * unreferenced objects at a time. The gray_list contains the objects which
35 * are already referenced or marked as false positives and need to be
36 * scanned. This list is only modified during a scanning episode when the
37 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
38 * Note that the kmemleak_object.use_count is incremented when an object is
39 * added to the gray_list and therefore cannot be freed. This mutex also
40 * prevents multiple users of the "kmemleak" debugfs file together with
41 * modifications to the memory scanning parameters including the scan_thread
42 * pointer
43 *
44 * Locks and mutexes are acquired/nested in the following order:
45 *
46 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
47 *
48 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
49 * regions.
50 *
51 * The kmemleak_object structures have a use_count incremented or decremented
52 * using the get_object()/put_object() functions. When the use_count becomes
53 * 0, this count can no longer be incremented and put_object() schedules the
54 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
55 * function must be protected by rcu_read_lock() to avoid accessing a freed
56 * structure.
57 */
58
59 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
60
61 #include <linux/init.h>
62 #include <linux/kernel.h>
63 #include <linux/list.h>
64 #include <linux/sched/signal.h>
65 #include <linux/sched/task.h>
66 #include <linux/sched/task_stack.h>
67 #include <linux/jiffies.h>
68 #include <linux/delay.h>
69 #include <linux/export.h>
70 #include <linux/kthread.h>
71 #include <linux/rbtree.h>
72 #include <linux/fs.h>
73 #include <linux/debugfs.h>
74 #include <linux/seq_file.h>
75 #include <linux/cpumask.h>
76 #include <linux/spinlock.h>
77 #include <linux/module.h>
78 #include <linux/mutex.h>
79 #include <linux/rcupdate.h>
80 #include <linux/stacktrace.h>
81 #include <linux/stackdepot.h>
82 #include <linux/cache.h>
83 #include <linux/percpu.h>
84 #include <linux/memblock.h>
85 #include <linux/pfn.h>
86 #include <linux/mmzone.h>
87 #include <linux/slab.h>
88 #include <linux/thread_info.h>
89 #include <linux/err.h>
90 #include <linux/uaccess.h>
91 #include <linux/string.h>
92 #include <linux/nodemask.h>
93 #include <linux/mm.h>
94 #include <linux/workqueue.h>
95 #include <linux/crc32.h>
96
97 #include <asm/sections.h>
98 #include <asm/processor.h>
99 #include <linux/atomic.h>
100
101 #include <linux/kasan.h>
102 #include <linux/kfence.h>
103 #include <linux/kmemleak.h>
104 #include <linux/memory_hotplug.h>
105
106 /*
107 * Kmemleak configuration and common defines.
108 */
109 #define MAX_TRACE 16 /* stack trace length */
110 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
111 #define SECS_FIRST_SCAN 60 /* delay before the first scan */
112 #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
113 #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
114
115 #define BYTES_PER_POINTER sizeof(void *)
116
117 /* scanning area inside a memory block */
118 struct kmemleak_scan_area {
119 struct hlist_node node;
120 unsigned long start;
121 size_t size;
122 };
123
124 #define KMEMLEAK_GREY 0
125 #define KMEMLEAK_BLACK -1
126
127 /*
128 * Structure holding the metadata for each allocated memory block.
129 * Modifications to such objects should be made while holding the
130 * object->lock. Insertions or deletions from object_list, gray_list or
131 * rb_node are already protected by the corresponding locks or mutex (see
132 * the notes on locking above). These objects are reference-counted
133 * (use_count) and freed using the RCU mechanism.
134 */
135 struct kmemleak_object {
136 raw_spinlock_t lock;
137 unsigned int flags; /* object status flags */
138 struct list_head object_list;
139 struct list_head gray_list;
140 struct rb_node rb_node;
141 struct rcu_head rcu; /* object_list lockless traversal */
142 /* object usage count; object freed when use_count == 0 */
143 atomic_t use_count;
144 unsigned int del_state; /* deletion state */
145 unsigned long pointer;
146 size_t size;
147 /* pass surplus references to this pointer */
148 unsigned long excess_ref;
149 /* minimum number of a pointers found before it is considered leak */
150 int min_count;
151 /* the total number of pointers found pointing to this object */
152 int count;
153 /* checksum for detecting modified objects */
154 u32 checksum;
155 depot_stack_handle_t trace_handle;
156 /* memory ranges to be scanned inside an object (empty for all) */
157 struct hlist_head area_list;
158 unsigned long jiffies; /* creation timestamp */
159 pid_t pid; /* pid of the current task */
160 char comm[TASK_COMM_LEN]; /* executable name */
161 };
162
163 /* flag representing the memory block allocation status */
164 #define OBJECT_ALLOCATED (1 << 0)
165 /* flag set after the first reporting of an unreference object */
166 #define OBJECT_REPORTED (1 << 1)
167 /* flag set to not scan the object */
168 #define OBJECT_NO_SCAN (1 << 2)
169 /* flag set to fully scan the object when scan_area allocation failed */
170 #define OBJECT_FULL_SCAN (1 << 3)
171 /* flag set for object allocated with physical address */
172 #define OBJECT_PHYS (1 << 4)
173 /* flag set for per-CPU pointers */
174 #define OBJECT_PERCPU (1 << 5)
175
176 /* set when __remove_object() called */
177 #define DELSTATE_REMOVED (1 << 0)
178 /* set to temporarily prevent deletion from object_list */
179 #define DELSTATE_NO_DELETE (1 << 1)
180
181 #define HEX_PREFIX " "
182 /* number of bytes to print per line; must be 16 or 32 */
183 #define HEX_ROW_SIZE 16
184 /* number of bytes to print at a time (1, 2, 4, 8) */
185 #define HEX_GROUP_SIZE 1
186 /* include ASCII after the hex output */
187 #define HEX_ASCII 1
188 /* max number of lines to be printed */
189 #define HEX_MAX_LINES 2
190
191 /* the list of all allocated objects */
192 static LIST_HEAD(object_list);
193 /* the list of gray-colored objects (see color_gray comment below) */
194 static LIST_HEAD(gray_list);
195 /* memory pool allocation */
196 static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
197 static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
198 static LIST_HEAD(mem_pool_free_list);
199 /* search tree for object boundaries */
200 static struct rb_root object_tree_root = RB_ROOT;
201 /* search tree for object (with OBJECT_PHYS flag) boundaries */
202 static struct rb_root object_phys_tree_root = RB_ROOT;
203 /* search tree for object (with OBJECT_PERCPU flag) boundaries */
204 static struct rb_root object_percpu_tree_root = RB_ROOT;
205 /* protecting the access to object_list, object_tree_root (or object_phys_tree_root) */
206 static DEFINE_RAW_SPINLOCK(kmemleak_lock);
207
208 /* allocation caches for kmemleak internal data */
209 static struct kmem_cache *object_cache;
210 static struct kmem_cache *scan_area_cache;
211
212 /* set if tracing memory operations is enabled */
213 static int kmemleak_enabled = 1;
214 /* same as above but only for the kmemleak_free() callback */
215 static int kmemleak_free_enabled = 1;
216 /* set in the late_initcall if there were no errors */
217 static int kmemleak_late_initialized;
218 /* set if a kmemleak warning was issued */
219 static int kmemleak_warning;
220 /* set if a fatal kmemleak error has occurred */
221 static int kmemleak_error;
222
223 /* minimum and maximum address that may be valid pointers */
224 static unsigned long min_addr = ULONG_MAX;
225 static unsigned long max_addr;
226
227 /* minimum and maximum address that may be valid per-CPU pointers */
228 static unsigned long min_percpu_addr = ULONG_MAX;
229 static unsigned long max_percpu_addr;
230
231 static struct task_struct *scan_thread;
232 /* used to avoid reporting of recently allocated objects */
233 static unsigned long jiffies_min_age;
234 static unsigned long jiffies_last_scan;
235 /* delay between automatic memory scannings */
236 static unsigned long jiffies_scan_wait;
237 /* enables or disables the task stacks scanning */
238 static int kmemleak_stack_scan = 1;
239 /* protects the memory scanning, parameters and debug/kmemleak file access */
240 static DEFINE_MUTEX(scan_mutex);
241 /* setting kmemleak=on, will set this var, skipping the disable */
242 static int kmemleak_skip_disable;
243 /* If there are leaks that can be reported */
244 static bool kmemleak_found_leaks;
245
246 static bool kmemleak_verbose;
247 module_param_named(verbose, kmemleak_verbose, bool, 0600);
248
249 static void kmemleak_disable(void);
250
251 /*
252 * Print a warning and dump the stack trace.
253 */
254 #define kmemleak_warn(x...) do { \
255 pr_warn(x); \
256 dump_stack(); \
257 kmemleak_warning = 1; \
258 } while (0)
259
260 /*
261 * Macro invoked when a serious kmemleak condition occurred and cannot be
262 * recovered from. Kmemleak will be disabled and further allocation/freeing
263 * tracing no longer available.
264 */
265 #define kmemleak_stop(x...) do { \
266 kmemleak_warn(x); \
267 kmemleak_disable(); \
268 } while (0)
269
270 #define warn_or_seq_printf(seq, fmt, ...) do { \
271 if (seq) \
272 seq_printf(seq, fmt, ##__VA_ARGS__); \
273 else \
274 pr_warn(fmt, ##__VA_ARGS__); \
275 } while (0)
276
warn_or_seq_hex_dump(struct seq_file * seq,int prefix_type,int rowsize,int groupsize,const void * buf,size_t len,bool ascii)277 static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
278 int rowsize, int groupsize, const void *buf,
279 size_t len, bool ascii)
280 {
281 if (seq)
282 seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
283 buf, len, ascii);
284 else
285 print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
286 rowsize, groupsize, buf, len, ascii);
287 }
288
289 /*
290 * Printing of the objects hex dump to the seq file. The number of lines to be
291 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
292 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
293 * with the object->lock held.
294 */
hex_dump_object(struct seq_file * seq,struct kmemleak_object * object)295 static void hex_dump_object(struct seq_file *seq,
296 struct kmemleak_object *object)
297 {
298 const u8 *ptr = (const u8 *)object->pointer;
299 size_t len;
300
301 if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
302 return;
303
304 if (object->flags & OBJECT_PERCPU)
305 ptr = (const u8 *)this_cpu_ptr((void __percpu *)object->pointer);
306
307 /* limit the number of lines to HEX_MAX_LINES */
308 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
309
310 if (object->flags & OBJECT_PERCPU)
311 warn_or_seq_printf(seq, " hex dump (first %zu bytes on cpu %d):\n",
312 len, raw_smp_processor_id());
313 else
314 warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len);
315 kasan_disable_current();
316 warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
317 HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
318 kasan_enable_current();
319 }
320
321 /*
322 * Object colors, encoded with count and min_count:
323 * - white - orphan object, not enough references to it (count < min_count)
324 * - gray - not orphan, not marked as false positive (min_count == 0) or
325 * sufficient references to it (count >= min_count)
326 * - black - ignore, it doesn't contain references (e.g. text section)
327 * (min_count == -1). No function defined for this color.
328 * Newly created objects don't have any color assigned (object->count == -1)
329 * before the next memory scan when they become white.
330 */
color_white(const struct kmemleak_object * object)331 static bool color_white(const struct kmemleak_object *object)
332 {
333 return object->count != KMEMLEAK_BLACK &&
334 object->count < object->min_count;
335 }
336
color_gray(const struct kmemleak_object * object)337 static bool color_gray(const struct kmemleak_object *object)
338 {
339 return object->min_count != KMEMLEAK_BLACK &&
340 object->count >= object->min_count;
341 }
342
343 /*
344 * Objects are considered unreferenced only if their color is white, they have
345 * not be deleted and have a minimum age to avoid false positives caused by
346 * pointers temporarily stored in CPU registers.
347 */
unreferenced_object(struct kmemleak_object * object)348 static bool unreferenced_object(struct kmemleak_object *object)
349 {
350 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
351 time_before_eq(object->jiffies + jiffies_min_age,
352 jiffies_last_scan);
353 }
354
__object_type_str(struct kmemleak_object * object)355 static const char *__object_type_str(struct kmemleak_object *object)
356 {
357 if (object->flags & OBJECT_PHYS)
358 return " (phys)";
359 if (object->flags & OBJECT_PERCPU)
360 return " (percpu)";
361 return "";
362 }
363
364 /*
365 * Printing of the unreferenced objects information to the seq file. The
366 * print_unreferenced function must be called with the object->lock held.
367 */
print_unreferenced(struct seq_file * seq,struct kmemleak_object * object)368 static void print_unreferenced(struct seq_file *seq,
369 struct kmemleak_object *object)
370 {
371 int i;
372 unsigned long *entries;
373 unsigned int nr_entries;
374
375 nr_entries = stack_depot_fetch(object->trace_handle, &entries);
376 warn_or_seq_printf(seq, "unreferenced object%s 0x%08lx (size %zu):\n",
377 __object_type_str(object),
378 object->pointer, object->size);
379 warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n",
380 object->comm, object->pid, object->jiffies);
381 hex_dump_object(seq, object);
382 warn_or_seq_printf(seq, " backtrace (crc %x):\n", object->checksum);
383
384 for (i = 0; i < nr_entries; i++) {
385 void *ptr = (void *)entries[i];
386 warn_or_seq_printf(seq, " %pS\n", ptr);
387 }
388 }
389
390 /*
391 * Print the kmemleak_object information. This function is used mainly for
392 * debugging special cases when kmemleak operations. It must be called with
393 * the object->lock held.
394 */
dump_object_info(struct kmemleak_object * object)395 static void dump_object_info(struct kmemleak_object *object)
396 {
397 pr_notice("Object%s 0x%08lx (size %zu):\n",
398 __object_type_str(object), object->pointer, object->size);
399 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
400 object->comm, object->pid, object->jiffies);
401 pr_notice(" min_count = %d\n", object->min_count);
402 pr_notice(" count = %d\n", object->count);
403 pr_notice(" flags = 0x%x\n", object->flags);
404 pr_notice(" checksum = %u\n", object->checksum);
405 pr_notice(" backtrace:\n");
406 if (object->trace_handle)
407 stack_depot_print(object->trace_handle);
408 }
409
object_tree(unsigned long objflags)410 static struct rb_root *object_tree(unsigned long objflags)
411 {
412 if (objflags & OBJECT_PHYS)
413 return &object_phys_tree_root;
414 if (objflags & OBJECT_PERCPU)
415 return &object_percpu_tree_root;
416 return &object_tree_root;
417 }
418
419 /*
420 * Look-up a memory block metadata (kmemleak_object) in the object search
421 * tree based on a pointer value. If alias is 0, only values pointing to the
422 * beginning of the memory block are allowed. The kmemleak_lock must be held
423 * when calling this function.
424 */
__lookup_object(unsigned long ptr,int alias,unsigned int objflags)425 static struct kmemleak_object *__lookup_object(unsigned long ptr, int alias,
426 unsigned int objflags)
427 {
428 struct rb_node *rb = object_tree(objflags)->rb_node;
429 unsigned long untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
430
431 while (rb) {
432 struct kmemleak_object *object;
433 unsigned long untagged_objp;
434
435 object = rb_entry(rb, struct kmemleak_object, rb_node);
436 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
437
438 if (untagged_ptr < untagged_objp)
439 rb = object->rb_node.rb_left;
440 else if (untagged_objp + object->size <= untagged_ptr)
441 rb = object->rb_node.rb_right;
442 else if (untagged_objp == untagged_ptr || alias)
443 return object;
444 else {
445 kmemleak_warn("Found object by alias at 0x%08lx\n",
446 ptr);
447 dump_object_info(object);
448 break;
449 }
450 }
451 return NULL;
452 }
453
454 /* Look-up a kmemleak object which allocated with virtual address. */
lookup_object(unsigned long ptr,int alias)455 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
456 {
457 return __lookup_object(ptr, alias, 0);
458 }
459
460 /*
461 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
462 * that once an object's use_count reached 0, the RCU freeing was already
463 * registered and the object should no longer be used. This function must be
464 * called under the protection of rcu_read_lock().
465 */
get_object(struct kmemleak_object * object)466 static int get_object(struct kmemleak_object *object)
467 {
468 return atomic_inc_not_zero(&object->use_count);
469 }
470
471 /*
472 * Memory pool allocation and freeing. kmemleak_lock must not be held.
473 */
mem_pool_alloc(gfp_t gfp)474 static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
475 {
476 unsigned long flags;
477 struct kmemleak_object *object;
478
479 /* try the slab allocator first */
480 if (object_cache) {
481 object = kmem_cache_alloc_noprof(object_cache,
482 gfp_nested_mask(gfp));
483 if (object)
484 return object;
485 }
486
487 /* slab allocation failed, try the memory pool */
488 raw_spin_lock_irqsave(&kmemleak_lock, flags);
489 object = list_first_entry_or_null(&mem_pool_free_list,
490 typeof(*object), object_list);
491 if (object)
492 list_del(&object->object_list);
493 else if (mem_pool_free_count)
494 object = &mem_pool[--mem_pool_free_count];
495 else
496 pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
497 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
498
499 return object;
500 }
501
502 /*
503 * Return the object to either the slab allocator or the memory pool.
504 */
mem_pool_free(struct kmemleak_object * object)505 static void mem_pool_free(struct kmemleak_object *object)
506 {
507 unsigned long flags;
508
509 if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
510 kmem_cache_free(object_cache, object);
511 return;
512 }
513
514 /* add the object to the memory pool free list */
515 raw_spin_lock_irqsave(&kmemleak_lock, flags);
516 list_add(&object->object_list, &mem_pool_free_list);
517 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
518 }
519
520 /*
521 * RCU callback to free a kmemleak_object.
522 */
free_object_rcu(struct rcu_head * rcu)523 static void free_object_rcu(struct rcu_head *rcu)
524 {
525 struct hlist_node *tmp;
526 struct kmemleak_scan_area *area;
527 struct kmemleak_object *object =
528 container_of(rcu, struct kmemleak_object, rcu);
529
530 /*
531 * Once use_count is 0 (guaranteed by put_object), there is no other
532 * code accessing this object, hence no need for locking.
533 */
534 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
535 hlist_del(&area->node);
536 kmem_cache_free(scan_area_cache, area);
537 }
538 mem_pool_free(object);
539 }
540
541 /*
542 * Decrement the object use_count. Once the count is 0, free the object using
543 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
544 * delete_object() path, the delayed RCU freeing ensures that there is no
545 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
546 * is also possible.
547 */
put_object(struct kmemleak_object * object)548 static void put_object(struct kmemleak_object *object)
549 {
550 if (!atomic_dec_and_test(&object->use_count))
551 return;
552
553 /* should only get here after delete_object was called */
554 WARN_ON(object->flags & OBJECT_ALLOCATED);
555
556 /*
557 * It may be too early for the RCU callbacks, however, there is no
558 * concurrent object_list traversal when !object_cache and all objects
559 * came from the memory pool. Free the object directly.
560 */
561 if (object_cache)
562 call_rcu(&object->rcu, free_object_rcu);
563 else
564 free_object_rcu(&object->rcu);
565 }
566
567 /*
568 * Look up an object in the object search tree and increase its use_count.
569 */
__find_and_get_object(unsigned long ptr,int alias,unsigned int objflags)570 static struct kmemleak_object *__find_and_get_object(unsigned long ptr, int alias,
571 unsigned int objflags)
572 {
573 unsigned long flags;
574 struct kmemleak_object *object;
575
576 rcu_read_lock();
577 raw_spin_lock_irqsave(&kmemleak_lock, flags);
578 object = __lookup_object(ptr, alias, objflags);
579 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
580
581 /* check whether the object is still available */
582 if (object && !get_object(object))
583 object = NULL;
584 rcu_read_unlock();
585
586 return object;
587 }
588
589 /* Look up and get an object which allocated with virtual address. */
find_and_get_object(unsigned long ptr,int alias)590 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
591 {
592 return __find_and_get_object(ptr, alias, 0);
593 }
594
595 /*
596 * Remove an object from its object tree and object_list. Must be called with
597 * the kmemleak_lock held _if_ kmemleak is still enabled.
598 */
__remove_object(struct kmemleak_object * object)599 static void __remove_object(struct kmemleak_object *object)
600 {
601 rb_erase(&object->rb_node, object_tree(object->flags));
602 if (!(object->del_state & DELSTATE_NO_DELETE))
603 list_del_rcu(&object->object_list);
604 object->del_state |= DELSTATE_REMOVED;
605 }
606
__find_and_remove_object(unsigned long ptr,int alias,unsigned int objflags)607 static struct kmemleak_object *__find_and_remove_object(unsigned long ptr,
608 int alias,
609 unsigned int objflags)
610 {
611 struct kmemleak_object *object;
612
613 object = __lookup_object(ptr, alias, objflags);
614 if (object)
615 __remove_object(object);
616
617 return object;
618 }
619
620 /*
621 * Look up an object in the object search tree and remove it from both object
622 * tree root and object_list. The returned object's use_count should be at
623 * least 1, as initially set by create_object().
624 */
find_and_remove_object(unsigned long ptr,int alias,unsigned int objflags)625 static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias,
626 unsigned int objflags)
627 {
628 unsigned long flags;
629 struct kmemleak_object *object;
630
631 raw_spin_lock_irqsave(&kmemleak_lock, flags);
632 object = __find_and_remove_object(ptr, alias, objflags);
633 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
634
635 return object;
636 }
637
set_track_prepare(void)638 static noinline depot_stack_handle_t set_track_prepare(void)
639 {
640 depot_stack_handle_t trace_handle;
641 unsigned long entries[MAX_TRACE];
642 unsigned int nr_entries;
643
644 /*
645 * Use object_cache to determine whether kmemleak_init() has
646 * been invoked. stack_depot_early_init() is called before
647 * kmemleak_init() in mm_core_init().
648 */
649 if (!object_cache)
650 return 0;
651 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
652 trace_handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
653
654 return trace_handle;
655 }
656
__alloc_object(gfp_t gfp)657 static struct kmemleak_object *__alloc_object(gfp_t gfp)
658 {
659 struct kmemleak_object *object;
660
661 object = mem_pool_alloc(gfp);
662 if (!object) {
663 pr_warn("Cannot allocate a kmemleak_object structure\n");
664 kmemleak_disable();
665 return NULL;
666 }
667
668 INIT_LIST_HEAD(&object->object_list);
669 INIT_LIST_HEAD(&object->gray_list);
670 INIT_HLIST_HEAD(&object->area_list);
671 raw_spin_lock_init(&object->lock);
672 atomic_set(&object->use_count, 1);
673 object->excess_ref = 0;
674 object->count = 0; /* white color initially */
675 object->checksum = 0;
676 object->del_state = 0;
677
678 /* task information */
679 if (in_hardirq()) {
680 object->pid = 0;
681 strscpy(object->comm, "hardirq");
682 } else if (in_serving_softirq()) {
683 object->pid = 0;
684 strscpy(object->comm, "softirq");
685 } else {
686 object->pid = current->pid;
687 /*
688 * There is a small chance of a race with set_task_comm(),
689 * however using get_task_comm() here may cause locking
690 * dependency issues with current->alloc_lock. In the worst
691 * case, the command line is not correct.
692 */
693 strscpy(object->comm, current->comm);
694 }
695
696 /* kernel backtrace */
697 object->trace_handle = set_track_prepare();
698
699 return object;
700 }
701
__link_object(struct kmemleak_object * object,unsigned long ptr,size_t size,int min_count,unsigned int objflags)702 static int __link_object(struct kmemleak_object *object, unsigned long ptr,
703 size_t size, int min_count, unsigned int objflags)
704 {
705
706 struct kmemleak_object *parent;
707 struct rb_node **link, *rb_parent;
708 unsigned long untagged_ptr;
709 unsigned long untagged_objp;
710
711 object->flags = OBJECT_ALLOCATED | objflags;
712 object->pointer = ptr;
713 object->size = kfence_ksize((void *)ptr) ?: size;
714 object->min_count = min_count;
715 object->jiffies = jiffies;
716
717 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
718 /*
719 * Only update min_addr and max_addr with object storing virtual
720 * address. And update min_percpu_addr max_percpu_addr for per-CPU
721 * objects.
722 */
723 if (objflags & OBJECT_PERCPU) {
724 min_percpu_addr = min(min_percpu_addr, untagged_ptr);
725 max_percpu_addr = max(max_percpu_addr, untagged_ptr + size);
726 } else if (!(objflags & OBJECT_PHYS)) {
727 min_addr = min(min_addr, untagged_ptr);
728 max_addr = max(max_addr, untagged_ptr + size);
729 }
730 link = &object_tree(objflags)->rb_node;
731 rb_parent = NULL;
732 while (*link) {
733 rb_parent = *link;
734 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
735 untagged_objp = (unsigned long)kasan_reset_tag((void *)parent->pointer);
736 if (untagged_ptr + size <= untagged_objp)
737 link = &parent->rb_node.rb_left;
738 else if (untagged_objp + parent->size <= untagged_ptr)
739 link = &parent->rb_node.rb_right;
740 else {
741 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
742 ptr);
743 /*
744 * No need for parent->lock here since "parent" cannot
745 * be freed while the kmemleak_lock is held.
746 */
747 dump_object_info(parent);
748 return -EEXIST;
749 }
750 }
751 rb_link_node(&object->rb_node, rb_parent, link);
752 rb_insert_color(&object->rb_node, object_tree(objflags));
753 list_add_tail_rcu(&object->object_list, &object_list);
754
755 return 0;
756 }
757
758 /*
759 * Create the metadata (struct kmemleak_object) corresponding to an allocated
760 * memory block and add it to the object_list and object tree.
761 */
__create_object(unsigned long ptr,size_t size,int min_count,gfp_t gfp,unsigned int objflags)762 static void __create_object(unsigned long ptr, size_t size,
763 int min_count, gfp_t gfp, unsigned int objflags)
764 {
765 struct kmemleak_object *object;
766 unsigned long flags;
767 int ret;
768
769 object = __alloc_object(gfp);
770 if (!object)
771 return;
772
773 raw_spin_lock_irqsave(&kmemleak_lock, flags);
774 ret = __link_object(object, ptr, size, min_count, objflags);
775 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
776 if (ret)
777 mem_pool_free(object);
778 }
779
780 /* Create kmemleak object which allocated with virtual address. */
create_object(unsigned long ptr,size_t size,int min_count,gfp_t gfp)781 static void create_object(unsigned long ptr, size_t size,
782 int min_count, gfp_t gfp)
783 {
784 __create_object(ptr, size, min_count, gfp, 0);
785 }
786
787 /* Create kmemleak object which allocated with physical address. */
create_object_phys(unsigned long ptr,size_t size,int min_count,gfp_t gfp)788 static void create_object_phys(unsigned long ptr, size_t size,
789 int min_count, gfp_t gfp)
790 {
791 __create_object(ptr, size, min_count, gfp, OBJECT_PHYS);
792 }
793
794 /* Create kmemleak object corresponding to a per-CPU allocation. */
create_object_percpu(unsigned long ptr,size_t size,int min_count,gfp_t gfp)795 static void create_object_percpu(unsigned long ptr, size_t size,
796 int min_count, gfp_t gfp)
797 {
798 __create_object(ptr, size, min_count, gfp, OBJECT_PERCPU);
799 }
800
801 /*
802 * Mark the object as not allocated and schedule RCU freeing via put_object().
803 */
__delete_object(struct kmemleak_object * object)804 static void __delete_object(struct kmemleak_object *object)
805 {
806 unsigned long flags;
807
808 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
809 WARN_ON(atomic_read(&object->use_count) < 1);
810
811 /*
812 * Locking here also ensures that the corresponding memory block
813 * cannot be freed when it is being scanned.
814 */
815 raw_spin_lock_irqsave(&object->lock, flags);
816 object->flags &= ~OBJECT_ALLOCATED;
817 raw_spin_unlock_irqrestore(&object->lock, flags);
818 put_object(object);
819 }
820
821 /*
822 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
823 * delete it.
824 */
delete_object_full(unsigned long ptr,unsigned int objflags)825 static void delete_object_full(unsigned long ptr, unsigned int objflags)
826 {
827 struct kmemleak_object *object;
828
829 object = find_and_remove_object(ptr, 0, objflags);
830 if (!object) {
831 #ifdef DEBUG
832 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
833 ptr);
834 #endif
835 return;
836 }
837 __delete_object(object);
838 }
839
840 /*
841 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
842 * delete it. If the memory block is partially freed, the function may create
843 * additional metadata for the remaining parts of the block.
844 */
delete_object_part(unsigned long ptr,size_t size,unsigned int objflags)845 static void delete_object_part(unsigned long ptr, size_t size,
846 unsigned int objflags)
847 {
848 struct kmemleak_object *object, *object_l, *object_r;
849 unsigned long start, end, flags;
850
851 object_l = __alloc_object(GFP_KERNEL);
852 if (!object_l)
853 return;
854
855 object_r = __alloc_object(GFP_KERNEL);
856 if (!object_r)
857 goto out;
858
859 raw_spin_lock_irqsave(&kmemleak_lock, flags);
860 object = __find_and_remove_object(ptr, 1, objflags);
861 if (!object) {
862 #ifdef DEBUG
863 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
864 ptr, size);
865 #endif
866 goto unlock;
867 }
868
869 /*
870 * Create one or two objects that may result from the memory block
871 * split. Note that partial freeing is only done by free_bootmem() and
872 * this happens before kmemleak_init() is called.
873 */
874 start = object->pointer;
875 end = object->pointer + object->size;
876 if ((ptr > start) &&
877 !__link_object(object_l, start, ptr - start,
878 object->min_count, objflags))
879 object_l = NULL;
880 if ((ptr + size < end) &&
881 !__link_object(object_r, ptr + size, end - ptr - size,
882 object->min_count, objflags))
883 object_r = NULL;
884
885 unlock:
886 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
887 if (object)
888 __delete_object(object);
889
890 out:
891 if (object_l)
892 mem_pool_free(object_l);
893 if (object_r)
894 mem_pool_free(object_r);
895 }
896
__paint_it(struct kmemleak_object * object,int color)897 static void __paint_it(struct kmemleak_object *object, int color)
898 {
899 object->min_count = color;
900 if (color == KMEMLEAK_BLACK)
901 object->flags |= OBJECT_NO_SCAN;
902 }
903
paint_it(struct kmemleak_object * object,int color)904 static void paint_it(struct kmemleak_object *object, int color)
905 {
906 unsigned long flags;
907
908 raw_spin_lock_irqsave(&object->lock, flags);
909 __paint_it(object, color);
910 raw_spin_unlock_irqrestore(&object->lock, flags);
911 }
912
paint_ptr(unsigned long ptr,int color,unsigned int objflags)913 static void paint_ptr(unsigned long ptr, int color, unsigned int objflags)
914 {
915 struct kmemleak_object *object;
916
917 object = __find_and_get_object(ptr, 0, objflags);
918 if (!object) {
919 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
920 ptr,
921 (color == KMEMLEAK_GREY) ? "Grey" :
922 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
923 return;
924 }
925 paint_it(object, color);
926 put_object(object);
927 }
928
929 /*
930 * Mark an object permanently as gray-colored so that it can no longer be
931 * reported as a leak. This is used in general to mark a false positive.
932 */
make_gray_object(unsigned long ptr)933 static void make_gray_object(unsigned long ptr)
934 {
935 paint_ptr(ptr, KMEMLEAK_GREY, 0);
936 }
937
938 /*
939 * Mark the object as black-colored so that it is ignored from scans and
940 * reporting.
941 */
make_black_object(unsigned long ptr,unsigned int objflags)942 static void make_black_object(unsigned long ptr, unsigned int objflags)
943 {
944 paint_ptr(ptr, KMEMLEAK_BLACK, objflags);
945 }
946
947 /*
948 * Reset the checksum of an object. The immediate effect is that it will not
949 * be reported as a leak during the next scan until its checksum is updated.
950 */
reset_checksum(unsigned long ptr)951 static void reset_checksum(unsigned long ptr)
952 {
953 unsigned long flags;
954 struct kmemleak_object *object;
955
956 object = find_and_get_object(ptr, 0);
957 if (!object) {
958 kmemleak_warn("Not resetting the checksum of an unknown object at 0x%08lx\n",
959 ptr);
960 return;
961 }
962
963 raw_spin_lock_irqsave(&object->lock, flags);
964 object->checksum = 0;
965 raw_spin_unlock_irqrestore(&object->lock, flags);
966 put_object(object);
967 }
968
969 /*
970 * Add a scanning area to the object. If at least one such area is added,
971 * kmemleak will only scan these ranges rather than the whole memory block.
972 */
add_scan_area(unsigned long ptr,size_t size,gfp_t gfp)973 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
974 {
975 unsigned long flags;
976 struct kmemleak_object *object;
977 struct kmemleak_scan_area *area = NULL;
978 unsigned long untagged_ptr;
979 unsigned long untagged_objp;
980
981 object = find_and_get_object(ptr, 1);
982 if (!object) {
983 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
984 ptr);
985 return;
986 }
987
988 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
989 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
990
991 if (scan_area_cache)
992 area = kmem_cache_alloc_noprof(scan_area_cache,
993 gfp_nested_mask(gfp));
994
995 raw_spin_lock_irqsave(&object->lock, flags);
996 if (!area) {
997 pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
998 /* mark the object for full scan to avoid false positives */
999 object->flags |= OBJECT_FULL_SCAN;
1000 goto out_unlock;
1001 }
1002 if (size == SIZE_MAX) {
1003 size = untagged_objp + object->size - untagged_ptr;
1004 } else if (untagged_ptr + size > untagged_objp + object->size) {
1005 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
1006 dump_object_info(object);
1007 kmem_cache_free(scan_area_cache, area);
1008 goto out_unlock;
1009 }
1010
1011 INIT_HLIST_NODE(&area->node);
1012 area->start = ptr;
1013 area->size = size;
1014
1015 hlist_add_head(&area->node, &object->area_list);
1016 out_unlock:
1017 raw_spin_unlock_irqrestore(&object->lock, flags);
1018 put_object(object);
1019 }
1020
1021 /*
1022 * Any surplus references (object already gray) to 'ptr' are passed to
1023 * 'excess_ref'. This is used in the vmalloc() case where a pointer to
1024 * vm_struct may be used as an alternative reference to the vmalloc'ed object
1025 * (see free_thread_stack()).
1026 */
object_set_excess_ref(unsigned long ptr,unsigned long excess_ref)1027 static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
1028 {
1029 unsigned long flags;
1030 struct kmemleak_object *object;
1031
1032 object = find_and_get_object(ptr, 0);
1033 if (!object) {
1034 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
1035 ptr);
1036 return;
1037 }
1038
1039 raw_spin_lock_irqsave(&object->lock, flags);
1040 object->excess_ref = excess_ref;
1041 raw_spin_unlock_irqrestore(&object->lock, flags);
1042 put_object(object);
1043 }
1044
1045 /*
1046 * Set the OBJECT_NO_SCAN flag for the object corresponding to the given
1047 * pointer. Such object will not be scanned by kmemleak but references to it
1048 * are searched.
1049 */
object_no_scan(unsigned long ptr)1050 static void object_no_scan(unsigned long ptr)
1051 {
1052 unsigned long flags;
1053 struct kmemleak_object *object;
1054
1055 object = find_and_get_object(ptr, 0);
1056 if (!object) {
1057 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
1058 return;
1059 }
1060
1061 raw_spin_lock_irqsave(&object->lock, flags);
1062 object->flags |= OBJECT_NO_SCAN;
1063 raw_spin_unlock_irqrestore(&object->lock, flags);
1064 put_object(object);
1065 }
1066
1067 /**
1068 * kmemleak_alloc - register a newly allocated object
1069 * @ptr: pointer to beginning of the object
1070 * @size: size of the object
1071 * @min_count: minimum number of references to this object. If during memory
1072 * scanning a number of references less than @min_count is found,
1073 * the object is reported as a memory leak. If @min_count is 0,
1074 * the object is never reported as a leak. If @min_count is -1,
1075 * the object is ignored (not scanned and not reported as a leak)
1076 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1077 *
1078 * This function is called from the kernel allocators when a new object
1079 * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
1080 */
kmemleak_alloc(const void * ptr,size_t size,int min_count,gfp_t gfp)1081 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
1082 gfp_t gfp)
1083 {
1084 pr_debug("%s(0x%px, %zu, %d)\n", __func__, ptr, size, min_count);
1085
1086 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1087 create_object((unsigned long)ptr, size, min_count, gfp);
1088 }
1089 EXPORT_SYMBOL_GPL(kmemleak_alloc);
1090
1091 /**
1092 * kmemleak_alloc_percpu - register a newly allocated __percpu object
1093 * @ptr: __percpu pointer to beginning of the object
1094 * @size: size of the object
1095 * @gfp: flags used for kmemleak internal memory allocations
1096 *
1097 * This function is called from the kernel percpu allocator when a new object
1098 * (memory block) is allocated (alloc_percpu).
1099 */
kmemleak_alloc_percpu(const void __percpu * ptr,size_t size,gfp_t gfp)1100 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
1101 gfp_t gfp)
1102 {
1103 pr_debug("%s(0x%px, %zu)\n", __func__, ptr, size);
1104
1105 if (kmemleak_enabled && ptr && !IS_ERR_PCPU(ptr))
1106 create_object_percpu((__force unsigned long)ptr, size, 1, gfp);
1107 }
1108 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
1109
1110 /**
1111 * kmemleak_vmalloc - register a newly vmalloc'ed object
1112 * @area: pointer to vm_struct
1113 * @size: size of the object
1114 * @gfp: __vmalloc() flags used for kmemleak internal memory allocations
1115 *
1116 * This function is called from the vmalloc() kernel allocator when a new
1117 * object (memory block) is allocated.
1118 */
kmemleak_vmalloc(const struct vm_struct * area,size_t size,gfp_t gfp)1119 void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
1120 {
1121 pr_debug("%s(0x%px, %zu)\n", __func__, area, size);
1122
1123 /*
1124 * A min_count = 2 is needed because vm_struct contains a reference to
1125 * the virtual address of the vmalloc'ed block.
1126 */
1127 if (kmemleak_enabled) {
1128 create_object((unsigned long)area->addr, size, 2, gfp);
1129 object_set_excess_ref((unsigned long)area,
1130 (unsigned long)area->addr);
1131 }
1132 }
1133 EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
1134
1135 /**
1136 * kmemleak_free - unregister a previously registered object
1137 * @ptr: pointer to beginning of the object
1138 *
1139 * This function is called from the kernel allocators when an object (memory
1140 * block) is freed (kmem_cache_free, kfree, vfree etc.).
1141 */
kmemleak_free(const void * ptr)1142 void __ref kmemleak_free(const void *ptr)
1143 {
1144 pr_debug("%s(0x%px)\n", __func__, ptr);
1145
1146 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1147 delete_object_full((unsigned long)ptr, 0);
1148 }
1149 EXPORT_SYMBOL_GPL(kmemleak_free);
1150
1151 /**
1152 * kmemleak_free_part - partially unregister a previously registered object
1153 * @ptr: pointer to the beginning or inside the object. This also
1154 * represents the start of the range to be freed
1155 * @size: size to be unregistered
1156 *
1157 * This function is called when only a part of a memory block is freed
1158 * (usually from the bootmem allocator).
1159 */
kmemleak_free_part(const void * ptr,size_t size)1160 void __ref kmemleak_free_part(const void *ptr, size_t size)
1161 {
1162 pr_debug("%s(0x%px)\n", __func__, ptr);
1163
1164 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1165 delete_object_part((unsigned long)ptr, size, 0);
1166 }
1167 EXPORT_SYMBOL_GPL(kmemleak_free_part);
1168
1169 /**
1170 * kmemleak_free_percpu - unregister a previously registered __percpu object
1171 * @ptr: __percpu pointer to beginning of the object
1172 *
1173 * This function is called from the kernel percpu allocator when an object
1174 * (memory block) is freed (free_percpu).
1175 */
kmemleak_free_percpu(const void __percpu * ptr)1176 void __ref kmemleak_free_percpu(const void __percpu *ptr)
1177 {
1178 pr_debug("%s(0x%px)\n", __func__, ptr);
1179
1180 if (kmemleak_free_enabled && ptr && !IS_ERR_PCPU(ptr))
1181 delete_object_full((__force unsigned long)ptr, OBJECT_PERCPU);
1182 }
1183 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1184
1185 /**
1186 * kmemleak_update_trace - update object allocation stack trace
1187 * @ptr: pointer to beginning of the object
1188 *
1189 * Override the object allocation stack trace for cases where the actual
1190 * allocation place is not always useful.
1191 */
kmemleak_update_trace(const void * ptr)1192 void __ref kmemleak_update_trace(const void *ptr)
1193 {
1194 struct kmemleak_object *object;
1195 depot_stack_handle_t trace_handle;
1196 unsigned long flags;
1197
1198 pr_debug("%s(0x%px)\n", __func__, ptr);
1199
1200 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1201 return;
1202
1203 object = find_and_get_object((unsigned long)ptr, 1);
1204 if (!object) {
1205 #ifdef DEBUG
1206 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1207 ptr);
1208 #endif
1209 return;
1210 }
1211
1212 trace_handle = set_track_prepare();
1213 raw_spin_lock_irqsave(&object->lock, flags);
1214 object->trace_handle = trace_handle;
1215 raw_spin_unlock_irqrestore(&object->lock, flags);
1216
1217 put_object(object);
1218 }
1219 EXPORT_SYMBOL(kmemleak_update_trace);
1220
1221 /**
1222 * kmemleak_not_leak - mark an allocated object as false positive
1223 * @ptr: pointer to beginning of the object
1224 *
1225 * Calling this function on an object will cause the memory block to no longer
1226 * be reported as leak and always be scanned.
1227 */
kmemleak_not_leak(const void * ptr)1228 void __ref kmemleak_not_leak(const void *ptr)
1229 {
1230 pr_debug("%s(0x%px)\n", __func__, ptr);
1231
1232 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1233 make_gray_object((unsigned long)ptr);
1234 }
1235 EXPORT_SYMBOL(kmemleak_not_leak);
1236
1237 /**
1238 * kmemleak_transient_leak - mark an allocated object as transient false positive
1239 * @ptr: pointer to beginning of the object
1240 *
1241 * Calling this function on an object will cause the memory block to not be
1242 * reported as a leak temporarily. This may happen, for example, if the object
1243 * is part of a singly linked list and the ->next reference to it is changed.
1244 */
kmemleak_transient_leak(const void * ptr)1245 void __ref kmemleak_transient_leak(const void *ptr)
1246 {
1247 pr_debug("%s(0x%px)\n", __func__, ptr);
1248
1249 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1250 reset_checksum((unsigned long)ptr);
1251 }
1252 EXPORT_SYMBOL(kmemleak_transient_leak);
1253
1254 /**
1255 * kmemleak_ignore - ignore an allocated object
1256 * @ptr: pointer to beginning of the object
1257 *
1258 * Calling this function on an object will cause the memory block to be
1259 * ignored (not scanned and not reported as a leak). This is usually done when
1260 * it is known that the corresponding block is not a leak and does not contain
1261 * any references to other allocated memory blocks.
1262 */
kmemleak_ignore(const void * ptr)1263 void __ref kmemleak_ignore(const void *ptr)
1264 {
1265 pr_debug("%s(0x%px)\n", __func__, ptr);
1266
1267 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1268 make_black_object((unsigned long)ptr, 0);
1269 }
1270 EXPORT_SYMBOL(kmemleak_ignore);
1271
1272 /**
1273 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1274 * @ptr: pointer to beginning or inside the object. This also
1275 * represents the start of the scan area
1276 * @size: size of the scan area
1277 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1278 *
1279 * This function is used when it is known that only certain parts of an object
1280 * contain references to other objects. Kmemleak will only scan these areas
1281 * reducing the number false negatives.
1282 */
kmemleak_scan_area(const void * ptr,size_t size,gfp_t gfp)1283 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1284 {
1285 pr_debug("%s(0x%px)\n", __func__, ptr);
1286
1287 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1288 add_scan_area((unsigned long)ptr, size, gfp);
1289 }
1290 EXPORT_SYMBOL(kmemleak_scan_area);
1291
1292 /**
1293 * kmemleak_no_scan - do not scan an allocated object
1294 * @ptr: pointer to beginning of the object
1295 *
1296 * This function notifies kmemleak not to scan the given memory block. Useful
1297 * in situations where it is known that the given object does not contain any
1298 * references to other objects. Kmemleak will not scan such objects reducing
1299 * the number of false negatives.
1300 */
kmemleak_no_scan(const void * ptr)1301 void __ref kmemleak_no_scan(const void *ptr)
1302 {
1303 pr_debug("%s(0x%px)\n", __func__, ptr);
1304
1305 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1306 object_no_scan((unsigned long)ptr);
1307 }
1308 EXPORT_SYMBOL(kmemleak_no_scan);
1309
1310 /**
1311 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1312 * address argument
1313 * @phys: physical address of the object
1314 * @size: size of the object
1315 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1316 */
kmemleak_alloc_phys(phys_addr_t phys,size_t size,gfp_t gfp)1317 void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, gfp_t gfp)
1318 {
1319 pr_debug("%s(0x%px, %zu)\n", __func__, &phys, size);
1320
1321 if (kmemleak_enabled)
1322 /*
1323 * Create object with OBJECT_PHYS flag and
1324 * assume min_count 0.
1325 */
1326 create_object_phys((unsigned long)phys, size, 0, gfp);
1327 }
1328 EXPORT_SYMBOL(kmemleak_alloc_phys);
1329
1330 /**
1331 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1332 * physical address argument
1333 * @phys: physical address if the beginning or inside an object. This
1334 * also represents the start of the range to be freed
1335 * @size: size to be unregistered
1336 */
kmemleak_free_part_phys(phys_addr_t phys,size_t size)1337 void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1338 {
1339 pr_debug("%s(0x%px)\n", __func__, &phys);
1340
1341 if (kmemleak_enabled)
1342 delete_object_part((unsigned long)phys, size, OBJECT_PHYS);
1343 }
1344 EXPORT_SYMBOL(kmemleak_free_part_phys);
1345
1346 /**
1347 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1348 * address argument
1349 * @phys: physical address of the object
1350 */
kmemleak_ignore_phys(phys_addr_t phys)1351 void __ref kmemleak_ignore_phys(phys_addr_t phys)
1352 {
1353 pr_debug("%s(0x%px)\n", __func__, &phys);
1354
1355 if (kmemleak_enabled)
1356 make_black_object((unsigned long)phys, OBJECT_PHYS);
1357 }
1358 EXPORT_SYMBOL(kmemleak_ignore_phys);
1359
1360 /*
1361 * Update an object's checksum and return true if it was modified.
1362 */
update_checksum(struct kmemleak_object * object)1363 static bool update_checksum(struct kmemleak_object *object)
1364 {
1365 u32 old_csum = object->checksum;
1366
1367 if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
1368 return false;
1369
1370 kasan_disable_current();
1371 kcsan_disable_current();
1372 if (object->flags & OBJECT_PERCPU) {
1373 unsigned int cpu;
1374
1375 object->checksum = 0;
1376 for_each_possible_cpu(cpu) {
1377 void *ptr = per_cpu_ptr((void __percpu *)object->pointer, cpu);
1378
1379 object->checksum ^= crc32(0, kasan_reset_tag((void *)ptr), object->size);
1380 }
1381 } else {
1382 object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
1383 }
1384 kasan_enable_current();
1385 kcsan_enable_current();
1386
1387 return object->checksum != old_csum;
1388 }
1389
1390 /*
1391 * Update an object's references. object->lock must be held by the caller.
1392 */
update_refs(struct kmemleak_object * object)1393 static void update_refs(struct kmemleak_object *object)
1394 {
1395 if (!color_white(object)) {
1396 /* non-orphan, ignored or new */
1397 return;
1398 }
1399
1400 /*
1401 * Increase the object's reference count (number of pointers to the
1402 * memory block). If this count reaches the required minimum, the
1403 * object's color will become gray and it will be added to the
1404 * gray_list.
1405 */
1406 object->count++;
1407 if (color_gray(object)) {
1408 /* put_object() called when removing from gray_list */
1409 WARN_ON(!get_object(object));
1410 list_add_tail(&object->gray_list, &gray_list);
1411 }
1412 }
1413
pointer_update_refs(struct kmemleak_object * scanned,unsigned long pointer,unsigned int objflags)1414 static void pointer_update_refs(struct kmemleak_object *scanned,
1415 unsigned long pointer, unsigned int objflags)
1416 {
1417 struct kmemleak_object *object;
1418 unsigned long untagged_ptr;
1419 unsigned long excess_ref;
1420
1421 untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1422 if (objflags & OBJECT_PERCPU) {
1423 if (untagged_ptr < min_percpu_addr || untagged_ptr >= max_percpu_addr)
1424 return;
1425 } else {
1426 if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1427 return;
1428 }
1429
1430 /*
1431 * No need for get_object() here since we hold kmemleak_lock.
1432 * object->use_count cannot be dropped to 0 while the object
1433 * is still present in object_tree_root and object_list
1434 * (with updates protected by kmemleak_lock).
1435 */
1436 object = __lookup_object(pointer, 1, objflags);
1437 if (!object)
1438 return;
1439 if (object == scanned)
1440 /* self referenced, ignore */
1441 return;
1442
1443 /*
1444 * Avoid the lockdep recursive warning on object->lock being
1445 * previously acquired in scan_object(). These locks are
1446 * enclosed by scan_mutex.
1447 */
1448 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1449 /* only pass surplus references (object already gray) */
1450 if (color_gray(object)) {
1451 excess_ref = object->excess_ref;
1452 /* no need for update_refs() if object already gray */
1453 } else {
1454 excess_ref = 0;
1455 update_refs(object);
1456 }
1457 raw_spin_unlock(&object->lock);
1458
1459 if (excess_ref) {
1460 object = lookup_object(excess_ref, 0);
1461 if (!object)
1462 return;
1463 if (object == scanned)
1464 /* circular reference, ignore */
1465 return;
1466 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1467 update_refs(object);
1468 raw_spin_unlock(&object->lock);
1469 }
1470 }
1471
1472 /*
1473 * Memory scanning is a long process and it needs to be interruptible. This
1474 * function checks whether such interrupt condition occurred.
1475 */
scan_should_stop(void)1476 static int scan_should_stop(void)
1477 {
1478 if (!kmemleak_enabled)
1479 return 1;
1480
1481 /*
1482 * This function may be called from either process or kthread context,
1483 * hence the need to check for both stop conditions.
1484 */
1485 if (current->mm)
1486 return signal_pending(current);
1487 else
1488 return kthread_should_stop();
1489
1490 return 0;
1491 }
1492
1493 /*
1494 * Scan a memory block (exclusive range) for valid pointers and add those
1495 * found to the gray list.
1496 */
scan_block(void * _start,void * _end,struct kmemleak_object * scanned)1497 static void scan_block(void *_start, void *_end,
1498 struct kmemleak_object *scanned)
1499 {
1500 unsigned long *ptr;
1501 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1502 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1503 unsigned long flags;
1504
1505 raw_spin_lock_irqsave(&kmemleak_lock, flags);
1506 for (ptr = start; ptr < end; ptr++) {
1507 unsigned long pointer;
1508
1509 if (scan_should_stop())
1510 break;
1511
1512 kasan_disable_current();
1513 pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
1514 kasan_enable_current();
1515
1516 pointer_update_refs(scanned, pointer, 0);
1517 pointer_update_refs(scanned, pointer, OBJECT_PERCPU);
1518 }
1519 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
1520 }
1521
1522 /*
1523 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1524 */
1525 #ifdef CONFIG_SMP
scan_large_block(void * start,void * end)1526 static void scan_large_block(void *start, void *end)
1527 {
1528 void *next;
1529
1530 while (start < end) {
1531 next = min(start + MAX_SCAN_SIZE, end);
1532 scan_block(start, next, NULL);
1533 start = next;
1534 cond_resched();
1535 }
1536 }
1537 #endif
1538
1539 /*
1540 * Scan a memory block corresponding to a kmemleak_object. A condition is
1541 * that object->use_count >= 1.
1542 */
scan_object(struct kmemleak_object * object)1543 static void scan_object(struct kmemleak_object *object)
1544 {
1545 struct kmemleak_scan_area *area;
1546 unsigned long flags;
1547
1548 /*
1549 * Once the object->lock is acquired, the corresponding memory block
1550 * cannot be freed (the same lock is acquired in delete_object).
1551 */
1552 raw_spin_lock_irqsave(&object->lock, flags);
1553 if (object->flags & OBJECT_NO_SCAN)
1554 goto out;
1555 if (!(object->flags & OBJECT_ALLOCATED))
1556 /* already freed object */
1557 goto out;
1558
1559 if (object->flags & OBJECT_PERCPU) {
1560 unsigned int cpu;
1561
1562 for_each_possible_cpu(cpu) {
1563 void *start = per_cpu_ptr((void __percpu *)object->pointer, cpu);
1564 void *end = start + object->size;
1565
1566 scan_block(start, end, object);
1567
1568 raw_spin_unlock_irqrestore(&object->lock, flags);
1569 cond_resched();
1570 raw_spin_lock_irqsave(&object->lock, flags);
1571 if (!(object->flags & OBJECT_ALLOCATED))
1572 break;
1573 }
1574 } else if (hlist_empty(&object->area_list) ||
1575 object->flags & OBJECT_FULL_SCAN) {
1576 void *start = object->flags & OBJECT_PHYS ?
1577 __va((phys_addr_t)object->pointer) :
1578 (void *)object->pointer;
1579 void *end = start + object->size;
1580 void *next;
1581
1582 do {
1583 next = min(start + MAX_SCAN_SIZE, end);
1584 scan_block(start, next, object);
1585
1586 start = next;
1587 if (start >= end)
1588 break;
1589
1590 raw_spin_unlock_irqrestore(&object->lock, flags);
1591 cond_resched();
1592 raw_spin_lock_irqsave(&object->lock, flags);
1593 } while (object->flags & OBJECT_ALLOCATED);
1594 } else {
1595 hlist_for_each_entry(area, &object->area_list, node)
1596 scan_block((void *)area->start,
1597 (void *)(area->start + area->size),
1598 object);
1599 }
1600 out:
1601 raw_spin_unlock_irqrestore(&object->lock, flags);
1602 }
1603
1604 /*
1605 * Scan the objects already referenced (gray objects). More objects will be
1606 * referenced and, if there are no memory leaks, all the objects are scanned.
1607 */
scan_gray_list(void)1608 static void scan_gray_list(void)
1609 {
1610 struct kmemleak_object *object, *tmp;
1611
1612 /*
1613 * The list traversal is safe for both tail additions and removals
1614 * from inside the loop. The kmemleak objects cannot be freed from
1615 * outside the loop because their use_count was incremented.
1616 */
1617 object = list_entry(gray_list.next, typeof(*object), gray_list);
1618 while (&object->gray_list != &gray_list) {
1619 cond_resched();
1620
1621 /* may add new objects to the list */
1622 if (!scan_should_stop())
1623 scan_object(object);
1624
1625 tmp = list_entry(object->gray_list.next, typeof(*object),
1626 gray_list);
1627
1628 /* remove the object from the list and release it */
1629 list_del(&object->gray_list);
1630 put_object(object);
1631
1632 object = tmp;
1633 }
1634 WARN_ON(!list_empty(&gray_list));
1635 }
1636
1637 /*
1638 * Conditionally call resched() in an object iteration loop while making sure
1639 * that the given object won't go away without RCU read lock by performing a
1640 * get_object() if necessaary.
1641 */
kmemleak_cond_resched(struct kmemleak_object * object)1642 static void kmemleak_cond_resched(struct kmemleak_object *object)
1643 {
1644 if (!get_object(object))
1645 return; /* Try next object */
1646
1647 raw_spin_lock_irq(&kmemleak_lock);
1648 if (object->del_state & DELSTATE_REMOVED)
1649 goto unlock_put; /* Object removed */
1650 object->del_state |= DELSTATE_NO_DELETE;
1651 raw_spin_unlock_irq(&kmemleak_lock);
1652
1653 rcu_read_unlock();
1654 cond_resched();
1655 rcu_read_lock();
1656
1657 raw_spin_lock_irq(&kmemleak_lock);
1658 if (object->del_state & DELSTATE_REMOVED)
1659 list_del_rcu(&object->object_list);
1660 object->del_state &= ~DELSTATE_NO_DELETE;
1661 unlock_put:
1662 raw_spin_unlock_irq(&kmemleak_lock);
1663 put_object(object);
1664 }
1665
1666 /*
1667 * Scan data sections and all the referenced memory blocks allocated via the
1668 * kernel's standard allocators. This function must be called with the
1669 * scan_mutex held.
1670 */
kmemleak_scan(void)1671 static void kmemleak_scan(void)
1672 {
1673 struct kmemleak_object *object;
1674 struct zone *zone;
1675 int __maybe_unused i;
1676 int new_leaks = 0;
1677
1678 jiffies_last_scan = jiffies;
1679
1680 /* prepare the kmemleak_object's */
1681 rcu_read_lock();
1682 list_for_each_entry_rcu(object, &object_list, object_list) {
1683 raw_spin_lock_irq(&object->lock);
1684 #ifdef DEBUG
1685 /*
1686 * With a few exceptions there should be a maximum of
1687 * 1 reference to any object at this point.
1688 */
1689 if (atomic_read(&object->use_count) > 1) {
1690 pr_debug("object->use_count = %d\n",
1691 atomic_read(&object->use_count));
1692 dump_object_info(object);
1693 }
1694 #endif
1695
1696 /* ignore objects outside lowmem (paint them black) */
1697 if ((object->flags & OBJECT_PHYS) &&
1698 !(object->flags & OBJECT_NO_SCAN)) {
1699 unsigned long phys = object->pointer;
1700
1701 if (PHYS_PFN(phys) < min_low_pfn ||
1702 PHYS_PFN(phys + object->size) > max_low_pfn)
1703 __paint_it(object, KMEMLEAK_BLACK);
1704 }
1705
1706 /* reset the reference count (whiten the object) */
1707 object->count = 0;
1708 if (color_gray(object) && get_object(object))
1709 list_add_tail(&object->gray_list, &gray_list);
1710
1711 raw_spin_unlock_irq(&object->lock);
1712
1713 if (need_resched())
1714 kmemleak_cond_resched(object);
1715 }
1716 rcu_read_unlock();
1717
1718 #ifdef CONFIG_SMP
1719 /* per-cpu sections scanning */
1720 for_each_possible_cpu(i)
1721 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1722 __per_cpu_end + per_cpu_offset(i));
1723 #endif
1724
1725 /*
1726 * Struct page scanning for each node.
1727 */
1728 get_online_mems();
1729 for_each_populated_zone(zone) {
1730 unsigned long start_pfn = zone->zone_start_pfn;
1731 unsigned long end_pfn = zone_end_pfn(zone);
1732 unsigned long pfn;
1733
1734 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1735 struct page *page = pfn_to_online_page(pfn);
1736
1737 if (!(pfn & 63))
1738 cond_resched();
1739
1740 if (!page)
1741 continue;
1742
1743 /* only scan pages belonging to this zone */
1744 if (page_zone(page) != zone)
1745 continue;
1746 /* only scan if page is in use */
1747 if (page_count(page) == 0)
1748 continue;
1749 scan_block(page, page + 1, NULL);
1750 }
1751 }
1752 put_online_mems();
1753
1754 /*
1755 * Scanning the task stacks (may introduce false negatives).
1756 */
1757 if (kmemleak_stack_scan) {
1758 struct task_struct *p, *g;
1759
1760 rcu_read_lock();
1761 for_each_process_thread(g, p) {
1762 void *stack = try_get_task_stack(p);
1763 if (stack) {
1764 scan_block(stack, stack + THREAD_SIZE, NULL);
1765 put_task_stack(p);
1766 }
1767 }
1768 rcu_read_unlock();
1769 }
1770
1771 /*
1772 * Scan the objects already referenced from the sections scanned
1773 * above.
1774 */
1775 scan_gray_list();
1776
1777 /*
1778 * Check for new or unreferenced objects modified since the previous
1779 * scan and color them gray until the next scan.
1780 */
1781 rcu_read_lock();
1782 list_for_each_entry_rcu(object, &object_list, object_list) {
1783 if (need_resched())
1784 kmemleak_cond_resched(object);
1785
1786 /*
1787 * This is racy but we can save the overhead of lock/unlock
1788 * calls. The missed objects, if any, should be caught in
1789 * the next scan.
1790 */
1791 if (!color_white(object))
1792 continue;
1793 raw_spin_lock_irq(&object->lock);
1794 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1795 && update_checksum(object) && get_object(object)) {
1796 /* color it gray temporarily */
1797 object->count = object->min_count;
1798 list_add_tail(&object->gray_list, &gray_list);
1799 }
1800 raw_spin_unlock_irq(&object->lock);
1801 }
1802 rcu_read_unlock();
1803
1804 /*
1805 * Re-scan the gray list for modified unreferenced objects.
1806 */
1807 scan_gray_list();
1808
1809 /*
1810 * If scanning was stopped do not report any new unreferenced objects.
1811 */
1812 if (scan_should_stop())
1813 return;
1814
1815 /*
1816 * Scanning result reporting.
1817 */
1818 rcu_read_lock();
1819 list_for_each_entry_rcu(object, &object_list, object_list) {
1820 if (need_resched())
1821 kmemleak_cond_resched(object);
1822
1823 /*
1824 * This is racy but we can save the overhead of lock/unlock
1825 * calls. The missed objects, if any, should be caught in
1826 * the next scan.
1827 */
1828 if (!color_white(object))
1829 continue;
1830 raw_spin_lock_irq(&object->lock);
1831 if (unreferenced_object(object) &&
1832 !(object->flags & OBJECT_REPORTED)) {
1833 object->flags |= OBJECT_REPORTED;
1834
1835 if (kmemleak_verbose)
1836 print_unreferenced(NULL, object);
1837
1838 new_leaks++;
1839 }
1840 raw_spin_unlock_irq(&object->lock);
1841 }
1842 rcu_read_unlock();
1843
1844 if (new_leaks) {
1845 kmemleak_found_leaks = true;
1846
1847 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1848 new_leaks);
1849 }
1850
1851 }
1852
1853 /*
1854 * Thread function performing automatic memory scanning. Unreferenced objects
1855 * at the end of a memory scan are reported but only the first time.
1856 */
kmemleak_scan_thread(void * arg)1857 static int kmemleak_scan_thread(void *arg)
1858 {
1859 static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1860
1861 pr_info("Automatic memory scanning thread started\n");
1862 set_user_nice(current, 10);
1863
1864 /*
1865 * Wait before the first scan to allow the system to fully initialize.
1866 */
1867 if (first_run) {
1868 signed long timeout = secs_to_jiffies(SECS_FIRST_SCAN);
1869 first_run = 0;
1870 while (timeout && !kthread_should_stop())
1871 timeout = schedule_timeout_interruptible(timeout);
1872 }
1873
1874 while (!kthread_should_stop()) {
1875 signed long timeout = READ_ONCE(jiffies_scan_wait);
1876
1877 mutex_lock(&scan_mutex);
1878 kmemleak_scan();
1879 mutex_unlock(&scan_mutex);
1880
1881 /* wait before the next scan */
1882 while (timeout && !kthread_should_stop())
1883 timeout = schedule_timeout_interruptible(timeout);
1884 }
1885
1886 pr_info("Automatic memory scanning thread ended\n");
1887
1888 return 0;
1889 }
1890
1891 /*
1892 * Start the automatic memory scanning thread. This function must be called
1893 * with the scan_mutex held.
1894 */
start_scan_thread(void)1895 static void start_scan_thread(void)
1896 {
1897 if (scan_thread)
1898 return;
1899 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1900 if (IS_ERR(scan_thread)) {
1901 pr_warn("Failed to create the scan thread\n");
1902 scan_thread = NULL;
1903 }
1904 }
1905
1906 /*
1907 * Stop the automatic memory scanning thread.
1908 */
stop_scan_thread(void)1909 static void stop_scan_thread(void)
1910 {
1911 if (scan_thread) {
1912 kthread_stop(scan_thread);
1913 scan_thread = NULL;
1914 }
1915 }
1916
1917 /*
1918 * Iterate over the object_list and return the first valid object at or after
1919 * the required position with its use_count incremented. The function triggers
1920 * a memory scanning when the pos argument points to the first position.
1921 */
kmemleak_seq_start(struct seq_file * seq,loff_t * pos)1922 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1923 {
1924 struct kmemleak_object *object;
1925 loff_t n = *pos;
1926 int err;
1927
1928 err = mutex_lock_interruptible(&scan_mutex);
1929 if (err < 0)
1930 return ERR_PTR(err);
1931
1932 rcu_read_lock();
1933 list_for_each_entry_rcu(object, &object_list, object_list) {
1934 if (n-- > 0)
1935 continue;
1936 if (get_object(object))
1937 goto out;
1938 }
1939 object = NULL;
1940 out:
1941 return object;
1942 }
1943
1944 /*
1945 * Return the next object in the object_list. The function decrements the
1946 * use_count of the previous object and increases that of the next one.
1947 */
kmemleak_seq_next(struct seq_file * seq,void * v,loff_t * pos)1948 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1949 {
1950 struct kmemleak_object *prev_obj = v;
1951 struct kmemleak_object *next_obj = NULL;
1952 struct kmemleak_object *obj = prev_obj;
1953
1954 ++(*pos);
1955
1956 list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1957 if (get_object(obj)) {
1958 next_obj = obj;
1959 break;
1960 }
1961 }
1962
1963 put_object(prev_obj);
1964 return next_obj;
1965 }
1966
1967 /*
1968 * Decrement the use_count of the last object required, if any.
1969 */
kmemleak_seq_stop(struct seq_file * seq,void * v)1970 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1971 {
1972 if (!IS_ERR(v)) {
1973 /*
1974 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1975 * waiting was interrupted, so only release it if !IS_ERR.
1976 */
1977 rcu_read_unlock();
1978 mutex_unlock(&scan_mutex);
1979 if (v)
1980 put_object(v);
1981 }
1982 }
1983
1984 /*
1985 * Print the information for an unreferenced object to the seq file.
1986 */
kmemleak_seq_show(struct seq_file * seq,void * v)1987 static int kmemleak_seq_show(struct seq_file *seq, void *v)
1988 {
1989 struct kmemleak_object *object = v;
1990 unsigned long flags;
1991
1992 raw_spin_lock_irqsave(&object->lock, flags);
1993 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1994 print_unreferenced(seq, object);
1995 raw_spin_unlock_irqrestore(&object->lock, flags);
1996 return 0;
1997 }
1998
1999 static const struct seq_operations kmemleak_seq_ops = {
2000 .start = kmemleak_seq_start,
2001 .next = kmemleak_seq_next,
2002 .stop = kmemleak_seq_stop,
2003 .show = kmemleak_seq_show,
2004 };
2005
kmemleak_open(struct inode * inode,struct file * file)2006 static int kmemleak_open(struct inode *inode, struct file *file)
2007 {
2008 return seq_open(file, &kmemleak_seq_ops);
2009 }
2010
__dump_str_object_info(unsigned long addr,unsigned int objflags)2011 static bool __dump_str_object_info(unsigned long addr, unsigned int objflags)
2012 {
2013 unsigned long flags;
2014 struct kmemleak_object *object;
2015
2016 object = __find_and_get_object(addr, 1, objflags);
2017 if (!object)
2018 return false;
2019
2020 raw_spin_lock_irqsave(&object->lock, flags);
2021 dump_object_info(object);
2022 raw_spin_unlock_irqrestore(&object->lock, flags);
2023
2024 put_object(object);
2025
2026 return true;
2027 }
2028
dump_str_object_info(const char * str)2029 static int dump_str_object_info(const char *str)
2030 {
2031 unsigned long addr;
2032 bool found = false;
2033
2034 if (kstrtoul(str, 0, &addr))
2035 return -EINVAL;
2036
2037 found |= __dump_str_object_info(addr, 0);
2038 found |= __dump_str_object_info(addr, OBJECT_PHYS);
2039 found |= __dump_str_object_info(addr, OBJECT_PERCPU);
2040
2041 if (!found) {
2042 pr_info("Unknown object at 0x%08lx\n", addr);
2043 return -EINVAL;
2044 }
2045
2046 return 0;
2047 }
2048
2049 /*
2050 * We use grey instead of black to ensure we can do future scans on the same
2051 * objects. If we did not do future scans these black objects could
2052 * potentially contain references to newly allocated objects in the future and
2053 * we'd end up with false positives.
2054 */
kmemleak_clear(void)2055 static void kmemleak_clear(void)
2056 {
2057 struct kmemleak_object *object;
2058
2059 rcu_read_lock();
2060 list_for_each_entry_rcu(object, &object_list, object_list) {
2061 raw_spin_lock_irq(&object->lock);
2062 if ((object->flags & OBJECT_REPORTED) &&
2063 unreferenced_object(object))
2064 __paint_it(object, KMEMLEAK_GREY);
2065 raw_spin_unlock_irq(&object->lock);
2066 }
2067 rcu_read_unlock();
2068
2069 kmemleak_found_leaks = false;
2070 }
2071
2072 static void __kmemleak_do_cleanup(void);
2073
2074 /*
2075 * File write operation to configure kmemleak at run-time. The following
2076 * commands can be written to the /sys/kernel/debug/kmemleak file:
2077 * off - disable kmemleak (irreversible)
2078 * stack=on - enable the task stacks scanning
2079 * stack=off - disable the tasks stacks scanning
2080 * scan=on - start the automatic memory scanning thread
2081 * scan=off - stop the automatic memory scanning thread
2082 * scan=... - set the automatic memory scanning period in seconds (0 to
2083 * disable it)
2084 * scan - trigger a memory scan
2085 * clear - mark all current reported unreferenced kmemleak objects as
2086 * grey to ignore printing them, or free all kmemleak objects
2087 * if kmemleak has been disabled.
2088 * dump=... - dump information about the object found at the given address
2089 */
kmemleak_write(struct file * file,const char __user * user_buf,size_t size,loff_t * ppos)2090 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
2091 size_t size, loff_t *ppos)
2092 {
2093 char buf[64];
2094 int buf_size;
2095 int ret;
2096
2097 buf_size = min(size, (sizeof(buf) - 1));
2098 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
2099 return -EFAULT;
2100 buf[buf_size] = 0;
2101
2102 ret = mutex_lock_interruptible(&scan_mutex);
2103 if (ret < 0)
2104 return ret;
2105
2106 if (strncmp(buf, "clear", 5) == 0) {
2107 if (kmemleak_enabled)
2108 kmemleak_clear();
2109 else
2110 __kmemleak_do_cleanup();
2111 goto out;
2112 }
2113
2114 if (!kmemleak_enabled) {
2115 ret = -EPERM;
2116 goto out;
2117 }
2118
2119 if (strncmp(buf, "off", 3) == 0)
2120 kmemleak_disable();
2121 else if (strncmp(buf, "stack=on", 8) == 0)
2122 kmemleak_stack_scan = 1;
2123 else if (strncmp(buf, "stack=off", 9) == 0)
2124 kmemleak_stack_scan = 0;
2125 else if (strncmp(buf, "scan=on", 7) == 0)
2126 start_scan_thread();
2127 else if (strncmp(buf, "scan=off", 8) == 0)
2128 stop_scan_thread();
2129 else if (strncmp(buf, "scan=", 5) == 0) {
2130 unsigned secs;
2131 unsigned long msecs;
2132
2133 ret = kstrtouint(buf + 5, 0, &secs);
2134 if (ret < 0)
2135 goto out;
2136
2137 msecs = secs * MSEC_PER_SEC;
2138 if (msecs > UINT_MAX)
2139 msecs = UINT_MAX;
2140
2141 stop_scan_thread();
2142 if (msecs) {
2143 WRITE_ONCE(jiffies_scan_wait, msecs_to_jiffies(msecs));
2144 start_scan_thread();
2145 }
2146 } else if (strncmp(buf, "scan", 4) == 0)
2147 kmemleak_scan();
2148 else if (strncmp(buf, "dump=", 5) == 0)
2149 ret = dump_str_object_info(buf + 5);
2150 else
2151 ret = -EINVAL;
2152
2153 out:
2154 mutex_unlock(&scan_mutex);
2155 if (ret < 0)
2156 return ret;
2157
2158 /* ignore the rest of the buffer, only one command at a time */
2159 *ppos += size;
2160 return size;
2161 }
2162
2163 static const struct file_operations kmemleak_fops = {
2164 .owner = THIS_MODULE,
2165 .open = kmemleak_open,
2166 .read = seq_read,
2167 .write = kmemleak_write,
2168 .llseek = seq_lseek,
2169 .release = seq_release,
2170 };
2171
__kmemleak_do_cleanup(void)2172 static void __kmemleak_do_cleanup(void)
2173 {
2174 struct kmemleak_object *object, *tmp;
2175
2176 /*
2177 * Kmemleak has already been disabled, no need for RCU list traversal
2178 * or kmemleak_lock held.
2179 */
2180 list_for_each_entry_safe(object, tmp, &object_list, object_list) {
2181 __remove_object(object);
2182 __delete_object(object);
2183 }
2184 }
2185
2186 /*
2187 * Stop the memory scanning thread and free the kmemleak internal objects if
2188 * no previous scan thread (otherwise, kmemleak may still have some useful
2189 * information on memory leaks).
2190 */
kmemleak_do_cleanup(struct work_struct * work)2191 static void kmemleak_do_cleanup(struct work_struct *work)
2192 {
2193 stop_scan_thread();
2194
2195 mutex_lock(&scan_mutex);
2196 /*
2197 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
2198 * longer track object freeing. Ordering of the scan thread stopping and
2199 * the memory accesses below is guaranteed by the kthread_stop()
2200 * function.
2201 */
2202 kmemleak_free_enabled = 0;
2203 mutex_unlock(&scan_mutex);
2204
2205 if (!kmemleak_found_leaks)
2206 __kmemleak_do_cleanup();
2207 else
2208 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
2209 }
2210
2211 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
2212
2213 /*
2214 * Disable kmemleak. No memory allocation/freeing will be traced once this
2215 * function is called. Disabling kmemleak is an irreversible operation.
2216 */
kmemleak_disable(void)2217 static void kmemleak_disable(void)
2218 {
2219 /* atomically check whether it was already invoked */
2220 if (cmpxchg(&kmemleak_error, 0, 1))
2221 return;
2222
2223 /* stop any memory operation tracing */
2224 kmemleak_enabled = 0;
2225
2226 /* check whether it is too early for a kernel thread */
2227 if (kmemleak_late_initialized)
2228 schedule_work(&cleanup_work);
2229 else
2230 kmemleak_free_enabled = 0;
2231
2232 pr_info("Kernel memory leak detector disabled\n");
2233 }
2234
2235 /*
2236 * Allow boot-time kmemleak disabling (enabled by default).
2237 */
kmemleak_boot_config(char * str)2238 static int __init kmemleak_boot_config(char *str)
2239 {
2240 if (!str)
2241 return -EINVAL;
2242 if (strcmp(str, "off") == 0)
2243 kmemleak_disable();
2244 else if (strcmp(str, "on") == 0) {
2245 kmemleak_skip_disable = 1;
2246 stack_depot_request_early_init();
2247 }
2248 else
2249 return -EINVAL;
2250 return 0;
2251 }
2252 early_param("kmemleak", kmemleak_boot_config);
2253
2254 /*
2255 * Kmemleak initialization.
2256 */
kmemleak_init(void)2257 void __init kmemleak_init(void)
2258 {
2259 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2260 if (!kmemleak_skip_disable) {
2261 kmemleak_disable();
2262 return;
2263 }
2264 #endif
2265
2266 if (kmemleak_error)
2267 return;
2268
2269 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
2270 jiffies_scan_wait = secs_to_jiffies(SECS_SCAN_WAIT);
2271
2272 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
2273 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
2274
2275 /* register the data/bss sections */
2276 create_object((unsigned long)_sdata, _edata - _sdata,
2277 KMEMLEAK_GREY, GFP_ATOMIC);
2278 create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
2279 KMEMLEAK_GREY, GFP_ATOMIC);
2280 /* only register .data..ro_after_init if not within .data */
2281 if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
2282 create_object((unsigned long)__start_ro_after_init,
2283 __end_ro_after_init - __start_ro_after_init,
2284 KMEMLEAK_GREY, GFP_ATOMIC);
2285 }
2286
2287 /*
2288 * Late initialization function.
2289 */
kmemleak_late_init(void)2290 static int __init kmemleak_late_init(void)
2291 {
2292 kmemleak_late_initialized = 1;
2293
2294 debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
2295
2296 if (kmemleak_error) {
2297 /*
2298 * Some error occurred and kmemleak was disabled. There is a
2299 * small chance that kmemleak_disable() was called immediately
2300 * after setting kmemleak_late_initialized and we may end up with
2301 * two clean-up threads but serialized by scan_mutex.
2302 */
2303 schedule_work(&cleanup_work);
2304 return -ENOMEM;
2305 }
2306
2307 if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
2308 mutex_lock(&scan_mutex);
2309 start_scan_thread();
2310 mutex_unlock(&scan_mutex);
2311 }
2312
2313 pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
2314 mem_pool_free_count);
2315
2316 return 0;
2317 }
2318 late_initcall(kmemleak_late_init);
2319