1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * mm/kmemleak.c
4 *
5 * Copyright (C) 2008 ARM Limited
6 * Written by Catalin Marinas <catalin.marinas@arm.com>
7 *
8 * For more information on the algorithm and kmemleak usage, please see
9 * Documentation/dev-tools/kmemleak.rst.
10 *
11 * Notes on locking
12 * ----------------
13 *
14 * The following locks and mutexes are used by kmemleak:
15 *
16 * - kmemleak_lock (raw_spinlock_t): protects the object_list as well as
17 * del_state modifications and accesses to the object trees
18 * (object_tree_root, object_phys_tree_root, object_percpu_tree_root). The
19 * object_list is the main list holding the metadata (struct
20 * kmemleak_object) for the allocated memory blocks. The object trees are
21 * red black trees used to look-up metadata based on a pointer to the
22 * corresponding memory block. The kmemleak_object structures are added to
23 * the object_list and the object tree root in the create_object() function
24 * called from the kmemleak_alloc{,_phys,_percpu}() callback and removed in
25 * delete_object() called from the kmemleak_free{,_phys,_percpu}() callback
26 * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
27 * Accesses to the metadata (e.g. count) are protected by this lock. Note
28 * that some members of this structure may be protected by other means
29 * (atomic or kmemleak_lock). This lock is also held when scanning the
30 * corresponding memory block to avoid the kernel freeing it via the
31 * kmemleak_free() callback. This is less heavyweight than holding a global
32 * lock like kmemleak_lock during scanning.
33 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
34 * unreferenced objects at a time. The gray_list contains the objects which
35 * are already referenced or marked as false positives and need to be
36 * scanned. This list is only modified during a scanning episode when the
37 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
38 * Note that the kmemleak_object.use_count is incremented when an object is
39 * added to the gray_list and therefore cannot be freed. This mutex also
40 * prevents multiple users of the "kmemleak" debugfs file together with
41 * modifications to the memory scanning parameters including the scan_thread
42 * pointer
43 *
44 * Locks and mutexes are acquired/nested in the following order:
45 *
46 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
47 *
48 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
49 * regions.
50 *
51 * The kmemleak_object structures have a use_count incremented or decremented
52 * using the get_object()/put_object() functions. When the use_count becomes
53 * 0, this count can no longer be incremented and put_object() schedules the
54 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
55 * function must be protected by rcu_read_lock() to avoid accessing a freed
56 * structure.
57 */
58
59 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
60
61 #include <linux/init.h>
62 #include <linux/kernel.h>
63 #include <linux/list.h>
64 #include <linux/sched/signal.h>
65 #include <linux/sched/task.h>
66 #include <linux/sched/task_stack.h>
67 #include <linux/jiffies.h>
68 #include <linux/delay.h>
69 #include <linux/export.h>
70 #include <linux/kthread.h>
71 #include <linux/rbtree.h>
72 #include <linux/fs.h>
73 #include <linux/debugfs.h>
74 #include <linux/seq_file.h>
75 #include <linux/cpumask.h>
76 #include <linux/spinlock.h>
77 #include <linux/module.h>
78 #include <linux/mutex.h>
79 #include <linux/rcupdate.h>
80 #include <linux/stacktrace.h>
81 #include <linux/stackdepot.h>
82 #include <linux/cache.h>
83 #include <linux/percpu.h>
84 #include <linux/memblock.h>
85 #include <linux/pfn.h>
86 #include <linux/mmzone.h>
87 #include <linux/slab.h>
88 #include <linux/thread_info.h>
89 #include <linux/err.h>
90 #include <linux/uaccess.h>
91 #include <linux/string.h>
92 #include <linux/nodemask.h>
93 #include <linux/mm.h>
94 #include <linux/workqueue.h>
95 #include <linux/crc32.h>
96
97 #include <asm/sections.h>
98 #include <asm/processor.h>
99 #include <linux/atomic.h>
100
101 #include <linux/kasan.h>
102 #include <linux/kfence.h>
103 #include <linux/kmemleak.h>
104 #include <linux/memory_hotplug.h>
105
106 /*
107 * Kmemleak configuration and common defines.
108 */
109 #define MAX_TRACE 16 /* stack trace length */
110 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
111 #define SECS_FIRST_SCAN 60 /* delay before the first scan */
112 #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
113 #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
114
115 #define BYTES_PER_POINTER sizeof(void *)
116
117 /* scanning area inside a memory block */
118 struct kmemleak_scan_area {
119 struct hlist_node node;
120 unsigned long start;
121 size_t size;
122 };
123
124 #define KMEMLEAK_GREY 0
125 #define KMEMLEAK_BLACK -1
126
127 /*
128 * Structure holding the metadata for each allocated memory block.
129 * Modifications to such objects should be made while holding the
130 * object->lock. Insertions or deletions from object_list, gray_list or
131 * rb_node are already protected by the corresponding locks or mutex (see
132 * the notes on locking above). These objects are reference-counted
133 * (use_count) and freed using the RCU mechanism.
134 */
135 struct kmemleak_object {
136 raw_spinlock_t lock;
137 unsigned int flags; /* object status flags */
138 struct list_head object_list;
139 struct list_head gray_list;
140 struct rb_node rb_node;
141 struct rcu_head rcu; /* object_list lockless traversal */
142 /* object usage count; object freed when use_count == 0 */
143 atomic_t use_count;
144 unsigned int del_state; /* deletion state */
145 unsigned long pointer;
146 size_t size;
147 /* pass surplus references to this pointer */
148 unsigned long excess_ref;
149 /* minimum number of a pointers found before it is considered leak */
150 int min_count;
151 /* the total number of pointers found pointing to this object */
152 int count;
153 /* checksum for detecting modified objects */
154 u32 checksum;
155 depot_stack_handle_t trace_handle;
156 /* memory ranges to be scanned inside an object (empty for all) */
157 struct hlist_head area_list;
158 unsigned long jiffies; /* creation timestamp */
159 pid_t pid; /* pid of the current task */
160 char comm[TASK_COMM_LEN]; /* executable name */
161 };
162
163 /* flag representing the memory block allocation status */
164 #define OBJECT_ALLOCATED (1 << 0)
165 /* flag set after the first reporting of an unreference object */
166 #define OBJECT_REPORTED (1 << 1)
167 /* flag set to not scan the object */
168 #define OBJECT_NO_SCAN (1 << 2)
169 /* flag set to fully scan the object when scan_area allocation failed */
170 #define OBJECT_FULL_SCAN (1 << 3)
171 /* flag set for object allocated with physical address */
172 #define OBJECT_PHYS (1 << 4)
173 /* flag set for per-CPU pointers */
174 #define OBJECT_PERCPU (1 << 5)
175
176 /* set when __remove_object() called */
177 #define DELSTATE_REMOVED (1 << 0)
178 /* set to temporarily prevent deletion from object_list */
179 #define DELSTATE_NO_DELETE (1 << 1)
180
181 #define HEX_PREFIX " "
182 /* number of bytes to print per line; must be 16 or 32 */
183 #define HEX_ROW_SIZE 16
184 /* number of bytes to print at a time (1, 2, 4, 8) */
185 #define HEX_GROUP_SIZE 1
186 /* include ASCII after the hex output */
187 #define HEX_ASCII 1
188 /* max number of lines to be printed */
189 #define HEX_MAX_LINES 2
190
191 /* the list of all allocated objects */
192 static LIST_HEAD(object_list);
193 /* the list of gray-colored objects (see color_gray comment below) */
194 static LIST_HEAD(gray_list);
195 /* memory pool allocation */
196 static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
197 static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
198 static LIST_HEAD(mem_pool_free_list);
199 /* search tree for object boundaries */
200 static struct rb_root object_tree_root = RB_ROOT;
201 /* search tree for object (with OBJECT_PHYS flag) boundaries */
202 static struct rb_root object_phys_tree_root = RB_ROOT;
203 /* search tree for object (with OBJECT_PERCPU flag) boundaries */
204 static struct rb_root object_percpu_tree_root = RB_ROOT;
205 /* protecting the access to object_list, object_tree_root (or object_phys_tree_root) */
206 static DEFINE_RAW_SPINLOCK(kmemleak_lock);
207
208 /* allocation caches for kmemleak internal data */
209 static struct kmem_cache *object_cache;
210 static struct kmem_cache *scan_area_cache;
211
212 /* set if tracing memory operations is enabled */
213 static int kmemleak_enabled __read_mostly = 1;
214 /* same as above but only for the kmemleak_free() callback */
215 static int kmemleak_free_enabled __read_mostly = 1;
216 /* set in the late_initcall if there were no errors */
217 static int kmemleak_late_initialized;
218 /* set if a fatal kmemleak error has occurred */
219 static int kmemleak_error;
220
221 /* minimum and maximum address that may be valid pointers */
222 static unsigned long min_addr = ULONG_MAX;
223 static unsigned long max_addr;
224
225 /* minimum and maximum address that may be valid per-CPU pointers */
226 static unsigned long min_percpu_addr = ULONG_MAX;
227 static unsigned long max_percpu_addr;
228
229 static struct task_struct *scan_thread;
230 /* used to avoid reporting of recently allocated objects */
231 static unsigned long jiffies_min_age;
232 static unsigned long jiffies_last_scan;
233 /* delay between automatic memory scannings */
234 static unsigned long jiffies_scan_wait;
235 /* enables or disables the task stacks scanning */
236 static int kmemleak_stack_scan = 1;
237 /* protects the memory scanning, parameters and debug/kmemleak file access */
238 static DEFINE_MUTEX(scan_mutex);
239 /* setting kmemleak=on, will set this var, skipping the disable */
240 static int kmemleak_skip_disable;
241 /* If there are leaks that can be reported */
242 static bool kmemleak_found_leaks;
243
244 static bool kmemleak_verbose;
245 module_param_named(verbose, kmemleak_verbose, bool, 0600);
246
247 static void kmemleak_disable(void);
248
249 /*
250 * Print a warning and dump the stack trace.
251 */
252 #define kmemleak_warn(x...) do { \
253 pr_warn(x); \
254 dump_stack(); \
255 } while (0)
256
257 /*
258 * Macro invoked when a serious kmemleak condition occurred and cannot be
259 * recovered from. Kmemleak will be disabled and further allocation/freeing
260 * tracing no longer available.
261 */
262 #define kmemleak_stop(x...) do { \
263 kmemleak_warn(x); \
264 kmemleak_disable(); \
265 } while (0)
266
267 #define warn_or_seq_printf(seq, fmt, ...) do { \
268 if (seq) \
269 seq_printf(seq, fmt, ##__VA_ARGS__); \
270 else \
271 pr_warn(fmt, ##__VA_ARGS__); \
272 } while (0)
273
warn_or_seq_hex_dump(struct seq_file * seq,int prefix_type,int rowsize,int groupsize,const void * buf,size_t len,bool ascii)274 static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
275 int rowsize, int groupsize, const void *buf,
276 size_t len, bool ascii)
277 {
278 if (seq)
279 seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
280 buf, len, ascii);
281 else
282 print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
283 rowsize, groupsize, buf, len, ascii);
284 }
285
286 /*
287 * Printing of the objects hex dump to the seq file. The number of lines to be
288 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
289 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
290 * with the object->lock held.
291 */
hex_dump_object(struct seq_file * seq,struct kmemleak_object * object)292 static void hex_dump_object(struct seq_file *seq,
293 struct kmemleak_object *object)
294 {
295 const u8 *ptr = (const u8 *)object->pointer;
296 size_t len;
297
298 if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
299 return;
300
301 if (object->flags & OBJECT_PERCPU)
302 ptr = (const u8 *)this_cpu_ptr((void __percpu *)object->pointer);
303
304 /* limit the number of lines to HEX_MAX_LINES */
305 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
306
307 if (object->flags & OBJECT_PERCPU)
308 warn_or_seq_printf(seq, " hex dump (first %zu bytes on cpu %d):\n",
309 len, raw_smp_processor_id());
310 else
311 warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len);
312 kasan_disable_current();
313 warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
314 HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
315 kasan_enable_current();
316 }
317
318 /*
319 * Object colors, encoded with count and min_count:
320 * - white - orphan object, not enough references to it (count < min_count)
321 * - gray - not orphan, not marked as false positive (min_count == 0) or
322 * sufficient references to it (count >= min_count)
323 * - black - ignore, it doesn't contain references (e.g. text section)
324 * (min_count == -1). No function defined for this color.
325 */
color_white(const struct kmemleak_object * object)326 static bool color_white(const struct kmemleak_object *object)
327 {
328 return object->count != KMEMLEAK_BLACK &&
329 object->count < object->min_count;
330 }
331
color_gray(const struct kmemleak_object * object)332 static bool color_gray(const struct kmemleak_object *object)
333 {
334 return object->min_count != KMEMLEAK_BLACK &&
335 object->count >= object->min_count;
336 }
337
338 /*
339 * Objects are considered unreferenced only if their color is white, they have
340 * not be deleted and have a minimum age to avoid false positives caused by
341 * pointers temporarily stored in CPU registers.
342 */
unreferenced_object(struct kmemleak_object * object)343 static bool unreferenced_object(struct kmemleak_object *object)
344 {
345 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
346 time_before_eq(object->jiffies + jiffies_min_age,
347 jiffies_last_scan);
348 }
349
__object_type_str(struct kmemleak_object * object)350 static const char *__object_type_str(struct kmemleak_object *object)
351 {
352 if (object->flags & OBJECT_PHYS)
353 return " (phys)";
354 if (object->flags & OBJECT_PERCPU)
355 return " (percpu)";
356 return "";
357 }
358
359 /*
360 * Printing of the unreferenced objects information to the seq file. The
361 * print_unreferenced function must be called with the object->lock held.
362 */
print_unreferenced(struct seq_file * seq,struct kmemleak_object * object)363 static void print_unreferenced(struct seq_file *seq,
364 struct kmemleak_object *object)
365 {
366 int i;
367 unsigned long *entries;
368 unsigned int nr_entries;
369
370 nr_entries = stack_depot_fetch(object->trace_handle, &entries);
371 warn_or_seq_printf(seq, "unreferenced object%s 0x%08lx (size %zu):\n",
372 __object_type_str(object),
373 object->pointer, object->size);
374 warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n",
375 object->comm, object->pid, object->jiffies);
376 hex_dump_object(seq, object);
377 warn_or_seq_printf(seq, " backtrace (crc %x):\n", object->checksum);
378
379 for (i = 0; i < nr_entries; i++) {
380 void *ptr = (void *)entries[i];
381 warn_or_seq_printf(seq, " %pS\n", ptr);
382 }
383 }
384
385 /*
386 * Print the kmemleak_object information. This function is used mainly for
387 * debugging special cases when kmemleak operations. It must be called with
388 * the object->lock held.
389 */
dump_object_info(struct kmemleak_object * object)390 static void dump_object_info(struct kmemleak_object *object)
391 {
392 pr_notice("Object%s 0x%08lx (size %zu):\n",
393 __object_type_str(object), object->pointer, object->size);
394 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
395 object->comm, object->pid, object->jiffies);
396 pr_notice(" min_count = %d\n", object->min_count);
397 pr_notice(" count = %d\n", object->count);
398 pr_notice(" flags = 0x%x\n", object->flags);
399 pr_notice(" checksum = %u\n", object->checksum);
400 pr_notice(" backtrace:\n");
401 if (object->trace_handle)
402 stack_depot_print(object->trace_handle);
403 }
404
object_tree(unsigned long objflags)405 static struct rb_root *object_tree(unsigned long objflags)
406 {
407 if (objflags & OBJECT_PHYS)
408 return &object_phys_tree_root;
409 if (objflags & OBJECT_PERCPU)
410 return &object_percpu_tree_root;
411 return &object_tree_root;
412 }
413
414 /*
415 * Look-up a memory block metadata (kmemleak_object) in the object search
416 * tree based on a pointer value. If alias is 0, only values pointing to the
417 * beginning of the memory block are allowed. The kmemleak_lock must be held
418 * when calling this function.
419 */
__lookup_object(unsigned long ptr,int alias,unsigned int objflags)420 static struct kmemleak_object *__lookup_object(unsigned long ptr, int alias,
421 unsigned int objflags)
422 {
423 struct rb_node *rb = object_tree(objflags)->rb_node;
424 unsigned long untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
425
426 while (rb) {
427 struct kmemleak_object *object;
428 unsigned long untagged_objp;
429
430 object = rb_entry(rb, struct kmemleak_object, rb_node);
431 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
432
433 if (untagged_ptr < untagged_objp)
434 rb = object->rb_node.rb_left;
435 else if (untagged_objp + object->size <= untagged_ptr)
436 rb = object->rb_node.rb_right;
437 else if (untagged_objp == untagged_ptr || alias)
438 return object;
439 else {
440 /*
441 * Printk deferring due to the kmemleak_lock held.
442 * This is done to avoid deadlock.
443 */
444 printk_deferred_enter();
445 kmemleak_warn("Found object by alias at 0x%08lx\n",
446 ptr);
447 dump_object_info(object);
448 printk_deferred_exit();
449 break;
450 }
451 }
452 return NULL;
453 }
454
455 /* Look-up a kmemleak object which allocated with virtual address. */
lookup_object(unsigned long ptr,int alias)456 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
457 {
458 return __lookup_object(ptr, alias, 0);
459 }
460
461 /*
462 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
463 * that once an object's use_count reached 0, the RCU freeing was already
464 * registered and the object should no longer be used. This function must be
465 * called under the protection of rcu_read_lock().
466 */
get_object(struct kmemleak_object * object)467 static int get_object(struct kmemleak_object *object)
468 {
469 return atomic_inc_not_zero(&object->use_count);
470 }
471
472 /*
473 * Memory pool allocation and freeing. kmemleak_lock must not be held.
474 */
mem_pool_alloc(gfp_t gfp)475 static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
476 {
477 unsigned long flags;
478 struct kmemleak_object *object;
479 bool warn = false;
480
481 /* try the slab allocator first */
482 if (object_cache) {
483 object = kmem_cache_alloc_noprof(object_cache,
484 gfp_nested_mask(gfp));
485 if (object)
486 return object;
487 }
488
489 /* slab allocation failed, try the memory pool */
490 raw_spin_lock_irqsave(&kmemleak_lock, flags);
491 object = list_first_entry_or_null(&mem_pool_free_list,
492 typeof(*object), object_list);
493 if (object)
494 list_del(&object->object_list);
495 else if (mem_pool_free_count)
496 object = &mem_pool[--mem_pool_free_count];
497 else
498 warn = true;
499 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
500 if (warn)
501 pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
502
503 return object;
504 }
505
506 /*
507 * Return the object to either the slab allocator or the memory pool.
508 */
mem_pool_free(struct kmemleak_object * object)509 static void mem_pool_free(struct kmemleak_object *object)
510 {
511 unsigned long flags;
512
513 if (object < mem_pool || object >= ARRAY_END(mem_pool)) {
514 kmem_cache_free(object_cache, object);
515 return;
516 }
517
518 /* add the object to the memory pool free list */
519 raw_spin_lock_irqsave(&kmemleak_lock, flags);
520 list_add(&object->object_list, &mem_pool_free_list);
521 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
522 }
523
524 /*
525 * RCU callback to free a kmemleak_object.
526 */
free_object_rcu(struct rcu_head * rcu)527 static void free_object_rcu(struct rcu_head *rcu)
528 {
529 struct hlist_node *tmp;
530 struct kmemleak_scan_area *area;
531 struct kmemleak_object *object =
532 container_of(rcu, struct kmemleak_object, rcu);
533
534 /*
535 * Once use_count is 0 (guaranteed by put_object), there is no other
536 * code accessing this object, hence no need for locking.
537 */
538 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
539 hlist_del(&area->node);
540 kmem_cache_free(scan_area_cache, area);
541 }
542 mem_pool_free(object);
543 }
544
545 /*
546 * Decrement the object use_count. Once the count is 0, free the object using
547 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
548 * delete_object() path, the delayed RCU freeing ensures that there is no
549 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
550 * is also possible.
551 */
put_object(struct kmemleak_object * object)552 static void put_object(struct kmemleak_object *object)
553 {
554 if (!atomic_dec_and_test(&object->use_count))
555 return;
556
557 /* should only get here after delete_object was called */
558 WARN_ON(object->flags & OBJECT_ALLOCATED);
559
560 /*
561 * It may be too early for the RCU callbacks, however, there is no
562 * concurrent object_list traversal when !object_cache and all objects
563 * came from the memory pool. Free the object directly.
564 */
565 if (object_cache)
566 call_rcu(&object->rcu, free_object_rcu);
567 else
568 free_object_rcu(&object->rcu);
569 }
570
571 /*
572 * Look up an object in the object search tree and increase its use_count.
573 */
__find_and_get_object(unsigned long ptr,int alias,unsigned int objflags)574 static struct kmemleak_object *__find_and_get_object(unsigned long ptr, int alias,
575 unsigned int objflags)
576 {
577 unsigned long flags;
578 struct kmemleak_object *object;
579
580 rcu_read_lock();
581 raw_spin_lock_irqsave(&kmemleak_lock, flags);
582 object = __lookup_object(ptr, alias, objflags);
583 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
584
585 /* check whether the object is still available */
586 if (object && !get_object(object))
587 object = NULL;
588 rcu_read_unlock();
589
590 return object;
591 }
592
593 /* Look up and get an object which allocated with virtual address. */
find_and_get_object(unsigned long ptr,int alias)594 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
595 {
596 return __find_and_get_object(ptr, alias, 0);
597 }
598
599 /*
600 * Remove an object from its object tree and object_list. Must be called with
601 * the kmemleak_lock held _if_ kmemleak is still enabled.
602 */
__remove_object(struct kmemleak_object * object)603 static void __remove_object(struct kmemleak_object *object)
604 {
605 rb_erase(&object->rb_node, object_tree(object->flags));
606 if (!(object->del_state & DELSTATE_NO_DELETE))
607 list_del_rcu(&object->object_list);
608 object->del_state |= DELSTATE_REMOVED;
609 }
610
__find_and_remove_object(unsigned long ptr,int alias,unsigned int objflags)611 static struct kmemleak_object *__find_and_remove_object(unsigned long ptr,
612 int alias,
613 unsigned int objflags)
614 {
615 struct kmemleak_object *object;
616
617 object = __lookup_object(ptr, alias, objflags);
618 if (object)
619 __remove_object(object);
620
621 return object;
622 }
623
624 /*
625 * Look up an object in the object search tree and remove it from both object
626 * tree root and object_list. The returned object's use_count should be at
627 * least 1, as initially set by create_object().
628 */
find_and_remove_object(unsigned long ptr,int alias,unsigned int objflags)629 static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias,
630 unsigned int objflags)
631 {
632 unsigned long flags;
633 struct kmemleak_object *object;
634
635 raw_spin_lock_irqsave(&kmemleak_lock, flags);
636 object = __find_and_remove_object(ptr, alias, objflags);
637 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
638
639 return object;
640 }
641
set_track_prepare(void)642 static noinline depot_stack_handle_t set_track_prepare(void)
643 {
644 depot_stack_handle_t trace_handle;
645 unsigned long entries[MAX_TRACE];
646 unsigned int nr_entries;
647
648 /*
649 * Use object_cache to determine whether kmemleak_init() has
650 * been invoked. stack_depot_early_init() is called before
651 * kmemleak_init() in mm_core_init().
652 */
653 if (!object_cache)
654 return 0;
655 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
656 trace_handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
657
658 return trace_handle;
659 }
660
__alloc_object(gfp_t gfp)661 static struct kmemleak_object *__alloc_object(gfp_t gfp)
662 {
663 struct kmemleak_object *object;
664
665 object = mem_pool_alloc(gfp);
666 if (!object) {
667 pr_warn("Cannot allocate a kmemleak_object structure\n");
668 kmemleak_disable();
669 return NULL;
670 }
671
672 INIT_LIST_HEAD(&object->object_list);
673 INIT_LIST_HEAD(&object->gray_list);
674 INIT_HLIST_HEAD(&object->area_list);
675 raw_spin_lock_init(&object->lock);
676 atomic_set(&object->use_count, 1);
677 object->excess_ref = 0;
678 object->count = 0; /* white color initially */
679 object->checksum = 0;
680 object->del_state = 0;
681
682 /* task information */
683 if (in_hardirq()) {
684 object->pid = 0;
685 strscpy(object->comm, "hardirq");
686 } else if (in_serving_softirq()) {
687 object->pid = 0;
688 strscpy(object->comm, "softirq");
689 } else {
690 object->pid = current->pid;
691 /*
692 * There is a small chance of a race with set_task_comm(),
693 * however using get_task_comm() here may cause locking
694 * dependency issues with current->alloc_lock. In the worst
695 * case, the command line is not correct.
696 */
697 strscpy(object->comm, current->comm);
698 }
699
700 /* kernel backtrace */
701 object->trace_handle = set_track_prepare();
702
703 return object;
704 }
705
__link_object(struct kmemleak_object * object,unsigned long ptr,size_t size,int min_count,unsigned int objflags)706 static int __link_object(struct kmemleak_object *object, unsigned long ptr,
707 size_t size, int min_count, unsigned int objflags)
708 {
709
710 struct kmemleak_object *parent;
711 struct rb_node **link, *rb_parent;
712 unsigned long untagged_ptr;
713 unsigned long untagged_objp;
714
715 object->flags = OBJECT_ALLOCATED | objflags;
716 object->pointer = ptr;
717 object->size = kfence_ksize((void *)ptr) ?: size;
718 object->min_count = min_count;
719 object->jiffies = jiffies;
720
721 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
722 /*
723 * Only update min_addr and max_addr with object storing virtual
724 * address. And update min_percpu_addr max_percpu_addr for per-CPU
725 * objects.
726 */
727 if (objflags & OBJECT_PERCPU) {
728 min_percpu_addr = min(min_percpu_addr, untagged_ptr);
729 max_percpu_addr = max(max_percpu_addr, untagged_ptr + size);
730 } else if (!(objflags & OBJECT_PHYS)) {
731 min_addr = min(min_addr, untagged_ptr);
732 max_addr = max(max_addr, untagged_ptr + size);
733 }
734 link = &object_tree(objflags)->rb_node;
735 rb_parent = NULL;
736 while (*link) {
737 rb_parent = *link;
738 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
739 untagged_objp = (unsigned long)kasan_reset_tag((void *)parent->pointer);
740 if (untagged_ptr + size <= untagged_objp)
741 link = &parent->rb_node.rb_left;
742 else if (untagged_objp + parent->size <= untagged_ptr)
743 link = &parent->rb_node.rb_right;
744 else {
745 /*
746 * Printk deferring due to the kmemleak_lock held.
747 * This is done to avoid deadlock.
748 */
749 printk_deferred_enter();
750 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
751 ptr);
752 /*
753 * No need for parent->lock here since "parent" cannot
754 * be freed while the kmemleak_lock is held.
755 */
756 dump_object_info(parent);
757 printk_deferred_exit();
758 return -EEXIST;
759 }
760 }
761 rb_link_node(&object->rb_node, rb_parent, link);
762 rb_insert_color(&object->rb_node, object_tree(objflags));
763 list_add_tail_rcu(&object->object_list, &object_list);
764
765 return 0;
766 }
767
768 /*
769 * Create the metadata (struct kmemleak_object) corresponding to an allocated
770 * memory block and add it to the object_list and object tree.
771 */
__create_object(unsigned long ptr,size_t size,int min_count,gfp_t gfp,unsigned int objflags)772 static void __create_object(unsigned long ptr, size_t size,
773 int min_count, gfp_t gfp, unsigned int objflags)
774 {
775 struct kmemleak_object *object;
776 unsigned long flags;
777 int ret;
778
779 object = __alloc_object(gfp);
780 if (!object)
781 return;
782
783 raw_spin_lock_irqsave(&kmemleak_lock, flags);
784 ret = __link_object(object, ptr, size, min_count, objflags);
785 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
786 if (ret)
787 mem_pool_free(object);
788 }
789
790 /* Create kmemleak object which allocated with virtual address. */
create_object(unsigned long ptr,size_t size,int min_count,gfp_t gfp)791 static void create_object(unsigned long ptr, size_t size,
792 int min_count, gfp_t gfp)
793 {
794 __create_object(ptr, size, min_count, gfp, 0);
795 }
796
797 /* Create kmemleak object which allocated with physical address. */
create_object_phys(unsigned long ptr,size_t size,int min_count,gfp_t gfp)798 static void create_object_phys(unsigned long ptr, size_t size,
799 int min_count, gfp_t gfp)
800 {
801 __create_object(ptr, size, min_count, gfp, OBJECT_PHYS);
802 }
803
804 /* Create kmemleak object corresponding to a per-CPU allocation. */
create_object_percpu(unsigned long ptr,size_t size,int min_count,gfp_t gfp)805 static void create_object_percpu(unsigned long ptr, size_t size,
806 int min_count, gfp_t gfp)
807 {
808 __create_object(ptr, size, min_count, gfp, OBJECT_PERCPU);
809 }
810
811 /*
812 * Mark the object as not allocated and schedule RCU freeing via put_object().
813 */
__delete_object(struct kmemleak_object * object)814 static void __delete_object(struct kmemleak_object *object)
815 {
816 unsigned long flags;
817
818 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
819 WARN_ON(atomic_read(&object->use_count) < 1);
820
821 /*
822 * Locking here also ensures that the corresponding memory block
823 * cannot be freed when it is being scanned.
824 */
825 raw_spin_lock_irqsave(&object->lock, flags);
826 object->flags &= ~OBJECT_ALLOCATED;
827 raw_spin_unlock_irqrestore(&object->lock, flags);
828 put_object(object);
829 }
830
831 /*
832 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
833 * delete it.
834 */
delete_object_full(unsigned long ptr,unsigned int objflags)835 static void delete_object_full(unsigned long ptr, unsigned int objflags)
836 {
837 struct kmemleak_object *object;
838
839 object = find_and_remove_object(ptr, 0, objflags);
840 if (!object)
841 /*
842 * kmalloc_nolock() -> kfree() calls kmemleak_free()
843 * without kmemleak_alloc().
844 */
845 return;
846 __delete_object(object);
847 }
848
849 /*
850 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
851 * delete it. If the memory block is partially freed, the function may create
852 * additional metadata for the remaining parts of the block.
853 */
delete_object_part(unsigned long ptr,size_t size,unsigned int objflags)854 static void delete_object_part(unsigned long ptr, size_t size,
855 unsigned int objflags)
856 {
857 struct kmemleak_object *object, *object_l, *object_r;
858 unsigned long start, end, flags;
859
860 object_l = __alloc_object(GFP_KERNEL);
861 if (!object_l)
862 return;
863
864 object_r = __alloc_object(GFP_KERNEL);
865 if (!object_r)
866 goto out;
867
868 raw_spin_lock_irqsave(&kmemleak_lock, flags);
869 object = __find_and_remove_object(ptr, 1, objflags);
870 if (!object)
871 goto unlock;
872
873 /*
874 * Create one or two objects that may result from the memory block
875 * split. Note that partial freeing is only done by free_bootmem() and
876 * this happens before kmemleak_init() is called.
877 */
878 start = object->pointer;
879 end = object->pointer + object->size;
880 if ((ptr > start) &&
881 !__link_object(object_l, start, ptr - start,
882 object->min_count, objflags))
883 object_l = NULL;
884 if ((ptr + size < end) &&
885 !__link_object(object_r, ptr + size, end - ptr - size,
886 object->min_count, objflags))
887 object_r = NULL;
888
889 unlock:
890 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
891 if (object) {
892 __delete_object(object);
893 } else {
894 #ifdef DEBUG
895 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
896 ptr, size);
897 #endif
898 }
899
900 out:
901 if (object_l)
902 mem_pool_free(object_l);
903 if (object_r)
904 mem_pool_free(object_r);
905 }
906
__paint_it(struct kmemleak_object * object,int color)907 static void __paint_it(struct kmemleak_object *object, int color)
908 {
909 object->min_count = color;
910 if (color == KMEMLEAK_BLACK)
911 object->flags |= OBJECT_NO_SCAN;
912 }
913
paint_it(struct kmemleak_object * object,int color)914 static void paint_it(struct kmemleak_object *object, int color)
915 {
916 unsigned long flags;
917
918 raw_spin_lock_irqsave(&object->lock, flags);
919 __paint_it(object, color);
920 raw_spin_unlock_irqrestore(&object->lock, flags);
921 }
922
paint_ptr(unsigned long ptr,int color,unsigned int objflags)923 static void paint_ptr(unsigned long ptr, int color, unsigned int objflags)
924 {
925 struct kmemleak_object *object;
926
927 object = __find_and_get_object(ptr, 0, objflags);
928 if (!object)
929 /*
930 * kmalloc_nolock() -> kfree_rcu() calls kmemleak_ignore()
931 * without kmemleak_alloc().
932 */
933 return;
934 paint_it(object, color);
935 put_object(object);
936 }
937
938 /*
939 * Mark an object permanently as gray-colored so that it can no longer be
940 * reported as a leak. This is used in general to mark a false positive.
941 */
make_gray_object(unsigned long ptr)942 static void make_gray_object(unsigned long ptr)
943 {
944 paint_ptr(ptr, KMEMLEAK_GREY, 0);
945 }
946
947 /*
948 * Mark the object as black-colored so that it is ignored from scans and
949 * reporting.
950 */
make_black_object(unsigned long ptr,unsigned int objflags)951 static void make_black_object(unsigned long ptr, unsigned int objflags)
952 {
953 paint_ptr(ptr, KMEMLEAK_BLACK, objflags);
954 }
955
956 /*
957 * Reset the checksum of an object. The immediate effect is that it will not
958 * be reported as a leak during the next scan until its checksum is updated.
959 */
reset_checksum(unsigned long ptr)960 static void reset_checksum(unsigned long ptr)
961 {
962 unsigned long flags;
963 struct kmemleak_object *object;
964
965 object = find_and_get_object(ptr, 0);
966 if (!object) {
967 kmemleak_warn("Not resetting the checksum of an unknown object at 0x%08lx\n",
968 ptr);
969 return;
970 }
971
972 raw_spin_lock_irqsave(&object->lock, flags);
973 object->checksum = 0;
974 raw_spin_unlock_irqrestore(&object->lock, flags);
975 put_object(object);
976 }
977
978 /*
979 * Add a scanning area to the object. If at least one such area is added,
980 * kmemleak will only scan these ranges rather than the whole memory block.
981 */
add_scan_area(unsigned long ptr,size_t size,gfp_t gfp)982 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
983 {
984 unsigned long flags;
985 struct kmemleak_object *object;
986 struct kmemleak_scan_area *area = NULL;
987 unsigned long untagged_ptr;
988 unsigned long untagged_objp;
989
990 object = find_and_get_object(ptr, 1);
991 if (!object) {
992 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
993 ptr);
994 return;
995 }
996
997 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
998 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
999
1000 if (scan_area_cache)
1001 area = kmem_cache_alloc_noprof(scan_area_cache,
1002 gfp_nested_mask(gfp));
1003
1004 raw_spin_lock_irqsave(&object->lock, flags);
1005 if (!area) {
1006 pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
1007 /* mark the object for full scan to avoid false positives */
1008 object->flags |= OBJECT_FULL_SCAN;
1009 goto out_unlock;
1010 }
1011 if (size == SIZE_MAX) {
1012 size = untagged_objp + object->size - untagged_ptr;
1013 } else if (untagged_ptr + size > untagged_objp + object->size) {
1014 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
1015 dump_object_info(object);
1016 kmem_cache_free(scan_area_cache, area);
1017 goto out_unlock;
1018 }
1019
1020 INIT_HLIST_NODE(&area->node);
1021 area->start = ptr;
1022 area->size = size;
1023
1024 hlist_add_head(&area->node, &object->area_list);
1025 out_unlock:
1026 raw_spin_unlock_irqrestore(&object->lock, flags);
1027 put_object(object);
1028 }
1029
1030 /*
1031 * Any surplus references (object already gray) to 'ptr' are passed to
1032 * 'excess_ref'. This is used in the vmalloc() case where a pointer to
1033 * vm_struct may be used as an alternative reference to the vmalloc'ed object
1034 * (see free_thread_stack()).
1035 */
object_set_excess_ref(unsigned long ptr,unsigned long excess_ref)1036 static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
1037 {
1038 unsigned long flags;
1039 struct kmemleak_object *object;
1040
1041 object = find_and_get_object(ptr, 0);
1042 if (!object) {
1043 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
1044 ptr);
1045 return;
1046 }
1047
1048 raw_spin_lock_irqsave(&object->lock, flags);
1049 object->excess_ref = excess_ref;
1050 raw_spin_unlock_irqrestore(&object->lock, flags);
1051 put_object(object);
1052 }
1053
1054 /*
1055 * Set the OBJECT_NO_SCAN flag for the object corresponding to the given
1056 * pointer. Such object will not be scanned by kmemleak but references to it
1057 * are searched.
1058 */
object_no_scan(unsigned long ptr)1059 static void object_no_scan(unsigned long ptr)
1060 {
1061 unsigned long flags;
1062 struct kmemleak_object *object;
1063
1064 object = find_and_get_object(ptr, 0);
1065 if (!object) {
1066 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
1067 return;
1068 }
1069
1070 raw_spin_lock_irqsave(&object->lock, flags);
1071 object->flags |= OBJECT_NO_SCAN;
1072 raw_spin_unlock_irqrestore(&object->lock, flags);
1073 put_object(object);
1074 }
1075
1076 /**
1077 * kmemleak_alloc - register a newly allocated object
1078 * @ptr: pointer to beginning of the object
1079 * @size: size of the object
1080 * @min_count: minimum number of references to this object. If during memory
1081 * scanning a number of references less than @min_count is found,
1082 * the object is reported as a memory leak. If @min_count is 0,
1083 * the object is never reported as a leak. If @min_count is -1,
1084 * the object is ignored (not scanned and not reported as a leak)
1085 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1086 *
1087 * This function is called from the kernel allocators when a new object
1088 * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
1089 */
kmemleak_alloc(const void * ptr,size_t size,int min_count,gfp_t gfp)1090 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
1091 gfp_t gfp)
1092 {
1093 pr_debug("%s(0x%px, %zu, %d)\n", __func__, ptr, size, min_count);
1094
1095 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1096 create_object((unsigned long)ptr, size, min_count, gfp);
1097 }
1098 EXPORT_SYMBOL_GPL(kmemleak_alloc);
1099
1100 /**
1101 * kmemleak_alloc_percpu - register a newly allocated __percpu object
1102 * @ptr: __percpu pointer to beginning of the object
1103 * @size: size of the object
1104 * @gfp: flags used for kmemleak internal memory allocations
1105 *
1106 * This function is called from the kernel percpu allocator when a new object
1107 * (memory block) is allocated (alloc_percpu).
1108 */
kmemleak_alloc_percpu(const void __percpu * ptr,size_t size,gfp_t gfp)1109 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
1110 gfp_t gfp)
1111 {
1112 pr_debug("%s(0x%px, %zu)\n", __func__, ptr, size);
1113
1114 if (kmemleak_enabled && ptr && !IS_ERR_PCPU(ptr))
1115 create_object_percpu((__force unsigned long)ptr, size, 1, gfp);
1116 }
1117 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
1118
1119 /**
1120 * kmemleak_vmalloc - register a newly vmalloc'ed object
1121 * @area: pointer to vm_struct
1122 * @size: size of the object
1123 * @gfp: __vmalloc() flags used for kmemleak internal memory allocations
1124 *
1125 * This function is called from the vmalloc() kernel allocator when a new
1126 * object (memory block) is allocated.
1127 */
kmemleak_vmalloc(const struct vm_struct * area,size_t size,gfp_t gfp)1128 void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
1129 {
1130 pr_debug("%s(0x%px, %zu)\n", __func__, area, size);
1131
1132 /*
1133 * A min_count = 2 is needed because vm_struct contains a reference to
1134 * the virtual address of the vmalloc'ed block.
1135 */
1136 if (kmemleak_enabled) {
1137 create_object((unsigned long)area->addr, size, 2, gfp);
1138 object_set_excess_ref((unsigned long)area,
1139 (unsigned long)area->addr);
1140 }
1141 }
1142 EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
1143
1144 /**
1145 * kmemleak_free - unregister a previously registered object
1146 * @ptr: pointer to beginning of the object
1147 *
1148 * This function is called from the kernel allocators when an object (memory
1149 * block) is freed (kmem_cache_free, kfree, vfree etc.).
1150 */
kmemleak_free(const void * ptr)1151 void __ref kmemleak_free(const void *ptr)
1152 {
1153 pr_debug("%s(0x%px)\n", __func__, ptr);
1154
1155 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1156 delete_object_full((unsigned long)ptr, 0);
1157 }
1158 EXPORT_SYMBOL_GPL(kmemleak_free);
1159
1160 /**
1161 * kmemleak_free_part - partially unregister a previously registered object
1162 * @ptr: pointer to the beginning or inside the object. This also
1163 * represents the start of the range to be freed
1164 * @size: size to be unregistered
1165 *
1166 * This function is called when only a part of a memory block is freed
1167 * (usually from the bootmem allocator).
1168 */
kmemleak_free_part(const void * ptr,size_t size)1169 void __ref kmemleak_free_part(const void *ptr, size_t size)
1170 {
1171 pr_debug("%s(0x%px)\n", __func__, ptr);
1172
1173 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1174 delete_object_part((unsigned long)ptr, size, 0);
1175 }
1176 EXPORT_SYMBOL_GPL(kmemleak_free_part);
1177
1178 /**
1179 * kmemleak_free_percpu - unregister a previously registered __percpu object
1180 * @ptr: __percpu pointer to beginning of the object
1181 *
1182 * This function is called from the kernel percpu allocator when an object
1183 * (memory block) is freed (free_percpu).
1184 */
kmemleak_free_percpu(const void __percpu * ptr)1185 void __ref kmemleak_free_percpu(const void __percpu *ptr)
1186 {
1187 pr_debug("%s(0x%px)\n", __func__, ptr);
1188
1189 if (kmemleak_free_enabled && ptr && !IS_ERR_PCPU(ptr))
1190 delete_object_full((__force unsigned long)ptr, OBJECT_PERCPU);
1191 }
1192 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1193
1194 /**
1195 * kmemleak_update_trace - update object allocation stack trace
1196 * @ptr: pointer to beginning of the object
1197 *
1198 * Override the object allocation stack trace for cases where the actual
1199 * allocation place is not always useful.
1200 */
kmemleak_update_trace(const void * ptr)1201 void __ref kmemleak_update_trace(const void *ptr)
1202 {
1203 struct kmemleak_object *object;
1204 depot_stack_handle_t trace_handle;
1205 unsigned long flags;
1206
1207 pr_debug("%s(0x%px)\n", __func__, ptr);
1208
1209 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1210 return;
1211
1212 object = find_and_get_object((unsigned long)ptr, 1);
1213 if (!object) {
1214 #ifdef DEBUG
1215 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1216 ptr);
1217 #endif
1218 return;
1219 }
1220
1221 trace_handle = set_track_prepare();
1222 raw_spin_lock_irqsave(&object->lock, flags);
1223 object->trace_handle = trace_handle;
1224 raw_spin_unlock_irqrestore(&object->lock, flags);
1225
1226 put_object(object);
1227 }
1228 EXPORT_SYMBOL(kmemleak_update_trace);
1229
1230 /**
1231 * kmemleak_not_leak - mark an allocated object as false positive
1232 * @ptr: pointer to beginning of the object
1233 *
1234 * Calling this function on an object will cause the memory block to no longer
1235 * be reported as leak and always be scanned.
1236 */
kmemleak_not_leak(const void * ptr)1237 void __ref kmemleak_not_leak(const void *ptr)
1238 {
1239 pr_debug("%s(0x%px)\n", __func__, ptr);
1240
1241 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1242 make_gray_object((unsigned long)ptr);
1243 }
1244 EXPORT_SYMBOL(kmemleak_not_leak);
1245
1246 /**
1247 * kmemleak_transient_leak - mark an allocated object as transient false positive
1248 * @ptr: pointer to beginning of the object
1249 *
1250 * Calling this function on an object will cause the memory block to not be
1251 * reported as a leak temporarily. This may happen, for example, if the object
1252 * is part of a singly linked list and the ->next reference to it is changed.
1253 */
kmemleak_transient_leak(const void * ptr)1254 void __ref kmemleak_transient_leak(const void *ptr)
1255 {
1256 pr_debug("%s(0x%px)\n", __func__, ptr);
1257
1258 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1259 reset_checksum((unsigned long)ptr);
1260 }
1261 EXPORT_SYMBOL(kmemleak_transient_leak);
1262
1263 /**
1264 * kmemleak_ignore_percpu - similar to kmemleak_ignore but taking a percpu
1265 * address argument
1266 * @ptr: percpu address of the object
1267 */
kmemleak_ignore_percpu(const void __percpu * ptr)1268 void __ref kmemleak_ignore_percpu(const void __percpu *ptr)
1269 {
1270 pr_debug("%s(0x%px)\n", __func__, ptr);
1271
1272 if (kmemleak_enabled && ptr && !IS_ERR_PCPU(ptr))
1273 make_black_object((unsigned long)ptr, OBJECT_PERCPU);
1274 }
1275 EXPORT_SYMBOL_GPL(kmemleak_ignore_percpu);
1276
1277 /**
1278 * kmemleak_ignore - ignore an allocated object
1279 * @ptr: pointer to beginning of the object
1280 *
1281 * Calling this function on an object will cause the memory block to be
1282 * ignored (not scanned and not reported as a leak). This is usually done when
1283 * it is known that the corresponding block is not a leak and does not contain
1284 * any references to other allocated memory blocks.
1285 */
kmemleak_ignore(const void * ptr)1286 void __ref kmemleak_ignore(const void *ptr)
1287 {
1288 pr_debug("%s(0x%px)\n", __func__, ptr);
1289
1290 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1291 make_black_object((unsigned long)ptr, 0);
1292 }
1293 EXPORT_SYMBOL(kmemleak_ignore);
1294
1295 /**
1296 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1297 * @ptr: pointer to beginning or inside the object. This also
1298 * represents the start of the scan area
1299 * @size: size of the scan area
1300 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1301 *
1302 * This function is used when it is known that only certain parts of an object
1303 * contain references to other objects. Kmemleak will only scan these areas
1304 * reducing the number false negatives.
1305 */
kmemleak_scan_area(const void * ptr,size_t size,gfp_t gfp)1306 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1307 {
1308 pr_debug("%s(0x%px)\n", __func__, ptr);
1309
1310 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1311 add_scan_area((unsigned long)ptr, size, gfp);
1312 }
1313 EXPORT_SYMBOL(kmemleak_scan_area);
1314
1315 /**
1316 * kmemleak_no_scan - do not scan an allocated object
1317 * @ptr: pointer to beginning of the object
1318 *
1319 * This function notifies kmemleak not to scan the given memory block. Useful
1320 * in situations where it is known that the given object does not contain any
1321 * references to other objects. Kmemleak will not scan such objects reducing
1322 * the number of false negatives.
1323 */
kmemleak_no_scan(const void * ptr)1324 void __ref kmemleak_no_scan(const void *ptr)
1325 {
1326 pr_debug("%s(0x%px)\n", __func__, ptr);
1327
1328 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1329 object_no_scan((unsigned long)ptr);
1330 }
1331 EXPORT_SYMBOL(kmemleak_no_scan);
1332
1333 /**
1334 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1335 * address argument
1336 * @phys: physical address of the object
1337 * @size: size of the object
1338 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1339 */
kmemleak_alloc_phys(phys_addr_t phys,size_t size,gfp_t gfp)1340 void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, gfp_t gfp)
1341 {
1342 pr_debug("%s(0x%px, %zu)\n", __func__, &phys, size);
1343
1344 if (kmemleak_enabled)
1345 /*
1346 * Create object with OBJECT_PHYS flag and
1347 * assume min_count 0.
1348 */
1349 create_object_phys((unsigned long)phys, size, 0, gfp);
1350 }
1351 EXPORT_SYMBOL(kmemleak_alloc_phys);
1352
1353 /**
1354 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1355 * physical address argument
1356 * @phys: physical address if the beginning or inside an object. This
1357 * also represents the start of the range to be freed
1358 * @size: size to be unregistered
1359 */
kmemleak_free_part_phys(phys_addr_t phys,size_t size)1360 void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1361 {
1362 pr_debug("%s(0x%px)\n", __func__, &phys);
1363
1364 if (kmemleak_enabled)
1365 delete_object_part((unsigned long)phys, size, OBJECT_PHYS);
1366 }
1367 EXPORT_SYMBOL(kmemleak_free_part_phys);
1368
1369 /**
1370 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1371 * address argument
1372 * @phys: physical address of the object
1373 */
kmemleak_ignore_phys(phys_addr_t phys)1374 void __ref kmemleak_ignore_phys(phys_addr_t phys)
1375 {
1376 pr_debug("%s(0x%px)\n", __func__, &phys);
1377
1378 if (kmemleak_enabled)
1379 make_black_object((unsigned long)phys, OBJECT_PHYS);
1380 }
1381 EXPORT_SYMBOL(kmemleak_ignore_phys);
1382
1383 /*
1384 * Update an object's checksum and return true if it was modified.
1385 */
update_checksum(struct kmemleak_object * object)1386 static bool update_checksum(struct kmemleak_object *object)
1387 {
1388 u32 old_csum = object->checksum;
1389
1390 if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
1391 return false;
1392
1393 kasan_disable_current();
1394 kcsan_disable_current();
1395 if (object->flags & OBJECT_PERCPU) {
1396 unsigned int cpu;
1397
1398 object->checksum = 0;
1399 for_each_possible_cpu(cpu) {
1400 void *ptr = per_cpu_ptr((void __percpu *)object->pointer, cpu);
1401
1402 object->checksum ^= crc32(0, kasan_reset_tag((void *)ptr), object->size);
1403 }
1404 } else {
1405 object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
1406 }
1407 kasan_enable_current();
1408 kcsan_enable_current();
1409
1410 return object->checksum != old_csum;
1411 }
1412
1413 /*
1414 * Update an object's references. object->lock must be held by the caller.
1415 */
update_refs(struct kmemleak_object * object)1416 static void update_refs(struct kmemleak_object *object)
1417 {
1418 if (!color_white(object)) {
1419 /* non-orphan, ignored or new */
1420 return;
1421 }
1422
1423 /*
1424 * Increase the object's reference count (number of pointers to the
1425 * memory block). If this count reaches the required minimum, the
1426 * object's color will become gray and it will be added to the
1427 * gray_list.
1428 */
1429 object->count++;
1430 if (color_gray(object)) {
1431 /* put_object() called when removing from gray_list */
1432 WARN_ON(!get_object(object));
1433 list_add_tail(&object->gray_list, &gray_list);
1434 }
1435 }
1436
pointer_update_refs(struct kmemleak_object * scanned,unsigned long pointer,unsigned int objflags)1437 static void pointer_update_refs(struct kmemleak_object *scanned,
1438 unsigned long pointer, unsigned int objflags)
1439 {
1440 struct kmemleak_object *object;
1441 unsigned long untagged_ptr;
1442 unsigned long excess_ref;
1443
1444 untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1445 if (objflags & OBJECT_PERCPU) {
1446 if (untagged_ptr < min_percpu_addr || untagged_ptr >= max_percpu_addr)
1447 return;
1448 } else {
1449 if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1450 return;
1451 }
1452
1453 /*
1454 * No need for get_object() here since we hold kmemleak_lock.
1455 * object->use_count cannot be dropped to 0 while the object
1456 * is still present in object_tree_root and object_list
1457 * (with updates protected by kmemleak_lock).
1458 */
1459 object = __lookup_object(pointer, 1, objflags);
1460 if (!object)
1461 return;
1462 if (object == scanned)
1463 /* self referenced, ignore */
1464 return;
1465
1466 /*
1467 * Avoid the lockdep recursive warning on object->lock being
1468 * previously acquired in scan_object(). These locks are
1469 * enclosed by scan_mutex.
1470 */
1471 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1472 /* only pass surplus references (object already gray) */
1473 if (color_gray(object)) {
1474 excess_ref = object->excess_ref;
1475 /* no need for update_refs() if object already gray */
1476 } else {
1477 excess_ref = 0;
1478 update_refs(object);
1479 }
1480 raw_spin_unlock(&object->lock);
1481
1482 if (excess_ref) {
1483 object = lookup_object(excess_ref, 0);
1484 if (!object)
1485 return;
1486 if (object == scanned)
1487 /* circular reference, ignore */
1488 return;
1489 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1490 update_refs(object);
1491 raw_spin_unlock(&object->lock);
1492 }
1493 }
1494
1495 /*
1496 * Memory scanning is a long process and it needs to be interruptible. This
1497 * function checks whether such interrupt condition occurred.
1498 */
scan_should_stop(void)1499 static int scan_should_stop(void)
1500 {
1501 if (!kmemleak_enabled)
1502 return 1;
1503
1504 /*
1505 * This function may be called from either process or kthread context,
1506 * hence the need to check for both stop conditions.
1507 */
1508 if (current->flags & PF_KTHREAD)
1509 return kthread_should_stop();
1510
1511 return signal_pending(current);
1512 }
1513
1514 /*
1515 * Scan a memory block (exclusive range) for valid pointers and add those
1516 * found to the gray list.
1517 */
scan_block(void * _start,void * _end,struct kmemleak_object * scanned)1518 static void scan_block(void *_start, void *_end,
1519 struct kmemleak_object *scanned)
1520 {
1521 unsigned long *ptr;
1522 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1523 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1524 unsigned long flags;
1525
1526 raw_spin_lock_irqsave(&kmemleak_lock, flags);
1527 for (ptr = start; ptr < end; ptr++) {
1528 unsigned long pointer;
1529
1530 if (scan_should_stop())
1531 break;
1532
1533 kasan_disable_current();
1534 pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
1535 kasan_enable_current();
1536
1537 pointer_update_refs(scanned, pointer, 0);
1538 pointer_update_refs(scanned, pointer, OBJECT_PERCPU);
1539 }
1540 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
1541 }
1542
1543 /*
1544 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1545 */
1546 #ifdef CONFIG_SMP
scan_large_block(void * start,void * end)1547 static void scan_large_block(void *start, void *end)
1548 {
1549 void *next;
1550
1551 while (start < end) {
1552 next = min(start + MAX_SCAN_SIZE, end);
1553 scan_block(start, next, NULL);
1554 start = next;
1555 cond_resched();
1556 }
1557 }
1558 #endif
1559
1560 /*
1561 * Scan a memory block corresponding to a kmemleak_object. A condition is
1562 * that object->use_count >= 1.
1563 */
scan_object(struct kmemleak_object * object)1564 static void scan_object(struct kmemleak_object *object)
1565 {
1566 struct kmemleak_scan_area *area;
1567 unsigned long flags;
1568
1569 /*
1570 * Once the object->lock is acquired, the corresponding memory block
1571 * cannot be freed (the same lock is acquired in delete_object).
1572 */
1573 raw_spin_lock_irqsave(&object->lock, flags);
1574 if (object->flags & OBJECT_NO_SCAN)
1575 goto out;
1576 if (!(object->flags & OBJECT_ALLOCATED))
1577 /* already freed object */
1578 goto out;
1579
1580 if (object->flags & OBJECT_PERCPU) {
1581 unsigned int cpu;
1582
1583 for_each_possible_cpu(cpu) {
1584 void *start = per_cpu_ptr((void __percpu *)object->pointer, cpu);
1585 void *end = start + object->size;
1586
1587 scan_block(start, end, object);
1588
1589 raw_spin_unlock_irqrestore(&object->lock, flags);
1590 cond_resched();
1591 raw_spin_lock_irqsave(&object->lock, flags);
1592 if (!(object->flags & OBJECT_ALLOCATED))
1593 break;
1594 }
1595 } else if (hlist_empty(&object->area_list) ||
1596 object->flags & OBJECT_FULL_SCAN) {
1597 void *start = object->flags & OBJECT_PHYS ?
1598 __va((phys_addr_t)object->pointer) :
1599 (void *)object->pointer;
1600 void *end = start + object->size;
1601 void *next;
1602
1603 do {
1604 next = min(start + MAX_SCAN_SIZE, end);
1605 scan_block(start, next, object);
1606
1607 start = next;
1608 if (start >= end)
1609 break;
1610
1611 raw_spin_unlock_irqrestore(&object->lock, flags);
1612 cond_resched();
1613 raw_spin_lock_irqsave(&object->lock, flags);
1614 } while (object->flags & OBJECT_ALLOCATED);
1615 } else {
1616 hlist_for_each_entry(area, &object->area_list, node)
1617 scan_block((void *)area->start,
1618 (void *)(area->start + area->size),
1619 object);
1620 }
1621 out:
1622 raw_spin_unlock_irqrestore(&object->lock, flags);
1623 }
1624
1625 /*
1626 * Scan the objects already referenced (gray objects). More objects will be
1627 * referenced and, if there are no memory leaks, all the objects are scanned.
1628 */
scan_gray_list(void)1629 static void scan_gray_list(void)
1630 {
1631 struct kmemleak_object *object, *tmp;
1632
1633 /*
1634 * The list traversal is safe for both tail additions and removals
1635 * from inside the loop. The kmemleak objects cannot be freed from
1636 * outside the loop because their use_count was incremented.
1637 */
1638 object = list_entry(gray_list.next, typeof(*object), gray_list);
1639 while (&object->gray_list != &gray_list) {
1640 cond_resched();
1641
1642 /* may add new objects to the list */
1643 if (!scan_should_stop())
1644 scan_object(object);
1645
1646 tmp = list_entry(object->gray_list.next, typeof(*object),
1647 gray_list);
1648
1649 /* remove the object from the list and release it */
1650 list_del(&object->gray_list);
1651 put_object(object);
1652
1653 object = tmp;
1654 }
1655 WARN_ON(!list_empty(&gray_list));
1656 }
1657
1658 /*
1659 * Conditionally call resched() in an object iteration loop while making sure
1660 * that the given object won't go away without RCU read lock by performing a
1661 * get_object() if necessaary.
1662 */
kmemleak_cond_resched(struct kmemleak_object * object)1663 static void kmemleak_cond_resched(struct kmemleak_object *object)
1664 {
1665 if (!get_object(object))
1666 return; /* Try next object */
1667
1668 raw_spin_lock_irq(&kmemleak_lock);
1669 if (object->del_state & DELSTATE_REMOVED)
1670 goto unlock_put; /* Object removed */
1671 object->del_state |= DELSTATE_NO_DELETE;
1672 raw_spin_unlock_irq(&kmemleak_lock);
1673
1674 rcu_read_unlock();
1675 cond_resched();
1676 rcu_read_lock();
1677
1678 raw_spin_lock_irq(&kmemleak_lock);
1679 if (object->del_state & DELSTATE_REMOVED)
1680 list_del_rcu(&object->object_list);
1681 object->del_state &= ~DELSTATE_NO_DELETE;
1682 unlock_put:
1683 raw_spin_unlock_irq(&kmemleak_lock);
1684 put_object(object);
1685 }
1686
1687 /*
1688 * Scan data sections and all the referenced memory blocks allocated via the
1689 * kernel's standard allocators. This function must be called with the
1690 * scan_mutex held.
1691 */
kmemleak_scan(void)1692 static void kmemleak_scan(void)
1693 {
1694 struct kmemleak_object *object;
1695 struct zone *zone;
1696 int __maybe_unused i;
1697 int new_leaks = 0;
1698
1699 jiffies_last_scan = jiffies;
1700
1701 /* prepare the kmemleak_object's */
1702 rcu_read_lock();
1703 list_for_each_entry_rcu(object, &object_list, object_list) {
1704 raw_spin_lock_irq(&object->lock);
1705 #ifdef DEBUG
1706 /*
1707 * With a few exceptions there should be a maximum of
1708 * 1 reference to any object at this point.
1709 */
1710 if (atomic_read(&object->use_count) > 1) {
1711 pr_debug("object->use_count = %d\n",
1712 atomic_read(&object->use_count));
1713 dump_object_info(object);
1714 }
1715 #endif
1716
1717 /* ignore objects outside lowmem (paint them black) */
1718 if ((object->flags & OBJECT_PHYS) &&
1719 !(object->flags & OBJECT_NO_SCAN)) {
1720 unsigned long phys = object->pointer;
1721
1722 if (PHYS_PFN(phys) < min_low_pfn ||
1723 PHYS_PFN(phys + object->size) > max_low_pfn)
1724 __paint_it(object, KMEMLEAK_BLACK);
1725 }
1726
1727 /* reset the reference count (whiten the object) */
1728 object->count = 0;
1729 if (color_gray(object) && get_object(object))
1730 list_add_tail(&object->gray_list, &gray_list);
1731
1732 raw_spin_unlock_irq(&object->lock);
1733
1734 if (need_resched())
1735 kmemleak_cond_resched(object);
1736 }
1737 rcu_read_unlock();
1738
1739 #ifdef CONFIG_SMP
1740 /* per-cpu sections scanning */
1741 for_each_possible_cpu(i)
1742 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1743 __per_cpu_end + per_cpu_offset(i));
1744 #endif
1745
1746 /*
1747 * Struct page scanning for each node.
1748 */
1749 get_online_mems();
1750 for_each_populated_zone(zone) {
1751 unsigned long start_pfn = zone->zone_start_pfn;
1752 unsigned long end_pfn = zone_end_pfn(zone);
1753 unsigned long pfn;
1754
1755 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1756 struct page *page = pfn_to_online_page(pfn);
1757
1758 if (!(pfn & 63))
1759 cond_resched();
1760
1761 if (!page)
1762 continue;
1763
1764 /* only scan pages belonging to this zone */
1765 if (page_zone(page) != zone)
1766 continue;
1767 /* only scan if page is in use */
1768 if (page_count(page) == 0)
1769 continue;
1770 scan_block(page, page + 1, NULL);
1771 }
1772 }
1773 put_online_mems();
1774
1775 /*
1776 * Scanning the task stacks (may introduce false negatives).
1777 */
1778 if (kmemleak_stack_scan) {
1779 struct task_struct *p, *g;
1780
1781 rcu_read_lock();
1782 for_each_process_thread(g, p) {
1783 void *stack = try_get_task_stack(p);
1784 if (stack) {
1785 scan_block(stack, stack + THREAD_SIZE, NULL);
1786 put_task_stack(p);
1787 }
1788 }
1789 rcu_read_unlock();
1790 }
1791
1792 /*
1793 * Scan the objects already referenced from the sections scanned
1794 * above.
1795 */
1796 scan_gray_list();
1797
1798 /*
1799 * Check for new or unreferenced objects modified since the previous
1800 * scan and color them gray until the next scan.
1801 */
1802 rcu_read_lock();
1803 list_for_each_entry_rcu(object, &object_list, object_list) {
1804 if (need_resched())
1805 kmemleak_cond_resched(object);
1806
1807 /*
1808 * This is racy but we can save the overhead of lock/unlock
1809 * calls. The missed objects, if any, should be caught in
1810 * the next scan.
1811 */
1812 if (!color_white(object))
1813 continue;
1814 raw_spin_lock_irq(&object->lock);
1815 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1816 && update_checksum(object) && get_object(object)) {
1817 /* color it gray temporarily */
1818 object->count = object->min_count;
1819 list_add_tail(&object->gray_list, &gray_list);
1820 }
1821 raw_spin_unlock_irq(&object->lock);
1822 }
1823 rcu_read_unlock();
1824
1825 /*
1826 * Re-scan the gray list for modified unreferenced objects.
1827 */
1828 scan_gray_list();
1829
1830 /*
1831 * If scanning was stopped do not report any new unreferenced objects.
1832 */
1833 if (scan_should_stop())
1834 return;
1835
1836 /*
1837 * Scanning result reporting.
1838 */
1839 rcu_read_lock();
1840 list_for_each_entry_rcu(object, &object_list, object_list) {
1841 if (need_resched())
1842 kmemleak_cond_resched(object);
1843
1844 /*
1845 * This is racy but we can save the overhead of lock/unlock
1846 * calls. The missed objects, if any, should be caught in
1847 * the next scan.
1848 */
1849 if (!color_white(object))
1850 continue;
1851 raw_spin_lock_irq(&object->lock);
1852 if (unreferenced_object(object) &&
1853 !(object->flags & OBJECT_REPORTED)) {
1854 object->flags |= OBJECT_REPORTED;
1855
1856 if (kmemleak_verbose)
1857 print_unreferenced(NULL, object);
1858
1859 new_leaks++;
1860 }
1861 raw_spin_unlock_irq(&object->lock);
1862 }
1863 rcu_read_unlock();
1864
1865 if (new_leaks) {
1866 kmemleak_found_leaks = true;
1867
1868 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1869 new_leaks);
1870 }
1871
1872 }
1873
1874 /*
1875 * Thread function performing automatic memory scanning. Unreferenced objects
1876 * at the end of a memory scan are reported but only the first time.
1877 */
kmemleak_scan_thread(void * arg)1878 static int kmemleak_scan_thread(void *arg)
1879 {
1880 static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1881
1882 pr_info("Automatic memory scanning thread started\n");
1883 set_user_nice(current, 10);
1884
1885 /*
1886 * Wait before the first scan to allow the system to fully initialize.
1887 */
1888 if (first_run) {
1889 signed long timeout = secs_to_jiffies(SECS_FIRST_SCAN);
1890 first_run = 0;
1891 while (timeout && !kthread_should_stop())
1892 timeout = schedule_timeout_interruptible(timeout);
1893 }
1894
1895 while (!kthread_should_stop()) {
1896 signed long timeout = READ_ONCE(jiffies_scan_wait);
1897
1898 mutex_lock(&scan_mutex);
1899 kmemleak_scan();
1900 mutex_unlock(&scan_mutex);
1901
1902 /* wait before the next scan */
1903 while (timeout && !kthread_should_stop())
1904 timeout = schedule_timeout_interruptible(timeout);
1905 }
1906
1907 pr_info("Automatic memory scanning thread ended\n");
1908
1909 return 0;
1910 }
1911
1912 /*
1913 * Start the automatic memory scanning thread. This function must be called
1914 * with the scan_mutex held.
1915 */
start_scan_thread(void)1916 static void start_scan_thread(void)
1917 {
1918 if (scan_thread)
1919 return;
1920 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1921 if (IS_ERR(scan_thread)) {
1922 pr_warn("Failed to create the scan thread\n");
1923 scan_thread = NULL;
1924 }
1925 }
1926
1927 /*
1928 * Stop the automatic memory scanning thread.
1929 */
stop_scan_thread(void)1930 static void stop_scan_thread(void)
1931 {
1932 if (scan_thread) {
1933 kthread_stop(scan_thread);
1934 scan_thread = NULL;
1935 }
1936 }
1937
1938 /*
1939 * Iterate over the object_list and return the first valid object at or after
1940 * the required position with its use_count incremented. The function triggers
1941 * a memory scanning when the pos argument points to the first position.
1942 */
kmemleak_seq_start(struct seq_file * seq,loff_t * pos)1943 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1944 {
1945 struct kmemleak_object *object;
1946 loff_t n = *pos;
1947 int err;
1948
1949 err = mutex_lock_interruptible(&scan_mutex);
1950 if (err < 0)
1951 return ERR_PTR(err);
1952
1953 rcu_read_lock();
1954 list_for_each_entry_rcu(object, &object_list, object_list) {
1955 if (n-- > 0)
1956 continue;
1957 if (get_object(object))
1958 goto out;
1959 }
1960 object = NULL;
1961 out:
1962 return object;
1963 }
1964
1965 /*
1966 * Return the next object in the object_list. The function decrements the
1967 * use_count of the previous object and increases that of the next one.
1968 */
kmemleak_seq_next(struct seq_file * seq,void * v,loff_t * pos)1969 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1970 {
1971 struct kmemleak_object *prev_obj = v;
1972 struct kmemleak_object *next_obj = NULL;
1973 struct kmemleak_object *obj = prev_obj;
1974
1975 ++(*pos);
1976
1977 list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1978 if (get_object(obj)) {
1979 next_obj = obj;
1980 break;
1981 }
1982 }
1983
1984 put_object(prev_obj);
1985 return next_obj;
1986 }
1987
1988 /*
1989 * Decrement the use_count of the last object required, if any.
1990 */
kmemleak_seq_stop(struct seq_file * seq,void * v)1991 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1992 {
1993 if (!IS_ERR(v)) {
1994 /*
1995 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1996 * waiting was interrupted, so only release it if !IS_ERR.
1997 */
1998 rcu_read_unlock();
1999 mutex_unlock(&scan_mutex);
2000 if (v)
2001 put_object(v);
2002 }
2003 }
2004
2005 /*
2006 * Print the information for an unreferenced object to the seq file.
2007 */
kmemleak_seq_show(struct seq_file * seq,void * v)2008 static int kmemleak_seq_show(struct seq_file *seq, void *v)
2009 {
2010 struct kmemleak_object *object = v;
2011 unsigned long flags;
2012
2013 raw_spin_lock_irqsave(&object->lock, flags);
2014 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
2015 print_unreferenced(seq, object);
2016 raw_spin_unlock_irqrestore(&object->lock, flags);
2017 return 0;
2018 }
2019
2020 static const struct seq_operations kmemleak_seq_ops = {
2021 .start = kmemleak_seq_start,
2022 .next = kmemleak_seq_next,
2023 .stop = kmemleak_seq_stop,
2024 .show = kmemleak_seq_show,
2025 };
2026
kmemleak_open(struct inode * inode,struct file * file)2027 static int kmemleak_open(struct inode *inode, struct file *file)
2028 {
2029 return seq_open(file, &kmemleak_seq_ops);
2030 }
2031
__dump_str_object_info(unsigned long addr,unsigned int objflags)2032 static bool __dump_str_object_info(unsigned long addr, unsigned int objflags)
2033 {
2034 unsigned long flags;
2035 struct kmemleak_object *object;
2036
2037 object = __find_and_get_object(addr, 1, objflags);
2038 if (!object)
2039 return false;
2040
2041 raw_spin_lock_irqsave(&object->lock, flags);
2042 dump_object_info(object);
2043 raw_spin_unlock_irqrestore(&object->lock, flags);
2044
2045 put_object(object);
2046
2047 return true;
2048 }
2049
dump_str_object_info(const char * str)2050 static int dump_str_object_info(const char *str)
2051 {
2052 unsigned long addr;
2053 bool found = false;
2054
2055 if (kstrtoul(str, 0, &addr))
2056 return -EINVAL;
2057
2058 found |= __dump_str_object_info(addr, 0);
2059 found |= __dump_str_object_info(addr, OBJECT_PHYS);
2060 found |= __dump_str_object_info(addr, OBJECT_PERCPU);
2061
2062 if (!found) {
2063 pr_info("Unknown object at 0x%08lx\n", addr);
2064 return -EINVAL;
2065 }
2066
2067 return 0;
2068 }
2069
2070 /*
2071 * We use grey instead of black to ensure we can do future scans on the same
2072 * objects. If we did not do future scans these black objects could
2073 * potentially contain references to newly allocated objects in the future and
2074 * we'd end up with false positives.
2075 */
kmemleak_clear(void)2076 static void kmemleak_clear(void)
2077 {
2078 struct kmemleak_object *object;
2079
2080 rcu_read_lock();
2081 list_for_each_entry_rcu(object, &object_list, object_list) {
2082 raw_spin_lock_irq(&object->lock);
2083 if ((object->flags & OBJECT_REPORTED) &&
2084 unreferenced_object(object))
2085 __paint_it(object, KMEMLEAK_GREY);
2086 raw_spin_unlock_irq(&object->lock);
2087 }
2088 rcu_read_unlock();
2089
2090 kmemleak_found_leaks = false;
2091 }
2092
2093 static void __kmemleak_do_cleanup(void);
2094
2095 /*
2096 * File write operation to configure kmemleak at run-time. The following
2097 * commands can be written to the /sys/kernel/debug/kmemleak file:
2098 * off - disable kmemleak (irreversible)
2099 * stack=on - enable the task stacks scanning
2100 * stack=off - disable the tasks stacks scanning
2101 * scan=on - start the automatic memory scanning thread
2102 * scan=off - stop the automatic memory scanning thread
2103 * scan=... - set the automatic memory scanning period in seconds (0 to
2104 * disable it)
2105 * scan - trigger a memory scan
2106 * clear - mark all current reported unreferenced kmemleak objects as
2107 * grey to ignore printing them, or free all kmemleak objects
2108 * if kmemleak has been disabled.
2109 * dump=... - dump information about the object found at the given address
2110 */
kmemleak_write(struct file * file,const char __user * user_buf,size_t size,loff_t * ppos)2111 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
2112 size_t size, loff_t *ppos)
2113 {
2114 char buf[64];
2115 int buf_size;
2116 int ret;
2117
2118 buf_size = min(size, (sizeof(buf) - 1));
2119 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
2120 return -EFAULT;
2121 buf[buf_size] = 0;
2122
2123 ret = mutex_lock_interruptible(&scan_mutex);
2124 if (ret < 0)
2125 return ret;
2126
2127 if (strncmp(buf, "clear", 5) == 0) {
2128 if (kmemleak_enabled)
2129 kmemleak_clear();
2130 else
2131 __kmemleak_do_cleanup();
2132 goto out;
2133 }
2134
2135 if (!kmemleak_enabled) {
2136 ret = -EPERM;
2137 goto out;
2138 }
2139
2140 if (strncmp(buf, "off", 3) == 0)
2141 kmemleak_disable();
2142 else if (strncmp(buf, "stack=on", 8) == 0)
2143 kmemleak_stack_scan = 1;
2144 else if (strncmp(buf, "stack=off", 9) == 0)
2145 kmemleak_stack_scan = 0;
2146 else if (strncmp(buf, "scan=on", 7) == 0)
2147 start_scan_thread();
2148 else if (strncmp(buf, "scan=off", 8) == 0)
2149 stop_scan_thread();
2150 else if (strncmp(buf, "scan=", 5) == 0) {
2151 unsigned secs;
2152 unsigned long msecs;
2153
2154 ret = kstrtouint(buf + 5, 0, &secs);
2155 if (ret < 0)
2156 goto out;
2157
2158 msecs = secs * MSEC_PER_SEC;
2159 if (msecs > UINT_MAX)
2160 msecs = UINT_MAX;
2161
2162 stop_scan_thread();
2163 if (msecs) {
2164 WRITE_ONCE(jiffies_scan_wait, msecs_to_jiffies(msecs));
2165 start_scan_thread();
2166 }
2167 } else if (strncmp(buf, "scan", 4) == 0)
2168 kmemleak_scan();
2169 else if (strncmp(buf, "dump=", 5) == 0)
2170 ret = dump_str_object_info(buf + 5);
2171 else
2172 ret = -EINVAL;
2173
2174 out:
2175 mutex_unlock(&scan_mutex);
2176 if (ret < 0)
2177 return ret;
2178
2179 /* ignore the rest of the buffer, only one command at a time */
2180 *ppos += size;
2181 return size;
2182 }
2183
2184 static const struct file_operations kmemleak_fops = {
2185 .owner = THIS_MODULE,
2186 .open = kmemleak_open,
2187 .read = seq_read,
2188 .write = kmemleak_write,
2189 .llseek = seq_lseek,
2190 .release = seq_release,
2191 };
2192
__kmemleak_do_cleanup(void)2193 static void __kmemleak_do_cleanup(void)
2194 {
2195 struct kmemleak_object *object, *tmp;
2196 unsigned int cnt = 0;
2197
2198 /*
2199 * Kmemleak has already been disabled, no need for RCU list traversal
2200 * or kmemleak_lock held.
2201 */
2202 list_for_each_entry_safe(object, tmp, &object_list, object_list) {
2203 __remove_object(object);
2204 __delete_object(object);
2205
2206 /* Call cond_resched() once per 64 iterations to avoid soft lockup */
2207 if (!(++cnt & 0x3f))
2208 cond_resched();
2209 }
2210 }
2211
2212 /*
2213 * Stop the memory scanning thread and free the kmemleak internal objects if
2214 * no previous scan thread (otherwise, kmemleak may still have some useful
2215 * information on memory leaks).
2216 */
kmemleak_do_cleanup(struct work_struct * work)2217 static void kmemleak_do_cleanup(struct work_struct *work)
2218 {
2219 stop_scan_thread();
2220
2221 mutex_lock(&scan_mutex);
2222 /*
2223 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
2224 * longer track object freeing. Ordering of the scan thread stopping and
2225 * the memory accesses below is guaranteed by the kthread_stop()
2226 * function.
2227 */
2228 kmemleak_free_enabled = 0;
2229 mutex_unlock(&scan_mutex);
2230
2231 if (!kmemleak_found_leaks)
2232 __kmemleak_do_cleanup();
2233 else
2234 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
2235 }
2236
2237 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
2238
2239 /*
2240 * Disable kmemleak. No memory allocation/freeing will be traced once this
2241 * function is called. Disabling kmemleak is an irreversible operation.
2242 */
kmemleak_disable(void)2243 static void kmemleak_disable(void)
2244 {
2245 /* atomically check whether it was already invoked */
2246 if (cmpxchg(&kmemleak_error, 0, 1))
2247 return;
2248
2249 /* stop any memory operation tracing */
2250 kmemleak_enabled = 0;
2251
2252 /* check whether it is too early for a kernel thread */
2253 if (kmemleak_late_initialized)
2254 schedule_work(&cleanup_work);
2255 else
2256 kmemleak_free_enabled = 0;
2257
2258 pr_info("Kernel memory leak detector disabled\n");
2259 }
2260
2261 /*
2262 * Allow boot-time kmemleak disabling (enabled by default).
2263 */
kmemleak_boot_config(char * str)2264 static int __init kmemleak_boot_config(char *str)
2265 {
2266 if (!str)
2267 return -EINVAL;
2268 if (strcmp(str, "off") == 0)
2269 kmemleak_disable();
2270 else if (strcmp(str, "on") == 0) {
2271 kmemleak_skip_disable = 1;
2272 stack_depot_request_early_init();
2273 }
2274 else
2275 return -EINVAL;
2276 return 0;
2277 }
2278 early_param("kmemleak", kmemleak_boot_config);
2279
2280 /*
2281 * Kmemleak initialization.
2282 */
kmemleak_init(void)2283 void __init kmemleak_init(void)
2284 {
2285 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2286 if (!kmemleak_skip_disable) {
2287 kmemleak_disable();
2288 return;
2289 }
2290 #endif
2291
2292 if (kmemleak_error)
2293 return;
2294
2295 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
2296 jiffies_scan_wait = secs_to_jiffies(SECS_SCAN_WAIT);
2297
2298 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
2299 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
2300
2301 /* register the data/bss sections */
2302 create_object((unsigned long)_sdata, _edata - _sdata,
2303 KMEMLEAK_GREY, GFP_ATOMIC);
2304 create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
2305 KMEMLEAK_GREY, GFP_ATOMIC);
2306 /* only register .data..ro_after_init if not within .data */
2307 if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
2308 create_object((unsigned long)__start_ro_after_init,
2309 __end_ro_after_init - __start_ro_after_init,
2310 KMEMLEAK_GREY, GFP_ATOMIC);
2311 }
2312
2313 /*
2314 * Late initialization function.
2315 */
kmemleak_late_init(void)2316 static int __init kmemleak_late_init(void)
2317 {
2318 kmemleak_late_initialized = 1;
2319
2320 debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
2321
2322 if (kmemleak_error) {
2323 /*
2324 * Some error occurred and kmemleak was disabled. There is a
2325 * small chance that kmemleak_disable() was called immediately
2326 * after setting kmemleak_late_initialized and we may end up with
2327 * two clean-up threads but serialized by scan_mutex.
2328 */
2329 schedule_work(&cleanup_work);
2330 return -ENOMEM;
2331 }
2332
2333 if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
2334 mutex_lock(&scan_mutex);
2335 start_scan_thread();
2336 mutex_unlock(&scan_mutex);
2337 }
2338
2339 pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
2340 mem_pool_free_count);
2341
2342 return 0;
2343 }
2344 late_initcall(kmemleak_late_init);
2345