xref: /linux/lib/alloc_tag.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/alloc_tag.h>
3 #include <linux/execmem.h>
4 #include <linux/fs.h>
5 #include <linux/gfp.h>
6 #include <linux/kallsyms.h>
7 #include <linux/module.h>
8 #include <linux/page_ext.h>
9 #include <linux/proc_fs.h>
10 #include <linux/seq_buf.h>
11 #include <linux/seq_file.h>
12 #include <linux/vmalloc.h>
13 #include <linux/kmemleak.h>
14 
15 #define ALLOCINFO_FILE_NAME		"allocinfo"
16 #define MODULE_ALLOC_TAG_VMAP_SIZE	(100000UL * sizeof(struct alloc_tag))
17 #define SECTION_START(NAME)		(CODETAG_SECTION_START_PREFIX NAME)
18 #define SECTION_STOP(NAME)		(CODETAG_SECTION_STOP_PREFIX NAME)
19 
20 #ifdef CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT
21 static bool mem_profiling_support = true;
22 #else
23 static bool mem_profiling_support;
24 #endif
25 
26 static struct codetag_type *alloc_tag_cttype;
27 
28 #ifdef CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU
29 DEFINE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
30 EXPORT_SYMBOL(_shared_alloc_tag);
31 #endif
32 
33 DEFINE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
34 			mem_alloc_profiling_key);
35 EXPORT_SYMBOL(mem_alloc_profiling_key);
36 
37 DEFINE_STATIC_KEY_FALSE(mem_profiling_compressed);
38 
39 struct alloc_tag_kernel_section kernel_tags = { NULL, 0 };
40 unsigned long alloc_tag_ref_mask;
41 int alloc_tag_ref_offs;
42 
43 struct allocinfo_private {
44 	struct codetag_iterator iter;
45 	bool print_header;
46 };
47 
allocinfo_start(struct seq_file * m,loff_t * pos)48 static void *allocinfo_start(struct seq_file *m, loff_t *pos)
49 {
50 	struct allocinfo_private *priv;
51 	loff_t node = *pos;
52 
53 	priv = (struct allocinfo_private *)m->private;
54 	codetag_lock_module_list(alloc_tag_cttype, true);
55 	if (node == 0) {
56 		priv->print_header = true;
57 		priv->iter = codetag_get_ct_iter(alloc_tag_cttype);
58 		codetag_next_ct(&priv->iter);
59 	}
60 	return priv->iter.ct ? priv : NULL;
61 }
62 
allocinfo_next(struct seq_file * m,void * arg,loff_t * pos)63 static void *allocinfo_next(struct seq_file *m, void *arg, loff_t *pos)
64 {
65 	struct allocinfo_private *priv = (struct allocinfo_private *)arg;
66 	struct codetag *ct = codetag_next_ct(&priv->iter);
67 
68 	(*pos)++;
69 	if (!ct)
70 		return NULL;
71 
72 	return priv;
73 }
74 
allocinfo_stop(struct seq_file * m,void * arg)75 static void allocinfo_stop(struct seq_file *m, void *arg)
76 {
77 	codetag_lock_module_list(alloc_tag_cttype, false);
78 }
79 
print_allocinfo_header(struct seq_buf * buf)80 static void print_allocinfo_header(struct seq_buf *buf)
81 {
82 	/* Output format version, so we can change it. */
83 	seq_buf_printf(buf, "allocinfo - version: 1.0\n");
84 	seq_buf_printf(buf, "#     <size>  <calls> <tag info>\n");
85 }
86 
alloc_tag_to_text(struct seq_buf * out,struct codetag * ct)87 static void alloc_tag_to_text(struct seq_buf *out, struct codetag *ct)
88 {
89 	struct alloc_tag *tag = ct_to_alloc_tag(ct);
90 	struct alloc_tag_counters counter = alloc_tag_read(tag);
91 	s64 bytes = counter.bytes;
92 
93 	seq_buf_printf(out, "%12lli %8llu ", bytes, counter.calls);
94 	codetag_to_text(out, ct);
95 	seq_buf_putc(out, ' ');
96 	seq_buf_putc(out, '\n');
97 }
98 
allocinfo_show(struct seq_file * m,void * arg)99 static int allocinfo_show(struct seq_file *m, void *arg)
100 {
101 	struct allocinfo_private *priv = (struct allocinfo_private *)arg;
102 	char *bufp;
103 	size_t n = seq_get_buf(m, &bufp);
104 	struct seq_buf buf;
105 
106 	seq_buf_init(&buf, bufp, n);
107 	if (priv->print_header) {
108 		print_allocinfo_header(&buf);
109 		priv->print_header = false;
110 	}
111 	alloc_tag_to_text(&buf, priv->iter.ct);
112 	seq_commit(m, seq_buf_used(&buf));
113 	return 0;
114 }
115 
116 static const struct seq_operations allocinfo_seq_op = {
117 	.start	= allocinfo_start,
118 	.next	= allocinfo_next,
119 	.stop	= allocinfo_stop,
120 	.show	= allocinfo_show,
121 };
122 
alloc_tag_top_users(struct codetag_bytes * tags,size_t count,bool can_sleep)123 size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sleep)
124 {
125 	struct codetag_iterator iter;
126 	struct codetag *ct;
127 	struct codetag_bytes n;
128 	unsigned int i, nr = 0;
129 
130 	if (IS_ERR_OR_NULL(alloc_tag_cttype))
131 		return 0;
132 
133 	if (can_sleep)
134 		codetag_lock_module_list(alloc_tag_cttype, true);
135 	else if (!codetag_trylock_module_list(alloc_tag_cttype))
136 		return 0;
137 
138 	iter = codetag_get_ct_iter(alloc_tag_cttype);
139 	while ((ct = codetag_next_ct(&iter))) {
140 		struct alloc_tag_counters counter = alloc_tag_read(ct_to_alloc_tag(ct));
141 
142 		n.ct	= ct;
143 		n.bytes = counter.bytes;
144 
145 		for (i = 0; i < nr; i++)
146 			if (n.bytes > tags[i].bytes)
147 				break;
148 
149 		if (i < count) {
150 			nr -= nr == count;
151 			memmove(&tags[i + 1],
152 				&tags[i],
153 				sizeof(tags[0]) * (nr - i));
154 			nr++;
155 			tags[i] = n;
156 		}
157 	}
158 
159 	codetag_lock_module_list(alloc_tag_cttype, false);
160 
161 	return nr;
162 }
163 
pgalloc_tag_split(struct folio * folio,int old_order,int new_order)164 void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
165 {
166 	int i;
167 	struct alloc_tag *tag;
168 	unsigned int nr_pages = 1 << new_order;
169 
170 	if (!mem_alloc_profiling_enabled())
171 		return;
172 
173 	tag = __pgalloc_tag_get(&folio->page);
174 	if (!tag)
175 		return;
176 
177 	for (i = nr_pages; i < (1 << old_order); i += nr_pages) {
178 		union pgtag_ref_handle handle;
179 		union codetag_ref ref;
180 
181 		if (get_page_tag_ref(folio_page(folio, i), &ref, &handle)) {
182 			/* Set new reference to point to the original tag */
183 			alloc_tag_ref_set(&ref, tag);
184 			update_page_tag_ref(handle, &ref);
185 			put_page_tag_ref(handle);
186 		}
187 	}
188 }
189 
pgalloc_tag_swap(struct folio * new,struct folio * old)190 void pgalloc_tag_swap(struct folio *new, struct folio *old)
191 {
192 	union pgtag_ref_handle handle_old, handle_new;
193 	union codetag_ref ref_old, ref_new;
194 	struct alloc_tag *tag_old, *tag_new;
195 
196 	if (!mem_alloc_profiling_enabled())
197 		return;
198 
199 	tag_old = __pgalloc_tag_get(&old->page);
200 	if (!tag_old)
201 		return;
202 	tag_new = __pgalloc_tag_get(&new->page);
203 	if (!tag_new)
204 		return;
205 
206 	if (!get_page_tag_ref(&old->page, &ref_old, &handle_old))
207 		return;
208 	if (!get_page_tag_ref(&new->page, &ref_new, &handle_new)) {
209 		put_page_tag_ref(handle_old);
210 		return;
211 	}
212 
213 	/*
214 	 * Clear tag references to avoid debug warning when using
215 	 * __alloc_tag_ref_set() with non-empty reference.
216 	 */
217 	set_codetag_empty(&ref_old);
218 	set_codetag_empty(&ref_new);
219 
220 	/* swap tags */
221 	__alloc_tag_ref_set(&ref_old, tag_new);
222 	update_page_tag_ref(handle_old, &ref_old);
223 	__alloc_tag_ref_set(&ref_new, tag_old);
224 	update_page_tag_ref(handle_new, &ref_new);
225 
226 	put_page_tag_ref(handle_old);
227 	put_page_tag_ref(handle_new);
228 }
229 
shutdown_mem_profiling(bool remove_file)230 static void shutdown_mem_profiling(bool remove_file)
231 {
232 	if (mem_alloc_profiling_enabled())
233 		static_branch_disable(&mem_alloc_profiling_key);
234 
235 	if (!mem_profiling_support)
236 		return;
237 
238 	if (remove_file)
239 		remove_proc_entry(ALLOCINFO_FILE_NAME, NULL);
240 	mem_profiling_support = false;
241 }
242 
alloc_tag_sec_init(void)243 void __init alloc_tag_sec_init(void)
244 {
245 	struct alloc_tag *last_codetag;
246 
247 	if (!mem_profiling_support)
248 		return;
249 
250 	if (!static_key_enabled(&mem_profiling_compressed))
251 		return;
252 
253 	kernel_tags.first_tag = (struct alloc_tag *)kallsyms_lookup_name(
254 					SECTION_START(ALLOC_TAG_SECTION_NAME));
255 	last_codetag = (struct alloc_tag *)kallsyms_lookup_name(
256 					SECTION_STOP(ALLOC_TAG_SECTION_NAME));
257 	kernel_tags.count = last_codetag - kernel_tags.first_tag;
258 
259 	/* Check if kernel tags fit into page flags */
260 	if (kernel_tags.count > (1UL << NR_UNUSED_PAGEFLAG_BITS)) {
261 		shutdown_mem_profiling(false); /* allocinfo file does not exist yet */
262 		pr_err("%lu allocation tags cannot be references using %d available page flag bits. Memory allocation profiling is disabled!\n",
263 			kernel_tags.count, NR_UNUSED_PAGEFLAG_BITS);
264 		return;
265 	}
266 
267 	alloc_tag_ref_offs = (LRU_REFS_PGOFF - NR_UNUSED_PAGEFLAG_BITS);
268 	alloc_tag_ref_mask = ((1UL << NR_UNUSED_PAGEFLAG_BITS) - 1);
269 	pr_debug("Memory allocation profiling compression is using %d page flag bits!\n",
270 		 NR_UNUSED_PAGEFLAG_BITS);
271 }
272 
273 #ifdef CONFIG_MODULES
274 
275 static struct maple_tree mod_area_mt = MTREE_INIT(mod_area_mt, MT_FLAGS_ALLOC_RANGE);
276 static struct vm_struct *vm_module_tags;
277 /* A dummy object used to indicate an unloaded module */
278 static struct module unloaded_mod;
279 /* A dummy object used to indicate a module prepended area */
280 static struct module prepend_mod;
281 
282 struct alloc_tag_module_section module_tags;
283 
alloc_tag_align(unsigned long val)284 static inline unsigned long alloc_tag_align(unsigned long val)
285 {
286 	if (!static_key_enabled(&mem_profiling_compressed)) {
287 		/* No alignment requirements when we are not indexing the tags */
288 		return val;
289 	}
290 
291 	if (val % sizeof(struct alloc_tag) == 0)
292 		return val;
293 	return ((val / sizeof(struct alloc_tag)) + 1) * sizeof(struct alloc_tag);
294 }
295 
ensure_alignment(unsigned long align,unsigned int * prepend)296 static bool ensure_alignment(unsigned long align, unsigned int *prepend)
297 {
298 	if (!static_key_enabled(&mem_profiling_compressed)) {
299 		/* No alignment requirements when we are not indexing the tags */
300 		return true;
301 	}
302 
303 	/*
304 	 * If alloc_tag size is not a multiple of required alignment, tag
305 	 * indexing does not work.
306 	 */
307 	if (!IS_ALIGNED(sizeof(struct alloc_tag), align))
308 		return false;
309 
310 	/* Ensure prepend consumes multiple of alloc_tag-sized blocks */
311 	if (*prepend)
312 		*prepend = alloc_tag_align(*prepend);
313 
314 	return true;
315 }
316 
tags_addressable(void)317 static inline bool tags_addressable(void)
318 {
319 	unsigned long tag_idx_count;
320 
321 	if (!static_key_enabled(&mem_profiling_compressed))
322 		return true; /* with page_ext tags are always addressable */
323 
324 	tag_idx_count = CODETAG_ID_FIRST + kernel_tags.count +
325 			module_tags.size / sizeof(struct alloc_tag);
326 
327 	return tag_idx_count < (1UL << NR_UNUSED_PAGEFLAG_BITS);
328 }
329 
needs_section_mem(struct module * mod,unsigned long size)330 static bool needs_section_mem(struct module *mod, unsigned long size)
331 {
332 	if (!mem_profiling_support)
333 		return false;
334 
335 	return size >= sizeof(struct alloc_tag);
336 }
337 
clean_unused_counters(struct alloc_tag * start_tag,struct alloc_tag * end_tag)338 static bool clean_unused_counters(struct alloc_tag *start_tag,
339 				  struct alloc_tag *end_tag)
340 {
341 	struct alloc_tag *tag;
342 	bool ret = true;
343 
344 	for (tag = start_tag; tag <= end_tag; tag++) {
345 		struct alloc_tag_counters counter;
346 
347 		if (!tag->counters)
348 			continue;
349 
350 		counter = alloc_tag_read(tag);
351 		if (!counter.bytes) {
352 			free_percpu(tag->counters);
353 			tag->counters = NULL;
354 		} else {
355 			ret = false;
356 		}
357 	}
358 
359 	return ret;
360 }
361 
362 /* Called with mod_area_mt locked */
clean_unused_module_areas_locked(void)363 static void clean_unused_module_areas_locked(void)
364 {
365 	MA_STATE(mas, &mod_area_mt, 0, module_tags.size);
366 	struct module *val;
367 
368 	mas_for_each(&mas, val, module_tags.size) {
369 		struct alloc_tag *start_tag;
370 		struct alloc_tag *end_tag;
371 
372 		if (val != &unloaded_mod)
373 			continue;
374 
375 		/* Release area if all tags are unused */
376 		start_tag = (struct alloc_tag *)(module_tags.start_addr + mas.index);
377 		end_tag = (struct alloc_tag *)(module_tags.start_addr + mas.last);
378 		if (clean_unused_counters(start_tag, end_tag))
379 			mas_erase(&mas);
380 	}
381 }
382 
383 /* Called with mod_area_mt locked */
find_aligned_area(struct ma_state * mas,unsigned long section_size,unsigned long size,unsigned int prepend,unsigned long align)384 static bool find_aligned_area(struct ma_state *mas, unsigned long section_size,
385 			      unsigned long size, unsigned int prepend, unsigned long align)
386 {
387 	bool cleanup_done = false;
388 
389 repeat:
390 	/* Try finding exact size and hope the start is aligned */
391 	if (!mas_empty_area(mas, 0, section_size - 1, prepend + size)) {
392 		if (IS_ALIGNED(mas->index + prepend, align))
393 			return true;
394 
395 		/* Try finding larger area to align later */
396 		mas_reset(mas);
397 		if (!mas_empty_area(mas, 0, section_size - 1,
398 				    size + prepend + align - 1))
399 			return true;
400 	}
401 
402 	/* No free area, try cleanup stale data and repeat the search once */
403 	if (!cleanup_done) {
404 		clean_unused_module_areas_locked();
405 		cleanup_done = true;
406 		mas_reset(mas);
407 		goto repeat;
408 	}
409 
410 	return false;
411 }
412 
vm_module_tags_populate(void)413 static int vm_module_tags_populate(void)
414 {
415 	unsigned long phys_end = ALIGN_DOWN(module_tags.start_addr, PAGE_SIZE) +
416 				 (vm_module_tags->nr_pages << PAGE_SHIFT);
417 	unsigned long new_end = module_tags.start_addr + module_tags.size;
418 
419 	if (phys_end < new_end) {
420 		struct page **next_page = vm_module_tags->pages + vm_module_tags->nr_pages;
421 		unsigned long old_shadow_end = ALIGN(phys_end, MODULE_ALIGN);
422 		unsigned long new_shadow_end = ALIGN(new_end, MODULE_ALIGN);
423 		unsigned long more_pages;
424 		unsigned long nr = 0;
425 
426 		more_pages = ALIGN(new_end - phys_end, PAGE_SIZE) >> PAGE_SHIFT;
427 		while (nr < more_pages) {
428 			unsigned long allocated;
429 
430 			allocated = alloc_pages_bulk_node(GFP_KERNEL | __GFP_NOWARN,
431 				NUMA_NO_NODE, more_pages - nr, next_page + nr);
432 
433 			if (!allocated)
434 				break;
435 			nr += allocated;
436 		}
437 
438 		if (nr < more_pages ||
439 		    vmap_pages_range(phys_end, phys_end + (nr << PAGE_SHIFT), PAGE_KERNEL,
440 				     next_page, PAGE_SHIFT) < 0) {
441 			/* Clean up and error out */
442 			for (int i = 0; i < nr; i++)
443 				__free_page(next_page[i]);
444 			return -ENOMEM;
445 		}
446 
447 		vm_module_tags->nr_pages += nr;
448 
449 		/*
450 		 * Kasan allocates 1 byte of shadow for every 8 bytes of data.
451 		 * When kasan_alloc_module_shadow allocates shadow memory,
452 		 * its unit of allocation is a page.
453 		 * Therefore, here we need to align to MODULE_ALIGN.
454 		 */
455 		if (old_shadow_end < new_shadow_end)
456 			kasan_alloc_module_shadow((void *)old_shadow_end,
457 						  new_shadow_end - old_shadow_end,
458 						  GFP_KERNEL);
459 	}
460 
461 	/*
462 	 * Mark the pages as accessible, now that they are mapped.
463 	 * With hardware tag-based KASAN, marking is skipped for
464 	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
465 	 */
466 	kasan_unpoison_vmalloc((void *)module_tags.start_addr,
467 				new_end - module_tags.start_addr,
468 				KASAN_VMALLOC_PROT_NORMAL);
469 
470 	return 0;
471 }
472 
reserve_module_tags(struct module * mod,unsigned long size,unsigned int prepend,unsigned long align)473 static void *reserve_module_tags(struct module *mod, unsigned long size,
474 				 unsigned int prepend, unsigned long align)
475 {
476 	unsigned long section_size = module_tags.end_addr - module_tags.start_addr;
477 	MA_STATE(mas, &mod_area_mt, 0, section_size - 1);
478 	unsigned long offset;
479 	void *ret = NULL;
480 
481 	/* If no tags return error */
482 	if (size < sizeof(struct alloc_tag))
483 		return ERR_PTR(-EINVAL);
484 
485 	/*
486 	 * align is always power of 2, so we can use IS_ALIGNED and ALIGN.
487 	 * align 0 or 1 means no alignment, to simplify set to 1.
488 	 */
489 	if (!align)
490 		align = 1;
491 
492 	if (!ensure_alignment(align, &prepend)) {
493 		shutdown_mem_profiling(true);
494 		pr_err("%s: alignment %lu is incompatible with allocation tag indexing. Memory allocation profiling is disabled!\n",
495 			mod->name, align);
496 		return ERR_PTR(-EINVAL);
497 	}
498 
499 	mas_lock(&mas);
500 	if (!find_aligned_area(&mas, section_size, size, prepend, align)) {
501 		ret = ERR_PTR(-ENOMEM);
502 		goto unlock;
503 	}
504 
505 	/* Mark found area as reserved */
506 	offset = mas.index;
507 	offset += prepend;
508 	offset = ALIGN(offset, align);
509 	if (offset != mas.index) {
510 		unsigned long pad_start = mas.index;
511 
512 		mas.last = offset - 1;
513 		mas_store(&mas, &prepend_mod);
514 		if (mas_is_err(&mas)) {
515 			ret = ERR_PTR(xa_err(mas.node));
516 			goto unlock;
517 		}
518 		mas.index = offset;
519 		mas.last = offset + size - 1;
520 		mas_store(&mas, mod);
521 		if (mas_is_err(&mas)) {
522 			mas.index = pad_start;
523 			mas_erase(&mas);
524 			ret = ERR_PTR(xa_err(mas.node));
525 		}
526 	} else {
527 		mas.last = offset + size - 1;
528 		mas_store(&mas, mod);
529 		if (mas_is_err(&mas))
530 			ret = ERR_PTR(xa_err(mas.node));
531 	}
532 unlock:
533 	mas_unlock(&mas);
534 
535 	if (IS_ERR(ret))
536 		return ret;
537 
538 	if (module_tags.size < offset + size) {
539 		int grow_res;
540 
541 		module_tags.size = offset + size;
542 		if (mem_alloc_profiling_enabled() && !tags_addressable()) {
543 			shutdown_mem_profiling(true);
544 			pr_warn("With module %s there are too many tags to fit in %d page flag bits. Memory allocation profiling is disabled!\n",
545 				mod->name, NR_UNUSED_PAGEFLAG_BITS);
546 		}
547 
548 		grow_res = vm_module_tags_populate();
549 		if (grow_res) {
550 			shutdown_mem_profiling(true);
551 			pr_err("Failed to allocate memory for allocation tags in the module %s. Memory allocation profiling is disabled!\n",
552 			       mod->name);
553 			return ERR_PTR(grow_res);
554 		}
555 	}
556 
557 	return (struct alloc_tag *)(module_tags.start_addr + offset);
558 }
559 
release_module_tags(struct module * mod,bool used)560 static void release_module_tags(struct module *mod, bool used)
561 {
562 	MA_STATE(mas, &mod_area_mt, module_tags.size, module_tags.size);
563 	struct alloc_tag *start_tag;
564 	struct alloc_tag *end_tag;
565 	struct module *val;
566 
567 	mas_lock(&mas);
568 	mas_for_each_rev(&mas, val, 0)
569 		if (val == mod)
570 			break;
571 
572 	if (!val) /* module not found */
573 		goto out;
574 
575 	if (!used)
576 		goto release_area;
577 
578 	start_tag = (struct alloc_tag *)(module_tags.start_addr + mas.index);
579 	end_tag = (struct alloc_tag *)(module_tags.start_addr + mas.last);
580 	if (!clean_unused_counters(start_tag, end_tag)) {
581 		struct alloc_tag *tag;
582 
583 		for (tag = start_tag; tag <= end_tag; tag++) {
584 			struct alloc_tag_counters counter;
585 
586 			if (!tag->counters)
587 				continue;
588 
589 			counter = alloc_tag_read(tag);
590 			pr_info("%s:%u module %s func:%s has %llu allocated at module unload\n",
591 				tag->ct.filename, tag->ct.lineno, tag->ct.modname,
592 				tag->ct.function, counter.bytes);
593 		}
594 	} else {
595 		used = false;
596 	}
597 release_area:
598 	mas_store(&mas, used ? &unloaded_mod : NULL);
599 	val = mas_prev_range(&mas, 0);
600 	if (val == &prepend_mod)
601 		mas_store(&mas, NULL);
602 out:
603 	mas_unlock(&mas);
604 }
605 
load_module(struct module * mod,struct codetag * start,struct codetag * stop)606 static int load_module(struct module *mod, struct codetag *start, struct codetag *stop)
607 {
608 	/* Allocate module alloc_tag percpu counters */
609 	struct alloc_tag *start_tag;
610 	struct alloc_tag *stop_tag;
611 	struct alloc_tag *tag;
612 
613 	/* percpu counters for core allocations are already statically allocated */
614 	if (!mod)
615 		return 0;
616 
617 	start_tag = ct_to_alloc_tag(start);
618 	stop_tag = ct_to_alloc_tag(stop);
619 	for (tag = start_tag; tag < stop_tag; tag++) {
620 		WARN_ON(tag->counters);
621 		tag->counters = alloc_percpu(struct alloc_tag_counters);
622 		if (!tag->counters) {
623 			while (--tag >= start_tag) {
624 				free_percpu(tag->counters);
625 				tag->counters = NULL;
626 			}
627 			pr_err("Failed to allocate memory for allocation tag percpu counters in the module %s\n",
628 			       mod->name);
629 			return -ENOMEM;
630 		}
631 
632 		/*
633 		 * Avoid a kmemleak false positive. The pointer to the counters is stored
634 		 * in the alloc_tag section of the module and cannot be directly accessed.
635 		 */
636 		kmemleak_ignore_percpu(tag->counters);
637 	}
638 	return 0;
639 }
640 
replace_module(struct module * mod,struct module * new_mod)641 static void replace_module(struct module *mod, struct module *new_mod)
642 {
643 	MA_STATE(mas, &mod_area_mt, 0, module_tags.size);
644 	struct module *val;
645 
646 	mas_lock(&mas);
647 	mas_for_each(&mas, val, module_tags.size) {
648 		if (val != mod)
649 			continue;
650 
651 		mas_store_gfp(&mas, new_mod, GFP_KERNEL);
652 		break;
653 	}
654 	mas_unlock(&mas);
655 }
656 
alloc_mod_tags_mem(void)657 static int __init alloc_mod_tags_mem(void)
658 {
659 	/* Map space to copy allocation tags */
660 	vm_module_tags = execmem_vmap(MODULE_ALLOC_TAG_VMAP_SIZE);
661 	if (!vm_module_tags) {
662 		pr_err("Failed to map %lu bytes for module allocation tags\n",
663 			MODULE_ALLOC_TAG_VMAP_SIZE);
664 		module_tags.start_addr = 0;
665 		return -ENOMEM;
666 	}
667 
668 	vm_module_tags->pages = kmalloc_array(get_vm_area_size(vm_module_tags) >> PAGE_SHIFT,
669 					sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
670 	if (!vm_module_tags->pages) {
671 		free_vm_area(vm_module_tags);
672 		return -ENOMEM;
673 	}
674 
675 	module_tags.start_addr = (unsigned long)vm_module_tags->addr;
676 	module_tags.end_addr = module_tags.start_addr + MODULE_ALLOC_TAG_VMAP_SIZE;
677 	/* Ensure the base is alloc_tag aligned when required for indexing */
678 	module_tags.start_addr = alloc_tag_align(module_tags.start_addr);
679 
680 	return 0;
681 }
682 
free_mod_tags_mem(void)683 static void __init free_mod_tags_mem(void)
684 {
685 	int i;
686 
687 	module_tags.start_addr = 0;
688 	for (i = 0; i < vm_module_tags->nr_pages; i++)
689 		__free_page(vm_module_tags->pages[i]);
690 	kfree(vm_module_tags->pages);
691 	free_vm_area(vm_module_tags);
692 }
693 
694 #else /* CONFIG_MODULES */
695 
alloc_mod_tags_mem(void)696 static inline int alloc_mod_tags_mem(void) { return 0; }
free_mod_tags_mem(void)697 static inline void free_mod_tags_mem(void) {}
698 
699 #endif /* CONFIG_MODULES */
700 
701 /* See: Documentation/mm/allocation-profiling.rst */
setup_early_mem_profiling(char * str)702 static int __init setup_early_mem_profiling(char *str)
703 {
704 	bool compressed = false;
705 	bool enable;
706 
707 	if (!str || !str[0])
708 		return -EINVAL;
709 
710 	if (!strncmp(str, "never", 5)) {
711 		enable = false;
712 		mem_profiling_support = false;
713 		pr_info("Memory allocation profiling is disabled!\n");
714 	} else {
715 		char *token = strsep(&str, ",");
716 
717 		if (kstrtobool(token, &enable))
718 			return -EINVAL;
719 
720 		if (str) {
721 
722 			if (strcmp(str, "compressed"))
723 				return -EINVAL;
724 
725 			compressed = true;
726 		}
727 		mem_profiling_support = true;
728 		pr_info("Memory allocation profiling is enabled %s compression and is turned %s!\n",
729 			compressed ? "with" : "without", enable ? "on" : "off");
730 	}
731 
732 	if (enable != mem_alloc_profiling_enabled()) {
733 		if (enable)
734 			static_branch_enable(&mem_alloc_profiling_key);
735 		else
736 			static_branch_disable(&mem_alloc_profiling_key);
737 	}
738 	if (compressed != static_key_enabled(&mem_profiling_compressed)) {
739 		if (compressed)
740 			static_branch_enable(&mem_profiling_compressed);
741 		else
742 			static_branch_disable(&mem_profiling_compressed);
743 	}
744 
745 	return 0;
746 }
747 early_param("sysctl.vm.mem_profiling", setup_early_mem_profiling);
748 
need_page_alloc_tagging(void)749 static __init bool need_page_alloc_tagging(void)
750 {
751 	if (static_key_enabled(&mem_profiling_compressed))
752 		return false;
753 
754 	return mem_profiling_support;
755 }
756 
init_page_alloc_tagging(void)757 static __init void init_page_alloc_tagging(void)
758 {
759 }
760 
761 struct page_ext_operations page_alloc_tagging_ops = {
762 	.size = sizeof(union codetag_ref),
763 	.need = need_page_alloc_tagging,
764 	.init = init_page_alloc_tagging,
765 };
766 EXPORT_SYMBOL(page_alloc_tagging_ops);
767 
768 #ifdef CONFIG_SYSCTL
769 static struct ctl_table memory_allocation_profiling_sysctls[] = {
770 	{
771 		.procname	= "mem_profiling",
772 		.data		= &mem_alloc_profiling_key,
773 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
774 		.mode		= 0444,
775 #else
776 		.mode		= 0644,
777 #endif
778 		.proc_handler	= proc_do_static_key,
779 	},
780 };
781 
sysctl_init(void)782 static void __init sysctl_init(void)
783 {
784 	if (!mem_profiling_support)
785 		memory_allocation_profiling_sysctls[0].mode = 0444;
786 
787 	register_sysctl_init("vm", memory_allocation_profiling_sysctls);
788 }
789 #else /* CONFIG_SYSCTL */
sysctl_init(void)790 static inline void sysctl_init(void) {}
791 #endif /* CONFIG_SYSCTL */
792 
alloc_tag_init(void)793 static int __init alloc_tag_init(void)
794 {
795 	const struct codetag_type_desc desc = {
796 		.section		= ALLOC_TAG_SECTION_NAME,
797 		.tag_size		= sizeof(struct alloc_tag),
798 #ifdef CONFIG_MODULES
799 		.needs_section_mem	= needs_section_mem,
800 		.alloc_section_mem	= reserve_module_tags,
801 		.free_section_mem	= release_module_tags,
802 		.module_load		= load_module,
803 		.module_replaced	= replace_module,
804 #endif
805 	};
806 	int res;
807 
808 	sysctl_init();
809 
810 	if (!mem_profiling_support) {
811 		pr_info("Memory allocation profiling is not supported!\n");
812 		return 0;
813 	}
814 
815 	if (!proc_create_seq_private(ALLOCINFO_FILE_NAME, 0400, NULL, &allocinfo_seq_op,
816 				     sizeof(struct allocinfo_private), NULL)) {
817 		pr_err("Failed to create %s file\n", ALLOCINFO_FILE_NAME);
818 		shutdown_mem_profiling(false);
819 		return -ENOMEM;
820 	}
821 
822 	res = alloc_mod_tags_mem();
823 	if (res) {
824 		pr_err("Failed to reserve address space for module tags, errno = %d\n", res);
825 		shutdown_mem_profiling(true);
826 		return res;
827 	}
828 
829 	alloc_tag_cttype = codetag_register_type(&desc);
830 	if (IS_ERR(alloc_tag_cttype)) {
831 		pr_err("Allocation tags registration failed, errno = %ld\n", PTR_ERR(alloc_tag_cttype));
832 		free_mod_tags_mem();
833 		shutdown_mem_profiling(true);
834 		return PTR_ERR(alloc_tag_cttype);
835 	}
836 
837 	return 0;
838 }
839 module_init(alloc_tag_init);
840