1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains KASAN runtime code that manages shadow memory for
4  * generic and software tag-based KASAN modes.
5  *
6  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
7  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
8  *
9  * Some code borrowed from https://github.com/xairy/kasan-prototype by
10  *        Andrey Konovalov <andreyknvl@gmail.com>
11  */
12 
13 #include <linux/init.h>
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/kfence.h>
17 #include <linux/kmemleak.h>
18 #include <linux/memory.h>
19 #include <linux/mm.h>
20 #include <linux/string.h>
21 #include <linux/types.h>
22 #include <linux/vmalloc.h>
23 
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
26 
27 #include "kasan.h"
28 
29 bool __kasan_check_read(const volatile void *p, unsigned int size)
30 {
31 	return kasan_check_range((void *)p, size, false, _RET_IP_);
32 }
33 EXPORT_SYMBOL(__kasan_check_read);
34 
35 bool __kasan_check_write(const volatile void *p, unsigned int size)
36 {
37 	return kasan_check_range((void *)p, size, true, _RET_IP_);
38 }
39 EXPORT_SYMBOL(__kasan_check_write);
40 
41 #if !defined(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX) && !defined(CONFIG_GENERIC_ENTRY)
42 /*
43  * CONFIG_GENERIC_ENTRY relies on compiler emitted mem*() calls to not be
44  * instrumented. KASAN enabled toolchains should emit __asan_mem*() functions
45  * for the sites they want to instrument.
46  *
47  * If we have a compiler that can instrument meminstrinsics, never override
48  * these, so that non-instrumented files can safely consider them as builtins.
49  */
50 #undef memset
51 void *memset(void *addr, int c, size_t len)
52 {
53 	if (!kasan_check_range(addr, len, true, _RET_IP_))
54 		return NULL;
55 
56 	return __memset(addr, c, len);
57 }
58 
59 #ifdef __HAVE_ARCH_MEMMOVE
60 #undef memmove
61 void *memmove(void *dest, const void *src, size_t len)
62 {
63 	if (!kasan_check_range(src, len, false, _RET_IP_) ||
64 	    !kasan_check_range(dest, len, true, _RET_IP_))
65 		return NULL;
66 
67 	return __memmove(dest, src, len);
68 }
69 #endif
70 
71 #undef memcpy
72 void *memcpy(void *dest, const void *src, size_t len)
73 {
74 	if (!kasan_check_range(src, len, false, _RET_IP_) ||
75 	    !kasan_check_range(dest, len, true, _RET_IP_))
76 		return NULL;
77 
78 	return __memcpy(dest, src, len);
79 }
80 #endif
81 
82 void *__asan_memset(void *addr, int c, ssize_t len)
83 {
84 	if (!kasan_check_range(addr, len, true, _RET_IP_))
85 		return NULL;
86 
87 	return __memset(addr, c, len);
88 }
89 EXPORT_SYMBOL(__asan_memset);
90 
91 #ifdef __HAVE_ARCH_MEMMOVE
92 void *__asan_memmove(void *dest, const void *src, ssize_t len)
93 {
94 	if (!kasan_check_range(src, len, false, _RET_IP_) ||
95 	    !kasan_check_range(dest, len, true, _RET_IP_))
96 		return NULL;
97 
98 	return __memmove(dest, src, len);
99 }
100 EXPORT_SYMBOL(__asan_memmove);
101 #endif
102 
103 void *__asan_memcpy(void *dest, const void *src, ssize_t len)
104 {
105 	if (!kasan_check_range(src, len, false, _RET_IP_) ||
106 	    !kasan_check_range(dest, len, true, _RET_IP_))
107 		return NULL;
108 
109 	return __memcpy(dest, src, len);
110 }
111 EXPORT_SYMBOL(__asan_memcpy);
112 
113 #ifdef CONFIG_KASAN_SW_TAGS
114 void *__hwasan_memset(void *addr, int c, ssize_t len) __alias(__asan_memset);
115 EXPORT_SYMBOL(__hwasan_memset);
116 #ifdef __HAVE_ARCH_MEMMOVE
117 void *__hwasan_memmove(void *dest, const void *src, ssize_t len) __alias(__asan_memmove);
118 EXPORT_SYMBOL(__hwasan_memmove);
119 #endif
120 void *__hwasan_memcpy(void *dest, const void *src, ssize_t len) __alias(__asan_memcpy);
121 EXPORT_SYMBOL(__hwasan_memcpy);
122 #endif
123 
124 void kasan_poison(const void *addr, size_t size, u8 value, bool init)
125 {
126 	void *shadow_start, *shadow_end;
127 
128 	if (!kasan_arch_is_ready())
129 		return;
130 
131 	/*
132 	 * Perform shadow offset calculation based on untagged address, as
133 	 * some of the callers (e.g. kasan_poison_new_object) pass tagged
134 	 * addresses to this function.
135 	 */
136 	addr = kasan_reset_tag(addr);
137 
138 	if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
139 		return;
140 	if (WARN_ON(size & KASAN_GRANULE_MASK))
141 		return;
142 
143 	shadow_start = kasan_mem_to_shadow(addr);
144 	shadow_end = kasan_mem_to_shadow(addr + size);
145 
146 	__memset(shadow_start, value, shadow_end - shadow_start);
147 }
148 EXPORT_SYMBOL_GPL(kasan_poison);
149 
150 #ifdef CONFIG_KASAN_GENERIC
151 void kasan_poison_last_granule(const void *addr, size_t size)
152 {
153 	if (!kasan_arch_is_ready())
154 		return;
155 
156 	if (size & KASAN_GRANULE_MASK) {
157 		u8 *shadow = (u8 *)kasan_mem_to_shadow(addr + size);
158 		*shadow = size & KASAN_GRANULE_MASK;
159 	}
160 }
161 #endif
162 
163 void kasan_unpoison(const void *addr, size_t size, bool init)
164 {
165 	u8 tag = get_tag(addr);
166 
167 	/*
168 	 * Perform shadow offset calculation based on untagged address, as
169 	 * some of the callers (e.g. kasan_unpoison_new_object) pass tagged
170 	 * addresses to this function.
171 	 */
172 	addr = kasan_reset_tag(addr);
173 
174 	if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
175 		return;
176 
177 	/* Unpoison all granules that cover the object. */
178 	kasan_poison(addr, round_up(size, KASAN_GRANULE_SIZE), tag, false);
179 
180 	/* Partially poison the last granule for the generic mode. */
181 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
182 		kasan_poison_last_granule(addr, size);
183 }
184 
185 #ifdef CONFIG_MEMORY_HOTPLUG
186 static bool shadow_mapped(unsigned long addr)
187 {
188 	pgd_t *pgd = pgd_offset_k(addr);
189 	p4d_t *p4d;
190 	pud_t *pud;
191 	pmd_t *pmd;
192 	pte_t *pte;
193 
194 	if (pgd_none(*pgd))
195 		return false;
196 	p4d = p4d_offset(pgd, addr);
197 	if (p4d_none(*p4d))
198 		return false;
199 	pud = pud_offset(p4d, addr);
200 	if (pud_none(*pud))
201 		return false;
202 	if (pud_leaf(*pud))
203 		return true;
204 	pmd = pmd_offset(pud, addr);
205 	if (pmd_none(*pmd))
206 		return false;
207 	if (pmd_leaf(*pmd))
208 		return true;
209 	pte = pte_offset_kernel(pmd, addr);
210 	return !pte_none(ptep_get(pte));
211 }
212 
213 static int __meminit kasan_mem_notifier(struct notifier_block *nb,
214 			unsigned long action, void *data)
215 {
216 	struct memory_notify *mem_data = data;
217 	unsigned long nr_shadow_pages, start_kaddr, shadow_start;
218 	unsigned long shadow_end, shadow_size;
219 
220 	nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
221 	start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
222 	shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
223 	shadow_size = nr_shadow_pages << PAGE_SHIFT;
224 	shadow_end = shadow_start + shadow_size;
225 
226 	if (WARN_ON(mem_data->nr_pages % KASAN_GRANULE_SIZE) ||
227 		WARN_ON(start_kaddr % KASAN_MEMORY_PER_SHADOW_PAGE))
228 		return NOTIFY_BAD;
229 
230 	switch (action) {
231 	case MEM_GOING_ONLINE: {
232 		void *ret;
233 
234 		/*
235 		 * If shadow is mapped already than it must have been mapped
236 		 * during the boot. This could happen if we onlining previously
237 		 * offlined memory.
238 		 */
239 		if (shadow_mapped(shadow_start))
240 			return NOTIFY_OK;
241 
242 		ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
243 					shadow_end, GFP_KERNEL,
244 					PAGE_KERNEL, VM_NO_GUARD,
245 					pfn_to_nid(mem_data->start_pfn),
246 					__builtin_return_address(0));
247 		if (!ret)
248 			return NOTIFY_BAD;
249 
250 		kmemleak_ignore(ret);
251 		return NOTIFY_OK;
252 	}
253 	case MEM_CANCEL_ONLINE:
254 	case MEM_OFFLINE: {
255 		struct vm_struct *vm;
256 
257 		/*
258 		 * shadow_start was either mapped during boot by kasan_init()
259 		 * or during memory online by __vmalloc_node_range().
260 		 * In the latter case we can use vfree() to free shadow.
261 		 * Non-NULL result of the find_vm_area() will tell us if
262 		 * that was the second case.
263 		 *
264 		 * Currently it's not possible to free shadow mapped
265 		 * during boot by kasan_init(). It's because the code
266 		 * to do that hasn't been written yet. So we'll just
267 		 * leak the memory.
268 		 */
269 		vm = find_vm_area((void *)shadow_start);
270 		if (vm)
271 			vfree((void *)shadow_start);
272 	}
273 	}
274 
275 	return NOTIFY_OK;
276 }
277 
278 static int __init kasan_memhotplug_init(void)
279 {
280 	hotplug_memory_notifier(kasan_mem_notifier, DEFAULT_CALLBACK_PRI);
281 
282 	return 0;
283 }
284 
285 core_initcall(kasan_memhotplug_init);
286 #endif
287 
288 #ifdef CONFIG_KASAN_VMALLOC
289 
290 void __init __weak kasan_populate_early_vm_area_shadow(void *start,
291 						       unsigned long size)
292 {
293 }
294 
295 struct vmalloc_populate_data {
296 	unsigned long start;
297 	struct page **pages;
298 };
299 
300 static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
301 				      void *_data)
302 {
303 	struct vmalloc_populate_data *data = _data;
304 	struct page *page;
305 	pte_t pte;
306 	int index;
307 
308 	if (likely(!pte_none(ptep_get(ptep))))
309 		return 0;
310 
311 	index = PFN_DOWN(addr - data->start);
312 	page = data->pages[index];
313 	__memset(page_to_virt(page), KASAN_VMALLOC_INVALID, PAGE_SIZE);
314 	pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL);
315 
316 	spin_lock(&init_mm.page_table_lock);
317 	if (likely(pte_none(ptep_get(ptep)))) {
318 		set_pte_at(&init_mm, addr, ptep, pte);
319 		data->pages[index] = NULL;
320 	}
321 	spin_unlock(&init_mm.page_table_lock);
322 
323 	return 0;
324 }
325 
326 static void ___free_pages_bulk(struct page **pages, int nr_pages)
327 {
328 	int i;
329 
330 	for (i = 0; i < nr_pages; i++) {
331 		if (pages[i]) {
332 			__free_pages(pages[i], 0);
333 			pages[i] = NULL;
334 		}
335 	}
336 }
337 
338 static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
339 {
340 	unsigned long nr_populated, nr_total = nr_pages;
341 	struct page **page_array = pages;
342 
343 	while (nr_pages) {
344 		nr_populated = alloc_pages_bulk(GFP_KERNEL, nr_pages, pages);
345 		if (!nr_populated) {
346 			___free_pages_bulk(page_array, nr_total - nr_pages);
347 			return -ENOMEM;
348 		}
349 		pages += nr_populated;
350 		nr_pages -= nr_populated;
351 	}
352 
353 	return 0;
354 }
355 
356 static int __kasan_populate_vmalloc(unsigned long start, unsigned long end)
357 {
358 	unsigned long nr_pages, nr_total = PFN_UP(end - start);
359 	struct vmalloc_populate_data data;
360 	int ret = 0;
361 
362 	data.pages = (struct page **)__get_free_page(GFP_KERNEL | __GFP_ZERO);
363 	if (!data.pages)
364 		return -ENOMEM;
365 
366 	while (nr_total) {
367 		nr_pages = min(nr_total, PAGE_SIZE / sizeof(data.pages[0]));
368 		ret = ___alloc_pages_bulk(data.pages, nr_pages);
369 		if (ret)
370 			break;
371 
372 		data.start = start;
373 		ret = apply_to_page_range(&init_mm, start, nr_pages * PAGE_SIZE,
374 					  kasan_populate_vmalloc_pte, &data);
375 		___free_pages_bulk(data.pages, nr_pages);
376 		if (ret)
377 			break;
378 
379 		start += nr_pages * PAGE_SIZE;
380 		nr_total -= nr_pages;
381 	}
382 
383 	free_page((unsigned long)data.pages);
384 
385 	return ret;
386 }
387 
388 int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
389 {
390 	unsigned long shadow_start, shadow_end;
391 	int ret;
392 
393 	if (!kasan_arch_is_ready())
394 		return 0;
395 
396 	if (!is_vmalloc_or_module_addr((void *)addr))
397 		return 0;
398 
399 	shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr);
400 	shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size);
401 
402 	/*
403 	 * User Mode Linux maps enough shadow memory for all of virtual memory
404 	 * at boot, so doesn't need to allocate more on vmalloc, just clear it.
405 	 *
406 	 * The remaining CONFIG_UML checks in this file exist for the same
407 	 * reason.
408 	 */
409 	if (IS_ENABLED(CONFIG_UML)) {
410 		__memset((void *)shadow_start, KASAN_VMALLOC_INVALID, shadow_end - shadow_start);
411 		return 0;
412 	}
413 
414 	shadow_start = PAGE_ALIGN_DOWN(shadow_start);
415 	shadow_end = PAGE_ALIGN(shadow_end);
416 
417 	ret = __kasan_populate_vmalloc(shadow_start, shadow_end);
418 	if (ret)
419 		return ret;
420 
421 	flush_cache_vmap(shadow_start, shadow_end);
422 
423 	/*
424 	 * We need to be careful about inter-cpu effects here. Consider:
425 	 *
426 	 *   CPU#0				  CPU#1
427 	 * WRITE_ONCE(p, vmalloc(100));		while (x = READ_ONCE(p)) ;
428 	 *					p[99] = 1;
429 	 *
430 	 * With compiler instrumentation, that ends up looking like this:
431 	 *
432 	 *   CPU#0				  CPU#1
433 	 * // vmalloc() allocates memory
434 	 * // let a = area->addr
435 	 * // we reach kasan_populate_vmalloc
436 	 * // and call kasan_unpoison:
437 	 * STORE shadow(a), unpoison_val
438 	 * ...
439 	 * STORE shadow(a+99), unpoison_val	x = LOAD p
440 	 * // rest of vmalloc process		<data dependency>
441 	 * STORE p, a				LOAD shadow(x+99)
442 	 *
443 	 * If there is no barrier between the end of unpoisoning the shadow
444 	 * and the store of the result to p, the stores could be committed
445 	 * in a different order by CPU#0, and CPU#1 could erroneously observe
446 	 * poison in the shadow.
447 	 *
448 	 * We need some sort of barrier between the stores.
449 	 *
450 	 * In the vmalloc() case, this is provided by a smp_wmb() in
451 	 * clear_vm_uninitialized_flag(). In the per-cpu allocator and in
452 	 * get_vm_area() and friends, the caller gets shadow allocated but
453 	 * doesn't have any pages mapped into the virtual address space that
454 	 * has been reserved. Mapping those pages in will involve taking and
455 	 * releasing a page-table lock, which will provide the barrier.
456 	 */
457 
458 	return 0;
459 }
460 
461 static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
462 					void *unused)
463 {
464 	unsigned long page;
465 
466 	page = (unsigned long)__va(pte_pfn(ptep_get(ptep)) << PAGE_SHIFT);
467 
468 	spin_lock(&init_mm.page_table_lock);
469 
470 	if (likely(!pte_none(ptep_get(ptep)))) {
471 		pte_clear(&init_mm, addr, ptep);
472 		free_page(page);
473 	}
474 	spin_unlock(&init_mm.page_table_lock);
475 
476 	return 0;
477 }
478 
479 /*
480  * Release the backing for the vmalloc region [start, end), which
481  * lies within the free region [free_region_start, free_region_end).
482  *
483  * This can be run lazily, long after the region was freed. It runs
484  * under vmap_area_lock, so it's not safe to interact with the vmalloc/vmap
485  * infrastructure.
486  *
487  * How does this work?
488  * -------------------
489  *
490  * We have a region that is page aligned, labeled as A.
491  * That might not map onto the shadow in a way that is page-aligned:
492  *
493  *                    start                     end
494  *                    v                         v
495  * |????????|????????|AAAAAAAA|AA....AA|AAAAAAAA|????????| < vmalloc
496  *  -------- -------- --------          -------- --------
497  *      |        |       |                 |        |
498  *      |        |       |         /-------/        |
499  *      \-------\|/------/         |/---------------/
500  *              |||                ||
501  *             |??AAAAAA|AAAAAAAA|AA??????|                < shadow
502  *                 (1)      (2)      (3)
503  *
504  * First we align the start upwards and the end downwards, so that the
505  * shadow of the region aligns with shadow page boundaries. In the
506  * example, this gives us the shadow page (2). This is the shadow entirely
507  * covered by this allocation.
508  *
509  * Then we have the tricky bits. We want to know if we can free the
510  * partially covered shadow pages - (1) and (3) in the example. For this,
511  * we are given the start and end of the free region that contains this
512  * allocation. Extending our previous example, we could have:
513  *
514  *  free_region_start                                    free_region_end
515  *  |                 start                     end      |
516  *  v                 v                         v        v
517  * |FFFFFFFF|FFFFFFFF|AAAAAAAA|AA....AA|AAAAAAAA|FFFFFFFF| < vmalloc
518  *  -------- -------- --------          -------- --------
519  *      |        |       |                 |        |
520  *      |        |       |         /-------/        |
521  *      \-------\|/------/         |/---------------/
522  *              |||                ||
523  *             |FFAAAAAA|AAAAAAAA|AAF?????|                < shadow
524  *                 (1)      (2)      (3)
525  *
526  * Once again, we align the start of the free region up, and the end of
527  * the free region down so that the shadow is page aligned. So we can free
528  * page (1) - we know no allocation currently uses anything in that page,
529  * because all of it is in the vmalloc free region. But we cannot free
530  * page (3), because we can't be sure that the rest of it is unused.
531  *
532  * We only consider pages that contain part of the original region for
533  * freeing: we don't try to free other pages from the free region or we'd
534  * end up trying to free huge chunks of virtual address space.
535  *
536  * Concurrency
537  * -----------
538  *
539  * How do we know that we're not freeing a page that is simultaneously
540  * being used for a fresh allocation in kasan_populate_vmalloc(_pte)?
541  *
542  * We _can_ have kasan_release_vmalloc and kasan_populate_vmalloc running
543  * at the same time. While we run under free_vmap_area_lock, the population
544  * code does not.
545  *
546  * free_vmap_area_lock instead operates to ensure that the larger range
547  * [free_region_start, free_region_end) is safe: because __alloc_vmap_area and
548  * the per-cpu region-finding algorithm both run under free_vmap_area_lock,
549  * no space identified as free will become used while we are running. This
550  * means that so long as we are careful with alignment and only free shadow
551  * pages entirely covered by the free region, we will not run in to any
552  * trouble - any simultaneous allocations will be for disjoint regions.
553  */
554 void kasan_release_vmalloc(unsigned long start, unsigned long end,
555 			   unsigned long free_region_start,
556 			   unsigned long free_region_end,
557 			   unsigned long flags)
558 {
559 	void *shadow_start, *shadow_end;
560 	unsigned long region_start, region_end;
561 	unsigned long size;
562 
563 	if (!kasan_arch_is_ready())
564 		return;
565 
566 	region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE);
567 	region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE);
568 
569 	free_region_start = ALIGN(free_region_start, KASAN_MEMORY_PER_SHADOW_PAGE);
570 
571 	if (start != region_start &&
572 	    free_region_start < region_start)
573 		region_start -= KASAN_MEMORY_PER_SHADOW_PAGE;
574 
575 	free_region_end = ALIGN_DOWN(free_region_end, KASAN_MEMORY_PER_SHADOW_PAGE);
576 
577 	if (end != region_end &&
578 	    free_region_end > region_end)
579 		region_end += KASAN_MEMORY_PER_SHADOW_PAGE;
580 
581 	shadow_start = kasan_mem_to_shadow((void *)region_start);
582 	shadow_end = kasan_mem_to_shadow((void *)region_end);
583 
584 	if (shadow_end > shadow_start) {
585 		size = shadow_end - shadow_start;
586 		if (IS_ENABLED(CONFIG_UML)) {
587 			__memset(shadow_start, KASAN_SHADOW_INIT, shadow_end - shadow_start);
588 			return;
589 		}
590 
591 
592 		if (flags & KASAN_VMALLOC_PAGE_RANGE)
593 			apply_to_existing_page_range(&init_mm,
594 					     (unsigned long)shadow_start,
595 					     size, kasan_depopulate_vmalloc_pte,
596 					     NULL);
597 
598 		if (flags & KASAN_VMALLOC_TLB_FLUSH)
599 			flush_tlb_kernel_range((unsigned long)shadow_start,
600 					       (unsigned long)shadow_end);
601 	}
602 }
603 
604 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
605 			       kasan_vmalloc_flags_t flags)
606 {
607 	/*
608 	 * Software KASAN modes unpoison both VM_ALLOC and non-VM_ALLOC
609 	 * mappings, so the KASAN_VMALLOC_VM_ALLOC flag is ignored.
610 	 * Software KASAN modes can't optimize zeroing memory by combining it
611 	 * with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored.
612 	 */
613 
614 	if (!kasan_arch_is_ready())
615 		return (void *)start;
616 
617 	if (!is_vmalloc_or_module_addr(start))
618 		return (void *)start;
619 
620 	/*
621 	 * Don't tag executable memory with the tag-based mode.
622 	 * The kernel doesn't tolerate having the PC register tagged.
623 	 */
624 	if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) &&
625 	    !(flags & KASAN_VMALLOC_PROT_NORMAL))
626 		return (void *)start;
627 
628 	start = set_tag(start, kasan_random_tag());
629 	kasan_unpoison(start, size, false);
630 	return (void *)start;
631 }
632 
633 /*
634  * Poison the shadow for a vmalloc region. Called as part of the
635  * freeing process at the time the region is freed.
636  */
637 void __kasan_poison_vmalloc(const void *start, unsigned long size)
638 {
639 	if (!kasan_arch_is_ready())
640 		return;
641 
642 	if (!is_vmalloc_or_module_addr(start))
643 		return;
644 
645 	size = round_up(size, KASAN_GRANULE_SIZE);
646 	kasan_poison(start, size, KASAN_VMALLOC_INVALID, false);
647 }
648 
649 #else /* CONFIG_KASAN_VMALLOC */
650 
651 int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask)
652 {
653 	void *ret;
654 	size_t scaled_size;
655 	size_t shadow_size;
656 	unsigned long shadow_start;
657 
658 	shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
659 	scaled_size = (size + KASAN_GRANULE_SIZE - 1) >>
660 				KASAN_SHADOW_SCALE_SHIFT;
661 	shadow_size = round_up(scaled_size, PAGE_SIZE);
662 
663 	if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
664 		return -EINVAL;
665 
666 	if (IS_ENABLED(CONFIG_UML)) {
667 		__memset((void *)shadow_start, KASAN_SHADOW_INIT, shadow_size);
668 		return 0;
669 	}
670 
671 	ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
672 			shadow_start + shadow_size,
673 			GFP_KERNEL,
674 			PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
675 			__builtin_return_address(0));
676 
677 	if (ret) {
678 		struct vm_struct *vm = find_vm_area(addr);
679 		__memset(ret, KASAN_SHADOW_INIT, shadow_size);
680 		vm->flags |= VM_KASAN;
681 		kmemleak_ignore(ret);
682 
683 		if (vm->flags & VM_DEFER_KMEMLEAK)
684 			kmemleak_vmalloc(vm, size, gfp_mask);
685 
686 		return 0;
687 	}
688 
689 	return -ENOMEM;
690 }
691 
692 void kasan_free_module_shadow(const struct vm_struct *vm)
693 {
694 	if (IS_ENABLED(CONFIG_UML))
695 		return;
696 
697 	if (vm->flags & VM_KASAN)
698 		vfree(kasan_mem_to_shadow(vm->addr));
699 }
700 
701 #endif
702