xref: /linux/mm/vmalloc.c (revision 334fbe734e687404f346eba7d5d96ed2b44d35ab)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 1993  Linus Torvalds
4  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
5  *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
6  *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
7  *  Numa awareness, Christoph Lameter, SGI, June 2005
8  *  Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019
9  */
10 
11 #include <linux/vmalloc.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/interrupt.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/set_memory.h>
22 #include <linux/debugobjects.h>
23 #include <linux/kallsyms.h>
24 #include <linux/list.h>
25 #include <linux/notifier.h>
26 #include <linux/rbtree.h>
27 #include <linux/xarray.h>
28 #include <linux/io.h>
29 #include <linux/rcupdate.h>
30 #include <linux/pfn.h>
31 #include <linux/kmemleak.h>
32 #include <linux/atomic.h>
33 #include <linux/compiler.h>
34 #include <linux/memcontrol.h>
35 #include <linux/llist.h>
36 #include <linux/uio.h>
37 #include <linux/bitops.h>
38 #include <linux/rbtree_augmented.h>
39 #include <linux/overflow.h>
40 #include <linux/pgtable.h>
41 #include <linux/hugetlb.h>
42 #include <linux/sched/mm.h>
43 #include <asm/tlbflush.h>
44 #include <asm/shmparam.h>
45 #include <linux/page_owner.h>
46 
47 #define CREATE_TRACE_POINTS
48 #include <trace/events/vmalloc.h>
49 
50 #include "internal.h"
51 #include "pgalloc-track.h"
52 
53 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
54 static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1;
55 
set_nohugeiomap(char * str)56 static int __init set_nohugeiomap(char *str)
57 {
58 	ioremap_max_page_shift = PAGE_SHIFT;
59 	return 0;
60 }
61 early_param("nohugeiomap", set_nohugeiomap);
62 #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
63 static const unsigned int ioremap_max_page_shift = PAGE_SHIFT;
64 #endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */
65 
66 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
67 static bool __ro_after_init vmap_allow_huge = true;
68 
set_nohugevmalloc(char * str)69 static int __init set_nohugevmalloc(char *str)
70 {
71 	vmap_allow_huge = false;
72 	return 0;
73 }
74 early_param("nohugevmalloc", set_nohugevmalloc);
75 #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
76 static const bool vmap_allow_huge = false;
77 #endif	/* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
78 
is_vmalloc_addr(const void * x)79 bool is_vmalloc_addr(const void *x)
80 {
81 	unsigned long addr = (unsigned long)kasan_reset_tag(x);
82 
83 	return addr >= VMALLOC_START && addr < VMALLOC_END;
84 }
85 EXPORT_SYMBOL(is_vmalloc_addr);
86 
87 struct vfree_deferred {
88 	struct llist_head list;
89 	struct work_struct wq;
90 };
91 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
92 
93 /*** Page table manipulation functions ***/
vmap_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift,pgtbl_mod_mask * mask)94 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
95 			phys_addr_t phys_addr, pgprot_t prot,
96 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
97 {
98 	pte_t *pte;
99 	u64 pfn;
100 	struct page *page;
101 	unsigned long size = PAGE_SIZE;
102 
103 	if (WARN_ON_ONCE(!PAGE_ALIGNED(end - addr)))
104 		return -EINVAL;
105 
106 	pfn = phys_addr >> PAGE_SHIFT;
107 	pte = pte_alloc_kernel_track(pmd, addr, mask);
108 	if (!pte)
109 		return -ENOMEM;
110 
111 	lazy_mmu_mode_enable();
112 
113 	do {
114 		if (unlikely(!pte_none(ptep_get(pte)))) {
115 			if (pfn_valid(pfn)) {
116 				page = pfn_to_page(pfn);
117 				dump_page(page, "remapping already mapped page");
118 			}
119 			BUG();
120 		}
121 
122 #ifdef CONFIG_HUGETLB_PAGE
123 		size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift);
124 		if (size != PAGE_SIZE) {
125 			pte_t entry = pfn_pte(pfn, prot);
126 
127 			entry = arch_make_huge_pte(entry, ilog2(size), 0);
128 			set_huge_pte_at(&init_mm, addr, pte, entry, size);
129 			pfn += PFN_DOWN(size);
130 			continue;
131 		}
132 #endif
133 		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
134 		pfn++;
135 	} while (pte += PFN_DOWN(size), addr += size, addr != end);
136 
137 	lazy_mmu_mode_disable();
138 	*mask |= PGTBL_PTE_MODIFIED;
139 	return 0;
140 }
141 
vmap_try_huge_pmd(pmd_t * pmd,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift)142 static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
143 			phys_addr_t phys_addr, pgprot_t prot,
144 			unsigned int max_page_shift)
145 {
146 	if (max_page_shift < PMD_SHIFT)
147 		return 0;
148 
149 	if (!arch_vmap_pmd_supported(prot))
150 		return 0;
151 
152 	if ((end - addr) != PMD_SIZE)
153 		return 0;
154 
155 	if (!IS_ALIGNED(addr, PMD_SIZE))
156 		return 0;
157 
158 	if (!IS_ALIGNED(phys_addr, PMD_SIZE))
159 		return 0;
160 
161 	if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
162 		return 0;
163 
164 	return pmd_set_huge(pmd, phys_addr, prot);
165 }
166 
vmap_pmd_range(pud_t * pud,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift,pgtbl_mod_mask * mask)167 static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
168 			phys_addr_t phys_addr, pgprot_t prot,
169 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
170 {
171 	pmd_t *pmd;
172 	unsigned long next;
173 	int err = 0;
174 
175 	pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
176 	if (!pmd)
177 		return -ENOMEM;
178 	do {
179 		next = pmd_addr_end(addr, end);
180 
181 		if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot,
182 					max_page_shift)) {
183 			*mask |= PGTBL_PMD_MODIFIED;
184 			continue;
185 		}
186 
187 		err = vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask);
188 		if (err)
189 			break;
190 	} while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
191 	return err;
192 }
193 
vmap_try_huge_pud(pud_t * pud,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift)194 static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
195 			phys_addr_t phys_addr, pgprot_t prot,
196 			unsigned int max_page_shift)
197 {
198 	if (max_page_shift < PUD_SHIFT)
199 		return 0;
200 
201 	if (!arch_vmap_pud_supported(prot))
202 		return 0;
203 
204 	if ((end - addr) != PUD_SIZE)
205 		return 0;
206 
207 	if (!IS_ALIGNED(addr, PUD_SIZE))
208 		return 0;
209 
210 	if (!IS_ALIGNED(phys_addr, PUD_SIZE))
211 		return 0;
212 
213 	if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
214 		return 0;
215 
216 	return pud_set_huge(pud, phys_addr, prot);
217 }
218 
vmap_pud_range(p4d_t * p4d,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift,pgtbl_mod_mask * mask)219 static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
220 			phys_addr_t phys_addr, pgprot_t prot,
221 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
222 {
223 	pud_t *pud;
224 	unsigned long next;
225 	int err = 0;
226 
227 	pud = pud_alloc_track(&init_mm, p4d, addr, mask);
228 	if (!pud)
229 		return -ENOMEM;
230 	do {
231 		next = pud_addr_end(addr, end);
232 
233 		if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot,
234 					max_page_shift)) {
235 			*mask |= PGTBL_PUD_MODIFIED;
236 			continue;
237 		}
238 
239 		err = vmap_pmd_range(pud, addr, next, phys_addr, prot, max_page_shift, mask);
240 		if (err)
241 			break;
242 	} while (pud++, phys_addr += (next - addr), addr = next, addr != end);
243 	return err;
244 }
245 
vmap_try_huge_p4d(p4d_t * p4d,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift)246 static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
247 			phys_addr_t phys_addr, pgprot_t prot,
248 			unsigned int max_page_shift)
249 {
250 	if (max_page_shift < P4D_SHIFT)
251 		return 0;
252 
253 	if (!arch_vmap_p4d_supported(prot))
254 		return 0;
255 
256 	if ((end - addr) != P4D_SIZE)
257 		return 0;
258 
259 	if (!IS_ALIGNED(addr, P4D_SIZE))
260 		return 0;
261 
262 	if (!IS_ALIGNED(phys_addr, P4D_SIZE))
263 		return 0;
264 
265 	if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
266 		return 0;
267 
268 	return p4d_set_huge(p4d, phys_addr, prot);
269 }
270 
vmap_p4d_range(pgd_t * pgd,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift,pgtbl_mod_mask * mask)271 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
272 			phys_addr_t phys_addr, pgprot_t prot,
273 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
274 {
275 	p4d_t *p4d;
276 	unsigned long next;
277 	int err = 0;
278 
279 	p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
280 	if (!p4d)
281 		return -ENOMEM;
282 	do {
283 		next = p4d_addr_end(addr, end);
284 
285 		if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot,
286 					max_page_shift)) {
287 			*mask |= PGTBL_P4D_MODIFIED;
288 			continue;
289 		}
290 
291 		err = vmap_pud_range(p4d, addr, next, phys_addr, prot, max_page_shift, mask);
292 		if (err)
293 			break;
294 	} while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
295 	return err;
296 }
297 
vmap_range_noflush(unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift)298 static int vmap_range_noflush(unsigned long addr, unsigned long end,
299 			phys_addr_t phys_addr, pgprot_t prot,
300 			unsigned int max_page_shift)
301 {
302 	pgd_t *pgd;
303 	unsigned long start;
304 	unsigned long next;
305 	int err;
306 	pgtbl_mod_mask mask = 0;
307 
308 	/*
309 	 * Might allocate pagetables (for most archs a more precise annotation
310 	 * would be might_alloc(GFP_PGTABLE_KERNEL)). Also might shootdown TLB
311 	 * (requires IRQs enabled on x86).
312 	 */
313 	might_sleep();
314 	BUG_ON(addr >= end);
315 
316 	start = addr;
317 	pgd = pgd_offset_k(addr);
318 	do {
319 		next = pgd_addr_end(addr, end);
320 		err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
321 					max_page_shift, &mask);
322 		if (err)
323 			break;
324 	} while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
325 
326 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
327 		arch_sync_kernel_mappings(start, end);
328 
329 	return err;
330 }
331 
vmap_page_range(unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot)332 int vmap_page_range(unsigned long addr, unsigned long end,
333 		    phys_addr_t phys_addr, pgprot_t prot)
334 {
335 	int err;
336 
337 	err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
338 				 ioremap_max_page_shift);
339 	flush_cache_vmap(addr, end);
340 	if (!err)
341 		err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
342 					       ioremap_max_page_shift);
343 	return err;
344 }
345 
ioremap_page_range(unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot)346 int ioremap_page_range(unsigned long addr, unsigned long end,
347 		phys_addr_t phys_addr, pgprot_t prot)
348 {
349 	struct vm_struct *area;
350 
351 	area = find_vm_area((void *)addr);
352 	if (!area || !(area->flags & VM_IOREMAP)) {
353 		WARN_ONCE(1, "vm_area at addr %lx is not marked as VM_IOREMAP\n", addr);
354 		return -EINVAL;
355 	}
356 	if (addr != (unsigned long)area->addr ||
357 	    (void *)end != area->addr + get_vm_area_size(area)) {
358 		WARN_ONCE(1, "ioremap request [%lx,%lx) doesn't match vm_area [%lx, %lx)\n",
359 			  addr, end, (long)area->addr,
360 			  (long)area->addr + get_vm_area_size(area));
361 		return -ERANGE;
362 	}
363 	return vmap_page_range(addr, end, phys_addr, prot);
364 }
365 
vunmap_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,pgtbl_mod_mask * mask)366 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
367 			     pgtbl_mod_mask *mask)
368 {
369 	pte_t *pte;
370 	pte_t ptent;
371 	unsigned long size = PAGE_SIZE;
372 
373 	pte = pte_offset_kernel(pmd, addr);
374 	lazy_mmu_mode_enable();
375 
376 	do {
377 #ifdef CONFIG_HUGETLB_PAGE
378 		size = arch_vmap_pte_range_unmap_size(addr, pte);
379 		if (size != PAGE_SIZE) {
380 			if (WARN_ON(!IS_ALIGNED(addr, size))) {
381 				addr = ALIGN_DOWN(addr, size);
382 				pte = PTR_ALIGN_DOWN(pte, sizeof(*pte) * (size >> PAGE_SHIFT));
383 			}
384 			ptent = huge_ptep_get_and_clear(&init_mm, addr, pte, size);
385 			if (WARN_ON(end - addr < size))
386 				size = end - addr;
387 		} else
388 #endif
389 			ptent = ptep_get_and_clear(&init_mm, addr, pte);
390 		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
391 	} while (pte += (size >> PAGE_SHIFT), addr += size, addr != end);
392 
393 	lazy_mmu_mode_disable();
394 	*mask |= PGTBL_PTE_MODIFIED;
395 }
396 
vunmap_pmd_range(pud_t * pud,unsigned long addr,unsigned long end,pgtbl_mod_mask * mask)397 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
398 			     pgtbl_mod_mask *mask)
399 {
400 	pmd_t *pmd;
401 	unsigned long next;
402 	int cleared;
403 
404 	pmd = pmd_offset(pud, addr);
405 	do {
406 		next = pmd_addr_end(addr, end);
407 
408 		cleared = pmd_clear_huge(pmd);
409 		if (cleared || pmd_bad(*pmd))
410 			*mask |= PGTBL_PMD_MODIFIED;
411 
412 		if (cleared) {
413 			WARN_ON(next - addr < PMD_SIZE);
414 			continue;
415 		}
416 		if (pmd_none_or_clear_bad(pmd))
417 			continue;
418 		vunmap_pte_range(pmd, addr, next, mask);
419 
420 		cond_resched();
421 	} while (pmd++, addr = next, addr != end);
422 }
423 
vunmap_pud_range(p4d_t * p4d,unsigned long addr,unsigned long end,pgtbl_mod_mask * mask)424 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
425 			     pgtbl_mod_mask *mask)
426 {
427 	pud_t *pud;
428 	unsigned long next;
429 	int cleared;
430 
431 	pud = pud_offset(p4d, addr);
432 	do {
433 		next = pud_addr_end(addr, end);
434 
435 		cleared = pud_clear_huge(pud);
436 		if (cleared || pud_bad(*pud))
437 			*mask |= PGTBL_PUD_MODIFIED;
438 
439 		if (cleared) {
440 			WARN_ON(next - addr < PUD_SIZE);
441 			continue;
442 		}
443 		if (pud_none_or_clear_bad(pud))
444 			continue;
445 		vunmap_pmd_range(pud, addr, next, mask);
446 	} while (pud++, addr = next, addr != end);
447 }
448 
vunmap_p4d_range(pgd_t * pgd,unsigned long addr,unsigned long end,pgtbl_mod_mask * mask)449 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
450 			     pgtbl_mod_mask *mask)
451 {
452 	p4d_t *p4d;
453 	unsigned long next;
454 
455 	p4d = p4d_offset(pgd, addr);
456 	do {
457 		next = p4d_addr_end(addr, end);
458 
459 		p4d_clear_huge(p4d);
460 		if (p4d_bad(*p4d))
461 			*mask |= PGTBL_P4D_MODIFIED;
462 
463 		if (p4d_none_or_clear_bad(p4d))
464 			continue;
465 		vunmap_pud_range(p4d, addr, next, mask);
466 	} while (p4d++, addr = next, addr != end);
467 }
468 
469 /*
470  * vunmap_range_noflush is similar to vunmap_range, but does not
471  * flush caches or TLBs.
472  *
473  * The caller is responsible for calling flush_cache_vmap() before calling
474  * this function, and flush_tlb_kernel_range after it has returned
475  * successfully (and before the addresses are expected to cause a page fault
476  * or be re-mapped for something else, if TLB flushes are being delayed or
477  * coalesced).
478  *
479  * This is an internal function only. Do not use outside mm/.
480  */
__vunmap_range_noflush(unsigned long start,unsigned long end)481 void __vunmap_range_noflush(unsigned long start, unsigned long end)
482 {
483 	unsigned long next;
484 	pgd_t *pgd;
485 	unsigned long addr = start;
486 	pgtbl_mod_mask mask = 0;
487 
488 	BUG_ON(addr >= end);
489 	pgd = pgd_offset_k(addr);
490 	do {
491 		next = pgd_addr_end(addr, end);
492 		if (pgd_bad(*pgd))
493 			mask |= PGTBL_PGD_MODIFIED;
494 		if (pgd_none_or_clear_bad(pgd))
495 			continue;
496 		vunmap_p4d_range(pgd, addr, next, &mask);
497 	} while (pgd++, addr = next, addr != end);
498 
499 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
500 		arch_sync_kernel_mappings(start, end);
501 }
502 
vunmap_range_noflush(unsigned long start,unsigned long end)503 void vunmap_range_noflush(unsigned long start, unsigned long end)
504 {
505 	kmsan_vunmap_range_noflush(start, end);
506 	__vunmap_range_noflush(start, end);
507 }
508 
509 /**
510  * vunmap_range - unmap kernel virtual addresses
511  * @addr: start of the VM area to unmap
512  * @end: end of the VM area to unmap (non-inclusive)
513  *
514  * Clears any present PTEs in the virtual address range, flushes TLBs and
515  * caches. Any subsequent access to the address before it has been re-mapped
516  * is a kernel bug.
517  */
vunmap_range(unsigned long addr,unsigned long end)518 void vunmap_range(unsigned long addr, unsigned long end)
519 {
520 	flush_cache_vunmap(addr, end);
521 	vunmap_range_noflush(addr, end);
522 	flush_tlb_kernel_range(addr, end);
523 }
524 
vmap_pages_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,int * nr,pgtbl_mod_mask * mask)525 static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
526 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
527 		pgtbl_mod_mask *mask)
528 {
529 	int err = 0;
530 	pte_t *pte;
531 
532 	/*
533 	 * nr is a running index into the array which helps higher level
534 	 * callers keep track of where we're up to.
535 	 */
536 
537 	pte = pte_alloc_kernel_track(pmd, addr, mask);
538 	if (!pte)
539 		return -ENOMEM;
540 
541 	lazy_mmu_mode_enable();
542 
543 	do {
544 		struct page *page = pages[*nr];
545 
546 		if (WARN_ON(!pte_none(ptep_get(pte)))) {
547 			err = -EBUSY;
548 			break;
549 		}
550 		if (WARN_ON(!page)) {
551 			err = -ENOMEM;
552 			break;
553 		}
554 		if (WARN_ON(!pfn_valid(page_to_pfn(page)))) {
555 			err = -EINVAL;
556 			break;
557 		}
558 
559 		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
560 		(*nr)++;
561 	} while (pte++, addr += PAGE_SIZE, addr != end);
562 
563 	lazy_mmu_mode_disable();
564 	*mask |= PGTBL_PTE_MODIFIED;
565 
566 	return err;
567 }
568 
vmap_pages_pmd_range(pud_t * pud,unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,int * nr,pgtbl_mod_mask * mask)569 static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
570 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
571 		pgtbl_mod_mask *mask)
572 {
573 	pmd_t *pmd;
574 	unsigned long next;
575 
576 	pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
577 	if (!pmd)
578 		return -ENOMEM;
579 	do {
580 		next = pmd_addr_end(addr, end);
581 		if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask))
582 			return -ENOMEM;
583 	} while (pmd++, addr = next, addr != end);
584 	return 0;
585 }
586 
vmap_pages_pud_range(p4d_t * p4d,unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,int * nr,pgtbl_mod_mask * mask)587 static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
588 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
589 		pgtbl_mod_mask *mask)
590 {
591 	pud_t *pud;
592 	unsigned long next;
593 
594 	pud = pud_alloc_track(&init_mm, p4d, addr, mask);
595 	if (!pud)
596 		return -ENOMEM;
597 	do {
598 		next = pud_addr_end(addr, end);
599 		if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask))
600 			return -ENOMEM;
601 	} while (pud++, addr = next, addr != end);
602 	return 0;
603 }
604 
vmap_pages_p4d_range(pgd_t * pgd,unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,int * nr,pgtbl_mod_mask * mask)605 static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
606 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
607 		pgtbl_mod_mask *mask)
608 {
609 	p4d_t *p4d;
610 	unsigned long next;
611 
612 	p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
613 	if (!p4d)
614 		return -ENOMEM;
615 	do {
616 		next = p4d_addr_end(addr, end);
617 		if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask))
618 			return -ENOMEM;
619 	} while (p4d++, addr = next, addr != end);
620 	return 0;
621 }
622 
vmap_small_pages_range_noflush(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages)623 static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
624 		pgprot_t prot, struct page **pages)
625 {
626 	unsigned long start = addr;
627 	pgd_t *pgd;
628 	unsigned long next;
629 	int err = 0;
630 	int nr = 0;
631 	pgtbl_mod_mask mask = 0;
632 
633 	BUG_ON(addr >= end);
634 	pgd = pgd_offset_k(addr);
635 	do {
636 		next = pgd_addr_end(addr, end);
637 		if (pgd_bad(*pgd))
638 			mask |= PGTBL_PGD_MODIFIED;
639 		err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
640 		if (err)
641 			break;
642 	} while (pgd++, addr = next, addr != end);
643 
644 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
645 		arch_sync_kernel_mappings(start, end);
646 
647 	return err;
648 }
649 
650 /*
651  * vmap_pages_range_noflush is similar to vmap_pages_range, but does not
652  * flush caches.
653  *
654  * The caller is responsible for calling flush_cache_vmap() after this
655  * function returns successfully and before the addresses are accessed.
656  *
657  * This is an internal function only. Do not use outside mm/.
658  */
__vmap_pages_range_noflush(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,unsigned int page_shift)659 int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
660 		pgprot_t prot, struct page **pages, unsigned int page_shift)
661 {
662 	unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
663 
664 	WARN_ON(page_shift < PAGE_SHIFT);
665 
666 	if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) ||
667 			page_shift == PAGE_SHIFT)
668 		return vmap_small_pages_range_noflush(addr, end, prot, pages);
669 
670 	for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) {
671 		int err;
672 
673 		err = vmap_range_noflush(addr, addr + (1UL << page_shift),
674 					page_to_phys(pages[i]), prot,
675 					page_shift);
676 		if (err)
677 			return err;
678 
679 		addr += 1UL << page_shift;
680 	}
681 
682 	return 0;
683 }
684 
vmap_pages_range_noflush(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,unsigned int page_shift,gfp_t gfp_mask)685 int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
686 		pgprot_t prot, struct page **pages, unsigned int page_shift,
687 		gfp_t gfp_mask)
688 {
689 	int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages,
690 						page_shift, gfp_mask);
691 
692 	if (ret)
693 		return ret;
694 	return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
695 }
696 
__vmap_pages_range(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,unsigned int page_shift,gfp_t gfp_mask)697 static int __vmap_pages_range(unsigned long addr, unsigned long end,
698 		pgprot_t prot, struct page **pages, unsigned int page_shift,
699 		gfp_t gfp_mask)
700 {
701 	int err;
702 
703 	err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift, gfp_mask);
704 	flush_cache_vmap(addr, end);
705 	return err;
706 }
707 
708 /**
709  * vmap_pages_range - map pages to a kernel virtual address
710  * @addr: start of the VM area to map
711  * @end: end of the VM area to map (non-inclusive)
712  * @prot: page protection flags to use
713  * @pages: pages to map (always PAGE_SIZE pages)
714  * @page_shift: maximum shift that the pages may be mapped with, @pages must
715  * be aligned and contiguous up to at least this shift.
716  *
717  * RETURNS:
718  * 0 on success, -errno on failure.
719  */
vmap_pages_range(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,unsigned int page_shift)720 int vmap_pages_range(unsigned long addr, unsigned long end,
721 		pgprot_t prot, struct page **pages, unsigned int page_shift)
722 {
723 	return __vmap_pages_range(addr, end, prot, pages, page_shift, GFP_KERNEL);
724 }
725 
check_sparse_vm_area(struct vm_struct * area,unsigned long start,unsigned long end)726 static int check_sparse_vm_area(struct vm_struct *area, unsigned long start,
727 				unsigned long end)
728 {
729 	might_sleep();
730 	if (WARN_ON_ONCE(area->flags & VM_FLUSH_RESET_PERMS))
731 		return -EINVAL;
732 	if (WARN_ON_ONCE(area->flags & VM_NO_GUARD))
733 		return -EINVAL;
734 	if (WARN_ON_ONCE(!(area->flags & VM_SPARSE)))
735 		return -EINVAL;
736 	if ((end - start) >> PAGE_SHIFT > totalram_pages())
737 		return -E2BIG;
738 	if (start < (unsigned long)area->addr ||
739 	    (void *)end > area->addr + get_vm_area_size(area))
740 		return -ERANGE;
741 	return 0;
742 }
743 
744 /**
745  * vm_area_map_pages - map pages inside given sparse vm_area
746  * @area: vm_area
747  * @start: start address inside vm_area
748  * @end: end address inside vm_area
749  * @pages: pages to map (always PAGE_SIZE pages)
750  */
vm_area_map_pages(struct vm_struct * area,unsigned long start,unsigned long end,struct page ** pages)751 int vm_area_map_pages(struct vm_struct *area, unsigned long start,
752 		      unsigned long end, struct page **pages)
753 {
754 	int err;
755 
756 	err = check_sparse_vm_area(area, start, end);
757 	if (err)
758 		return err;
759 
760 	return vmap_pages_range(start, end, PAGE_KERNEL, pages, PAGE_SHIFT);
761 }
762 
763 /**
764  * vm_area_unmap_pages - unmap pages inside given sparse vm_area
765  * @area: vm_area
766  * @start: start address inside vm_area
767  * @end: end address inside vm_area
768  */
vm_area_unmap_pages(struct vm_struct * area,unsigned long start,unsigned long end)769 void vm_area_unmap_pages(struct vm_struct *area, unsigned long start,
770 			 unsigned long end)
771 {
772 	if (check_sparse_vm_area(area, start, end))
773 		return;
774 
775 	vunmap_range(start, end);
776 }
777 
is_vmalloc_or_module_addr(const void * x)778 int is_vmalloc_or_module_addr(const void *x)
779 {
780 	/*
781 	 * ARM, x86-64 and sparc64 put modules in a special place,
782 	 * and fall back on vmalloc() if that fails. Others
783 	 * just put it in the vmalloc space.
784 	 */
785 #if defined(CONFIG_EXECMEM) && defined(MODULES_VADDR)
786 	unsigned long addr = (unsigned long)kasan_reset_tag(x);
787 	if (addr >= MODULES_VADDR && addr < MODULES_END)
788 		return 1;
789 #endif
790 	return is_vmalloc_addr(x);
791 }
792 EXPORT_SYMBOL_GPL(is_vmalloc_or_module_addr);
793 
794 /*
795  * Walk a vmap address to the struct page it maps. Huge vmap mappings will
796  * return the tail page that corresponds to the base page address, which
797  * matches small vmap mappings.
798  */
vmalloc_to_page(const void * vmalloc_addr)799 struct page *vmalloc_to_page(const void *vmalloc_addr)
800 {
801 	unsigned long addr = (unsigned long) vmalloc_addr;
802 	struct page *page = NULL;
803 	pgd_t *pgd = pgd_offset_k(addr);
804 	p4d_t *p4d;
805 	pud_t *pud;
806 	pmd_t *pmd;
807 	pte_t *ptep, pte;
808 
809 	/*
810 	 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
811 	 * architectures that do not vmalloc module space
812 	 */
813 	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
814 
815 	if (pgd_none(*pgd))
816 		return NULL;
817 	if (WARN_ON_ONCE(pgd_leaf(*pgd)))
818 		return NULL; /* XXX: no allowance for huge pgd */
819 	if (WARN_ON_ONCE(pgd_bad(*pgd)))
820 		return NULL;
821 
822 	p4d = p4d_offset(pgd, addr);
823 	if (p4d_none(*p4d))
824 		return NULL;
825 	if (p4d_leaf(*p4d))
826 		return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
827 	if (WARN_ON_ONCE(p4d_bad(*p4d)))
828 		return NULL;
829 
830 	pud = pud_offset(p4d, addr);
831 	if (pud_none(*pud))
832 		return NULL;
833 	if (pud_leaf(*pud))
834 		return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
835 	if (WARN_ON_ONCE(pud_bad(*pud)))
836 		return NULL;
837 
838 	pmd = pmd_offset(pud, addr);
839 	if (pmd_none(*pmd))
840 		return NULL;
841 	if (pmd_leaf(*pmd))
842 		return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
843 	if (WARN_ON_ONCE(pmd_bad(*pmd)))
844 		return NULL;
845 
846 	ptep = pte_offset_kernel(pmd, addr);
847 	pte = ptep_get(ptep);
848 	if (pte_present(pte))
849 		page = pte_page(pte);
850 
851 	return page;
852 }
853 EXPORT_SYMBOL(vmalloc_to_page);
854 
855 /*
856  * Map a vmalloc()-space virtual address to the physical page frame number.
857  */
vmalloc_to_pfn(const void * vmalloc_addr)858 unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
859 {
860 	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
861 }
862 EXPORT_SYMBOL(vmalloc_to_pfn);
863 
864 
865 /*** Global kva allocator ***/
866 
867 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
868 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
869 
870 
871 static DEFINE_SPINLOCK(free_vmap_area_lock);
872 static bool vmap_initialized __read_mostly;
873 
874 /*
875  * This kmem_cache is used for vmap_area objects. Instead of
876  * allocating from slab we reuse an object from this cache to
877  * make things faster. Especially in "no edge" splitting of
878  * free block.
879  */
880 static struct kmem_cache *vmap_area_cachep;
881 
882 /*
883  * This linked list is used in pair with free_vmap_area_root.
884  * It gives O(1) access to prev/next to perform fast coalescing.
885  */
886 static LIST_HEAD(free_vmap_area_list);
887 
888 /*
889  * This augment red-black tree represents the free vmap space.
890  * All vmap_area objects in this tree are sorted by va->va_start
891  * address. It is used for allocation and merging when a vmap
892  * object is released.
893  *
894  * Each vmap_area node contains a maximum available free block
895  * of its sub-tree, right or left. Therefore it is possible to
896  * find a lowest match of free area.
897  */
898 static struct rb_root free_vmap_area_root = RB_ROOT;
899 
900 /*
901  * Preload a CPU with one object for "no edge" split case. The
902  * aim is to get rid of allocations from the atomic context, thus
903  * to use more permissive allocation masks.
904  */
905 static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
906 
907 /*
908  * This structure defines a single, solid model where a list and
909  * rb-tree are part of one entity protected by the lock. Nodes are
910  * sorted in ascending order, thus for O(1) access to left/right
911  * neighbors a list is used as well as for sequential traversal.
912  */
913 struct rb_list {
914 	struct rb_root root;
915 	struct list_head head;
916 	spinlock_t lock;
917 };
918 
919 /*
920  * A fast size storage contains VAs up to 1M size. A pool consists
921  * of linked between each other ready to go VAs of certain sizes.
922  * An index in the pool-array corresponds to number of pages + 1.
923  */
924 #define MAX_VA_SIZE_PAGES 256
925 
926 struct vmap_pool {
927 	struct list_head head;
928 	unsigned long len;
929 };
930 
931 /*
932  * An effective vmap-node logic. Users make use of nodes instead
933  * of a global heap. It allows to balance an access and mitigate
934  * contention.
935  */
936 static struct vmap_node {
937 	/* Simple size segregated storage. */
938 	struct vmap_pool pool[MAX_VA_SIZE_PAGES];
939 	spinlock_t pool_lock;
940 	bool skip_populate;
941 
942 	/* Bookkeeping data of this node. */
943 	struct rb_list busy;
944 	struct rb_list lazy;
945 
946 	/*
947 	 * Ready-to-free areas.
948 	 */
949 	struct list_head purge_list;
950 	struct work_struct purge_work;
951 	unsigned long nr_purged;
952 } single;
953 
954 /*
955  * Initial setup consists of one single node, i.e. a balancing
956  * is fully disabled. Later on, after vmap is initialized these
957  * parameters are updated based on a system capacity.
958  */
959 static struct vmap_node *vmap_nodes = &single;
960 static __read_mostly unsigned int nr_vmap_nodes = 1;
961 static __read_mostly unsigned int vmap_zone_size = 1;
962 
963 /* A simple iterator over all vmap-nodes. */
964 #define for_each_vmap_node(vn)	\
965 	for ((vn) = &vmap_nodes[0];	\
966 		(vn) < &vmap_nodes[nr_vmap_nodes]; (vn)++)
967 
968 static inline unsigned int
addr_to_node_id(unsigned long addr)969 addr_to_node_id(unsigned long addr)
970 {
971 	return (addr / vmap_zone_size) % nr_vmap_nodes;
972 }
973 
974 static inline struct vmap_node *
addr_to_node(unsigned long addr)975 addr_to_node(unsigned long addr)
976 {
977 	return &vmap_nodes[addr_to_node_id(addr)];
978 }
979 
980 static inline struct vmap_node *
id_to_node(unsigned int id)981 id_to_node(unsigned int id)
982 {
983 	return &vmap_nodes[id % nr_vmap_nodes];
984 }
985 
986 static inline unsigned int
node_to_id(struct vmap_node * node)987 node_to_id(struct vmap_node *node)
988 {
989 	/* Pointer arithmetic. */
990 	unsigned int id = node - vmap_nodes;
991 
992 	if (likely(id < nr_vmap_nodes))
993 		return id;
994 
995 	WARN_ONCE(1, "An address 0x%p is out-of-bounds.\n", node);
996 	return 0;
997 }
998 
999 /*
1000  * We use the value 0 to represent "no node", that is why
1001  * an encoded value will be the node-id incremented by 1.
1002  * It is always greater then 0. A valid node_id which can
1003  * be encoded is [0:nr_vmap_nodes - 1]. If a passed node_id
1004  * is not valid 0 is returned.
1005  */
1006 static unsigned int
encode_vn_id(unsigned int node_id)1007 encode_vn_id(unsigned int node_id)
1008 {
1009 	/* Can store U8_MAX [0:254] nodes. */
1010 	if (node_id < nr_vmap_nodes)
1011 		return (node_id + 1) << BITS_PER_BYTE;
1012 
1013 	/* Warn and no node encoded. */
1014 	WARN_ONCE(1, "Encode wrong node id (%u)\n", node_id);
1015 	return 0;
1016 }
1017 
1018 /*
1019  * Returns an encoded node-id, the valid range is within
1020  * [0:nr_vmap_nodes-1] values. Otherwise nr_vmap_nodes is
1021  * returned if extracted data is wrong.
1022  */
1023 static unsigned int
decode_vn_id(unsigned int val)1024 decode_vn_id(unsigned int val)
1025 {
1026 	unsigned int node_id = (val >> BITS_PER_BYTE) - 1;
1027 
1028 	/* Can store U8_MAX [0:254] nodes. */
1029 	if (node_id < nr_vmap_nodes)
1030 		return node_id;
1031 
1032 	/* If it was _not_ zero, warn. */
1033 	WARN_ONCE(node_id != UINT_MAX,
1034 		"Decode wrong node id (%d)\n", node_id);
1035 
1036 	return nr_vmap_nodes;
1037 }
1038 
1039 static bool
is_vn_id_valid(unsigned int node_id)1040 is_vn_id_valid(unsigned int node_id)
1041 {
1042 	if (node_id < nr_vmap_nodes)
1043 		return true;
1044 
1045 	return false;
1046 }
1047 
1048 static __always_inline unsigned long
va_size(struct vmap_area * va)1049 va_size(struct vmap_area *va)
1050 {
1051 	return (va->va_end - va->va_start);
1052 }
1053 
1054 static __always_inline unsigned long
get_subtree_max_size(struct rb_node * node)1055 get_subtree_max_size(struct rb_node *node)
1056 {
1057 	struct vmap_area *va;
1058 
1059 	va = rb_entry_safe(node, struct vmap_area, rb_node);
1060 	return va ? va->subtree_max_size : 0;
1061 }
1062 
1063 RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
1064 	struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
1065 
1066 static void reclaim_and_purge_vmap_areas(void);
1067 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
1068 static void drain_vmap_area_work(struct work_struct *work);
1069 static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work);
1070 
1071 static __cacheline_aligned_in_smp atomic_long_t vmap_lazy_nr;
1072 
__find_vmap_area(unsigned long addr,struct rb_root * root)1073 static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root)
1074 {
1075 	struct rb_node *n = root->rb_node;
1076 
1077 	addr = (unsigned long)kasan_reset_tag((void *)addr);
1078 
1079 	while (n) {
1080 		struct vmap_area *va;
1081 
1082 		va = rb_entry(n, struct vmap_area, rb_node);
1083 		if (addr < va->va_start)
1084 			n = n->rb_left;
1085 		else if (addr >= va->va_end)
1086 			n = n->rb_right;
1087 		else
1088 			return va;
1089 	}
1090 
1091 	return NULL;
1092 }
1093 
1094 /* Look up the first VA which satisfies addr < va_end, NULL if none. */
1095 static struct vmap_area *
__find_vmap_area_exceed_addr(unsigned long addr,struct rb_root * root)1096 __find_vmap_area_exceed_addr(unsigned long addr, struct rb_root *root)
1097 {
1098 	struct vmap_area *va = NULL;
1099 	struct rb_node *n = root->rb_node;
1100 
1101 	addr = (unsigned long)kasan_reset_tag((void *)addr);
1102 
1103 	while (n) {
1104 		struct vmap_area *tmp;
1105 
1106 		tmp = rb_entry(n, struct vmap_area, rb_node);
1107 		if (tmp->va_end > addr) {
1108 			va = tmp;
1109 			if (tmp->va_start <= addr)
1110 				break;
1111 
1112 			n = n->rb_left;
1113 		} else
1114 			n = n->rb_right;
1115 	}
1116 
1117 	return va;
1118 }
1119 
1120 /*
1121  * Returns a node where a first VA, that satisfies addr < va_end, resides.
1122  * If success, a node is locked. A user is responsible to unlock it when a
1123  * VA is no longer needed to be accessed.
1124  *
1125  * Returns NULL if nothing found.
1126  */
1127 static struct vmap_node *
find_vmap_area_exceed_addr_lock(unsigned long addr,struct vmap_area ** va)1128 find_vmap_area_exceed_addr_lock(unsigned long addr, struct vmap_area **va)
1129 {
1130 	unsigned long va_start_lowest;
1131 	struct vmap_node *vn;
1132 
1133 repeat:
1134 	va_start_lowest = 0;
1135 
1136 	for_each_vmap_node(vn) {
1137 		spin_lock(&vn->busy.lock);
1138 		*va = __find_vmap_area_exceed_addr(addr, &vn->busy.root);
1139 
1140 		if (*va)
1141 			if (!va_start_lowest || (*va)->va_start < va_start_lowest)
1142 				va_start_lowest = (*va)->va_start;
1143 		spin_unlock(&vn->busy.lock);
1144 	}
1145 
1146 	/*
1147 	 * Check if found VA exists, it might have gone away.  In this case we
1148 	 * repeat the search because a VA has been removed concurrently and we
1149 	 * need to proceed to the next one, which is a rare case.
1150 	 */
1151 	if (va_start_lowest) {
1152 		vn = addr_to_node(va_start_lowest);
1153 
1154 		spin_lock(&vn->busy.lock);
1155 		*va = __find_vmap_area(va_start_lowest, &vn->busy.root);
1156 
1157 		if (*va)
1158 			return vn;
1159 
1160 		spin_unlock(&vn->busy.lock);
1161 		goto repeat;
1162 	}
1163 
1164 	return NULL;
1165 }
1166 
1167 /*
1168  * This function returns back addresses of parent node
1169  * and its left or right link for further processing.
1170  *
1171  * Otherwise NULL is returned. In that case all further
1172  * steps regarding inserting of conflicting overlap range
1173  * have to be declined and actually considered as a bug.
1174  */
1175 static __always_inline struct rb_node **
find_va_links(struct vmap_area * va,struct rb_root * root,struct rb_node * from,struct rb_node ** parent)1176 find_va_links(struct vmap_area *va,
1177 	struct rb_root *root, struct rb_node *from,
1178 	struct rb_node **parent)
1179 {
1180 	struct vmap_area *tmp_va;
1181 	struct rb_node **link;
1182 
1183 	if (root) {
1184 		link = &root->rb_node;
1185 		if (unlikely(!*link)) {
1186 			*parent = NULL;
1187 			return link;
1188 		}
1189 	} else {
1190 		link = &from;
1191 	}
1192 
1193 	/*
1194 	 * Go to the bottom of the tree. When we hit the last point
1195 	 * we end up with parent rb_node and correct direction, i name
1196 	 * it link, where the new va->rb_node will be attached to.
1197 	 */
1198 	do {
1199 		tmp_va = rb_entry(*link, struct vmap_area, rb_node);
1200 
1201 		/*
1202 		 * During the traversal we also do some sanity check.
1203 		 * Trigger the BUG() if there are sides(left/right)
1204 		 * or full overlaps.
1205 		 */
1206 		if (va->va_end <= tmp_va->va_start)
1207 			link = &(*link)->rb_left;
1208 		else if (va->va_start >= tmp_va->va_end)
1209 			link = &(*link)->rb_right;
1210 		else {
1211 			WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
1212 				va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
1213 
1214 			return NULL;
1215 		}
1216 	} while (*link);
1217 
1218 	*parent = &tmp_va->rb_node;
1219 	return link;
1220 }
1221 
1222 static __always_inline struct list_head *
get_va_next_sibling(struct rb_node * parent,struct rb_node ** link)1223 get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
1224 {
1225 	struct list_head *list;
1226 
1227 	if (unlikely(!parent))
1228 		/*
1229 		 * The red-black tree where we try to find VA neighbors
1230 		 * before merging or inserting is empty, i.e. it means
1231 		 * there is no free vmap space. Normally it does not
1232 		 * happen but we handle this case anyway.
1233 		 */
1234 		return NULL;
1235 
1236 	list = &rb_entry(parent, struct vmap_area, rb_node)->list;
1237 	return (&parent->rb_right == link ? list->next : list);
1238 }
1239 
1240 static __always_inline void
__link_va(struct vmap_area * va,struct rb_root * root,struct rb_node * parent,struct rb_node ** link,struct list_head * head,bool augment)1241 __link_va(struct vmap_area *va, struct rb_root *root,
1242 	struct rb_node *parent, struct rb_node **link,
1243 	struct list_head *head, bool augment)
1244 {
1245 	/*
1246 	 * VA is still not in the list, but we can
1247 	 * identify its future previous list_head node.
1248 	 */
1249 	if (likely(parent)) {
1250 		head = &rb_entry(parent, struct vmap_area, rb_node)->list;
1251 		if (&parent->rb_right != link)
1252 			head = head->prev;
1253 	}
1254 
1255 	/* Insert to the rb-tree */
1256 	rb_link_node(&va->rb_node, parent, link);
1257 	if (augment) {
1258 		/*
1259 		 * Some explanation here. Just perform simple insertion
1260 		 * to the tree. We do not set va->subtree_max_size to
1261 		 * its current size before calling rb_insert_augmented().
1262 		 * It is because we populate the tree from the bottom
1263 		 * to parent levels when the node _is_ in the tree.
1264 		 *
1265 		 * Therefore we set subtree_max_size to zero after insertion,
1266 		 * to let __augment_tree_propagate_from() puts everything to
1267 		 * the correct order later on.
1268 		 */
1269 		rb_insert_augmented(&va->rb_node,
1270 			root, &free_vmap_area_rb_augment_cb);
1271 		va->subtree_max_size = 0;
1272 	} else {
1273 		rb_insert_color(&va->rb_node, root);
1274 	}
1275 
1276 	/* Address-sort this list */
1277 	list_add(&va->list, head);
1278 }
1279 
1280 static __always_inline void
link_va(struct vmap_area * va,struct rb_root * root,struct rb_node * parent,struct rb_node ** link,struct list_head * head)1281 link_va(struct vmap_area *va, struct rb_root *root,
1282 	struct rb_node *parent, struct rb_node **link,
1283 	struct list_head *head)
1284 {
1285 	__link_va(va, root, parent, link, head, false);
1286 }
1287 
1288 static __always_inline void
link_va_augment(struct vmap_area * va,struct rb_root * root,struct rb_node * parent,struct rb_node ** link,struct list_head * head)1289 link_va_augment(struct vmap_area *va, struct rb_root *root,
1290 	struct rb_node *parent, struct rb_node **link,
1291 	struct list_head *head)
1292 {
1293 	__link_va(va, root, parent, link, head, true);
1294 }
1295 
1296 static __always_inline void
__unlink_va(struct vmap_area * va,struct rb_root * root,bool augment)1297 __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment)
1298 {
1299 	if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
1300 		return;
1301 
1302 	if (augment)
1303 		rb_erase_augmented(&va->rb_node,
1304 			root, &free_vmap_area_rb_augment_cb);
1305 	else
1306 		rb_erase(&va->rb_node, root);
1307 
1308 	list_del_init(&va->list);
1309 	RB_CLEAR_NODE(&va->rb_node);
1310 }
1311 
1312 static __always_inline void
unlink_va(struct vmap_area * va,struct rb_root * root)1313 unlink_va(struct vmap_area *va, struct rb_root *root)
1314 {
1315 	__unlink_va(va, root, false);
1316 }
1317 
1318 static __always_inline void
unlink_va_augment(struct vmap_area * va,struct rb_root * root)1319 unlink_va_augment(struct vmap_area *va, struct rb_root *root)
1320 {
1321 	__unlink_va(va, root, true);
1322 }
1323 
1324 #if DEBUG_AUGMENT_PROPAGATE_CHECK
1325 /*
1326  * Gets called when remove the node and rotate.
1327  */
1328 static __always_inline unsigned long
compute_subtree_max_size(struct vmap_area * va)1329 compute_subtree_max_size(struct vmap_area *va)
1330 {
1331 	return max3(va_size(va),
1332 		get_subtree_max_size(va->rb_node.rb_left),
1333 		get_subtree_max_size(va->rb_node.rb_right));
1334 }
1335 
1336 static void
augment_tree_propagate_check(void)1337 augment_tree_propagate_check(void)
1338 {
1339 	struct vmap_area *va;
1340 	unsigned long computed_size;
1341 
1342 	list_for_each_entry(va, &free_vmap_area_list, list) {
1343 		computed_size = compute_subtree_max_size(va);
1344 		if (computed_size != va->subtree_max_size)
1345 			pr_emerg("tree is corrupted: %lu, %lu\n",
1346 				va_size(va), va->subtree_max_size);
1347 	}
1348 }
1349 #endif
1350 
1351 /*
1352  * This function populates subtree_max_size from bottom to upper
1353  * levels starting from VA point. The propagation must be done
1354  * when VA size is modified by changing its va_start/va_end. Or
1355  * in case of newly inserting of VA to the tree.
1356  *
1357  * It means that __augment_tree_propagate_from() must be called:
1358  * - After VA has been inserted to the tree(free path);
1359  * - After VA has been shrunk(allocation path);
1360  * - After VA has been increased(merging path).
1361  *
1362  * Please note that, it does not mean that upper parent nodes
1363  * and their subtree_max_size are recalculated all the time up
1364  * to the root node.
1365  *
1366  *       4--8
1367  *        /\
1368  *       /  \
1369  *      /    \
1370  *    2--2  8--8
1371  *
1372  * For example if we modify the node 4, shrinking it to 2, then
1373  * no any modification is required. If we shrink the node 2 to 1
1374  * its subtree_max_size is updated only, and set to 1. If we shrink
1375  * the node 8 to 6, then its subtree_max_size is set to 6 and parent
1376  * node becomes 4--6.
1377  */
1378 static __always_inline void
augment_tree_propagate_from(struct vmap_area * va)1379 augment_tree_propagate_from(struct vmap_area *va)
1380 {
1381 	/*
1382 	 * Populate the tree from bottom towards the root until
1383 	 * the calculated maximum available size of checked node
1384 	 * is equal to its current one.
1385 	 */
1386 	free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
1387 
1388 #if DEBUG_AUGMENT_PROPAGATE_CHECK
1389 	augment_tree_propagate_check();
1390 #endif
1391 }
1392 
1393 static void
insert_vmap_area(struct vmap_area * va,struct rb_root * root,struct list_head * head)1394 insert_vmap_area(struct vmap_area *va,
1395 	struct rb_root *root, struct list_head *head)
1396 {
1397 	struct rb_node **link;
1398 	struct rb_node *parent;
1399 
1400 	link = find_va_links(va, root, NULL, &parent);
1401 	if (link)
1402 		link_va(va, root, parent, link, head);
1403 }
1404 
1405 static void
insert_vmap_area_augment(struct vmap_area * va,struct rb_node * from,struct rb_root * root,struct list_head * head)1406 insert_vmap_area_augment(struct vmap_area *va,
1407 	struct rb_node *from, struct rb_root *root,
1408 	struct list_head *head)
1409 {
1410 	struct rb_node **link;
1411 	struct rb_node *parent;
1412 
1413 	if (from)
1414 		link = find_va_links(va, NULL, from, &parent);
1415 	else
1416 		link = find_va_links(va, root, NULL, &parent);
1417 
1418 	if (link) {
1419 		link_va_augment(va, root, parent, link, head);
1420 		augment_tree_propagate_from(va);
1421 	}
1422 }
1423 
1424 /*
1425  * Merge de-allocated chunk of VA memory with previous
1426  * and next free blocks. If coalesce is not done a new
1427  * free area is inserted. If VA has been merged, it is
1428  * freed.
1429  *
1430  * Please note, it can return NULL in case of overlap
1431  * ranges, followed by WARN() report. Despite it is a
1432  * buggy behaviour, a system can be alive and keep
1433  * ongoing.
1434  */
1435 static __always_inline struct vmap_area *
__merge_or_add_vmap_area(struct vmap_area * va,struct rb_root * root,struct list_head * head,bool augment)1436 __merge_or_add_vmap_area(struct vmap_area *va,
1437 	struct rb_root *root, struct list_head *head, bool augment)
1438 {
1439 	struct vmap_area *sibling;
1440 	struct list_head *next;
1441 	struct rb_node **link;
1442 	struct rb_node *parent;
1443 	bool merged = false;
1444 
1445 	/*
1446 	 * Find a place in the tree where VA potentially will be
1447 	 * inserted, unless it is merged with its sibling/siblings.
1448 	 */
1449 	link = find_va_links(va, root, NULL, &parent);
1450 	if (!link)
1451 		return NULL;
1452 
1453 	/*
1454 	 * Get next node of VA to check if merging can be done.
1455 	 */
1456 	next = get_va_next_sibling(parent, link);
1457 	if (unlikely(next == NULL))
1458 		goto insert;
1459 
1460 	/*
1461 	 * start            end
1462 	 * |                |
1463 	 * |<------VA------>|<-----Next----->|
1464 	 *                  |                |
1465 	 *                  start            end
1466 	 */
1467 	if (next != head) {
1468 		sibling = list_entry(next, struct vmap_area, list);
1469 		if (sibling->va_start == va->va_end) {
1470 			sibling->va_start = va->va_start;
1471 
1472 			/* Free vmap_area object. */
1473 			kmem_cache_free(vmap_area_cachep, va);
1474 
1475 			/* Point to the new merged area. */
1476 			va = sibling;
1477 			merged = true;
1478 		}
1479 	}
1480 
1481 	/*
1482 	 * start            end
1483 	 * |                |
1484 	 * |<-----Prev----->|<------VA------>|
1485 	 *                  |                |
1486 	 *                  start            end
1487 	 */
1488 	if (next->prev != head) {
1489 		sibling = list_entry(next->prev, struct vmap_area, list);
1490 		if (sibling->va_end == va->va_start) {
1491 			/*
1492 			 * If both neighbors are coalesced, it is important
1493 			 * to unlink the "next" node first, followed by merging
1494 			 * with "previous" one. Otherwise the tree might not be
1495 			 * fully populated if a sibling's augmented value is
1496 			 * "normalized" because of rotation operations.
1497 			 */
1498 			if (merged)
1499 				__unlink_va(va, root, augment);
1500 
1501 			sibling->va_end = va->va_end;
1502 
1503 			/* Free vmap_area object. */
1504 			kmem_cache_free(vmap_area_cachep, va);
1505 
1506 			/* Point to the new merged area. */
1507 			va = sibling;
1508 			merged = true;
1509 		}
1510 	}
1511 
1512 insert:
1513 	if (!merged)
1514 		__link_va(va, root, parent, link, head, augment);
1515 
1516 	return va;
1517 }
1518 
1519 static __always_inline struct vmap_area *
merge_or_add_vmap_area(struct vmap_area * va,struct rb_root * root,struct list_head * head)1520 merge_or_add_vmap_area(struct vmap_area *va,
1521 	struct rb_root *root, struct list_head *head)
1522 {
1523 	return __merge_or_add_vmap_area(va, root, head, false);
1524 }
1525 
1526 static __always_inline struct vmap_area *
merge_or_add_vmap_area_augment(struct vmap_area * va,struct rb_root * root,struct list_head * head)1527 merge_or_add_vmap_area_augment(struct vmap_area *va,
1528 	struct rb_root *root, struct list_head *head)
1529 {
1530 	va = __merge_or_add_vmap_area(va, root, head, true);
1531 	if (va)
1532 		augment_tree_propagate_from(va);
1533 
1534 	return va;
1535 }
1536 
1537 static __always_inline bool
is_within_this_va(struct vmap_area * va,unsigned long size,unsigned long align,unsigned long vstart)1538 is_within_this_va(struct vmap_area *va, unsigned long size,
1539 	unsigned long align, unsigned long vstart)
1540 {
1541 	unsigned long nva_start_addr;
1542 
1543 	if (va->va_start > vstart)
1544 		nva_start_addr = ALIGN(va->va_start, align);
1545 	else
1546 		nva_start_addr = ALIGN(vstart, align);
1547 
1548 	/* Can be overflowed due to big size or alignment. */
1549 	if (nva_start_addr + size < nva_start_addr ||
1550 			nva_start_addr < vstart)
1551 		return false;
1552 
1553 	return (nva_start_addr + size <= va->va_end);
1554 }
1555 
1556 /*
1557  * Find the first free block(lowest start address) in the tree,
1558  * that will accomplish the request corresponding to passing
1559  * parameters. Please note, with an alignment bigger than PAGE_SIZE,
1560  * a search length is adjusted to account for worst case alignment
1561  * overhead.
1562  */
1563 static __always_inline struct vmap_area *
find_vmap_lowest_match(struct rb_root * root,unsigned long size,unsigned long align,unsigned long vstart,bool adjust_search_size)1564 find_vmap_lowest_match(struct rb_root *root, unsigned long size,
1565 	unsigned long align, unsigned long vstart, bool adjust_search_size)
1566 {
1567 	struct vmap_area *va;
1568 	struct rb_node *node;
1569 	unsigned long length;
1570 
1571 	/* Start from the root. */
1572 	node = root->rb_node;
1573 
1574 	/* Adjust the search size for alignment overhead. */
1575 	length = adjust_search_size ? size + align - 1 : size;
1576 
1577 	while (node) {
1578 		va = rb_entry(node, struct vmap_area, rb_node);
1579 
1580 		if (get_subtree_max_size(node->rb_left) >= length &&
1581 				vstart < va->va_start) {
1582 			node = node->rb_left;
1583 		} else {
1584 			if (is_within_this_va(va, size, align, vstart))
1585 				return va;
1586 
1587 			/*
1588 			 * Does not make sense to go deeper towards the right
1589 			 * sub-tree if it does not have a free block that is
1590 			 * equal or bigger to the requested search length.
1591 			 */
1592 			if (get_subtree_max_size(node->rb_right) >= length) {
1593 				node = node->rb_right;
1594 				continue;
1595 			}
1596 
1597 			/*
1598 			 * OK. We roll back and find the first right sub-tree,
1599 			 * that will satisfy the search criteria. It can happen
1600 			 * due to "vstart" restriction or an alignment overhead
1601 			 * that is bigger then PAGE_SIZE.
1602 			 */
1603 			while ((node = rb_parent(node))) {
1604 				va = rb_entry(node, struct vmap_area, rb_node);
1605 				if (is_within_this_va(va, size, align, vstart))
1606 					return va;
1607 
1608 				if (get_subtree_max_size(node->rb_right) >= length &&
1609 						vstart <= va->va_start) {
1610 					/*
1611 					 * Shift the vstart forward. Please note, we update it with
1612 					 * parent's start address adding "1" because we do not want
1613 					 * to enter same sub-tree after it has already been checked
1614 					 * and no suitable free block found there.
1615 					 */
1616 					vstart = va->va_start + 1;
1617 					node = node->rb_right;
1618 					break;
1619 				}
1620 			}
1621 		}
1622 	}
1623 
1624 	return NULL;
1625 }
1626 
1627 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1628 #include <linux/random.h>
1629 
1630 static struct vmap_area *
find_vmap_lowest_linear_match(struct list_head * head,unsigned long size,unsigned long align,unsigned long vstart)1631 find_vmap_lowest_linear_match(struct list_head *head, unsigned long size,
1632 	unsigned long align, unsigned long vstart)
1633 {
1634 	struct vmap_area *va;
1635 
1636 	list_for_each_entry(va, head, list) {
1637 		if (!is_within_this_va(va, size, align, vstart))
1638 			continue;
1639 
1640 		return va;
1641 	}
1642 
1643 	return NULL;
1644 }
1645 
1646 static void
find_vmap_lowest_match_check(struct rb_root * root,struct list_head * head,unsigned long size,unsigned long align)1647 find_vmap_lowest_match_check(struct rb_root *root, struct list_head *head,
1648 			     unsigned long size, unsigned long align)
1649 {
1650 	struct vmap_area *va_1, *va_2;
1651 	unsigned long vstart;
1652 	unsigned int rnd;
1653 
1654 	get_random_bytes(&rnd, sizeof(rnd));
1655 	vstart = VMALLOC_START + rnd;
1656 
1657 	va_1 = find_vmap_lowest_match(root, size, align, vstart, false);
1658 	va_2 = find_vmap_lowest_linear_match(head, size, align, vstart);
1659 
1660 	if (va_1 != va_2)
1661 		pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
1662 			va_1, va_2, vstart);
1663 }
1664 #endif
1665 
1666 enum fit_type {
1667 	NOTHING_FIT = 0,
1668 	FL_FIT_TYPE = 1,	/* full fit */
1669 	LE_FIT_TYPE = 2,	/* left edge fit */
1670 	RE_FIT_TYPE = 3,	/* right edge fit */
1671 	NE_FIT_TYPE = 4		/* no edge fit */
1672 };
1673 
1674 static __always_inline enum fit_type
classify_va_fit_type(struct vmap_area * va,unsigned long nva_start_addr,unsigned long size)1675 classify_va_fit_type(struct vmap_area *va,
1676 	unsigned long nva_start_addr, unsigned long size)
1677 {
1678 	enum fit_type type;
1679 
1680 	/* Check if it is within VA. */
1681 	if (nva_start_addr < va->va_start ||
1682 			nva_start_addr + size > va->va_end)
1683 		return NOTHING_FIT;
1684 
1685 	/* Now classify. */
1686 	if (va->va_start == nva_start_addr) {
1687 		if (va->va_end == nva_start_addr + size)
1688 			type = FL_FIT_TYPE;
1689 		else
1690 			type = LE_FIT_TYPE;
1691 	} else if (va->va_end == nva_start_addr + size) {
1692 		type = RE_FIT_TYPE;
1693 	} else {
1694 		type = NE_FIT_TYPE;
1695 	}
1696 
1697 	return type;
1698 }
1699 
1700 static __always_inline int
va_clip(struct rb_root * root,struct list_head * head,struct vmap_area * va,unsigned long nva_start_addr,unsigned long size)1701 va_clip(struct rb_root *root, struct list_head *head,
1702 		struct vmap_area *va, unsigned long nva_start_addr,
1703 		unsigned long size)
1704 {
1705 	struct vmap_area *lva = NULL;
1706 	enum fit_type type = classify_va_fit_type(va, nva_start_addr, size);
1707 
1708 	if (type == FL_FIT_TYPE) {
1709 		/*
1710 		 * No need to split VA, it fully fits.
1711 		 *
1712 		 * |               |
1713 		 * V      NVA      V
1714 		 * |---------------|
1715 		 */
1716 		unlink_va_augment(va, root);
1717 		kmem_cache_free(vmap_area_cachep, va);
1718 	} else if (type == LE_FIT_TYPE) {
1719 		/*
1720 		 * Split left edge of fit VA.
1721 		 *
1722 		 * |       |
1723 		 * V  NVA  V   R
1724 		 * |-------|-------|
1725 		 */
1726 		va->va_start += size;
1727 	} else if (type == RE_FIT_TYPE) {
1728 		/*
1729 		 * Split right edge of fit VA.
1730 		 *
1731 		 *         |       |
1732 		 *     L   V  NVA  V
1733 		 * |-------|-------|
1734 		 */
1735 		va->va_end = nva_start_addr;
1736 	} else if (type == NE_FIT_TYPE) {
1737 		/*
1738 		 * Split no edge of fit VA.
1739 		 *
1740 		 *     |       |
1741 		 *   L V  NVA  V R
1742 		 * |---|-------|---|
1743 		 */
1744 		lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
1745 		if (unlikely(!lva)) {
1746 			/*
1747 			 * For percpu allocator we do not do any pre-allocation
1748 			 * and leave it as it is. The reason is it most likely
1749 			 * never ends up with NE_FIT_TYPE splitting. In case of
1750 			 * percpu allocations offsets and sizes are aligned to
1751 			 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
1752 			 * are its main fitting cases.
1753 			 *
1754 			 * There are a few exceptions though, as an example it is
1755 			 * a first allocation (early boot up) when we have "one"
1756 			 * big free space that has to be split.
1757 			 *
1758 			 * Also we can hit this path in case of regular "vmap"
1759 			 * allocations, if "this" current CPU was not preloaded.
1760 			 * See the comment in alloc_vmap_area() why. If so, then
1761 			 * GFP_NOWAIT is used instead to get an extra object for
1762 			 * split purpose. That is rare and most time does not
1763 			 * occur.
1764 			 *
1765 			 * What happens if an allocation gets failed. Basically,
1766 			 * an "overflow" path is triggered to purge lazily freed
1767 			 * areas to free some memory, then, the "retry" path is
1768 			 * triggered to repeat one more time. See more details
1769 			 * in alloc_vmap_area() function.
1770 			 */
1771 			lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
1772 			if (!lva)
1773 				return -ENOMEM;
1774 		}
1775 
1776 		/*
1777 		 * Build the remainder.
1778 		 */
1779 		lva->va_start = va->va_start;
1780 		lva->va_end = nva_start_addr;
1781 
1782 		/*
1783 		 * Shrink this VA to remaining size.
1784 		 */
1785 		va->va_start = nva_start_addr + size;
1786 	} else {
1787 		return -EINVAL;
1788 	}
1789 
1790 	if (type != FL_FIT_TYPE) {
1791 		augment_tree_propagate_from(va);
1792 
1793 		if (lva)	/* type == NE_FIT_TYPE */
1794 			insert_vmap_area_augment(lva, &va->rb_node, root, head);
1795 	}
1796 
1797 	return 0;
1798 }
1799 
1800 static unsigned long
va_alloc(struct vmap_area * va,struct rb_root * root,struct list_head * head,unsigned long size,unsigned long align,unsigned long vstart,unsigned long vend)1801 va_alloc(struct vmap_area *va,
1802 		struct rb_root *root, struct list_head *head,
1803 		unsigned long size, unsigned long align,
1804 		unsigned long vstart, unsigned long vend)
1805 {
1806 	unsigned long nva_start_addr;
1807 	int ret;
1808 
1809 	if (va->va_start > vstart)
1810 		nva_start_addr = ALIGN(va->va_start, align);
1811 	else
1812 		nva_start_addr = ALIGN(vstart, align);
1813 
1814 	/* Check the "vend" restriction. */
1815 	if (nva_start_addr + size > vend)
1816 		return -ERANGE;
1817 
1818 	/* Update the free vmap_area. */
1819 	ret = va_clip(root, head, va, nva_start_addr, size);
1820 	if (WARN_ON_ONCE(ret))
1821 		return ret;
1822 
1823 	return nva_start_addr;
1824 }
1825 
1826 /*
1827  * Returns a start address of the newly allocated area, if success.
1828  * Otherwise an error value is returned that indicates failure.
1829  */
1830 static __always_inline unsigned long
__alloc_vmap_area(struct rb_root * root,struct list_head * head,unsigned long size,unsigned long align,unsigned long vstart,unsigned long vend)1831 __alloc_vmap_area(struct rb_root *root, struct list_head *head,
1832 	unsigned long size, unsigned long align,
1833 	unsigned long vstart, unsigned long vend)
1834 {
1835 	bool adjust_search_size = true;
1836 	unsigned long nva_start_addr;
1837 	struct vmap_area *va;
1838 
1839 	/*
1840 	 * Do not adjust when:
1841 	 *   a) align <= PAGE_SIZE, because it does not make any sense.
1842 	 *      All blocks(their start addresses) are at least PAGE_SIZE
1843 	 *      aligned anyway;
1844 	 *   b) a short range where a requested size corresponds to exactly
1845 	 *      specified [vstart:vend] interval and an alignment > PAGE_SIZE.
1846 	 *      With adjusted search length an allocation would not succeed.
1847 	 */
1848 	if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size))
1849 		adjust_search_size = false;
1850 
1851 	va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
1852 	if (unlikely(!va))
1853 		return -ENOENT;
1854 
1855 	nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend);
1856 
1857 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1858 	if (!IS_ERR_VALUE(nva_start_addr))
1859 		find_vmap_lowest_match_check(root, head, size, align);
1860 #endif
1861 
1862 	return nva_start_addr;
1863 }
1864 
1865 /*
1866  * Free a region of KVA allocated by alloc_vmap_area
1867  */
free_vmap_area(struct vmap_area * va)1868 static void free_vmap_area(struct vmap_area *va)
1869 {
1870 	struct vmap_node *vn = addr_to_node(va->va_start);
1871 
1872 	/*
1873 	 * Remove from the busy tree/list.
1874 	 */
1875 	spin_lock(&vn->busy.lock);
1876 	unlink_va(va, &vn->busy.root);
1877 	spin_unlock(&vn->busy.lock);
1878 
1879 	/*
1880 	 * Insert/Merge it back to the free tree/list.
1881 	 */
1882 	spin_lock(&free_vmap_area_lock);
1883 	merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list);
1884 	spin_unlock(&free_vmap_area_lock);
1885 }
1886 
1887 static inline void
preload_this_cpu_lock(spinlock_t * lock,gfp_t gfp_mask,int node)1888 preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node)
1889 {
1890 	struct vmap_area *va = NULL, *tmp;
1891 
1892 	/*
1893 	 * Preload this CPU with one extra vmap_area object. It is used
1894 	 * when fit type of free area is NE_FIT_TYPE. It guarantees that
1895 	 * a CPU that does an allocation is preloaded.
1896 	 *
1897 	 * We do it in non-atomic context, thus it allows us to use more
1898 	 * permissive allocation masks to be more stable under low memory
1899 	 * condition and high memory pressure.
1900 	 */
1901 	if (!this_cpu_read(ne_fit_preload_node))
1902 		va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1903 
1904 	spin_lock(lock);
1905 
1906 	tmp = NULL;
1907 	if (va && !__this_cpu_try_cmpxchg(ne_fit_preload_node, &tmp, va))
1908 		kmem_cache_free(vmap_area_cachep, va);
1909 }
1910 
1911 static struct vmap_pool *
size_to_va_pool(struct vmap_node * vn,unsigned long size)1912 size_to_va_pool(struct vmap_node *vn, unsigned long size)
1913 {
1914 	unsigned int idx = (size - 1) / PAGE_SIZE;
1915 
1916 	if (idx < MAX_VA_SIZE_PAGES)
1917 		return &vn->pool[idx];
1918 
1919 	return NULL;
1920 }
1921 
1922 static bool
node_pool_add_va(struct vmap_node * n,struct vmap_area * va)1923 node_pool_add_va(struct vmap_node *n, struct vmap_area *va)
1924 {
1925 	struct vmap_pool *vp;
1926 
1927 	vp = size_to_va_pool(n, va_size(va));
1928 	if (!vp)
1929 		return false;
1930 
1931 	spin_lock(&n->pool_lock);
1932 	list_add(&va->list, &vp->head);
1933 	WRITE_ONCE(vp->len, vp->len + 1);
1934 	spin_unlock(&n->pool_lock);
1935 
1936 	return true;
1937 }
1938 
1939 static struct vmap_area *
node_pool_del_va(struct vmap_node * vn,unsigned long size,unsigned long align,unsigned long vstart,unsigned long vend)1940 node_pool_del_va(struct vmap_node *vn, unsigned long size,
1941 		unsigned long align, unsigned long vstart,
1942 		unsigned long vend)
1943 {
1944 	struct vmap_area *va = NULL;
1945 	struct vmap_pool *vp;
1946 	int err = 0;
1947 
1948 	vp = size_to_va_pool(vn, size);
1949 	if (!vp || list_empty(&vp->head))
1950 		return NULL;
1951 
1952 	spin_lock(&vn->pool_lock);
1953 	if (!list_empty(&vp->head)) {
1954 		va = list_first_entry(&vp->head, struct vmap_area, list);
1955 
1956 		if (IS_ALIGNED(va->va_start, align)) {
1957 			/*
1958 			 * Do some sanity check and emit a warning
1959 			 * if one of below checks detects an error.
1960 			 */
1961 			err |= (va_size(va) != size);
1962 			err |= (va->va_start < vstart);
1963 			err |= (va->va_end > vend);
1964 
1965 			if (!WARN_ON_ONCE(err)) {
1966 				list_del_init(&va->list);
1967 				WRITE_ONCE(vp->len, vp->len - 1);
1968 			} else {
1969 				va = NULL;
1970 			}
1971 		} else {
1972 			list_move_tail(&va->list, &vp->head);
1973 			va = NULL;
1974 		}
1975 	}
1976 	spin_unlock(&vn->pool_lock);
1977 
1978 	return va;
1979 }
1980 
1981 static struct vmap_area *
node_alloc(unsigned long size,unsigned long align,unsigned long vstart,unsigned long vend,unsigned long * addr,unsigned int * vn_id)1982 node_alloc(unsigned long size, unsigned long align,
1983 		unsigned long vstart, unsigned long vend,
1984 		unsigned long *addr, unsigned int *vn_id)
1985 {
1986 	struct vmap_area *va;
1987 
1988 	*vn_id = 0;
1989 	*addr = -EINVAL;
1990 
1991 	/*
1992 	 * Fallback to a global heap if not vmalloc or there
1993 	 * is only one node.
1994 	 */
1995 	if (vstart != VMALLOC_START || vend != VMALLOC_END ||
1996 			nr_vmap_nodes == 1)
1997 		return NULL;
1998 
1999 	*vn_id = raw_smp_processor_id() % nr_vmap_nodes;
2000 	va = node_pool_del_va(id_to_node(*vn_id), size, align, vstart, vend);
2001 	*vn_id = encode_vn_id(*vn_id);
2002 
2003 	if (va)
2004 		*addr = va->va_start;
2005 
2006 	return va;
2007 }
2008 
setup_vmalloc_vm(struct vm_struct * vm,struct vmap_area * va,unsigned long flags,const void * caller)2009 static inline void setup_vmalloc_vm(struct vm_struct *vm,
2010 	struct vmap_area *va, unsigned long flags, const void *caller)
2011 {
2012 	vm->flags = flags;
2013 	vm->addr = (void *)va->va_start;
2014 	vm->size = vm->requested_size = va_size(va);
2015 	vm->caller = caller;
2016 	va->vm = vm;
2017 }
2018 
2019 /*
2020  * Allocate a region of KVA of the specified size and alignment, within the
2021  * vstart and vend. If vm is passed in, the two will also be bound.
2022  */
alloc_vmap_area(unsigned long size,unsigned long align,unsigned long vstart,unsigned long vend,int node,gfp_t gfp_mask,unsigned long va_flags,struct vm_struct * vm)2023 static struct vmap_area *alloc_vmap_area(unsigned long size,
2024 				unsigned long align,
2025 				unsigned long vstart, unsigned long vend,
2026 				int node, gfp_t gfp_mask,
2027 				unsigned long va_flags, struct vm_struct *vm)
2028 {
2029 	struct vmap_node *vn;
2030 	struct vmap_area *va;
2031 	unsigned long freed;
2032 	unsigned long addr;
2033 	unsigned int vn_id;
2034 	bool allow_block;
2035 	int purged = 0;
2036 	int ret;
2037 
2038 	if (unlikely(!size || offset_in_page(size) || !is_power_of_2(align)))
2039 		return ERR_PTR(-EINVAL);
2040 
2041 	if (unlikely(!vmap_initialized))
2042 		return ERR_PTR(-EBUSY);
2043 
2044 	/* Only reclaim behaviour flags are relevant. */
2045 	gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
2046 	allow_block = gfpflags_allow_blocking(gfp_mask);
2047 	might_sleep_if(allow_block);
2048 
2049 	/*
2050 	 * If a VA is obtained from a global heap(if it fails here)
2051 	 * it is anyway marked with this "vn_id" so it is returned
2052 	 * to this pool's node later. Such way gives a possibility
2053 	 * to populate pools based on users demand.
2054 	 *
2055 	 * On success a ready to go VA is returned.
2056 	 */
2057 	va = node_alloc(size, align, vstart, vend, &addr, &vn_id);
2058 	if (!va) {
2059 		va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
2060 		if (unlikely(!va))
2061 			return ERR_PTR(-ENOMEM);
2062 
2063 		/*
2064 		 * Only scan the relevant parts containing pointers to other objects
2065 		 * to avoid false negatives.
2066 		 */
2067 		kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
2068 	}
2069 
2070 retry:
2071 	if (IS_ERR_VALUE(addr)) {
2072 		preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
2073 		addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
2074 			size, align, vstart, vend);
2075 		spin_unlock(&free_vmap_area_lock);
2076 
2077 		/*
2078 		 * This is not a fast path.  Check if yielding is needed. This
2079 		 * is the only reschedule point in the vmalloc() path.
2080 		 */
2081 		if (allow_block)
2082 			cond_resched();
2083 	}
2084 
2085 	trace_alloc_vmap_area(addr, size, align, vstart, vend, IS_ERR_VALUE(addr));
2086 
2087 	/*
2088 	 * If an allocation fails, the error value is
2089 	 * returned. Therefore trigger the overflow path.
2090 	 */
2091 	if (IS_ERR_VALUE(addr)) {
2092 		if (allow_block)
2093 			goto overflow;
2094 
2095 		/*
2096 		 * We can not trigger any reclaim logic because
2097 		 * sleeping is not allowed, thus fail an allocation.
2098 		 */
2099 		goto out_free_va;
2100 	}
2101 
2102 	va->va_start = addr;
2103 	va->va_end = addr + size;
2104 	va->vm = NULL;
2105 	va->flags = (va_flags | vn_id);
2106 
2107 	if (vm) {
2108 		vm->addr = (void *)va->va_start;
2109 		vm->size = va_size(va);
2110 		va->vm = vm;
2111 	}
2112 
2113 	vn = addr_to_node(va->va_start);
2114 
2115 	spin_lock(&vn->busy.lock);
2116 	insert_vmap_area(va, &vn->busy.root, &vn->busy.head);
2117 	spin_unlock(&vn->busy.lock);
2118 
2119 	BUG_ON(!IS_ALIGNED(va->va_start, align));
2120 	BUG_ON(va->va_start < vstart);
2121 	BUG_ON(va->va_end > vend);
2122 
2123 	ret = kasan_populate_vmalloc(addr, size, gfp_mask);
2124 	if (ret) {
2125 		free_vmap_area(va);
2126 		return ERR_PTR(ret);
2127 	}
2128 
2129 	return va;
2130 
2131 overflow:
2132 	if (!purged) {
2133 		reclaim_and_purge_vmap_areas();
2134 		purged = 1;
2135 		goto retry;
2136 	}
2137 
2138 	freed = 0;
2139 	blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
2140 
2141 	if (freed > 0) {
2142 		purged = 0;
2143 		goto retry;
2144 	}
2145 
2146 	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
2147 		pr_warn("vmalloc_node_range for size %lu failed: Address range restricted to %#lx - %#lx\n",
2148 				size, vstart, vend);
2149 
2150 out_free_va:
2151 	kmem_cache_free(vmap_area_cachep, va);
2152 	return ERR_PTR(-EBUSY);
2153 }
2154 
register_vmap_purge_notifier(struct notifier_block * nb)2155 int register_vmap_purge_notifier(struct notifier_block *nb)
2156 {
2157 	return blocking_notifier_chain_register(&vmap_notify_list, nb);
2158 }
2159 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
2160 
unregister_vmap_purge_notifier(struct notifier_block * nb)2161 int unregister_vmap_purge_notifier(struct notifier_block *nb)
2162 {
2163 	return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
2164 }
2165 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
2166 
2167 /*
2168  * lazy_max_pages is the maximum amount of virtual address space we gather up
2169  * before attempting to purge with a TLB flush.
2170  *
2171  * There is a tradeoff here: a larger number will cover more kernel page tables
2172  * and take slightly longer to purge, but it will linearly reduce the number of
2173  * global TLB flushes that must be performed. It would seem natural to scale
2174  * this number up linearly with the number of CPUs (because vmapping activity
2175  * could also scale linearly with the number of CPUs), however it is likely
2176  * that in practice, workloads might be constrained in other ways that mean
2177  * vmap activity will not scale linearly with CPUs. Also, I want to be
2178  * conservative and not introduce a big latency on huge systems, so go with
2179  * a less aggressive log scale. It will still be an improvement over the old
2180  * code, and it will be simple to change the scale factor if we find that it
2181  * becomes a problem on bigger systems.
2182  */
lazy_max_pages(void)2183 static unsigned long lazy_max_pages(void)
2184 {
2185 	unsigned int log;
2186 
2187 	log = fls(num_online_cpus());
2188 
2189 	return log * (32UL * 1024 * 1024 / PAGE_SIZE);
2190 }
2191 
2192 /*
2193  * Serialize vmap purging.  There is no actual critical section protected
2194  * by this lock, but we want to avoid concurrent calls for performance
2195  * reasons and to make the pcpu_get_vm_areas more deterministic.
2196  */
2197 static DEFINE_MUTEX(vmap_purge_lock);
2198 
2199 /* for per-CPU blocks */
2200 static void purge_fragmented_blocks_allcpus(void);
2201 
2202 static void
reclaim_list_global(struct list_head * head)2203 reclaim_list_global(struct list_head *head)
2204 {
2205 	struct vmap_area *va, *n;
2206 
2207 	if (list_empty(head))
2208 		return;
2209 
2210 	spin_lock(&free_vmap_area_lock);
2211 	list_for_each_entry_safe(va, n, head, list)
2212 		merge_or_add_vmap_area_augment(va,
2213 			&free_vmap_area_root, &free_vmap_area_list);
2214 	spin_unlock(&free_vmap_area_lock);
2215 }
2216 
2217 static void
decay_va_pool_node(struct vmap_node * vn,bool full_decay)2218 decay_va_pool_node(struct vmap_node *vn, bool full_decay)
2219 {
2220 	LIST_HEAD(decay_list);
2221 	struct rb_root decay_root = RB_ROOT;
2222 	struct vmap_area *va, *nva;
2223 	unsigned long n_decay, pool_len;
2224 	int i;
2225 
2226 	for (i = 0; i < MAX_VA_SIZE_PAGES; i++) {
2227 		LIST_HEAD(tmp_list);
2228 
2229 		if (list_empty(&vn->pool[i].head))
2230 			continue;
2231 
2232 		/* Detach the pool, so no-one can access it. */
2233 		spin_lock(&vn->pool_lock);
2234 		list_replace_init(&vn->pool[i].head, &tmp_list);
2235 		spin_unlock(&vn->pool_lock);
2236 
2237 		pool_len = n_decay = vn->pool[i].len;
2238 		WRITE_ONCE(vn->pool[i].len, 0);
2239 
2240 		/* Decay a pool by ~25% out of left objects. */
2241 		if (!full_decay)
2242 			n_decay >>= 2;
2243 		pool_len -= n_decay;
2244 
2245 		list_for_each_entry_safe(va, nva, &tmp_list, list) {
2246 			if (!n_decay--)
2247 				break;
2248 
2249 			list_del_init(&va->list);
2250 			merge_or_add_vmap_area(va, &decay_root, &decay_list);
2251 		}
2252 
2253 		/*
2254 		 * Attach the pool back if it has been partly decayed.
2255 		 * Please note, it is supposed that nobody(other contexts)
2256 		 * can populate the pool therefore a simple list replace
2257 		 * operation takes place here.
2258 		 */
2259 		if (!list_empty(&tmp_list)) {
2260 			spin_lock(&vn->pool_lock);
2261 			list_replace_init(&tmp_list, &vn->pool[i].head);
2262 			WRITE_ONCE(vn->pool[i].len, pool_len);
2263 			spin_unlock(&vn->pool_lock);
2264 		}
2265 	}
2266 
2267 	reclaim_list_global(&decay_list);
2268 }
2269 
2270 #define KASAN_RELEASE_BATCH_SIZE 32
2271 
2272 static void
kasan_release_vmalloc_node(struct vmap_node * vn)2273 kasan_release_vmalloc_node(struct vmap_node *vn)
2274 {
2275 	struct vmap_area *va;
2276 	unsigned long start, end;
2277 	unsigned int batch_count = 0;
2278 
2279 	start = list_first_entry(&vn->purge_list, struct vmap_area, list)->va_start;
2280 	end = list_last_entry(&vn->purge_list, struct vmap_area, list)->va_end;
2281 
2282 	list_for_each_entry(va, &vn->purge_list, list) {
2283 		if (is_vmalloc_or_module_addr((void *) va->va_start))
2284 			kasan_release_vmalloc(va->va_start, va->va_end,
2285 				va->va_start, va->va_end,
2286 				KASAN_VMALLOC_PAGE_RANGE);
2287 
2288 		if (need_resched() || (++batch_count >= KASAN_RELEASE_BATCH_SIZE)) {
2289 			cond_resched();
2290 			batch_count = 0;
2291 		}
2292 	}
2293 
2294 	kasan_release_vmalloc(start, end, start, end, KASAN_VMALLOC_TLB_FLUSH);
2295 }
2296 
purge_vmap_node(struct work_struct * work)2297 static void purge_vmap_node(struct work_struct *work)
2298 {
2299 	struct vmap_node *vn = container_of(work,
2300 		struct vmap_node, purge_work);
2301 	unsigned long nr_purged_pages = 0;
2302 	struct vmap_area *va, *n_va;
2303 	LIST_HEAD(local_list);
2304 
2305 	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
2306 		kasan_release_vmalloc_node(vn);
2307 
2308 	vn->nr_purged = 0;
2309 
2310 	list_for_each_entry_safe(va, n_va, &vn->purge_list, list) {
2311 		unsigned long nr = va_size(va) >> PAGE_SHIFT;
2312 		unsigned int vn_id = decode_vn_id(va->flags);
2313 
2314 		list_del_init(&va->list);
2315 
2316 		nr_purged_pages += nr;
2317 		vn->nr_purged++;
2318 
2319 		if (is_vn_id_valid(vn_id) && !vn->skip_populate)
2320 			if (node_pool_add_va(vn, va))
2321 				continue;
2322 
2323 		/* Go back to global. */
2324 		list_add(&va->list, &local_list);
2325 	}
2326 
2327 	atomic_long_sub(nr_purged_pages, &vmap_lazy_nr);
2328 
2329 	reclaim_list_global(&local_list);
2330 }
2331 
2332 /*
2333  * Purges all lazily-freed vmap areas.
2334  */
__purge_vmap_area_lazy(unsigned long start,unsigned long end,bool full_pool_decay)2335 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end,
2336 		bool full_pool_decay)
2337 {
2338 	unsigned long nr_purged_areas = 0;
2339 	unsigned int nr_purge_helpers;
2340 	static cpumask_t purge_nodes;
2341 	unsigned int nr_purge_nodes;
2342 	struct vmap_node *vn;
2343 	int i;
2344 
2345 	lockdep_assert_held(&vmap_purge_lock);
2346 
2347 	/*
2348 	 * Use cpumask to mark which node has to be processed.
2349 	 */
2350 	purge_nodes = CPU_MASK_NONE;
2351 
2352 	for_each_vmap_node(vn) {
2353 		INIT_LIST_HEAD(&vn->purge_list);
2354 		vn->skip_populate = full_pool_decay;
2355 		decay_va_pool_node(vn, full_pool_decay);
2356 
2357 		if (RB_EMPTY_ROOT(&vn->lazy.root))
2358 			continue;
2359 
2360 		spin_lock(&vn->lazy.lock);
2361 		WRITE_ONCE(vn->lazy.root.rb_node, NULL);
2362 		list_replace_init(&vn->lazy.head, &vn->purge_list);
2363 		spin_unlock(&vn->lazy.lock);
2364 
2365 		start = min(start, list_first_entry(&vn->purge_list,
2366 			struct vmap_area, list)->va_start);
2367 
2368 		end = max(end, list_last_entry(&vn->purge_list,
2369 			struct vmap_area, list)->va_end);
2370 
2371 		cpumask_set_cpu(node_to_id(vn), &purge_nodes);
2372 	}
2373 
2374 	nr_purge_nodes = cpumask_weight(&purge_nodes);
2375 	if (nr_purge_nodes > 0) {
2376 		flush_tlb_kernel_range(start, end);
2377 
2378 		/* One extra worker is per a lazy_max_pages() full set minus one. */
2379 		nr_purge_helpers = atomic_long_read(&vmap_lazy_nr) / lazy_max_pages();
2380 		nr_purge_helpers = clamp(nr_purge_helpers, 1U, nr_purge_nodes) - 1;
2381 
2382 		for_each_cpu(i, &purge_nodes) {
2383 			vn = &vmap_nodes[i];
2384 
2385 			if (nr_purge_helpers > 0) {
2386 				INIT_WORK(&vn->purge_work, purge_vmap_node);
2387 
2388 				if (cpumask_test_cpu(i, cpu_online_mask))
2389 					schedule_work_on(i, &vn->purge_work);
2390 				else
2391 					schedule_work(&vn->purge_work);
2392 
2393 				nr_purge_helpers--;
2394 			} else {
2395 				vn->purge_work.func = NULL;
2396 				purge_vmap_node(&vn->purge_work);
2397 				nr_purged_areas += vn->nr_purged;
2398 			}
2399 		}
2400 
2401 		for_each_cpu(i, &purge_nodes) {
2402 			vn = &vmap_nodes[i];
2403 
2404 			if (vn->purge_work.func) {
2405 				flush_work(&vn->purge_work);
2406 				nr_purged_areas += vn->nr_purged;
2407 			}
2408 		}
2409 	}
2410 
2411 	trace_purge_vmap_area_lazy(start, end, nr_purged_areas);
2412 	return nr_purged_areas > 0;
2413 }
2414 
2415 /*
2416  * Reclaim vmap areas by purging fragmented blocks and purge_vmap_area_list.
2417  */
reclaim_and_purge_vmap_areas(void)2418 static void reclaim_and_purge_vmap_areas(void)
2419 
2420 {
2421 	mutex_lock(&vmap_purge_lock);
2422 	purge_fragmented_blocks_allcpus();
2423 	__purge_vmap_area_lazy(ULONG_MAX, 0, true);
2424 	mutex_unlock(&vmap_purge_lock);
2425 }
2426 
drain_vmap_area_work(struct work_struct * work)2427 static void drain_vmap_area_work(struct work_struct *work)
2428 {
2429 	mutex_lock(&vmap_purge_lock);
2430 	__purge_vmap_area_lazy(ULONG_MAX, 0, false);
2431 	mutex_unlock(&vmap_purge_lock);
2432 }
2433 
2434 /*
2435  * Free a vmap area, caller ensuring that the area has been unmapped,
2436  * unlinked and flush_cache_vunmap had been called for the correct
2437  * range previously.
2438  */
free_vmap_area_noflush(struct vmap_area * va)2439 static void free_vmap_area_noflush(struct vmap_area *va)
2440 {
2441 	unsigned long nr_lazy_max = lazy_max_pages();
2442 	unsigned long va_start = va->va_start;
2443 	unsigned int vn_id = decode_vn_id(va->flags);
2444 	struct vmap_node *vn;
2445 	unsigned long nr_lazy;
2446 
2447 	if (WARN_ON_ONCE(!list_empty(&va->list)))
2448 		return;
2449 
2450 	nr_lazy = atomic_long_add_return_relaxed(va_size(va) >> PAGE_SHIFT,
2451 					 &vmap_lazy_nr);
2452 
2453 	/*
2454 	 * If it was request by a certain node we would like to
2455 	 * return it to that node, i.e. its pool for later reuse.
2456 	 */
2457 	vn = is_vn_id_valid(vn_id) ?
2458 		id_to_node(vn_id):addr_to_node(va->va_start);
2459 
2460 	spin_lock(&vn->lazy.lock);
2461 	insert_vmap_area(va, &vn->lazy.root, &vn->lazy.head);
2462 	spin_unlock(&vn->lazy.lock);
2463 
2464 	trace_free_vmap_area_noflush(va_start, nr_lazy, nr_lazy_max);
2465 
2466 	/* After this point, we may free va at any time */
2467 	if (unlikely(nr_lazy > nr_lazy_max))
2468 		schedule_work(&drain_vmap_work);
2469 }
2470 
2471 /*
2472  * Free and unmap a vmap area
2473  */
free_unmap_vmap_area(struct vmap_area * va)2474 static void free_unmap_vmap_area(struct vmap_area *va)
2475 {
2476 	flush_cache_vunmap(va->va_start, va->va_end);
2477 	vunmap_range_noflush(va->va_start, va->va_end);
2478 	if (debug_pagealloc_enabled_static())
2479 		flush_tlb_kernel_range(va->va_start, va->va_end);
2480 
2481 	free_vmap_area_noflush(va);
2482 }
2483 
find_vmap_area(unsigned long addr)2484 struct vmap_area *find_vmap_area(unsigned long addr)
2485 {
2486 	struct vmap_node *vn;
2487 	struct vmap_area *va;
2488 	int i, j;
2489 
2490 	if (unlikely(!vmap_initialized))
2491 		return NULL;
2492 
2493 	/*
2494 	 * An addr_to_node_id(addr) converts an address to a node index
2495 	 * where a VA is located. If VA spans several zones and passed
2496 	 * addr is not the same as va->va_start, what is not common, we
2497 	 * may need to scan extra nodes. See an example:
2498 	 *
2499 	 *      <----va---->
2500 	 * -|-----|-----|-----|-----|-
2501 	 *     1     2     0     1
2502 	 *
2503 	 * VA resides in node 1 whereas it spans 1, 2 an 0. If passed
2504 	 * addr is within 2 or 0 nodes we should do extra work.
2505 	 */
2506 	i = j = addr_to_node_id(addr);
2507 	do {
2508 		vn = &vmap_nodes[i];
2509 
2510 		spin_lock(&vn->busy.lock);
2511 		va = __find_vmap_area(addr, &vn->busy.root);
2512 		spin_unlock(&vn->busy.lock);
2513 
2514 		if (va)
2515 			return va;
2516 	} while ((i = (i + nr_vmap_nodes - 1) % nr_vmap_nodes) != j);
2517 
2518 	return NULL;
2519 }
2520 
find_unlink_vmap_area(unsigned long addr)2521 static struct vmap_area *find_unlink_vmap_area(unsigned long addr)
2522 {
2523 	struct vmap_node *vn;
2524 	struct vmap_area *va;
2525 	int i, j;
2526 
2527 	/*
2528 	 * Check the comment in the find_vmap_area() about the loop.
2529 	 */
2530 	i = j = addr_to_node_id(addr);
2531 	do {
2532 		vn = &vmap_nodes[i];
2533 
2534 		spin_lock(&vn->busy.lock);
2535 		va = __find_vmap_area(addr, &vn->busy.root);
2536 		if (va)
2537 			unlink_va(va, &vn->busy.root);
2538 		spin_unlock(&vn->busy.lock);
2539 
2540 		if (va)
2541 			return va;
2542 	} while ((i = (i + nr_vmap_nodes - 1) % nr_vmap_nodes) != j);
2543 
2544 	return NULL;
2545 }
2546 
2547 /*** Per cpu kva allocator ***/
2548 
2549 /*
2550  * vmap space is limited especially on 32 bit architectures. Ensure there is
2551  * room for at least 16 percpu vmap blocks per CPU.
2552  */
2553 /*
2554  * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
2555  * to #define VMALLOC_SPACE		(VMALLOC_END-VMALLOC_START). Guess
2556  * instead (we just need a rough idea)
2557  */
2558 #if BITS_PER_LONG == 32
2559 #define VMALLOC_SPACE		(128UL*1024*1024)
2560 #else
2561 #define VMALLOC_SPACE		(128UL*1024*1024*1024)
2562 #endif
2563 
2564 #define VMALLOC_PAGES		(VMALLOC_SPACE / PAGE_SIZE)
2565 #define VMAP_MAX_ALLOC		BITS_PER_LONG	/* 256K with 4K pages */
2566 #define VMAP_BBMAP_BITS_MAX	1024	/* 4MB with 4K pages */
2567 #define VMAP_BBMAP_BITS_MIN	(VMAP_MAX_ALLOC*2)
2568 #define VMAP_MIN(x, y)		((x) < (y) ? (x) : (y)) /* can't use min() */
2569 #define VMAP_MAX(x, y)		((x) > (y) ? (x) : (y)) /* can't use max() */
2570 #define VMAP_BBMAP_BITS		\
2571 		VMAP_MIN(VMAP_BBMAP_BITS_MAX,	\
2572 		VMAP_MAX(VMAP_BBMAP_BITS_MIN,	\
2573 			VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
2574 
2575 #define VMAP_BLOCK_SIZE		(VMAP_BBMAP_BITS * PAGE_SIZE)
2576 
2577 /*
2578  * Purge threshold to prevent overeager purging of fragmented blocks for
2579  * regular operations: Purge if vb->free is less than 1/4 of the capacity.
2580  */
2581 #define VMAP_PURGE_THRESHOLD	(VMAP_BBMAP_BITS / 4)
2582 
2583 #define VMAP_RAM		0x1 /* indicates vm_map_ram area*/
2584 #define VMAP_BLOCK		0x2 /* mark out the vmap_block sub-type*/
2585 #define VMAP_FLAGS_MASK		0x3
2586 
2587 struct vmap_block_queue {
2588 	spinlock_t lock;
2589 	struct list_head free;
2590 
2591 	/*
2592 	 * An xarray requires an extra memory dynamically to
2593 	 * be allocated. If it is an issue, we can use rb-tree
2594 	 * instead.
2595 	 */
2596 	struct xarray vmap_blocks;
2597 };
2598 
2599 struct vmap_block {
2600 	spinlock_t lock;
2601 	struct vmap_area *va;
2602 	unsigned long free, dirty;
2603 	DECLARE_BITMAP(used_map, VMAP_BBMAP_BITS);
2604 	unsigned long dirty_min, dirty_max; /*< dirty range */
2605 	struct list_head free_list;
2606 	struct rcu_head rcu_head;
2607 	struct list_head purge;
2608 	unsigned int cpu;
2609 };
2610 
2611 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
2612 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
2613 
2614 /*
2615  * In order to fast access to any "vmap_block" associated with a
2616  * specific address, we use a hash.
2617  *
2618  * A per-cpu vmap_block_queue is used in both ways, to serialize
2619  * an access to free block chains among CPUs(alloc path) and it
2620  * also acts as a vmap_block hash(alloc/free paths). It means we
2621  * overload it, since we already have the per-cpu array which is
2622  * used as a hash table. When used as a hash a 'cpu' passed to
2623  * per_cpu() is not actually a CPU but rather a hash index.
2624  *
2625  * A hash function is addr_to_vb_xa() which hashes any address
2626  * to a specific index(in a hash) it belongs to. This then uses a
2627  * per_cpu() macro to access an array with generated index.
2628  *
2629  * An example:
2630  *
2631  *  CPU_1  CPU_2  CPU_0
2632  *    |      |      |
2633  *    V      V      V
2634  * 0     10     20     30     40     50     60
2635  * |------|------|------|------|------|------|...<vmap address space>
2636  *   CPU0   CPU1   CPU2   CPU0   CPU1   CPU2
2637  *
2638  * - CPU_1 invokes vm_unmap_ram(6), 6 belongs to CPU0 zone, thus
2639  *   it access: CPU0/INDEX0 -> vmap_blocks -> xa_lock;
2640  *
2641  * - CPU_2 invokes vm_unmap_ram(11), 11 belongs to CPU1 zone, thus
2642  *   it access: CPU1/INDEX1 -> vmap_blocks -> xa_lock;
2643  *
2644  * - CPU_0 invokes vm_unmap_ram(20), 20 belongs to CPU2 zone, thus
2645  *   it access: CPU2/INDEX2 -> vmap_blocks -> xa_lock.
2646  *
2647  * This technique almost always avoids lock contention on insert/remove,
2648  * however xarray spinlocks protect against any contention that remains.
2649  */
2650 static struct xarray *
addr_to_vb_xa(unsigned long addr)2651 addr_to_vb_xa(unsigned long addr)
2652 {
2653 	int index = (addr / VMAP_BLOCK_SIZE) % nr_cpu_ids;
2654 
2655 	/*
2656 	 * Please note, nr_cpu_ids points on a highest set
2657 	 * possible bit, i.e. we never invoke cpumask_next()
2658 	 * if an index points on it which is nr_cpu_ids - 1.
2659 	 */
2660 	if (!cpu_possible(index))
2661 		index = cpumask_next(index, cpu_possible_mask);
2662 
2663 	return &per_cpu(vmap_block_queue, index).vmap_blocks;
2664 }
2665 
2666 /*
2667  * We should probably have a fallback mechanism to allocate virtual memory
2668  * out of partially filled vmap blocks. However vmap block sizing should be
2669  * fairly reasonable according to the vmalloc size, so it shouldn't be a
2670  * big problem.
2671  */
2672 
addr_to_vb_idx(unsigned long addr)2673 static unsigned long addr_to_vb_idx(unsigned long addr)
2674 {
2675 	addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
2676 	addr /= VMAP_BLOCK_SIZE;
2677 	return addr;
2678 }
2679 
vmap_block_vaddr(unsigned long va_start,unsigned long pages_off)2680 static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
2681 {
2682 	unsigned long addr;
2683 
2684 	addr = va_start + (pages_off << PAGE_SHIFT);
2685 	BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
2686 	return (void *)addr;
2687 }
2688 
2689 /**
2690  * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
2691  *                  block. Of course pages number can't exceed VMAP_BBMAP_BITS
2692  * @order:    how many 2^order pages should be occupied in newly allocated block
2693  * @gfp_mask: flags for the page level allocator
2694  *
2695  * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
2696  */
new_vmap_block(unsigned int order,gfp_t gfp_mask)2697 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
2698 {
2699 	struct vmap_block_queue *vbq;
2700 	struct vmap_block *vb;
2701 	struct vmap_area *va;
2702 	struct xarray *xa;
2703 	unsigned long vb_idx;
2704 	int node, err;
2705 	void *vaddr;
2706 
2707 	node = numa_node_id();
2708 
2709 	vb = kmalloc_node(sizeof(struct vmap_block), gfp_mask, node);
2710 	if (unlikely(!vb))
2711 		return ERR_PTR(-ENOMEM);
2712 
2713 	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
2714 					VMALLOC_START, VMALLOC_END,
2715 					node, gfp_mask,
2716 					VMAP_RAM|VMAP_BLOCK, NULL);
2717 	if (IS_ERR(va)) {
2718 		kfree(vb);
2719 		return ERR_CAST(va);
2720 	}
2721 
2722 	vaddr = vmap_block_vaddr(va->va_start, 0);
2723 	spin_lock_init(&vb->lock);
2724 	vb->va = va;
2725 	/* At least something should be left free */
2726 	BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
2727 	bitmap_zero(vb->used_map, VMAP_BBMAP_BITS);
2728 	vb->free = VMAP_BBMAP_BITS - (1UL << order);
2729 	vb->dirty = 0;
2730 	vb->dirty_min = VMAP_BBMAP_BITS;
2731 	vb->dirty_max = 0;
2732 	bitmap_set(vb->used_map, 0, (1UL << order));
2733 	INIT_LIST_HEAD(&vb->free_list);
2734 	vb->cpu = raw_smp_processor_id();
2735 
2736 	xa = addr_to_vb_xa(va->va_start);
2737 	vb_idx = addr_to_vb_idx(va->va_start);
2738 	err = xa_insert(xa, vb_idx, vb, gfp_mask);
2739 	if (err) {
2740 		kfree(vb);
2741 		free_vmap_area(va);
2742 		return ERR_PTR(err);
2743 	}
2744 	/*
2745 	 * list_add_tail_rcu could happened in another core
2746 	 * rather than vb->cpu due to task migration, which
2747 	 * is safe as list_add_tail_rcu will ensure the list's
2748 	 * integrity together with list_for_each_rcu from read
2749 	 * side.
2750 	 */
2751 	vbq = per_cpu_ptr(&vmap_block_queue, vb->cpu);
2752 	spin_lock(&vbq->lock);
2753 	list_add_tail_rcu(&vb->free_list, &vbq->free);
2754 	spin_unlock(&vbq->lock);
2755 
2756 	return vaddr;
2757 }
2758 
free_vmap_block(struct vmap_block * vb)2759 static void free_vmap_block(struct vmap_block *vb)
2760 {
2761 	struct vmap_node *vn;
2762 	struct vmap_block *tmp;
2763 	struct xarray *xa;
2764 
2765 	xa = addr_to_vb_xa(vb->va->va_start);
2766 	tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start));
2767 	BUG_ON(tmp != vb);
2768 
2769 	vn = addr_to_node(vb->va->va_start);
2770 	spin_lock(&vn->busy.lock);
2771 	unlink_va(vb->va, &vn->busy.root);
2772 	spin_unlock(&vn->busy.lock);
2773 
2774 	free_vmap_area_noflush(vb->va);
2775 	kfree_rcu(vb, rcu_head);
2776 }
2777 
purge_fragmented_block(struct vmap_block * vb,struct list_head * purge_list,bool force_purge)2778 static bool purge_fragmented_block(struct vmap_block *vb,
2779 		struct list_head *purge_list, bool force_purge)
2780 {
2781 	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, vb->cpu);
2782 
2783 	if (vb->free + vb->dirty != VMAP_BBMAP_BITS ||
2784 	    vb->dirty == VMAP_BBMAP_BITS)
2785 		return false;
2786 
2787 	/* Don't overeagerly purge usable blocks unless requested */
2788 	if (!(force_purge || vb->free < VMAP_PURGE_THRESHOLD))
2789 		return false;
2790 
2791 	/* prevent further allocs after releasing lock */
2792 	WRITE_ONCE(vb->free, 0);
2793 	/* prevent purging it again */
2794 	WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS);
2795 	vb->dirty_min = 0;
2796 	vb->dirty_max = VMAP_BBMAP_BITS;
2797 	spin_lock(&vbq->lock);
2798 	list_del_rcu(&vb->free_list);
2799 	spin_unlock(&vbq->lock);
2800 	list_add_tail(&vb->purge, purge_list);
2801 	return true;
2802 }
2803 
free_purged_blocks(struct list_head * purge_list)2804 static void free_purged_blocks(struct list_head *purge_list)
2805 {
2806 	struct vmap_block *vb, *n_vb;
2807 
2808 	list_for_each_entry_safe(vb, n_vb, purge_list, purge) {
2809 		list_del(&vb->purge);
2810 		free_vmap_block(vb);
2811 	}
2812 }
2813 
purge_fragmented_blocks(int cpu)2814 static void purge_fragmented_blocks(int cpu)
2815 {
2816 	LIST_HEAD(purge);
2817 	struct vmap_block *vb;
2818 	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2819 
2820 	rcu_read_lock();
2821 	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2822 		unsigned long free = READ_ONCE(vb->free);
2823 		unsigned long dirty = READ_ONCE(vb->dirty);
2824 
2825 		if (free + dirty != VMAP_BBMAP_BITS ||
2826 		    dirty == VMAP_BBMAP_BITS)
2827 			continue;
2828 
2829 		spin_lock(&vb->lock);
2830 		purge_fragmented_block(vb, &purge, true);
2831 		spin_unlock(&vb->lock);
2832 	}
2833 	rcu_read_unlock();
2834 	free_purged_blocks(&purge);
2835 }
2836 
purge_fragmented_blocks_allcpus(void)2837 static void purge_fragmented_blocks_allcpus(void)
2838 {
2839 	int cpu;
2840 
2841 	for_each_possible_cpu(cpu)
2842 		purge_fragmented_blocks(cpu);
2843 }
2844 
vb_alloc(unsigned long size,gfp_t gfp_mask)2845 static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
2846 {
2847 	struct vmap_block_queue *vbq;
2848 	struct vmap_block *vb;
2849 	void *vaddr = NULL;
2850 	unsigned int order;
2851 
2852 	BUG_ON(offset_in_page(size));
2853 	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2854 	if (WARN_ON(size == 0)) {
2855 		/*
2856 		 * Allocating 0 bytes isn't what caller wants since
2857 		 * get_order(0) returns funny result. Just warn and terminate
2858 		 * early.
2859 		 */
2860 		return ERR_PTR(-EINVAL);
2861 	}
2862 	order = get_order(size);
2863 
2864 	rcu_read_lock();
2865 	vbq = raw_cpu_ptr(&vmap_block_queue);
2866 	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2867 		unsigned long pages_off;
2868 
2869 		if (READ_ONCE(vb->free) < (1UL << order))
2870 			continue;
2871 
2872 		spin_lock(&vb->lock);
2873 		if (vb->free < (1UL << order)) {
2874 			spin_unlock(&vb->lock);
2875 			continue;
2876 		}
2877 
2878 		pages_off = VMAP_BBMAP_BITS - vb->free;
2879 		vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
2880 		WRITE_ONCE(vb->free, vb->free - (1UL << order));
2881 		bitmap_set(vb->used_map, pages_off, (1UL << order));
2882 		if (vb->free == 0) {
2883 			spin_lock(&vbq->lock);
2884 			list_del_rcu(&vb->free_list);
2885 			spin_unlock(&vbq->lock);
2886 		}
2887 
2888 		spin_unlock(&vb->lock);
2889 		break;
2890 	}
2891 
2892 	rcu_read_unlock();
2893 
2894 	/* Allocate new block if nothing was found */
2895 	if (!vaddr)
2896 		vaddr = new_vmap_block(order, gfp_mask);
2897 
2898 	return vaddr;
2899 }
2900 
vb_free(unsigned long addr,unsigned long size)2901 static void vb_free(unsigned long addr, unsigned long size)
2902 {
2903 	unsigned long offset;
2904 	unsigned int order;
2905 	struct vmap_block *vb;
2906 	struct xarray *xa;
2907 
2908 	BUG_ON(offset_in_page(size));
2909 	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2910 
2911 	flush_cache_vunmap(addr, addr + size);
2912 
2913 	order = get_order(size);
2914 	offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
2915 
2916 	xa = addr_to_vb_xa(addr);
2917 	vb = xa_load(xa, addr_to_vb_idx(addr));
2918 
2919 	spin_lock(&vb->lock);
2920 	bitmap_clear(vb->used_map, offset, (1UL << order));
2921 	spin_unlock(&vb->lock);
2922 
2923 	vunmap_range_noflush(addr, addr + size);
2924 
2925 	if (debug_pagealloc_enabled_static())
2926 		flush_tlb_kernel_range(addr, addr + size);
2927 
2928 	spin_lock(&vb->lock);
2929 
2930 	/* Expand the not yet TLB flushed dirty range */
2931 	vb->dirty_min = min(vb->dirty_min, offset);
2932 	vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
2933 
2934 	WRITE_ONCE(vb->dirty, vb->dirty + (1UL << order));
2935 	if (vb->dirty == VMAP_BBMAP_BITS) {
2936 		BUG_ON(vb->free);
2937 		spin_unlock(&vb->lock);
2938 		free_vmap_block(vb);
2939 	} else
2940 		spin_unlock(&vb->lock);
2941 }
2942 
_vm_unmap_aliases(unsigned long start,unsigned long end,int flush)2943 static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
2944 {
2945 	LIST_HEAD(purge_list);
2946 	int cpu;
2947 
2948 	if (unlikely(!vmap_initialized))
2949 		return;
2950 
2951 	mutex_lock(&vmap_purge_lock);
2952 
2953 	for_each_possible_cpu(cpu) {
2954 		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2955 		struct vmap_block *vb;
2956 		unsigned long idx;
2957 
2958 		rcu_read_lock();
2959 		xa_for_each(&vbq->vmap_blocks, idx, vb) {
2960 			spin_lock(&vb->lock);
2961 
2962 			/*
2963 			 * Try to purge a fragmented block first. If it's
2964 			 * not purgeable, check whether there is dirty
2965 			 * space to be flushed.
2966 			 */
2967 			if (!purge_fragmented_block(vb, &purge_list, false) &&
2968 			    vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) {
2969 				unsigned long va_start = vb->va->va_start;
2970 				unsigned long s, e;
2971 
2972 				s = va_start + (vb->dirty_min << PAGE_SHIFT);
2973 				e = va_start + (vb->dirty_max << PAGE_SHIFT);
2974 
2975 				start = min(s, start);
2976 				end   = max(e, end);
2977 
2978 				/* Prevent that this is flushed again */
2979 				vb->dirty_min = VMAP_BBMAP_BITS;
2980 				vb->dirty_max = 0;
2981 
2982 				flush = 1;
2983 			}
2984 			spin_unlock(&vb->lock);
2985 		}
2986 		rcu_read_unlock();
2987 	}
2988 	free_purged_blocks(&purge_list);
2989 
2990 	if (!__purge_vmap_area_lazy(start, end, false) && flush)
2991 		flush_tlb_kernel_range(start, end);
2992 	mutex_unlock(&vmap_purge_lock);
2993 }
2994 
2995 /**
2996  * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
2997  *
2998  * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
2999  * to amortize TLB flushing overheads. What this means is that any page you
3000  * have now, may, in a former life, have been mapped into kernel virtual
3001  * address by the vmap layer and so there might be some CPUs with TLB entries
3002  * still referencing that page (additional to the regular 1:1 kernel mapping).
3003  *
3004  * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
3005  * be sure that none of the pages we have control over will have any aliases
3006  * from the vmap layer.
3007  */
vm_unmap_aliases(void)3008 void vm_unmap_aliases(void)
3009 {
3010 	_vm_unmap_aliases(ULONG_MAX, 0, 0);
3011 }
3012 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
3013 
3014 /**
3015  * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
3016  * @mem: the pointer returned by vm_map_ram
3017  * @count: the count passed to that vm_map_ram call (cannot unmap partial)
3018  */
vm_unmap_ram(const void * mem,unsigned int count)3019 void vm_unmap_ram(const void *mem, unsigned int count)
3020 {
3021 	unsigned long size = (unsigned long)count << PAGE_SHIFT;
3022 	unsigned long addr = (unsigned long)kasan_reset_tag(mem);
3023 	struct vmap_area *va;
3024 
3025 	might_sleep();
3026 	BUG_ON(!addr);
3027 	BUG_ON(addr < VMALLOC_START);
3028 	BUG_ON(addr > VMALLOC_END);
3029 	BUG_ON(!PAGE_ALIGNED(addr));
3030 
3031 	kasan_poison_vmalloc(mem, size);
3032 
3033 	if (likely(count <= VMAP_MAX_ALLOC)) {
3034 		debug_check_no_locks_freed(mem, size);
3035 		vb_free(addr, size);
3036 		return;
3037 	}
3038 
3039 	va = find_unlink_vmap_area(addr);
3040 	if (WARN_ON_ONCE(!va))
3041 		return;
3042 
3043 	debug_check_no_locks_freed((void *)va->va_start, va_size(va));
3044 	free_unmap_vmap_area(va);
3045 }
3046 EXPORT_SYMBOL(vm_unmap_ram);
3047 
3048 /**
3049  * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
3050  * @pages: an array of pointers to the pages to be mapped
3051  * @count: number of pages
3052  * @node: prefer to allocate data structures on this node
3053  *
3054  * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
3055  * faster than vmap so it's good.  But if you mix long-life and short-life
3056  * objects with vm_map_ram(), it could consume lots of address space through
3057  * fragmentation (especially on a 32bit machine).  You could see failures in
3058  * the end.  Please use this function for short-lived objects.
3059  *
3060  * Returns: a pointer to the address that has been mapped, or %NULL on failure
3061  */
vm_map_ram(struct page ** pages,unsigned int count,int node)3062 void *vm_map_ram(struct page **pages, unsigned int count, int node)
3063 {
3064 	unsigned long size = (unsigned long)count << PAGE_SHIFT;
3065 	unsigned long addr;
3066 	void *mem;
3067 
3068 	if (likely(count <= VMAP_MAX_ALLOC)) {
3069 		mem = vb_alloc(size, GFP_KERNEL);
3070 		if (IS_ERR(mem))
3071 			return NULL;
3072 		addr = (unsigned long)mem;
3073 	} else {
3074 		struct vmap_area *va;
3075 		va = alloc_vmap_area(size, PAGE_SIZE,
3076 				VMALLOC_START, VMALLOC_END,
3077 				node, GFP_KERNEL, VMAP_RAM,
3078 				NULL);
3079 		if (IS_ERR(va))
3080 			return NULL;
3081 
3082 		addr = va->va_start;
3083 		mem = (void *)addr;
3084 	}
3085 
3086 	if (vmap_pages_range(addr, addr + size, PAGE_KERNEL,
3087 				pages, PAGE_SHIFT) < 0) {
3088 		vm_unmap_ram(mem, count);
3089 		return NULL;
3090 	}
3091 
3092 	/*
3093 	 * Mark the pages as accessible, now that they are mapped.
3094 	 * With hardware tag-based KASAN, marking is skipped for
3095 	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
3096 	 */
3097 	mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL);
3098 
3099 	return mem;
3100 }
3101 EXPORT_SYMBOL(vm_map_ram);
3102 
3103 static struct vm_struct *vmlist __initdata;
3104 
vm_area_page_order(struct vm_struct * vm)3105 static inline unsigned int vm_area_page_order(struct vm_struct *vm)
3106 {
3107 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
3108 	return vm->page_order;
3109 #else
3110 	return 0;
3111 #endif
3112 }
3113 
get_vm_area_page_order(struct vm_struct * vm)3114 unsigned int get_vm_area_page_order(struct vm_struct *vm)
3115 {
3116 	return vm_area_page_order(vm);
3117 }
3118 
set_vm_area_page_order(struct vm_struct * vm,unsigned int order)3119 static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order)
3120 {
3121 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
3122 	vm->page_order = order;
3123 #else
3124 	BUG_ON(order != 0);
3125 #endif
3126 }
3127 
3128 /**
3129  * vm_area_add_early - add vmap area early during boot
3130  * @vm: vm_struct to add
3131  *
3132  * This function is used to add fixed kernel vm area to vmlist before
3133  * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
3134  * should contain proper values and the other fields should be zero.
3135  *
3136  * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
3137  */
vm_area_add_early(struct vm_struct * vm)3138 void __init vm_area_add_early(struct vm_struct *vm)
3139 {
3140 	struct vm_struct *tmp, **p;
3141 
3142 	BUG_ON(vmap_initialized);
3143 	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
3144 		if (tmp->addr >= vm->addr) {
3145 			BUG_ON(tmp->addr < vm->addr + vm->size);
3146 			break;
3147 		} else
3148 			BUG_ON(tmp->addr + tmp->size > vm->addr);
3149 	}
3150 	vm->next = *p;
3151 	*p = vm;
3152 }
3153 
3154 /**
3155  * vm_area_register_early - register vmap area early during boot
3156  * @vm: vm_struct to register
3157  * @align: requested alignment
3158  *
3159  * This function is used to register kernel vm area before
3160  * vmalloc_init() is called.  @vm->size and @vm->flags should contain
3161  * proper values on entry and other fields should be zero.  On return,
3162  * vm->addr contains the allocated address.
3163  *
3164  * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
3165  */
vm_area_register_early(struct vm_struct * vm,size_t align)3166 void __init vm_area_register_early(struct vm_struct *vm, size_t align)
3167 {
3168 	unsigned long addr = ALIGN(VMALLOC_START, align);
3169 	struct vm_struct *cur, **p;
3170 
3171 	BUG_ON(vmap_initialized);
3172 
3173 	for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) {
3174 		if ((unsigned long)cur->addr - addr >= vm->size)
3175 			break;
3176 		addr = ALIGN((unsigned long)cur->addr + cur->size, align);
3177 	}
3178 
3179 	BUG_ON(addr > VMALLOC_END - vm->size);
3180 	vm->addr = (void *)addr;
3181 	vm->next = *p;
3182 	*p = vm;
3183 	kasan_populate_early_vm_area_shadow(vm->addr, vm->size);
3184 }
3185 
clear_vm_uninitialized_flag(struct vm_struct * vm)3186 void clear_vm_uninitialized_flag(struct vm_struct *vm)
3187 {
3188 	/*
3189 	 * Before removing VM_UNINITIALIZED,
3190 	 * we should make sure that vm has proper values.
3191 	 * Pair with smp_rmb() in vread_iter() and vmalloc_info_show().
3192 	 */
3193 	smp_wmb();
3194 	vm->flags &= ~VM_UNINITIALIZED;
3195 }
3196 
__get_vm_area_node(unsigned long size,unsigned long align,unsigned long shift,unsigned long flags,unsigned long start,unsigned long end,int node,gfp_t gfp_mask,const void * caller)3197 struct vm_struct *__get_vm_area_node(unsigned long size,
3198 		unsigned long align, unsigned long shift, unsigned long flags,
3199 		unsigned long start, unsigned long end, int node,
3200 		gfp_t gfp_mask, const void *caller)
3201 {
3202 	struct vmap_area *va;
3203 	struct vm_struct *area;
3204 	unsigned long requested_size = size;
3205 
3206 	BUG_ON(in_interrupt());
3207 	size = ALIGN(size, 1ul << shift);
3208 	if (unlikely(!size))
3209 		return NULL;
3210 
3211 	if (flags & VM_IOREMAP)
3212 		align = 1ul << clamp_t(int, get_count_order_long(size),
3213 				       PAGE_SHIFT, IOREMAP_MAX_ORDER);
3214 
3215 	area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
3216 	if (unlikely(!area))
3217 		return NULL;
3218 
3219 	if (!(flags & VM_NO_GUARD))
3220 		size += PAGE_SIZE;
3221 
3222 	area->flags = flags;
3223 	area->caller = caller;
3224 	area->requested_size = requested_size;
3225 
3226 	va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area);
3227 	if (IS_ERR(va)) {
3228 		kfree(area);
3229 		return NULL;
3230 	}
3231 
3232 	/*
3233 	 * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a
3234 	 * best-effort approach, as they can be mapped outside of vmalloc code.
3235 	 * For VM_ALLOC mappings, the pages are marked as accessible after
3236 	 * getting mapped in __vmalloc_node_range().
3237 	 * With hardware tag-based KASAN, marking is skipped for
3238 	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
3239 	 */
3240 	if (!(flags & VM_ALLOC))
3241 		area->addr = kasan_unpoison_vmalloc(area->addr, requested_size,
3242 						    KASAN_VMALLOC_PROT_NORMAL);
3243 
3244 	return area;
3245 }
3246 
__get_vm_area_caller(unsigned long size,unsigned long flags,unsigned long start,unsigned long end,const void * caller)3247 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
3248 				       unsigned long start, unsigned long end,
3249 				       const void *caller)
3250 {
3251 	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
3252 				  NUMA_NO_NODE, GFP_KERNEL, caller);
3253 }
3254 
3255 /**
3256  * get_vm_area - reserve a contiguous kernel virtual area
3257  * @size:	 size of the area
3258  * @flags:	 %VM_IOREMAP for I/O mappings or VM_ALLOC
3259  *
3260  * Search an area of @size in the kernel virtual mapping area,
3261  * and reserved it for out purposes.  Returns the area descriptor
3262  * on success or %NULL on failure.
3263  *
3264  * Return: the area descriptor on success or %NULL on failure.
3265  */
get_vm_area(unsigned long size,unsigned long flags)3266 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
3267 {
3268 	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
3269 				  VMALLOC_START, VMALLOC_END,
3270 				  NUMA_NO_NODE, GFP_KERNEL,
3271 				  __builtin_return_address(0));
3272 }
3273 
get_vm_area_caller(unsigned long size,unsigned long flags,const void * caller)3274 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
3275 				const void *caller)
3276 {
3277 	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
3278 				  VMALLOC_START, VMALLOC_END,
3279 				  NUMA_NO_NODE, GFP_KERNEL, caller);
3280 }
3281 
3282 /**
3283  * find_vm_area - find a continuous kernel virtual area
3284  * @addr:	  base address
3285  *
3286  * Search for the kernel VM area starting at @addr, and return it.
3287  * It is up to the caller to do all required locking to keep the returned
3288  * pointer valid.
3289  *
3290  * Return: the area descriptor on success or %NULL on failure.
3291  */
find_vm_area(const void * addr)3292 struct vm_struct *find_vm_area(const void *addr)
3293 {
3294 	struct vmap_area *va;
3295 
3296 	va = find_vmap_area((unsigned long)addr);
3297 	if (!va)
3298 		return NULL;
3299 
3300 	return va->vm;
3301 }
3302 
3303 /**
3304  * remove_vm_area - find and remove a continuous kernel virtual area
3305  * @addr:	    base address
3306  *
3307  * Search for the kernel VM area starting at @addr, and remove it.
3308  * This function returns the found VM area, but using it is NOT safe
3309  * on SMP machines, except for its size or flags.
3310  *
3311  * Return: the area descriptor on success or %NULL on failure.
3312  */
remove_vm_area(const void * addr)3313 struct vm_struct *remove_vm_area(const void *addr)
3314 {
3315 	struct vmap_area *va;
3316 	struct vm_struct *vm;
3317 
3318 	might_sleep();
3319 
3320 	if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
3321 			addr))
3322 		return NULL;
3323 
3324 	va = find_unlink_vmap_area((unsigned long)addr);
3325 	if (!va || !va->vm)
3326 		return NULL;
3327 	vm = va->vm;
3328 
3329 	debug_check_no_locks_freed(vm->addr, get_vm_area_size(vm));
3330 	debug_check_no_obj_freed(vm->addr, get_vm_area_size(vm));
3331 	kasan_free_module_shadow(vm);
3332 	kasan_poison_vmalloc(vm->addr, get_vm_area_size(vm));
3333 
3334 	free_unmap_vmap_area(va);
3335 	return vm;
3336 }
3337 
set_area_direct_map(const struct vm_struct * area,int (* set_direct_map)(struct page * page))3338 static inline void set_area_direct_map(const struct vm_struct *area,
3339 				       int (*set_direct_map)(struct page *page))
3340 {
3341 	int i;
3342 
3343 	/* HUGE_VMALLOC passes small pages to set_direct_map */
3344 	for (i = 0; i < area->nr_pages; i++)
3345 		if (page_address(area->pages[i]))
3346 			set_direct_map(area->pages[i]);
3347 }
3348 
3349 /*
3350  * Flush the vm mapping and reset the direct map.
3351  */
vm_reset_perms(struct vm_struct * area)3352 static void vm_reset_perms(struct vm_struct *area)
3353 {
3354 	unsigned long start = ULONG_MAX, end = 0;
3355 	unsigned int page_order = vm_area_page_order(area);
3356 	int flush_dmap = 0;
3357 	int i;
3358 
3359 	/*
3360 	 * Find the start and end range of the direct mappings to make sure that
3361 	 * the vm_unmap_aliases() flush includes the direct map.
3362 	 */
3363 	for (i = 0; i < area->nr_pages; i += 1U << page_order) {
3364 		unsigned long addr = (unsigned long)page_address(area->pages[i]);
3365 
3366 		if (addr) {
3367 			unsigned long page_size;
3368 
3369 			page_size = PAGE_SIZE << page_order;
3370 			start = min(addr, start);
3371 			end = max(addr + page_size, end);
3372 			flush_dmap = 1;
3373 		}
3374 	}
3375 
3376 	/*
3377 	 * Set direct map to something invalid so that it won't be cached if
3378 	 * there are any accesses after the TLB flush, then flush the TLB and
3379 	 * reset the direct map permissions to the default.
3380 	 */
3381 	set_area_direct_map(area, set_direct_map_invalid_noflush);
3382 	_vm_unmap_aliases(start, end, flush_dmap);
3383 	set_area_direct_map(area, set_direct_map_default_noflush);
3384 }
3385 
delayed_vfree_work(struct work_struct * w)3386 static void delayed_vfree_work(struct work_struct *w)
3387 {
3388 	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
3389 	struct llist_node *t, *llnode;
3390 
3391 	llist_for_each_safe(llnode, t, llist_del_all(&p->list))
3392 		vfree(llnode);
3393 }
3394 
3395 /**
3396  * vfree_atomic - release memory allocated by vmalloc()
3397  * @addr:	  memory base address
3398  *
3399  * This one is just like vfree() but can be called in any atomic context
3400  * except NMIs.
3401  */
vfree_atomic(const void * addr)3402 void vfree_atomic(const void *addr)
3403 {
3404 	struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
3405 
3406 	BUG_ON(in_nmi());
3407 	kmemleak_free(addr);
3408 
3409 	/*
3410 	 * Use raw_cpu_ptr() because this can be called from preemptible
3411 	 * context. Preemption is absolutely fine here, because the llist_add()
3412 	 * implementation is lockless, so it works even if we are adding to
3413 	 * another cpu's list. schedule_work() should be fine with this too.
3414 	 */
3415 	if (addr && llist_add((struct llist_node *)addr, &p->list))
3416 		schedule_work(&p->wq);
3417 }
3418 
3419 /**
3420  * vfree - Release memory allocated by vmalloc()
3421  * @addr:  Memory base address
3422  *
3423  * Free the virtually continuous memory area starting at @addr, as obtained
3424  * from one of the vmalloc() family of APIs.  This will usually also free the
3425  * physical memory underlying the virtual allocation, but that memory is
3426  * reference counted, so it will not be freed until the last user goes away.
3427  *
3428  * If @addr is NULL, no operation is performed.
3429  *
3430  * Context:
3431  * May sleep if called *not* from interrupt context.
3432  * Must not be called in NMI context (strictly speaking, it could be
3433  * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
3434  * conventions for vfree() arch-dependent would be a really bad idea).
3435  */
vfree(const void * addr)3436 void vfree(const void *addr)
3437 {
3438 	struct vm_struct *vm;
3439 	int i;
3440 
3441 	if (unlikely(in_interrupt())) {
3442 		vfree_atomic(addr);
3443 		return;
3444 	}
3445 
3446 	BUG_ON(in_nmi());
3447 	kmemleak_free(addr);
3448 	might_sleep();
3449 
3450 	if (!addr)
3451 		return;
3452 
3453 	vm = remove_vm_area(addr);
3454 	if (unlikely(!vm)) {
3455 		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
3456 				addr);
3457 		return;
3458 	}
3459 
3460 	if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS))
3461 		vm_reset_perms(vm);
3462 	for (i = 0; i < vm->nr_pages; i++) {
3463 		struct page *page = vm->pages[i];
3464 
3465 		BUG_ON(!page);
3466 		/*
3467 		 * High-order allocs for huge vmallocs are split, so
3468 		 * can be freed as an array of order-0 allocations
3469 		 */
3470 		if (!(vm->flags & VM_MAP_PUT_PAGES))
3471 			mod_lruvec_page_state(page, NR_VMALLOC, -1);
3472 		__free_page(page);
3473 		cond_resched();
3474 	}
3475 	kvfree(vm->pages);
3476 	kfree(vm);
3477 }
3478 EXPORT_SYMBOL(vfree);
3479 
3480 /**
3481  * vunmap - release virtual mapping obtained by vmap()
3482  * @addr:   memory base address
3483  *
3484  * Free the virtually contiguous memory area starting at @addr,
3485  * which was created from the page array passed to vmap().
3486  *
3487  * Must not be called in interrupt context.
3488  */
vunmap(const void * addr)3489 void vunmap(const void *addr)
3490 {
3491 	struct vm_struct *vm;
3492 
3493 	BUG_ON(in_interrupt());
3494 	might_sleep();
3495 
3496 	if (!addr)
3497 		return;
3498 	vm = remove_vm_area(addr);
3499 	if (unlikely(!vm)) {
3500 		WARN(1, KERN_ERR "Trying to vunmap() nonexistent vm area (%p)\n",
3501 				addr);
3502 		return;
3503 	}
3504 	kfree(vm);
3505 }
3506 EXPORT_SYMBOL(vunmap);
3507 
3508 /**
3509  * vmap - map an array of pages into virtually contiguous space
3510  * @pages: array of page pointers
3511  * @count: number of pages to map
3512  * @flags: vm_area->flags
3513  * @prot: page protection for the mapping
3514  *
3515  * Maps @count pages from @pages into contiguous kernel virtual space.
3516  * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself
3517  * (which must be kmalloc or vmalloc memory) and one reference per pages in it
3518  * are transferred from the caller to vmap(), and will be freed / dropped when
3519  * vfree() is called on the return value.
3520  *
3521  * Return: the address of the area or %NULL on failure
3522  */
vmap(struct page ** pages,unsigned int count,unsigned long flags,pgprot_t prot)3523 void *vmap(struct page **pages, unsigned int count,
3524 	   unsigned long flags, pgprot_t prot)
3525 {
3526 	struct vm_struct *area;
3527 	unsigned long addr;
3528 	unsigned long size;		/* In bytes */
3529 
3530 	might_sleep();
3531 
3532 	if (WARN_ON_ONCE(flags & VM_FLUSH_RESET_PERMS))
3533 		return NULL;
3534 
3535 	/*
3536 	 * Your top guard is someone else's bottom guard. Not having a top
3537 	 * guard compromises someone else's mappings too.
3538 	 */
3539 	if (WARN_ON_ONCE(flags & VM_NO_GUARD))
3540 		flags &= ~VM_NO_GUARD;
3541 
3542 	if (count > totalram_pages())
3543 		return NULL;
3544 
3545 	size = (unsigned long)count << PAGE_SHIFT;
3546 	area = get_vm_area_caller(size, flags, __builtin_return_address(0));
3547 	if (!area)
3548 		return NULL;
3549 
3550 	addr = (unsigned long)area->addr;
3551 	if (vmap_pages_range(addr, addr + size, pgprot_nx(prot),
3552 				pages, PAGE_SHIFT) < 0) {
3553 		vunmap(area->addr);
3554 		return NULL;
3555 	}
3556 
3557 	if (flags & VM_MAP_PUT_PAGES) {
3558 		area->pages = pages;
3559 		area->nr_pages = count;
3560 	}
3561 	return area->addr;
3562 }
3563 EXPORT_SYMBOL(vmap);
3564 
3565 #ifdef CONFIG_VMAP_PFN
3566 struct vmap_pfn_data {
3567 	unsigned long	*pfns;
3568 	pgprot_t	prot;
3569 	unsigned int	idx;
3570 };
3571 
vmap_pfn_apply(pte_t * pte,unsigned long addr,void * private)3572 static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private)
3573 {
3574 	struct vmap_pfn_data *data = private;
3575 	unsigned long pfn = data->pfns[data->idx];
3576 	pte_t ptent;
3577 
3578 	if (WARN_ON_ONCE(pfn_valid(pfn)))
3579 		return -EINVAL;
3580 
3581 	ptent = pte_mkspecial(pfn_pte(pfn, data->prot));
3582 	set_pte_at(&init_mm, addr, pte, ptent);
3583 
3584 	data->idx++;
3585 	return 0;
3586 }
3587 
3588 /**
3589  * vmap_pfn - map an array of PFNs into virtually contiguous space
3590  * @pfns: array of PFNs
3591  * @count: number of pages to map
3592  * @prot: page protection for the mapping
3593  *
3594  * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns
3595  * the start address of the mapping.
3596  */
vmap_pfn(unsigned long * pfns,unsigned int count,pgprot_t prot)3597 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
3598 {
3599 	struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) };
3600 	struct vm_struct *area;
3601 
3602 	area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP,
3603 			__builtin_return_address(0));
3604 	if (!area)
3605 		return NULL;
3606 	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
3607 			count * PAGE_SIZE, vmap_pfn_apply, &data)) {
3608 		free_vm_area(area);
3609 		return NULL;
3610 	}
3611 
3612 	flush_cache_vmap((unsigned long)area->addr,
3613 			 (unsigned long)area->addr + count * PAGE_SIZE);
3614 
3615 	return area->addr;
3616 }
3617 EXPORT_SYMBOL_GPL(vmap_pfn);
3618 #endif /* CONFIG_VMAP_PFN */
3619 
3620 /*
3621  * Helper for vmalloc to adjust the gfp flags for certain allocations.
3622  */
vmalloc_gfp_adjust(gfp_t flags,const bool large)3623 static inline gfp_t vmalloc_gfp_adjust(gfp_t flags, const bool large)
3624 {
3625 	flags |= __GFP_NOWARN;
3626 	if (large)
3627 		flags &= ~__GFP_NOFAIL;
3628 	return flags;
3629 }
3630 
3631 static inline unsigned int
vm_area_alloc_pages(gfp_t gfp,int nid,unsigned int order,unsigned int nr_pages,struct page ** pages)3632 vm_area_alloc_pages(gfp_t gfp, int nid,
3633 		unsigned int order, unsigned int nr_pages, struct page **pages)
3634 {
3635 	unsigned int nr_allocated = 0;
3636 	unsigned int nr_remaining = nr_pages;
3637 	unsigned int max_attempt_order = MAX_PAGE_ORDER;
3638 	struct page *page;
3639 	int i;
3640 	unsigned int large_order = ilog2(nr_remaining);
3641 	gfp_t large_gfp = vmalloc_gfp_adjust(gfp, large_order) & ~__GFP_DIRECT_RECLAIM;
3642 
3643 	large_order = min(max_attempt_order, large_order);
3644 
3645 	/*
3646 	 * Initially, attempt to have the page allocator give us large order
3647 	 * pages. Do not attempt allocating smaller than order chunks since
3648 	 * __vmap_pages_range() expects physically contigous pages of exactly
3649 	 * order long chunks.
3650 	 */
3651 	while (large_order > order && nr_remaining) {
3652 		if (nid == NUMA_NO_NODE)
3653 			page = alloc_pages_noprof(large_gfp, large_order);
3654 		else
3655 			page = alloc_pages_node_noprof(nid, large_gfp, large_order);
3656 
3657 		if (unlikely(!page)) {
3658 			max_attempt_order = --large_order;
3659 			continue;
3660 		}
3661 
3662 		mod_lruvec_page_state(page, NR_VMALLOC, 1 << large_order);
3663 
3664 		split_page(page, large_order);
3665 		for (i = 0; i < (1U << large_order); i++)
3666 			pages[nr_allocated + i] = page + i;
3667 
3668 		nr_allocated += 1U << large_order;
3669 		nr_remaining = nr_pages - nr_allocated;
3670 
3671 		large_order = ilog2(nr_remaining);
3672 		large_order = min(max_attempt_order, large_order);
3673 	}
3674 
3675 	/*
3676 	 * For order-0 pages we make use of bulk allocator, if
3677 	 * the page array is partly or not at all populated due
3678 	 * to fails, fallback to a single page allocator that is
3679 	 * more permissive.
3680 	 */
3681 	if (!order) {
3682 		while (nr_allocated < nr_pages) {
3683 			unsigned int nr, nr_pages_request;
3684 			int i;
3685 
3686 			/*
3687 			 * A maximum allowed request is hard-coded and is 100
3688 			 * pages per call. That is done in order to prevent a
3689 			 * long preemption off scenario in the bulk-allocator
3690 			 * so the range is [1:100].
3691 			 */
3692 			nr_pages_request = min(100U, nr_pages - nr_allocated);
3693 
3694 			/* memory allocation should consider mempolicy, we can't
3695 			 * wrongly use nearest node when nid == NUMA_NO_NODE,
3696 			 * otherwise memory may be allocated in only one node,
3697 			 * but mempolicy wants to alloc memory by interleaving.
3698 			 */
3699 			if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
3700 				nr = alloc_pages_bulk_mempolicy_noprof(gfp,
3701 							nr_pages_request,
3702 							pages + nr_allocated);
3703 			else
3704 				nr = alloc_pages_bulk_node_noprof(gfp, nid,
3705 							nr_pages_request,
3706 							pages + nr_allocated);
3707 
3708 			for (i = nr_allocated; i < nr_allocated + nr; i++)
3709 				mod_lruvec_page_state(pages[i], NR_VMALLOC, 1);
3710 
3711 			nr_allocated += nr;
3712 
3713 			/*
3714 			 * If zero or pages were obtained partly,
3715 			 * fallback to a single page allocator.
3716 			 */
3717 			if (nr != nr_pages_request)
3718 				break;
3719 		}
3720 	}
3721 
3722 	/* High-order pages or fallback path if "bulk" fails. */
3723 	while (nr_allocated < nr_pages) {
3724 		if (!(gfp & __GFP_NOFAIL) && fatal_signal_pending(current))
3725 			break;
3726 
3727 		if (nid == NUMA_NO_NODE)
3728 			page = alloc_pages_noprof(gfp, order);
3729 		else
3730 			page = alloc_pages_node_noprof(nid, gfp, order);
3731 
3732 		if (unlikely(!page))
3733 			break;
3734 
3735 		mod_lruvec_page_state(page, NR_VMALLOC, 1 << order);
3736 
3737 		/*
3738 		 * High-order allocations must be able to be treated as
3739 		 * independent small pages by callers (as they can with
3740 		 * small-page vmallocs). Some drivers do their own refcounting
3741 		 * on vmalloc_to_page() pages, some use page->mapping,
3742 		 * page->lru, etc.
3743 		 */
3744 		if (order)
3745 			split_page(page, order);
3746 
3747 		/*
3748 		 * Careful, we allocate and map page-order pages, but
3749 		 * tracking is done per PAGE_SIZE page so as to keep the
3750 		 * vm_struct APIs independent of the physical/mapped size.
3751 		 */
3752 		for (i = 0; i < (1U << order); i++)
3753 			pages[nr_allocated + i] = page + i;
3754 
3755 		nr_allocated += 1U << order;
3756 	}
3757 
3758 	return nr_allocated;
3759 }
3760 
3761 static LLIST_HEAD(pending_vm_area_cleanup);
cleanup_vm_area_work(struct work_struct * work)3762 static void cleanup_vm_area_work(struct work_struct *work)
3763 {
3764 	struct vm_struct *area, *tmp;
3765 	struct llist_node *head;
3766 
3767 	head = llist_del_all(&pending_vm_area_cleanup);
3768 	if (!head)
3769 		return;
3770 
3771 	llist_for_each_entry_safe(area, tmp, head, llnode) {
3772 		if (!area->pages)
3773 			free_vm_area(area);
3774 		else
3775 			vfree(area->addr);
3776 	}
3777 }
3778 
3779 /*
3780  * Helper for __vmalloc_area_node() to defer cleanup
3781  * of partially initialized vm_struct in error paths.
3782  */
3783 static DECLARE_WORK(cleanup_vm_area, cleanup_vm_area_work);
defer_vm_area_cleanup(struct vm_struct * area)3784 static void defer_vm_area_cleanup(struct vm_struct *area)
3785 {
3786 	if (llist_add(&area->llnode, &pending_vm_area_cleanup))
3787 		schedule_work(&cleanup_vm_area);
3788 }
3789 
3790 /*
3791  * Page tables allocations ignore external GFP. Enforces it by
3792  * the memalloc scope API. It is used by vmalloc internals and
3793  * KASAN shadow population only.
3794  *
3795  * GFP to scope mapping:
3796  *
3797  * non-blocking (no __GFP_DIRECT_RECLAIM) - memalloc_noreclaim_save()
3798  * GFP_NOFS - memalloc_nofs_save()
3799  * GFP_NOIO - memalloc_noio_save()
3800  * __GFP_RETRY_MAYFAIL, __GFP_NORETRY - memalloc_noreclaim_save()
3801  * to prevent OOMs
3802  *
3803  * Returns a flag cookie to pair with restore.
3804  */
3805 unsigned int
memalloc_apply_gfp_scope(gfp_t gfp_mask)3806 memalloc_apply_gfp_scope(gfp_t gfp_mask)
3807 {
3808 	unsigned int flags = 0;
3809 
3810 	if (!gfpflags_allow_blocking(gfp_mask) ||
3811 			(gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_NORETRY)))
3812 		flags = memalloc_noreclaim_save();
3813 	else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
3814 		flags = memalloc_nofs_save();
3815 	else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
3816 		flags = memalloc_noio_save();
3817 
3818 	/* 0 - no scope applied. */
3819 	return flags;
3820 }
3821 
3822 void
memalloc_restore_scope(unsigned int flags)3823 memalloc_restore_scope(unsigned int flags)
3824 {
3825 	if (flags)
3826 		memalloc_flags_restore(flags);
3827 }
3828 
__vmalloc_area_node(struct vm_struct * area,gfp_t gfp_mask,pgprot_t prot,unsigned int page_shift,int node)3829 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
3830 				 pgprot_t prot, unsigned int page_shift,
3831 				 int node)
3832 {
3833 	const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
3834 	bool nofail = gfp_mask & __GFP_NOFAIL;
3835 	unsigned long addr = (unsigned long)area->addr;
3836 	unsigned long size = get_vm_area_size(area);
3837 	unsigned long array_size;
3838 	unsigned int nr_small_pages = size >> PAGE_SHIFT;
3839 	unsigned int page_order;
3840 	unsigned int flags;
3841 	int ret;
3842 
3843 	array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
3844 
3845 	/* __GFP_NOFAIL and "noblock" flags are mutually exclusive. */
3846 	if (!gfpflags_allow_blocking(gfp_mask))
3847 		nofail = false;
3848 
3849 	if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
3850 		gfp_mask |= __GFP_HIGHMEM;
3851 
3852 	/* Please note that the recursion is strictly bounded. */
3853 	if (array_size > PAGE_SIZE) {
3854 		area->pages = __vmalloc_node_noprof(array_size, 1, nested_gfp, node,
3855 					area->caller);
3856 	} else {
3857 		area->pages = kmalloc_node_noprof(array_size, nested_gfp, node);
3858 	}
3859 
3860 	if (!area->pages) {
3861 		warn_alloc(gfp_mask, NULL,
3862 			"vmalloc error: size %lu, failed to allocated page array size %lu",
3863 			nr_small_pages * PAGE_SIZE, array_size);
3864 		goto fail;
3865 	}
3866 
3867 	set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
3868 	page_order = vm_area_page_order(area);
3869 
3870 	/*
3871 	 * High-order nofail allocations are really expensive and
3872 	 * potentially dangerous (pre-mature OOM, disruptive reclaim
3873 	 * and compaction etc.
3874 	 *
3875 	 * Please note, the __vmalloc_node_range_noprof() falls-back
3876 	 * to order-0 pages if high-order attempt is unsuccessful.
3877 	 */
3878 	area->nr_pages = vm_area_alloc_pages(
3879 			vmalloc_gfp_adjust(gfp_mask, page_order), node,
3880 			page_order, nr_small_pages, area->pages);
3881 
3882 	/*
3883 	 * If not enough pages were obtained to accomplish an
3884 	 * allocation request, free them via vfree() if any.
3885 	 */
3886 	if (area->nr_pages != nr_small_pages) {
3887 		/*
3888 		 * vm_area_alloc_pages() can fail due to insufficient memory but
3889 		 * also:-
3890 		 *
3891 		 * - a pending fatal signal
3892 		 * - insufficient huge page-order pages
3893 		 *
3894 		 * Since we always retry allocations at order-0 in the huge page
3895 		 * case a warning for either is spurious.
3896 		 */
3897 		if (!fatal_signal_pending(current) && page_order == 0)
3898 			warn_alloc(gfp_mask, NULL,
3899 				"vmalloc error: size %lu, failed to allocate pages",
3900 				nr_small_pages * PAGE_SIZE);
3901 		goto fail;
3902 	}
3903 
3904 	/*
3905 	 * page tables allocations ignore external gfp mask, enforce it
3906 	 * by the scope API
3907 	 */
3908 	flags = memalloc_apply_gfp_scope(gfp_mask);
3909 	do {
3910 		ret = __vmap_pages_range(addr, addr + size, prot, area->pages,
3911 				page_shift, nested_gfp);
3912 		if (nofail && (ret < 0))
3913 			schedule_timeout_uninterruptible(1);
3914 	} while (nofail && (ret < 0));
3915 	memalloc_restore_scope(flags);
3916 
3917 	if (ret < 0) {
3918 		warn_alloc(gfp_mask, NULL,
3919 			"vmalloc error: size %lu, failed to map pages",
3920 			area->nr_pages * PAGE_SIZE);
3921 		goto fail;
3922 	}
3923 
3924 	return area->addr;
3925 
3926 fail:
3927 	defer_vm_area_cleanup(area);
3928 	return NULL;
3929 }
3930 
3931 /*
3932  * See __vmalloc_node_range() for a clear list of supported vmalloc flags.
3933  * This gfp lists all flags currently passed through vmalloc. Currently,
3934  * __GFP_ZERO is used by BPF and __GFP_NORETRY is used by percpu. Both drm
3935  * and BPF also use GFP_USER. Additionally, various users pass
3936  * GFP_KERNEL_ACCOUNT. Xfs uses __GFP_NOLOCKDEP.
3937  */
3938 #define GFP_VMALLOC_SUPPORTED (GFP_KERNEL | GFP_ATOMIC | GFP_NOWAIT |\
3939 				__GFP_NOFAIL | __GFP_ZERO |\
3940 				__GFP_NORETRY | __GFP_RETRY_MAYFAIL |\
3941 				GFP_NOFS | GFP_NOIO | GFP_KERNEL_ACCOUNT |\
3942 				GFP_USER | __GFP_NOLOCKDEP)
3943 
vmalloc_fix_flags(gfp_t flags)3944 static gfp_t vmalloc_fix_flags(gfp_t flags)
3945 {
3946 	gfp_t invalid_mask = flags & ~GFP_VMALLOC_SUPPORTED;
3947 
3948 	flags &= GFP_VMALLOC_SUPPORTED;
3949 	WARN_ONCE(1, "Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
3950 		  invalid_mask, &invalid_mask, flags, &flags);
3951 	return flags;
3952 }
3953 
3954 /**
3955  * __vmalloc_node_range - allocate virtually contiguous memory
3956  * @size:		  allocation size
3957  * @align:		  desired alignment
3958  * @start:		  vm area range start
3959  * @end:		  vm area range end
3960  * @gfp_mask:		  flags for the page level allocator
3961  * @prot:		  protection mask for the allocated pages
3962  * @vm_flags:		  additional vm area flags (e.g. %VM_NO_GUARD)
3963  * @node:		  node to use for allocation or NUMA_NO_NODE
3964  * @caller:		  caller's return address
3965  *
3966  * Allocate enough pages to cover @size from the page level
3967  * allocator with @gfp_mask flags and map them into contiguous
3968  * virtual range with protection @prot.
3969  *
3970  * Supported GFP classes: %GFP_KERNEL, %GFP_ATOMIC, %GFP_NOWAIT,
3971  * %__GFP_RETRY_MAYFAIL, %__GFP_NORETRY, %GFP_NOFS and %GFP_NOIO.
3972  * Zone modifiers are not supported.
3973  * Please note %GFP_ATOMIC and %GFP_NOWAIT are supported only
3974  * by __vmalloc().
3975  *
3976  * Retry modifiers: only %__GFP_NOFAIL is fully supported;
3977  * %__GFP_NORETRY and %__GFP_RETRY_MAYFAIL are supported with limitation,
3978  * i.e. page tables are allocated with NOWAIT semantic so they might fail
3979  * under moderate memory pressure.
3980  *
3981  * %__GFP_NOWARN can be used to suppress failure messages.
3982  *
3983  * Can not be called from interrupt nor NMI contexts.
3984  * Return: the address of the area or %NULL on failure
3985  */
__vmalloc_node_range_noprof(unsigned long size,unsigned long align,unsigned long start,unsigned long end,gfp_t gfp_mask,pgprot_t prot,unsigned long vm_flags,int node,const void * caller)3986 void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
3987 			unsigned long start, unsigned long end, gfp_t gfp_mask,
3988 			pgprot_t prot, unsigned long vm_flags, int node,
3989 			const void *caller)
3990 {
3991 	struct vm_struct *area;
3992 	void *ret;
3993 	kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE;
3994 	unsigned long original_align = align;
3995 	unsigned int shift = PAGE_SHIFT;
3996 
3997 	if (WARN_ON_ONCE(!size))
3998 		return NULL;
3999 
4000 	if ((size >> PAGE_SHIFT) > totalram_pages()) {
4001 		warn_alloc(gfp_mask, NULL,
4002 			"vmalloc error: size %lu, exceeds total pages",
4003 			size);
4004 		return NULL;
4005 	}
4006 
4007 	if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) {
4008 		/*
4009 		 * Try huge pages. Only try for PAGE_KERNEL allocations,
4010 		 * others like modules don't yet expect huge pages in
4011 		 * their allocations due to apply_to_page_range not
4012 		 * supporting them.
4013 		 */
4014 
4015 		if (arch_vmap_pmd_supported(prot) && size >= PMD_SIZE)
4016 			shift = PMD_SHIFT;
4017 		else
4018 			shift = arch_vmap_pte_supported_shift(size);
4019 
4020 		align = max(original_align, 1UL << shift);
4021 	}
4022 
4023 again:
4024 	area = __get_vm_area_node(size, align, shift, VM_ALLOC |
4025 				  VM_UNINITIALIZED | vm_flags, start, end, node,
4026 				  gfp_mask, caller);
4027 	if (!area) {
4028 		bool nofail = gfp_mask & __GFP_NOFAIL;
4029 		warn_alloc(gfp_mask, NULL,
4030 			"vmalloc error: size %lu, vm_struct allocation failed%s",
4031 			size, (nofail) ? ". Retrying." : "");
4032 		if (nofail) {
4033 			schedule_timeout_uninterruptible(1);
4034 			goto again;
4035 		}
4036 		goto fail;
4037 	}
4038 
4039 	/*
4040 	 * Prepare arguments for __vmalloc_area_node() and
4041 	 * kasan_unpoison_vmalloc().
4042 	 */
4043 	if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) {
4044 		if (kasan_hw_tags_enabled()) {
4045 			/*
4046 			 * Modify protection bits to allow tagging.
4047 			 * This must be done before mapping.
4048 			 */
4049 			prot = arch_vmap_pgprot_tagged(prot);
4050 
4051 			/*
4052 			 * Skip page_alloc poisoning and zeroing for physical
4053 			 * pages backing VM_ALLOC mapping. Memory is instead
4054 			 * poisoned and zeroed by kasan_unpoison_vmalloc().
4055 			 */
4056 			gfp_mask |= __GFP_SKIP_KASAN | __GFP_SKIP_ZERO;
4057 		}
4058 
4059 		/* Take note that the mapping is PAGE_KERNEL. */
4060 		kasan_flags |= KASAN_VMALLOC_PROT_NORMAL;
4061 	}
4062 
4063 	/* Allocate physical pages and map them into vmalloc space. */
4064 	ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
4065 	if (!ret)
4066 		goto fail;
4067 
4068 	/*
4069 	 * Mark the pages as accessible, now that they are mapped.
4070 	 * The condition for setting KASAN_VMALLOC_INIT should complement the
4071 	 * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check
4072 	 * to make sure that memory is initialized under the same conditions.
4073 	 * Tag-based KASAN modes only assign tags to normal non-executable
4074 	 * allocations, see __kasan_unpoison_vmalloc().
4075 	 */
4076 	kasan_flags |= KASAN_VMALLOC_VM_ALLOC;
4077 	if (!want_init_on_free() && want_init_on_alloc(gfp_mask) &&
4078 	    (gfp_mask & __GFP_SKIP_ZERO))
4079 		kasan_flags |= KASAN_VMALLOC_INIT;
4080 	/* KASAN_VMALLOC_PROT_NORMAL already set if required. */
4081 	area->addr = kasan_unpoison_vmalloc(area->addr, size, kasan_flags);
4082 
4083 	/*
4084 	 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
4085 	 * flag. It means that vm_struct is not fully initialized.
4086 	 * Now, it is fully initialized, so remove this flag here.
4087 	 */
4088 	clear_vm_uninitialized_flag(area);
4089 
4090 	if (!(vm_flags & VM_DEFER_KMEMLEAK))
4091 		kmemleak_vmalloc(area, PAGE_ALIGN(size), gfp_mask);
4092 
4093 	return area->addr;
4094 
4095 fail:
4096 	if (shift > PAGE_SHIFT) {
4097 		shift = PAGE_SHIFT;
4098 		align = original_align;
4099 		goto again;
4100 	}
4101 
4102 	return NULL;
4103 }
4104 
4105 /**
4106  * __vmalloc_node - allocate virtually contiguous memory
4107  * @size:	    allocation size
4108  * @align:	    desired alignment
4109  * @gfp_mask:	    flags for the page level allocator
4110  * @node:	    node to use for allocation or NUMA_NO_NODE
4111  * @caller:	    caller's return address
4112  *
4113  * Allocate enough pages to cover @size from the page level allocator with
4114  * @gfp_mask flags.  Map them into contiguous kernel virtual space.
4115  *
4116  * Semantics of @gfp_mask (including reclaim/retry modifiers such as
4117  * __GFP_NOFAIL) are the same as in __vmalloc_node_range_noprof().
4118  *
4119  * Return: pointer to the allocated memory or %NULL on error
4120  */
__vmalloc_node_noprof(unsigned long size,unsigned long align,gfp_t gfp_mask,int node,const void * caller)4121 void *__vmalloc_node_noprof(unsigned long size, unsigned long align,
4122 			    gfp_t gfp_mask, int node, const void *caller)
4123 {
4124 	return __vmalloc_node_range_noprof(size, align, VMALLOC_START, VMALLOC_END,
4125 				gfp_mask, PAGE_KERNEL, 0, node, caller);
4126 }
4127 /*
4128  * This is only for performance analysis of vmalloc and stress purpose.
4129  * It is required by vmalloc test module, therefore do not use it other
4130  * than that.
4131  */
4132 #ifdef CONFIG_TEST_VMALLOC_MODULE
4133 EXPORT_SYMBOL_GPL(__vmalloc_node_noprof);
4134 #endif
4135 
__vmalloc_noprof(unsigned long size,gfp_t gfp_mask)4136 void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask)
4137 {
4138 	if (unlikely(gfp_mask & ~GFP_VMALLOC_SUPPORTED))
4139 		gfp_mask = vmalloc_fix_flags(gfp_mask);
4140 	return __vmalloc_node_noprof(size, 1, gfp_mask, NUMA_NO_NODE,
4141 				__builtin_return_address(0));
4142 }
4143 EXPORT_SYMBOL(__vmalloc_noprof);
4144 
4145 /**
4146  * vmalloc - allocate virtually contiguous memory
4147  * @size:    allocation size
4148  *
4149  * Allocate enough pages to cover @size from the page level
4150  * allocator and map them into contiguous kernel virtual space.
4151  *
4152  * For tight control over page level allocator and protection flags
4153  * use __vmalloc() instead.
4154  *
4155  * Return: pointer to the allocated memory or %NULL on error
4156  */
vmalloc_noprof(unsigned long size)4157 void *vmalloc_noprof(unsigned long size)
4158 {
4159 	return __vmalloc_node_noprof(size, 1, GFP_KERNEL, NUMA_NO_NODE,
4160 				__builtin_return_address(0));
4161 }
4162 EXPORT_SYMBOL(vmalloc_noprof);
4163 
4164 /**
4165  * vmalloc_huge_node - allocate virtually contiguous memory, allow huge pages
4166  * @size:      allocation size
4167  * @gfp_mask:  flags for the page level allocator
4168  * @node:	    node to use for allocation or NUMA_NO_NODE
4169  *
4170  * Allocate enough pages to cover @size from the page level
4171  * allocator and map them into contiguous kernel virtual space.
4172  * If @size is greater than or equal to PMD_SIZE, allow using
4173  * huge pages for the memory
4174  *
4175  * Return: pointer to the allocated memory or %NULL on error
4176  */
vmalloc_huge_node_noprof(unsigned long size,gfp_t gfp_mask,int node)4177 void *vmalloc_huge_node_noprof(unsigned long size, gfp_t gfp_mask, int node)
4178 {
4179 	if (unlikely(gfp_mask & ~GFP_VMALLOC_SUPPORTED))
4180 		gfp_mask = vmalloc_fix_flags(gfp_mask);
4181 	return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END,
4182 					   gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
4183 					   node, __builtin_return_address(0));
4184 }
4185 EXPORT_SYMBOL_GPL(vmalloc_huge_node_noprof);
4186 
4187 /**
4188  * vzalloc - allocate virtually contiguous memory with zero fill
4189  * @size:    allocation size
4190  *
4191  * Allocate enough pages to cover @size from the page level
4192  * allocator and map them into contiguous kernel virtual space.
4193  * The memory allocated is set to zero.
4194  *
4195  * For tight control over page level allocator and protection flags
4196  * use __vmalloc() instead.
4197  *
4198  * Return: pointer to the allocated memory or %NULL on error
4199  */
vzalloc_noprof(unsigned long size)4200 void *vzalloc_noprof(unsigned long size)
4201 {
4202 	return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
4203 				__builtin_return_address(0));
4204 }
4205 EXPORT_SYMBOL(vzalloc_noprof);
4206 
4207 /**
4208  * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
4209  * @size: allocation size
4210  *
4211  * The resulting memory area is zeroed so it can be mapped to userspace
4212  * without leaking data.
4213  *
4214  * Return: pointer to the allocated memory or %NULL on error
4215  */
vmalloc_user_noprof(unsigned long size)4216 void *vmalloc_user_noprof(unsigned long size)
4217 {
4218 	return __vmalloc_node_range_noprof(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
4219 				    GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
4220 				    VM_USERMAP, NUMA_NO_NODE,
4221 				    __builtin_return_address(0));
4222 }
4223 EXPORT_SYMBOL(vmalloc_user_noprof);
4224 
4225 /**
4226  * vmalloc_node - allocate memory on a specific node
4227  * @size:	  allocation size
4228  * @node:	  numa node
4229  *
4230  * Allocate enough pages to cover @size from the page level
4231  * allocator and map them into contiguous kernel virtual space.
4232  *
4233  * For tight control over page level allocator and protection flags
4234  * use __vmalloc() instead.
4235  *
4236  * Return: pointer to the allocated memory or %NULL on error
4237  */
vmalloc_node_noprof(unsigned long size,int node)4238 void *vmalloc_node_noprof(unsigned long size, int node)
4239 {
4240 	return __vmalloc_node_noprof(size, 1, GFP_KERNEL, node,
4241 			__builtin_return_address(0));
4242 }
4243 EXPORT_SYMBOL(vmalloc_node_noprof);
4244 
4245 /**
4246  * vzalloc_node - allocate memory on a specific node with zero fill
4247  * @size:	allocation size
4248  * @node:	numa node
4249  *
4250  * Allocate enough pages to cover @size from the page level
4251  * allocator and map them into contiguous kernel virtual space.
4252  * The memory allocated is set to zero.
4253  *
4254  * Return: pointer to the allocated memory or %NULL on error
4255  */
vzalloc_node_noprof(unsigned long size,int node)4256 void *vzalloc_node_noprof(unsigned long size, int node)
4257 {
4258 	return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, node,
4259 				__builtin_return_address(0));
4260 }
4261 EXPORT_SYMBOL(vzalloc_node_noprof);
4262 
4263 /**
4264  * vrealloc_node_align - reallocate virtually contiguous memory; contents
4265  * remain unchanged
4266  * @p: object to reallocate memory for
4267  * @size: the size to reallocate
4268  * @align: requested alignment
4269  * @flags: the flags for the page level allocator
4270  * @nid: node number of the target node
4271  *
4272  * If @p is %NULL, vrealloc_XXX() behaves exactly like vmalloc_XXX(). If @size
4273  * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
4274  *
4275  * If the caller wants the new memory to be on specific node *only*,
4276  * __GFP_THISNODE flag should be set, otherwise the function will try to avoid
4277  * reallocation and possibly disregard the specified @nid.
4278  *
4279  * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
4280  * initial memory allocation, every subsequent call to this API for the same
4281  * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
4282  * __GFP_ZERO is not fully honored by this API.
4283  *
4284  * Requesting an alignment that is bigger than the alignment of the existing
4285  * allocation will fail.
4286  *
4287  * In any case, the contents of the object pointed to are preserved up to the
4288  * lesser of the new and old sizes.
4289  *
4290  * This function must not be called concurrently with itself or vfree() for the
4291  * same memory allocation.
4292  *
4293  * Return: pointer to the allocated memory; %NULL if @size is zero or in case of
4294  *         failure
4295  */
vrealloc_node_align_noprof(const void * p,size_t size,unsigned long align,gfp_t flags,int nid)4296 void *vrealloc_node_align_noprof(const void *p, size_t size, unsigned long align,
4297 				 gfp_t flags, int nid)
4298 {
4299 	struct vm_struct *vm = NULL;
4300 	size_t alloced_size = 0;
4301 	size_t old_size = 0;
4302 	void *n;
4303 
4304 	if (!size) {
4305 		vfree(p);
4306 		return NULL;
4307 	}
4308 
4309 	if (p) {
4310 		vm = find_vm_area(p);
4311 		if (unlikely(!vm)) {
4312 			WARN(1, "Trying to vrealloc() nonexistent vm area (%p)\n", p);
4313 			return NULL;
4314 		}
4315 
4316 		alloced_size = get_vm_area_size(vm);
4317 		old_size = vm->requested_size;
4318 		if (WARN(alloced_size < old_size,
4319 			 "vrealloc() has mismatched area vs requested sizes (%p)\n", p))
4320 			return NULL;
4321 		if (WARN(!IS_ALIGNED((unsigned long)p, align),
4322 			 "will not reallocate with a bigger alignment (0x%lx)\n", align))
4323 			return NULL;
4324 		if (unlikely(flags & __GFP_THISNODE) && nid != NUMA_NO_NODE &&
4325 			     nid != page_to_nid(vmalloc_to_page(p)))
4326 			goto need_realloc;
4327 	}
4328 
4329 	/*
4330 	 * TODO: Shrink the vm_area, i.e. unmap and free unused pages. What
4331 	 * would be a good heuristic for when to shrink the vm_area?
4332 	 */
4333 	if (size <= old_size) {
4334 		/* Zero out "freed" memory, potentially for future realloc. */
4335 		if (want_init_on_free() || want_init_on_alloc(flags))
4336 			memset((void *)p + size, 0, old_size - size);
4337 		vm->requested_size = size;
4338 		kasan_vrealloc(p, old_size, size);
4339 		return (void *)p;
4340 	}
4341 
4342 	/*
4343 	 * We already have the bytes available in the allocation; use them.
4344 	 */
4345 	if (size <= alloced_size) {
4346 		/*
4347 		 * No need to zero memory here, as unused memory will have
4348 		 * already been zeroed at initial allocation time or during
4349 		 * realloc shrink time.
4350 		 */
4351 		vm->requested_size = size;
4352 		kasan_vrealloc(p, old_size, size);
4353 		return (void *)p;
4354 	}
4355 
4356 need_realloc:
4357 	/* TODO: Grow the vm_area, i.e. allocate and map additional pages. */
4358 	n = __vmalloc_node_noprof(size, align, flags, nid, __builtin_return_address(0));
4359 
4360 	if (!n)
4361 		return NULL;
4362 
4363 	if (p) {
4364 		memcpy(n, p, old_size);
4365 		vfree(p);
4366 	}
4367 
4368 	return n;
4369 }
4370 EXPORT_SYMBOL(vrealloc_node_align_noprof);
4371 
4372 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
4373 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
4374 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
4375 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
4376 #else
4377 /*
4378  * 64b systems should always have either DMA or DMA32 zones. For others
4379  * GFP_DMA32 should do the right thing and use the normal zone.
4380  */
4381 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
4382 #endif
4383 
4384 /**
4385  * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
4386  * @size:	allocation size
4387  *
4388  * Allocate enough 32bit PA addressable pages to cover @size from the
4389  * page level allocator and map them into contiguous kernel virtual space.
4390  *
4391  * Return: pointer to the allocated memory or %NULL on error
4392  */
vmalloc_32_noprof(unsigned long size)4393 void *vmalloc_32_noprof(unsigned long size)
4394 {
4395 	return __vmalloc_node_noprof(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
4396 			__builtin_return_address(0));
4397 }
4398 EXPORT_SYMBOL(vmalloc_32_noprof);
4399 
4400 /**
4401  * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
4402  * @size:	     allocation size
4403  *
4404  * The resulting memory area is 32bit addressable and zeroed so it can be
4405  * mapped to userspace without leaking data.
4406  *
4407  * Return: pointer to the allocated memory or %NULL on error
4408  */
vmalloc_32_user_noprof(unsigned long size)4409 void *vmalloc_32_user_noprof(unsigned long size)
4410 {
4411 	return __vmalloc_node_range_noprof(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
4412 				    GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
4413 				    VM_USERMAP, NUMA_NO_NODE,
4414 				    __builtin_return_address(0));
4415 }
4416 EXPORT_SYMBOL(vmalloc_32_user_noprof);
4417 
4418 /*
4419  * Atomically zero bytes in the iterator.
4420  *
4421  * Returns the number of zeroed bytes.
4422  */
zero_iter(struct iov_iter * iter,size_t count)4423 static size_t zero_iter(struct iov_iter *iter, size_t count)
4424 {
4425 	size_t remains = count;
4426 
4427 	while (remains > 0) {
4428 		size_t num, copied;
4429 
4430 		num = min_t(size_t, remains, PAGE_SIZE);
4431 		copied = copy_page_to_iter_nofault(ZERO_PAGE(0), 0, num, iter);
4432 		remains -= copied;
4433 
4434 		if (copied < num)
4435 			break;
4436 	}
4437 
4438 	return count - remains;
4439 }
4440 
4441 /*
4442  * small helper routine, copy contents to iter from addr.
4443  * If the page is not present, fill zero.
4444  *
4445  * Returns the number of copied bytes.
4446  */
aligned_vread_iter(struct iov_iter * iter,const char * addr,size_t count)4447 static size_t aligned_vread_iter(struct iov_iter *iter,
4448 				 const char *addr, size_t count)
4449 {
4450 	size_t remains = count;
4451 	struct page *page;
4452 
4453 	while (remains > 0) {
4454 		unsigned long offset, length;
4455 		size_t copied = 0;
4456 
4457 		offset = offset_in_page(addr);
4458 		length = PAGE_SIZE - offset;
4459 		if (length > remains)
4460 			length = remains;
4461 		page = vmalloc_to_page(addr);
4462 		/*
4463 		 * To do safe access to this _mapped_ area, we need lock. But
4464 		 * adding lock here means that we need to add overhead of
4465 		 * vmalloc()/vfree() calls for this _debug_ interface, rarely
4466 		 * used. Instead of that, we'll use an local mapping via
4467 		 * copy_page_to_iter_nofault() and accept a small overhead in
4468 		 * this access function.
4469 		 */
4470 		if (page)
4471 			copied = copy_page_to_iter_nofault(page, offset,
4472 							   length, iter);
4473 		else
4474 			copied = zero_iter(iter, length);
4475 
4476 		addr += copied;
4477 		remains -= copied;
4478 
4479 		if (copied != length)
4480 			break;
4481 	}
4482 
4483 	return count - remains;
4484 }
4485 
4486 /*
4487  * Read from a vm_map_ram region of memory.
4488  *
4489  * Returns the number of copied bytes.
4490  */
vmap_ram_vread_iter(struct iov_iter * iter,const char * addr,size_t count,unsigned long flags)4491 static size_t vmap_ram_vread_iter(struct iov_iter *iter, const char *addr,
4492 				  size_t count, unsigned long flags)
4493 {
4494 	char *start;
4495 	struct vmap_block *vb;
4496 	struct xarray *xa;
4497 	unsigned long offset;
4498 	unsigned int rs, re;
4499 	size_t remains, n;
4500 
4501 	/*
4502 	 * If it's area created by vm_map_ram() interface directly, but
4503 	 * not further subdividing and delegating management to vmap_block,
4504 	 * handle it here.
4505 	 */
4506 	if (!(flags & VMAP_BLOCK))
4507 		return aligned_vread_iter(iter, addr, count);
4508 
4509 	remains = count;
4510 
4511 	/*
4512 	 * Area is split into regions and tracked with vmap_block, read out
4513 	 * each region and zero fill the hole between regions.
4514 	 */
4515 	xa = addr_to_vb_xa((unsigned long) addr);
4516 	vb = xa_load(xa, addr_to_vb_idx((unsigned long)addr));
4517 	if (!vb)
4518 		goto finished_zero;
4519 
4520 	spin_lock(&vb->lock);
4521 	if (bitmap_empty(vb->used_map, VMAP_BBMAP_BITS)) {
4522 		spin_unlock(&vb->lock);
4523 		goto finished_zero;
4524 	}
4525 
4526 	for_each_set_bitrange(rs, re, vb->used_map, VMAP_BBMAP_BITS) {
4527 		size_t copied;
4528 
4529 		if (remains == 0)
4530 			goto finished;
4531 
4532 		start = vmap_block_vaddr(vb->va->va_start, rs);
4533 
4534 		if (addr < start) {
4535 			size_t to_zero = min_t(size_t, start - addr, remains);
4536 			size_t zeroed = zero_iter(iter, to_zero);
4537 
4538 			addr += zeroed;
4539 			remains -= zeroed;
4540 
4541 			if (remains == 0 || zeroed != to_zero)
4542 				goto finished;
4543 		}
4544 
4545 		/*it could start reading from the middle of used region*/
4546 		offset = offset_in_page(addr);
4547 		n = ((re - rs + 1) << PAGE_SHIFT) - offset;
4548 		if (n > remains)
4549 			n = remains;
4550 
4551 		copied = aligned_vread_iter(iter, start + offset, n);
4552 
4553 		addr += copied;
4554 		remains -= copied;
4555 
4556 		if (copied != n)
4557 			goto finished;
4558 	}
4559 
4560 	spin_unlock(&vb->lock);
4561 
4562 finished_zero:
4563 	/* zero-fill the left dirty or free regions */
4564 	return count - remains + zero_iter(iter, remains);
4565 finished:
4566 	/* We couldn't copy/zero everything */
4567 	spin_unlock(&vb->lock);
4568 	return count - remains;
4569 }
4570 
4571 /**
4572  * vread_iter() - read vmalloc area in a safe way to an iterator.
4573  * @iter:         the iterator to which data should be written.
4574  * @addr:         vm address.
4575  * @count:        number of bytes to be read.
4576  *
4577  * This function checks that addr is a valid vmalloc'ed area, and
4578  * copies data from that area to a given iterator. If the given memory range
4579  * of [addr...addr+count) includes some valid address, data is copied to
4580  * proper area of @iter. If there are memory holes, they'll be zero-filled.
4581  * IOREMAP area is treated as memory hole and no copy is done.
4582  *
4583  * If [addr...addr+count) doesn't includes any intersects with alive
4584  * vm_struct area, returns 0.
4585  *
4586  * Note: In usual ops, vread_iter() is never necessary because the caller
4587  * should know vmalloc() area is valid and can use memcpy().
4588  * This is for routines which have to access vmalloc area without
4589  * any information, as /proc/kcore.
4590  *
4591  * Return: number of bytes for which addr and iter should be advanced
4592  * (same number as @count) or %0 if [addr...addr+count) doesn't
4593  * include any intersection with valid vmalloc area
4594  */
vread_iter(struct iov_iter * iter,const char * addr,size_t count)4595 long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
4596 {
4597 	struct vmap_node *vn;
4598 	struct vmap_area *va;
4599 	struct vm_struct *vm;
4600 	char *vaddr;
4601 	size_t n, size, flags, remains;
4602 	unsigned long next;
4603 
4604 	addr = kasan_reset_tag(addr);
4605 
4606 	/* Don't allow overflow */
4607 	if ((unsigned long) addr + count < count)
4608 		count = -(unsigned long) addr;
4609 
4610 	remains = count;
4611 
4612 	vn = find_vmap_area_exceed_addr_lock((unsigned long) addr, &va);
4613 	if (!vn)
4614 		goto finished_zero;
4615 
4616 	/* no intersects with alive vmap_area */
4617 	if ((unsigned long)addr + remains <= va->va_start)
4618 		goto finished_zero;
4619 
4620 	do {
4621 		size_t copied;
4622 
4623 		if (remains == 0)
4624 			goto finished;
4625 
4626 		vm = va->vm;
4627 		flags = va->flags & VMAP_FLAGS_MASK;
4628 		/*
4629 		 * VMAP_BLOCK indicates a sub-type of vm_map_ram area, need
4630 		 * be set together with VMAP_RAM.
4631 		 */
4632 		WARN_ON(flags == VMAP_BLOCK);
4633 
4634 		if (!vm && !flags)
4635 			goto next_va;
4636 
4637 		if (vm && (vm->flags & VM_UNINITIALIZED))
4638 			goto next_va;
4639 
4640 		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
4641 		smp_rmb();
4642 
4643 		vaddr = (char *) va->va_start;
4644 		size = vm ? get_vm_area_size(vm) : va_size(va);
4645 
4646 		if (addr >= vaddr + size)
4647 			goto next_va;
4648 
4649 		if (addr < vaddr) {
4650 			size_t to_zero = min_t(size_t, vaddr - addr, remains);
4651 			size_t zeroed = zero_iter(iter, to_zero);
4652 
4653 			addr += zeroed;
4654 			remains -= zeroed;
4655 
4656 			if (remains == 0 || zeroed != to_zero)
4657 				goto finished;
4658 		}
4659 
4660 		n = vaddr + size - addr;
4661 		if (n > remains)
4662 			n = remains;
4663 
4664 		if (flags & VMAP_RAM)
4665 			copied = vmap_ram_vread_iter(iter, addr, n, flags);
4666 		else if (!(vm && (vm->flags & (VM_IOREMAP | VM_SPARSE))))
4667 			copied = aligned_vread_iter(iter, addr, n);
4668 		else /* IOREMAP | SPARSE area is treated as memory hole */
4669 			copied = zero_iter(iter, n);
4670 
4671 		addr += copied;
4672 		remains -= copied;
4673 
4674 		if (copied != n)
4675 			goto finished;
4676 
4677 	next_va:
4678 		next = va->va_end;
4679 		spin_unlock(&vn->busy.lock);
4680 	} while ((vn = find_vmap_area_exceed_addr_lock(next, &va)));
4681 
4682 finished_zero:
4683 	if (vn)
4684 		spin_unlock(&vn->busy.lock);
4685 
4686 	/* zero-fill memory holes */
4687 	return count - remains + zero_iter(iter, remains);
4688 finished:
4689 	/* Nothing remains, or We couldn't copy/zero everything. */
4690 	if (vn)
4691 		spin_unlock(&vn->busy.lock);
4692 
4693 	return count - remains;
4694 }
4695 
4696 /**
4697  * remap_vmalloc_range_partial - map vmalloc pages to userspace
4698  * @vma:		vma to cover
4699  * @uaddr:		target user address to start at
4700  * @kaddr:		virtual address of vmalloc kernel memory
4701  * @pgoff:		offset from @kaddr to start at
4702  * @size:		size of map area
4703  *
4704  * Returns:	0 for success, -Exxx on failure
4705  *
4706  * This function checks that @kaddr is a valid vmalloc'ed area,
4707  * and that it is big enough to cover the range starting at
4708  * @uaddr in @vma. Will return failure if that criteria isn't
4709  * met.
4710  *
4711  * Similar to remap_pfn_range() (see mm/memory.c)
4712  */
remap_vmalloc_range_partial(struct vm_area_struct * vma,unsigned long uaddr,void * kaddr,unsigned long pgoff,unsigned long size)4713 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
4714 				void *kaddr, unsigned long pgoff,
4715 				unsigned long size)
4716 {
4717 	struct vm_struct *area;
4718 	unsigned long off;
4719 	unsigned long end_index;
4720 
4721 	if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
4722 		return -EINVAL;
4723 
4724 	size = PAGE_ALIGN(size);
4725 
4726 	if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
4727 		return -EINVAL;
4728 
4729 	area = find_vm_area(kaddr);
4730 	if (!area)
4731 		return -EINVAL;
4732 
4733 	if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
4734 		return -EINVAL;
4735 
4736 	if (check_add_overflow(size, off, &end_index) ||
4737 	    end_index > get_vm_area_size(area))
4738 		return -EINVAL;
4739 	kaddr += off;
4740 
4741 	do {
4742 		struct page *page = vmalloc_to_page(kaddr);
4743 		int ret;
4744 
4745 		ret = vm_insert_page(vma, uaddr, page);
4746 		if (ret)
4747 			return ret;
4748 
4749 		uaddr += PAGE_SIZE;
4750 		kaddr += PAGE_SIZE;
4751 		size -= PAGE_SIZE;
4752 	} while (size > 0);
4753 
4754 	vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
4755 
4756 	return 0;
4757 }
4758 
4759 /**
4760  * remap_vmalloc_range - map vmalloc pages to userspace
4761  * @vma:		vma to cover (map full range of vma)
4762  * @addr:		vmalloc memory
4763  * @pgoff:		number of pages into addr before first page to map
4764  *
4765  * Returns:	0 for success, -Exxx on failure
4766  *
4767  * This function checks that addr is a valid vmalloc'ed area, and
4768  * that it is big enough to cover the vma. Will return failure if
4769  * that criteria isn't met.
4770  *
4771  * Similar to remap_pfn_range() (see mm/memory.c)
4772  */
remap_vmalloc_range(struct vm_area_struct * vma,void * addr,unsigned long pgoff)4773 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
4774 						unsigned long pgoff)
4775 {
4776 	return remap_vmalloc_range_partial(vma, vma->vm_start,
4777 					   addr, pgoff,
4778 					   vma->vm_end - vma->vm_start);
4779 }
4780 EXPORT_SYMBOL(remap_vmalloc_range);
4781 
free_vm_area(struct vm_struct * area)4782 void free_vm_area(struct vm_struct *area)
4783 {
4784 	struct vm_struct *ret;
4785 	ret = remove_vm_area(area->addr);
4786 	BUG_ON(ret != area);
4787 	kfree(area);
4788 }
4789 EXPORT_SYMBOL_GPL(free_vm_area);
4790 
4791 #ifdef CONFIG_SMP
node_to_va(struct rb_node * n)4792 static struct vmap_area *node_to_va(struct rb_node *n)
4793 {
4794 	return rb_entry_safe(n, struct vmap_area, rb_node);
4795 }
4796 
4797 /**
4798  * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
4799  * @addr: target address
4800  *
4801  * Returns: vmap_area if it is found. If there is no such area
4802  *   the first highest(reverse order) vmap_area is returned
4803  *   i.e. va->va_start < addr && va->va_end < addr or NULL
4804  *   if there are no any areas before @addr.
4805  */
4806 static struct vmap_area *
pvm_find_va_enclose_addr(unsigned long addr)4807 pvm_find_va_enclose_addr(unsigned long addr)
4808 {
4809 	struct vmap_area *va, *tmp;
4810 	struct rb_node *n;
4811 
4812 	n = free_vmap_area_root.rb_node;
4813 	va = NULL;
4814 
4815 	while (n) {
4816 		tmp = rb_entry(n, struct vmap_area, rb_node);
4817 		if (tmp->va_start <= addr) {
4818 			va = tmp;
4819 			if (tmp->va_end >= addr)
4820 				break;
4821 
4822 			n = n->rb_right;
4823 		} else {
4824 			n = n->rb_left;
4825 		}
4826 	}
4827 
4828 	return va;
4829 }
4830 
4831 /**
4832  * pvm_determine_end_from_reverse - find the highest aligned address
4833  * of free block below VMALLOC_END
4834  * @va:
4835  *   in - the VA we start the search(reverse order);
4836  *   out - the VA with the highest aligned end address.
4837  * @align: alignment for required highest address
4838  *
4839  * Returns: determined end address within vmap_area
4840  */
4841 static unsigned long
pvm_determine_end_from_reverse(struct vmap_area ** va,unsigned long align)4842 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
4843 {
4844 	unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
4845 	unsigned long addr;
4846 
4847 	if (likely(*va)) {
4848 		list_for_each_entry_from_reverse((*va),
4849 				&free_vmap_area_list, list) {
4850 			addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
4851 			if ((*va)->va_start < addr)
4852 				return addr;
4853 		}
4854 	}
4855 
4856 	return 0;
4857 }
4858 
4859 /**
4860  * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
4861  * @offsets: array containing offset of each area
4862  * @sizes: array containing size of each area
4863  * @nr_vms: the number of areas to allocate
4864  * @align: alignment, all entries in @offsets and @sizes must be aligned to this
4865  *
4866  * Returns: kmalloc'd vm_struct pointer array pointing to allocated
4867  *	    vm_structs on success, %NULL on failure
4868  *
4869  * Percpu allocator wants to use congruent vm areas so that it can
4870  * maintain the offsets among percpu areas.  This function allocates
4871  * congruent vmalloc areas for it with GFP_KERNEL.  These areas tend to
4872  * be scattered pretty far, distance between two areas easily going up
4873  * to gigabytes.  To avoid interacting with regular vmallocs, these
4874  * areas are allocated from top.
4875  *
4876  * Despite its complicated look, this allocator is rather simple. It
4877  * does everything top-down and scans free blocks from the end looking
4878  * for matching base. While scanning, if any of the areas do not fit the
4879  * base address is pulled down to fit the area. Scanning is repeated till
4880  * all the areas fit and then all necessary data structures are inserted
4881  * and the result is returned.
4882  */
pcpu_get_vm_areas(const unsigned long * offsets,const size_t * sizes,int nr_vms,size_t align)4883 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
4884 				     const size_t *sizes, int nr_vms,
4885 				     size_t align)
4886 {
4887 	const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
4888 	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
4889 	struct vmap_area **vas, *va;
4890 	struct vm_struct **vms;
4891 	int area, area2, last_area, term_area;
4892 	unsigned long base, start, size, end, last_end, orig_start, orig_end;
4893 	bool purged = false;
4894 
4895 	/* verify parameters and allocate data structures */
4896 	BUG_ON(offset_in_page(align) || !is_power_of_2(align));
4897 	for (last_area = 0, area = 0; area < nr_vms; area++) {
4898 		start = offsets[area];
4899 		end = start + sizes[area];
4900 
4901 		/* is everything aligned properly? */
4902 		BUG_ON(!IS_ALIGNED(offsets[area], align));
4903 		BUG_ON(!IS_ALIGNED(sizes[area], align));
4904 
4905 		/* detect the area with the highest address */
4906 		if (start > offsets[last_area])
4907 			last_area = area;
4908 
4909 		for (area2 = area + 1; area2 < nr_vms; area2++) {
4910 			unsigned long start2 = offsets[area2];
4911 			unsigned long end2 = start2 + sizes[area2];
4912 
4913 			BUG_ON(start2 < end && start < end2);
4914 		}
4915 	}
4916 	last_end = offsets[last_area] + sizes[last_area];
4917 
4918 	if (vmalloc_end - vmalloc_start < last_end) {
4919 		WARN_ON(true);
4920 		return NULL;
4921 	}
4922 
4923 	vms = kzalloc_objs(vms[0], nr_vms);
4924 	vas = kzalloc_objs(vas[0], nr_vms);
4925 	if (!vas || !vms)
4926 		goto err_free2;
4927 
4928 	for (area = 0; area < nr_vms; area++) {
4929 		vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
4930 		vms[area] = kzalloc_obj(struct vm_struct);
4931 		if (!vas[area] || !vms[area])
4932 			goto err_free;
4933 	}
4934 retry:
4935 	spin_lock(&free_vmap_area_lock);
4936 
4937 	/* start scanning - we scan from the top, begin with the last area */
4938 	area = term_area = last_area;
4939 	start = offsets[area];
4940 	end = start + sizes[area];
4941 
4942 	va = pvm_find_va_enclose_addr(vmalloc_end);
4943 	base = pvm_determine_end_from_reverse(&va, align) - end;
4944 
4945 	while (true) {
4946 		/*
4947 		 * base might have underflowed, add last_end before
4948 		 * comparing.
4949 		 */
4950 		if (base + last_end < vmalloc_start + last_end)
4951 			goto overflow;
4952 
4953 		/*
4954 		 * Fitting base has not been found.
4955 		 */
4956 		if (va == NULL)
4957 			goto overflow;
4958 
4959 		/*
4960 		 * If required width exceeds current VA block, move
4961 		 * base downwards and then recheck.
4962 		 */
4963 		if (base + end > va->va_end) {
4964 			base = pvm_determine_end_from_reverse(&va, align) - end;
4965 			term_area = area;
4966 			continue;
4967 		}
4968 
4969 		/*
4970 		 * If this VA does not fit, move base downwards and recheck.
4971 		 */
4972 		if (base + start < va->va_start) {
4973 			va = node_to_va(rb_prev(&va->rb_node));
4974 			base = pvm_determine_end_from_reverse(&va, align) - end;
4975 			term_area = area;
4976 			continue;
4977 		}
4978 
4979 		/*
4980 		 * This area fits, move on to the previous one.  If
4981 		 * the previous one is the terminal one, we're done.
4982 		 */
4983 		area = (area + nr_vms - 1) % nr_vms;
4984 		if (area == term_area)
4985 			break;
4986 
4987 		start = offsets[area];
4988 		end = start + sizes[area];
4989 		va = pvm_find_va_enclose_addr(base + end);
4990 	}
4991 
4992 	/* we've found a fitting base, insert all va's */
4993 	for (area = 0; area < nr_vms; area++) {
4994 		int ret;
4995 
4996 		start = base + offsets[area];
4997 		size = sizes[area];
4998 
4999 		va = pvm_find_va_enclose_addr(start);
5000 		if (WARN_ON_ONCE(va == NULL))
5001 			/* It is a BUG(), but trigger recovery instead. */
5002 			goto recovery;
5003 
5004 		ret = va_clip(&free_vmap_area_root,
5005 			&free_vmap_area_list, va, start, size);
5006 		if (WARN_ON_ONCE(unlikely(ret)))
5007 			/* It is a BUG(), but trigger recovery instead. */
5008 			goto recovery;
5009 
5010 		/* Allocated area. */
5011 		va = vas[area];
5012 		va->va_start = start;
5013 		va->va_end = start + size;
5014 	}
5015 
5016 	spin_unlock(&free_vmap_area_lock);
5017 
5018 	/* populate the kasan shadow space */
5019 	for (area = 0; area < nr_vms; area++) {
5020 		if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area], GFP_KERNEL))
5021 			goto err_free_shadow;
5022 	}
5023 
5024 	/* insert all vm's */
5025 	for (area = 0; area < nr_vms; area++) {
5026 		struct vmap_node *vn = addr_to_node(vas[area]->va_start);
5027 
5028 		spin_lock(&vn->busy.lock);
5029 		insert_vmap_area(vas[area], &vn->busy.root, &vn->busy.head);
5030 		setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
5031 				 pcpu_get_vm_areas);
5032 		spin_unlock(&vn->busy.lock);
5033 	}
5034 
5035 	/*
5036 	 * Mark allocated areas as accessible. Do it now as a best-effort
5037 	 * approach, as they can be mapped outside of vmalloc code.
5038 	 * With hardware tag-based KASAN, marking is skipped for
5039 	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
5040 	 */
5041 	kasan_unpoison_vmap_areas(vms, nr_vms, KASAN_VMALLOC_PROT_NORMAL);
5042 
5043 	kfree(vas);
5044 	return vms;
5045 
5046 recovery:
5047 	/*
5048 	 * Remove previously allocated areas. There is no
5049 	 * need in removing these areas from the busy tree,
5050 	 * because they are inserted only on the final step
5051 	 * and when pcpu_get_vm_areas() is success.
5052 	 */
5053 	while (area--) {
5054 		orig_start = vas[area]->va_start;
5055 		orig_end = vas[area]->va_end;
5056 		va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
5057 				&free_vmap_area_list);
5058 		if (va)
5059 			kasan_release_vmalloc(orig_start, orig_end,
5060 				va->va_start, va->va_end,
5061 				KASAN_VMALLOC_PAGE_RANGE | KASAN_VMALLOC_TLB_FLUSH);
5062 		vas[area] = NULL;
5063 	}
5064 
5065 overflow:
5066 	spin_unlock(&free_vmap_area_lock);
5067 	if (!purged) {
5068 		reclaim_and_purge_vmap_areas();
5069 		purged = true;
5070 
5071 		/* Before "retry", check if we recover. */
5072 		for (area = 0; area < nr_vms; area++) {
5073 			if (vas[area])
5074 				continue;
5075 
5076 			vas[area] = kmem_cache_zalloc(
5077 				vmap_area_cachep, GFP_KERNEL);
5078 			if (!vas[area])
5079 				goto err_free;
5080 		}
5081 
5082 		goto retry;
5083 	}
5084 
5085 err_free:
5086 	for (area = 0; area < nr_vms; area++) {
5087 		if (vas[area])
5088 			kmem_cache_free(vmap_area_cachep, vas[area]);
5089 
5090 		kfree(vms[area]);
5091 	}
5092 err_free2:
5093 	kfree(vas);
5094 	kfree(vms);
5095 	return NULL;
5096 
5097 err_free_shadow:
5098 	spin_lock(&free_vmap_area_lock);
5099 	/*
5100 	 * We release all the vmalloc shadows, even the ones for regions that
5101 	 * hadn't been successfully added. This relies on kasan_release_vmalloc
5102 	 * being able to tolerate this case.
5103 	 */
5104 	for (area = 0; area < nr_vms; area++) {
5105 		orig_start = vas[area]->va_start;
5106 		orig_end = vas[area]->va_end;
5107 		va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
5108 				&free_vmap_area_list);
5109 		if (va)
5110 			kasan_release_vmalloc(orig_start, orig_end,
5111 				va->va_start, va->va_end,
5112 				KASAN_VMALLOC_PAGE_RANGE | KASAN_VMALLOC_TLB_FLUSH);
5113 		vas[area] = NULL;
5114 		kfree(vms[area]);
5115 	}
5116 	spin_unlock(&free_vmap_area_lock);
5117 	kfree(vas);
5118 	kfree(vms);
5119 	return NULL;
5120 }
5121 
5122 /**
5123  * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
5124  * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
5125  * @nr_vms: the number of allocated areas
5126  *
5127  * Free vm_structs and the array allocated by pcpu_get_vm_areas().
5128  */
pcpu_free_vm_areas(struct vm_struct ** vms,int nr_vms)5129 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
5130 {
5131 	int i;
5132 
5133 	for (i = 0; i < nr_vms; i++)
5134 		free_vm_area(vms[i]);
5135 	kfree(vms);
5136 }
5137 #endif	/* CONFIG_SMP */
5138 
5139 #ifdef CONFIG_PRINTK
vmalloc_dump_obj(void * object)5140 bool vmalloc_dump_obj(void *object)
5141 {
5142 	const void *caller;
5143 	struct vm_struct *vm;
5144 	struct vmap_area *va;
5145 	struct vmap_node *vn;
5146 	unsigned long addr;
5147 	unsigned int nr_pages;
5148 
5149 	addr = PAGE_ALIGN((unsigned long) object);
5150 	vn = addr_to_node(addr);
5151 
5152 	if (!spin_trylock(&vn->busy.lock))
5153 		return false;
5154 
5155 	va = __find_vmap_area(addr, &vn->busy.root);
5156 	if (!va || !va->vm) {
5157 		spin_unlock(&vn->busy.lock);
5158 		return false;
5159 	}
5160 
5161 	vm = va->vm;
5162 	addr = (unsigned long) vm->addr;
5163 	caller = vm->caller;
5164 	nr_pages = vm->nr_pages;
5165 	spin_unlock(&vn->busy.lock);
5166 
5167 	pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
5168 		nr_pages, addr, caller);
5169 
5170 	return true;
5171 }
5172 #endif
5173 
5174 #ifdef CONFIG_PROC_FS
5175 
5176 /*
5177  * Print number of pages allocated on each memory node.
5178  *
5179  * This function can only be called if CONFIG_NUMA is enabled
5180  * and VM_UNINITIALIZED bit in v->flags is disabled.
5181  */
show_numa_info(struct seq_file * m,struct vm_struct * v,unsigned int * counters)5182 static void show_numa_info(struct seq_file *m, struct vm_struct *v,
5183 				 unsigned int *counters)
5184 {
5185 	unsigned int nr;
5186 	unsigned int step = 1U << vm_area_page_order(v);
5187 
5188 	if (!counters)
5189 		return;
5190 
5191 	memset(counters, 0, nr_node_ids * sizeof(unsigned int));
5192 
5193 	for (nr = 0; nr < v->nr_pages; nr += step)
5194 		counters[page_to_nid(v->pages[nr])] += step;
5195 	for_each_node_state(nr, N_HIGH_MEMORY)
5196 		if (counters[nr])
5197 			seq_printf(m, " N%u=%u", nr, counters[nr]);
5198 }
5199 
show_purge_info(struct seq_file * m)5200 static void show_purge_info(struct seq_file *m)
5201 {
5202 	struct vmap_node *vn;
5203 	struct vmap_area *va;
5204 
5205 	for_each_vmap_node(vn) {
5206 		spin_lock(&vn->lazy.lock);
5207 		list_for_each_entry(va, &vn->lazy.head, list) {
5208 			seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
5209 				(void *)va->va_start, (void *)va->va_end,
5210 				va_size(va));
5211 		}
5212 		spin_unlock(&vn->lazy.lock);
5213 	}
5214 }
5215 
vmalloc_info_show(struct seq_file * m,void * p)5216 static int vmalloc_info_show(struct seq_file *m, void *p)
5217 {
5218 	struct vmap_node *vn;
5219 	struct vmap_area *va;
5220 	struct vm_struct *v;
5221 	unsigned int *counters;
5222 
5223 	if (IS_ENABLED(CONFIG_NUMA))
5224 		counters = kmalloc_array(nr_node_ids, sizeof(unsigned int), GFP_KERNEL);
5225 
5226 	for_each_vmap_node(vn) {
5227 		spin_lock(&vn->busy.lock);
5228 		list_for_each_entry(va, &vn->busy.head, list) {
5229 			if (!va->vm) {
5230 				if (va->flags & VMAP_RAM)
5231 					seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
5232 						(void *)va->va_start, (void *)va->va_end,
5233 						va_size(va));
5234 
5235 				continue;
5236 			}
5237 
5238 			v = va->vm;
5239 			if (v->flags & VM_UNINITIALIZED)
5240 				continue;
5241 
5242 			/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
5243 			smp_rmb();
5244 
5245 			seq_printf(m, "0x%pK-0x%pK %7ld",
5246 				v->addr, v->addr + v->size, v->size);
5247 
5248 			if (v->caller)
5249 				seq_printf(m, " %pS", v->caller);
5250 
5251 			if (v->nr_pages)
5252 				seq_printf(m, " pages=%d", v->nr_pages);
5253 
5254 			if (v->phys_addr)
5255 				seq_printf(m, " phys=%pa", &v->phys_addr);
5256 
5257 			if (v->flags & VM_IOREMAP)
5258 				seq_puts(m, " ioremap");
5259 
5260 			if (v->flags & VM_SPARSE)
5261 				seq_puts(m, " sparse");
5262 
5263 			if (v->flags & VM_ALLOC)
5264 				seq_puts(m, " vmalloc");
5265 
5266 			if (v->flags & VM_MAP)
5267 				seq_puts(m, " vmap");
5268 
5269 			if (v->flags & VM_USERMAP)
5270 				seq_puts(m, " user");
5271 
5272 			if (v->flags & VM_DMA_COHERENT)
5273 				seq_puts(m, " dma-coherent");
5274 
5275 			if (is_vmalloc_addr(v->pages))
5276 				seq_puts(m, " vpages");
5277 
5278 			if (IS_ENABLED(CONFIG_NUMA))
5279 				show_numa_info(m, v, counters);
5280 
5281 			seq_putc(m, '\n');
5282 		}
5283 		spin_unlock(&vn->busy.lock);
5284 	}
5285 
5286 	/*
5287 	 * As a final step, dump "unpurged" areas.
5288 	 */
5289 	show_purge_info(m);
5290 	if (IS_ENABLED(CONFIG_NUMA))
5291 		kfree(counters);
5292 	return 0;
5293 }
5294 
proc_vmalloc_init(void)5295 static int __init proc_vmalloc_init(void)
5296 {
5297 	proc_create_single("vmallocinfo", 0400, NULL, vmalloc_info_show);
5298 	return 0;
5299 }
5300 module_init(proc_vmalloc_init);
5301 
5302 #endif
5303 
vmap_init_free_space(void)5304 static void __init vmap_init_free_space(void)
5305 {
5306 	unsigned long vmap_start = 1;
5307 	const unsigned long vmap_end = ULONG_MAX;
5308 	struct vmap_area *free;
5309 	struct vm_struct *busy;
5310 
5311 	/*
5312 	 *     B     F     B     B     B     F
5313 	 * -|-----|.....|-----|-----|-----|.....|-
5314 	 *  |           The KVA space           |
5315 	 *  |<--------------------------------->|
5316 	 */
5317 	for (busy = vmlist; busy; busy = busy->next) {
5318 		if ((unsigned long) busy->addr - vmap_start > 0) {
5319 			free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
5320 			if (!WARN_ON_ONCE(!free)) {
5321 				free->va_start = vmap_start;
5322 				free->va_end = (unsigned long) busy->addr;
5323 
5324 				insert_vmap_area_augment(free, NULL,
5325 					&free_vmap_area_root,
5326 						&free_vmap_area_list);
5327 			}
5328 		}
5329 
5330 		vmap_start = (unsigned long) busy->addr + busy->size;
5331 	}
5332 
5333 	if (vmap_end - vmap_start > 0) {
5334 		free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
5335 		if (!WARN_ON_ONCE(!free)) {
5336 			free->va_start = vmap_start;
5337 			free->va_end = vmap_end;
5338 
5339 			insert_vmap_area_augment(free, NULL,
5340 				&free_vmap_area_root,
5341 					&free_vmap_area_list);
5342 		}
5343 	}
5344 }
5345 
vmap_init_nodes(void)5346 static void vmap_init_nodes(void)
5347 {
5348 	struct vmap_node *vn;
5349 	int i;
5350 
5351 #if BITS_PER_LONG == 64
5352 	/*
5353 	 * A high threshold of max nodes is fixed and bound to 128,
5354 	 * thus a scale factor is 1 for systems where number of cores
5355 	 * are less or equal to specified threshold.
5356 	 *
5357 	 * As for NUMA-aware notes. For bigger systems, for example
5358 	 * NUMA with multi-sockets, where we can end-up with thousands
5359 	 * of cores in total, a "sub-numa-clustering" should be added.
5360 	 *
5361 	 * In this case a NUMA domain is considered as a single entity
5362 	 * with dedicated sub-nodes in it which describe one group or
5363 	 * set of cores. Therefore a per-domain purging is supposed to
5364 	 * be added as well as a per-domain balancing.
5365 	 */
5366 	int n = clamp_t(unsigned int, num_possible_cpus(), 1, 128);
5367 
5368 	if (n > 1) {
5369 		vn = kmalloc_objs(*vn, n, GFP_NOWAIT);
5370 		if (vn) {
5371 			/* Node partition is 16 pages. */
5372 			vmap_zone_size = (1 << 4) * PAGE_SIZE;
5373 			nr_vmap_nodes = n;
5374 			vmap_nodes = vn;
5375 		} else {
5376 			pr_err("Failed to allocate an array. Disable a node layer\n");
5377 		}
5378 	}
5379 #endif
5380 
5381 	for_each_vmap_node(vn) {
5382 		vn->busy.root = RB_ROOT;
5383 		INIT_LIST_HEAD(&vn->busy.head);
5384 		spin_lock_init(&vn->busy.lock);
5385 
5386 		vn->lazy.root = RB_ROOT;
5387 		INIT_LIST_HEAD(&vn->lazy.head);
5388 		spin_lock_init(&vn->lazy.lock);
5389 
5390 		for (i = 0; i < MAX_VA_SIZE_PAGES; i++) {
5391 			INIT_LIST_HEAD(&vn->pool[i].head);
5392 			WRITE_ONCE(vn->pool[i].len, 0);
5393 		}
5394 
5395 		spin_lock_init(&vn->pool_lock);
5396 	}
5397 }
5398 
5399 static unsigned long
vmap_node_shrink_count(struct shrinker * shrink,struct shrink_control * sc)5400 vmap_node_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
5401 {
5402 	unsigned long count = 0;
5403 	struct vmap_node *vn;
5404 	int i;
5405 
5406 	for_each_vmap_node(vn) {
5407 		for (i = 0; i < MAX_VA_SIZE_PAGES; i++)
5408 			count += READ_ONCE(vn->pool[i].len);
5409 	}
5410 
5411 	return count ? count : SHRINK_EMPTY;
5412 }
5413 
5414 static unsigned long
vmap_node_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)5415 vmap_node_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
5416 {
5417 	struct vmap_node *vn;
5418 
5419 	for_each_vmap_node(vn)
5420 		decay_va_pool_node(vn, true);
5421 
5422 	return SHRINK_STOP;
5423 }
5424 
vmalloc_init(void)5425 void __init vmalloc_init(void)
5426 {
5427 	struct shrinker *vmap_node_shrinker;
5428 	struct vmap_area *va;
5429 	struct vmap_node *vn;
5430 	struct vm_struct *tmp;
5431 	int i;
5432 
5433 	/*
5434 	 * Create the cache for vmap_area objects.
5435 	 */
5436 	vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
5437 
5438 	for_each_possible_cpu(i) {
5439 		struct vmap_block_queue *vbq;
5440 		struct vfree_deferred *p;
5441 
5442 		vbq = &per_cpu(vmap_block_queue, i);
5443 		spin_lock_init(&vbq->lock);
5444 		INIT_LIST_HEAD(&vbq->free);
5445 		p = &per_cpu(vfree_deferred, i);
5446 		init_llist_head(&p->list);
5447 		INIT_WORK(&p->wq, delayed_vfree_work);
5448 		xa_init(&vbq->vmap_blocks);
5449 	}
5450 
5451 	/*
5452 	 * Setup nodes before importing vmlist.
5453 	 */
5454 	vmap_init_nodes();
5455 
5456 	/* Import existing vmlist entries. */
5457 	for (tmp = vmlist; tmp; tmp = tmp->next) {
5458 		va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
5459 		if (WARN_ON_ONCE(!va))
5460 			continue;
5461 
5462 		va->va_start = (unsigned long)tmp->addr;
5463 		va->va_end = va->va_start + tmp->size;
5464 		va->vm = tmp;
5465 
5466 		vn = addr_to_node(va->va_start);
5467 		insert_vmap_area(va, &vn->busy.root, &vn->busy.head);
5468 	}
5469 
5470 	/*
5471 	 * Now we can initialize a free vmap space.
5472 	 */
5473 	vmap_init_free_space();
5474 	vmap_initialized = true;
5475 
5476 	vmap_node_shrinker = shrinker_alloc(0, "vmap-node");
5477 	if (!vmap_node_shrinker) {
5478 		pr_err("Failed to allocate vmap-node shrinker!\n");
5479 		return;
5480 	}
5481 
5482 	vmap_node_shrinker->count_objects = vmap_node_shrink_count;
5483 	vmap_node_shrinker->scan_objects = vmap_node_shrink_scan;
5484 	shrinker_register(vmap_node_shrinker);
5485 }
5486