xref: /kvm-unit-tests/lib/vmalloc.c (revision 6ea7326a9de006edca66e342959b78c1fb4b776f)
1efd8e5aaSPaolo Bonzini /*
2efd8e5aaSPaolo Bonzini  * Copyright (C) 2012, 2017, Red Hat Inc.
3efd8e5aaSPaolo Bonzini  *
4efd8e5aaSPaolo Bonzini  * This allocator provides contiguous physical addresses with page
5efd8e5aaSPaolo Bonzini  * granularity.
6efd8e5aaSPaolo Bonzini  */
7efd8e5aaSPaolo Bonzini 
8efd8e5aaSPaolo Bonzini #include "libcflat.h"
9efd8e5aaSPaolo Bonzini #include "asm/spinlock.h"
10efd8e5aaSPaolo Bonzini #include "asm/page.h"
11937e2392SPaolo Bonzini #include "asm/io.h"
12dcda215bSPaolo Bonzini #include "alloc.h"
13937e2392SPaolo Bonzini #include "alloc_phys.h"
14937e2392SPaolo Bonzini #include "alloc_page.h"
15937e2392SPaolo Bonzini #include "vmalloc.h"
16efd8e5aaSPaolo Bonzini 
17efd8e5aaSPaolo Bonzini static struct spinlock lock;
18efd8e5aaSPaolo Bonzini static void *vfree_top = 0;
19937e2392SPaolo Bonzini static void *page_root;
20efd8e5aaSPaolo Bonzini 
21efd8e5aaSPaolo Bonzini void *alloc_vpages(ulong nr)
22efd8e5aaSPaolo Bonzini {
23efd8e5aaSPaolo Bonzini 	spin_lock(&lock);
24efd8e5aaSPaolo Bonzini 	vfree_top -= PAGE_SIZE * nr;
25efd8e5aaSPaolo Bonzini 	spin_unlock(&lock);
26efd8e5aaSPaolo Bonzini 	return vfree_top;
27efd8e5aaSPaolo Bonzini }
28efd8e5aaSPaolo Bonzini 
29efd8e5aaSPaolo Bonzini void *alloc_vpage(void)
30efd8e5aaSPaolo Bonzini {
31efd8e5aaSPaolo Bonzini 	return alloc_vpages(1);
32efd8e5aaSPaolo Bonzini }
33efd8e5aaSPaolo Bonzini 
34efd8e5aaSPaolo Bonzini void init_alloc_vpage(void *top)
35efd8e5aaSPaolo Bonzini {
36efd8e5aaSPaolo Bonzini 	vfree_top = top;
37efd8e5aaSPaolo Bonzini }
38937e2392SPaolo Bonzini 
39dcda215bSPaolo Bonzini void *vmap(phys_addr_t phys, size_t size)
40dcda215bSPaolo Bonzini {
41dcda215bSPaolo Bonzini 	void *mem, *p;
42dcda215bSPaolo Bonzini 	unsigned pages;
43dcda215bSPaolo Bonzini 
44*6ea7326aSClaudio Imbrenda 	size = PAGE_ALIGN(size);
45dcda215bSPaolo Bonzini 	pages = size / PAGE_SIZE;
46dcda215bSPaolo Bonzini 	mem = p = alloc_vpages(pages);
47dcda215bSPaolo Bonzini 
48dcda215bSPaolo Bonzini 	phys &= ~(unsigned long long)(PAGE_SIZE - 1);
49dcda215bSPaolo Bonzini 	while (pages--) {
50dcda215bSPaolo Bonzini 		install_page(page_root, phys, p);
51dcda215bSPaolo Bonzini 		phys += PAGE_SIZE;
52dcda215bSPaolo Bonzini 		p += PAGE_SIZE;
53dcda215bSPaolo Bonzini 	}
54dcda215bSPaolo Bonzini 	return mem;
55dcda215bSPaolo Bonzini }
56dcda215bSPaolo Bonzini 
57dcda215bSPaolo Bonzini static void *vm_memalign(size_t alignment, size_t size)
58dcda215bSPaolo Bonzini {
59dcda215bSPaolo Bonzini 	void *mem, *p;
60dcda215bSPaolo Bonzini 	unsigned pages;
61dcda215bSPaolo Bonzini 
62dcda215bSPaolo Bonzini 	assert(alignment <= PAGE_SIZE);
63*6ea7326aSClaudio Imbrenda 	size = PAGE_ALIGN(size);
64dcda215bSPaolo Bonzini 	pages = size / PAGE_SIZE;
65dcda215bSPaolo Bonzini 	mem = p = alloc_vpages(pages);
66dcda215bSPaolo Bonzini 	while (pages--) {
67dcda215bSPaolo Bonzini 		phys_addr_t pa = virt_to_phys(alloc_page());
68dcda215bSPaolo Bonzini 		install_page(page_root, pa, p);
69dcda215bSPaolo Bonzini 		p += PAGE_SIZE;
70dcda215bSPaolo Bonzini 	}
71dcda215bSPaolo Bonzini 	return mem;
72dcda215bSPaolo Bonzini }
73dcda215bSPaolo Bonzini 
74dcda215bSPaolo Bonzini static void vm_free(void *mem, size_t size)
75dcda215bSPaolo Bonzini {
76dcda215bSPaolo Bonzini 	while (size) {
77dcda215bSPaolo Bonzini 		free_page(phys_to_virt(virt_to_pte_phys(page_root, mem)));
78dcda215bSPaolo Bonzini 		mem += PAGE_SIZE;
79dcda215bSPaolo Bonzini 		size -= PAGE_SIZE;
80dcda215bSPaolo Bonzini 	}
81dcda215bSPaolo Bonzini }
82dcda215bSPaolo Bonzini 
83dcda215bSPaolo Bonzini static struct alloc_ops vmalloc_ops = {
84dcda215bSPaolo Bonzini 	.memalign = vm_memalign,
85dcda215bSPaolo Bonzini 	.free = vm_free,
86dcda215bSPaolo Bonzini 	.align_min = PAGE_SIZE,
87dcda215bSPaolo Bonzini };
88dcda215bSPaolo Bonzini 
8948a0145fSPaolo Bonzini void __attribute__((__weak__)) find_highmem(void)
9048a0145fSPaolo Bonzini {
9148a0145fSPaolo Bonzini }
9248a0145fSPaolo Bonzini 
93937e2392SPaolo Bonzini void setup_vm()
94937e2392SPaolo Bonzini {
95937e2392SPaolo Bonzini 	phys_addr_t base, top;
96dcda215bSPaolo Bonzini 
97dcda215bSPaolo Bonzini 	if (alloc_ops == &vmalloc_ops)
98dcda215bSPaolo Bonzini 		return;
99dcda215bSPaolo Bonzini 
100937e2392SPaolo Bonzini 	phys_alloc_get_unused(&base, &top);
101bf62a925SAndrew Jones 	assert(base != top || page_alloc_initialized());
10248a0145fSPaolo Bonzini 	/*
10348a0145fSPaolo Bonzini 	 * Give low memory immediately to the page allocator,
10448a0145fSPaolo Bonzini 	 * so that it can be used to allocate page tables.
10548a0145fSPaolo Bonzini 	 */
106bf62a925SAndrew Jones 	if (!page_alloc_initialized()) {
107*6ea7326aSClaudio Imbrenda 		base = PAGE_ALIGN(base);
108937e2392SPaolo Bonzini 		top = top & -PAGE_SIZE;
109937e2392SPaolo Bonzini 		free_pages(phys_to_virt(base), top - base);
110bf62a925SAndrew Jones 	}
11148a0145fSPaolo Bonzini 
11248a0145fSPaolo Bonzini 	find_highmem();
11348a0145fSPaolo Bonzini 	phys_alloc_get_unused(&base, &top);
114937e2392SPaolo Bonzini 	page_root = setup_mmu(top);
11548a0145fSPaolo Bonzini 	if (base != top) {
116*6ea7326aSClaudio Imbrenda 		base = PAGE_ALIGN(base);
11748a0145fSPaolo Bonzini 		top = top & -PAGE_SIZE;
11848a0145fSPaolo Bonzini 		free_pages(phys_to_virt(base), top - base);
11948a0145fSPaolo Bonzini 	}
12048a0145fSPaolo Bonzini 
121dcda215bSPaolo Bonzini 	alloc_ops = &vmalloc_ops;
122937e2392SPaolo Bonzini }
123