xref: /kvm-unit-tests/lib/vmalloc.c (revision b9289d76b946dc45c5221e142c5473ebfff1ca86)
1 /*
2  * Copyright (C) 2012, 2017, Red Hat Inc.
3  *
4  * This allocator provides contiguous physical addresses with page
5  * granularity.
6  */
7 
8 #include "libcflat.h"
9 #include "asm/spinlock.h"
10 #include "asm/page.h"
11 #include "asm/io.h"
12 #include "alloc.h"
13 #include "alloc_phys.h"
14 #include "alloc_page.h"
15 #include <bitops.h>
16 #include "vmalloc.h"
17 
18 #define VM_MAGIC 0x7E57C0DE
19 
20 #define GET_METADATA(x) (((struct metadata *)(x)) - 1)
21 #define GET_MAGIC(x) (*((unsigned long *)(x) - 1))
22 
23 struct metadata {
24 	unsigned long npages;
25 	unsigned long magic;
26 };
27 
28 static struct spinlock lock;
29 static void *vfree_top = 0;
30 static void *page_root;
31 
32 /*
33  * Allocate a certain number of pages from the virtual address space (without
34  * physical backing).
35  *
36  * nr is the number of pages to allocate
37  * alignment_pages is the alignment of the allocation *in pages*
38  * metadata indicates whether an extra (unaligned) page needs to be allocated
39  * right before the main (aligned) allocation.
40  *
41  * The return value points to the first allocated virtual page, which will
42  * be the (potentially unaligned) metadata page if the metadata flag is
43  * specified.
44  */
do_alloc_vpages(ulong nr,unsigned int align_order,bool metadata)45 static void *do_alloc_vpages(ulong nr, unsigned int align_order, bool metadata)
46 {
47 	uintptr_t ptr;
48 
49 	spin_lock(&lock);
50 	ptr = (uintptr_t)vfree_top;
51 	ptr -= PAGE_SIZE * nr;
52 	ptr &= GENMASK_ULL(63, PAGE_SHIFT + align_order);
53 	if (metadata)
54 		ptr -= PAGE_SIZE;
55 	vfree_top = (void *)ptr;
56 	spin_unlock(&lock);
57 
58 	/* Cannot return vfree_top here, we are outside the lock! */
59 	return (void *)ptr;
60 }
61 
alloc_vpages_aligned(ulong nr,unsigned int align_order)62 void *alloc_vpages_aligned(ulong nr, unsigned int align_order)
63 {
64 	return do_alloc_vpages(nr, align_order, false);
65 }
66 
alloc_vpages(ulong nr)67 void *alloc_vpages(ulong nr)
68 {
69 	return alloc_vpages_aligned(nr, 0);
70 }
71 
alloc_vpage(void)72 void *alloc_vpage(void)
73 {
74 	return alloc_vpages(1);
75 }
76 
vmap(phys_addr_t phys,size_t size)77 void *vmap(phys_addr_t phys, size_t size)
78 {
79 	void *mem, *p;
80 	size_t pages;
81 
82 	size = PAGE_ALIGN(size);
83 	pages = size / PAGE_SIZE;
84 	mem = p = alloc_vpages(pages);
85 
86 	phys &= ~(unsigned long long)(PAGE_SIZE - 1);
87 	while (pages--) {
88 		install_page(page_root, phys, p);
89 		phys += PAGE_SIZE;
90 		p += PAGE_SIZE;
91 	}
92 	return mem;
93 }
94 
95 /*
96  * Allocate one page, for an object with specified alignment.
97  * The resulting pointer will be aligned to the required alignment, but
98  * intentionally not page-aligned.
99  * The metadata for single pages allocation is just the magic value,
100  * which is placed right before the pointer, like for bigger allocations.
101  */
vm_alloc_one_page(size_t alignment)102 static void *vm_alloc_one_page(size_t alignment)
103 {
104 	void *p;
105 
106 	/* this guarantees that there will be space for the magic value */
107 	assert(alignment >= sizeof(uintptr_t));
108 	assert(alignment < PAGE_SIZE);
109 	p = alloc_vpage();
110 	install_page(page_root, virt_to_phys(alloc_page()), p);
111 	p = (void *)((uintptr_t)p + alignment);
112 	/* write the magic value right before the returned address */
113 	GET_MAGIC(p) = VM_MAGIC;
114 	return p;
115 }
116 
117 /*
118  * Allocate virtual memory, with the specified minimum alignment.
119  * If the allocation fits in one page, only one page is allocated. Otherwise
120  * enough pages are allocated for the object, plus one to keep metadata
121  * information about the allocation.
122  */
vm_memalign(size_t alignment,size_t size)123 static void *vm_memalign(size_t alignment, size_t size)
124 {
125 	struct metadata *m;
126 	phys_addr_t pa;
127 	uintptr_t p;
128 	void *mem;
129 	size_t i;
130 
131 	if (!size)
132 		return NULL;
133 	assert(is_power_of_2(alignment));
134 
135 	if (alignment < sizeof(uintptr_t))
136 		alignment = sizeof(uintptr_t);
137 	/* it fits in one page, allocate only one page */
138 	if (alignment + size <= PAGE_SIZE)
139 		return vm_alloc_one_page(alignment);
140 	size = PAGE_ALIGN(size) / PAGE_SIZE;
141 	alignment = get_order(PAGE_ALIGN(alignment) / PAGE_SIZE);
142 	mem = do_alloc_vpages(size, alignment, true);
143 	p = (uintptr_t)mem;
144 	/* skip the metadata page */
145 	mem = (void *)(p + PAGE_SIZE);
146 	/*
147 	 * time to actually allocate the physical pages to back our virtual
148 	 * allocation; note that we need to allocate one extra page (for the
149 	 * metadata), hence the <=
150 	 */
151 	for (i = 0; i <= size; i++, p += PAGE_SIZE) {
152 		pa = virt_to_phys(alloc_page());
153 		assert(pa);
154 		install_page(page_root, pa, (void *)p);
155 	}
156 	m = GET_METADATA(mem);
157 	m->npages = size;
158 	m->magic = VM_MAGIC;
159 	return mem;
160 }
161 
vm_free(void * mem)162 static void vm_free(void *mem)
163 {
164 	struct metadata *m;
165 	uintptr_t ptr, page, i;
166 
167 	if (!mem)
168 		return;
169 	/* the pointer is not page-aligned, it was a single-page allocation */
170 	if (!IS_ALIGNED((uintptr_t)mem, PAGE_SIZE)) {
171 		assert(GET_MAGIC(mem) == VM_MAGIC);
172 		page = virt_to_pte_phys(page_root, mem) & PAGE_MASK;
173 		assert(page);
174 		free_page(phys_to_virt(page));
175 		return;
176 	}
177 
178 	/* the pointer is page-aligned, it was a multi-page allocation */
179 	m = GET_METADATA(mem);
180 	assert(m->magic == VM_MAGIC);
181 	assert(m->npages > 0);
182 	assert(m->npages < BIT_ULL(BITS_PER_LONG - PAGE_SHIFT));
183 	/* free all the pages including the metadata page */
184 	ptr = (uintptr_t)m & PAGE_MASK;
185 	for (i = 0 ; i < m->npages + 1; i++, ptr += PAGE_SIZE) {
186 		page = virt_to_pte_phys(page_root, (void *)ptr) & PAGE_MASK;
187 		assert(page);
188 		free_page(phys_to_virt(page));
189 	}
190 }
191 
192 static struct alloc_ops vmalloc_ops = {
193 	.memalign = vm_memalign,
194 	.free = vm_free,
195 };
196 
find_highmem(void)197 void __attribute__((__weak__)) find_highmem(void)
198 {
199 }
200 
init_alloc_vpage(void * top)201 void init_alloc_vpage(void *top)
202 {
203 	spin_lock(&lock);
204 	assert(alloc_ops != &vmalloc_ops);
205 	vfree_top = top;
206 	spin_unlock(&lock);
207 }
208 
vm_available(void)209 bool __attribute__((__weak__)) vm_available(void)
210 {
211 	return true;
212 }
213 
__setup_vm(void * opaque)214 void __setup_vm(void *opaque)
215 {
216 	phys_addr_t base, top;
217 
218 	assert_msg(vm_available(), "Virtual memory not available. Must check vm_available() before calling setup_vm()");
219 
220 	if (alloc_ops == &vmalloc_ops)
221 		return;
222 
223 	phys_alloc_get_unused(&base, &top);
224 	assert(base != top || page_alloc_initialized());
225 	/*
226 	 * Give low memory immediately to the page allocator,
227 	 * so that it can be used to allocate page tables.
228 	 */
229 	if (!page_alloc_initialized()) {
230 		base = PAGE_ALIGN(base) >> PAGE_SHIFT;
231 		top = top >> PAGE_SHIFT;
232 		page_alloc_init_area(AREA_ANY_NUMBER, base, top);
233 		page_alloc_ops_enable();
234 	}
235 
236 	find_highmem();
237 	phys_alloc_get_unused(&base, &top);
238 	page_root = setup_mmu(top, opaque);
239 	if (base != top) {
240 		base = PAGE_ALIGN(base) >> PAGE_SHIFT;
241 		top = top >> PAGE_SHIFT;
242 		page_alloc_init_area(AREA_ANY_NUMBER, base, top);
243 	}
244 
245 	spin_lock(&lock);
246 	assert(alloc_ops != &vmalloc_ops);
247 	alloc_ops = &vmalloc_ops;
248 	spin_unlock(&lock);
249 }
250 
setup_vm(void)251 void setup_vm(void)
252 {
253 	__setup_vm(NULL);
254 }
255