xref: /kvm-unit-tests/lib/vmalloc.c (revision be704aff683c54fc108deaafacc7cb89ad0648d9)
1 /*
2  * Copyright (C) 2012, 2017, Red Hat Inc.
3  *
4  * This allocator provides contiguous physical addresses with page
5  * granularity.
6  */
7 
8 #include "libcflat.h"
9 #include "asm/spinlock.h"
10 #include "asm/page.h"
11 #include "asm/io.h"
12 #include "alloc.h"
13 #include "alloc_phys.h"
14 #include "alloc_page.h"
15 #include <bitops.h>
16 #include "vmalloc.h"
17 
18 static struct spinlock lock;
19 static void *vfree_top = 0;
20 static void *page_root;
21 
22 /*
23  * Allocate a certain number of pages from the virtual address space (without
24  * physical backing).
25  *
26  * nr is the number of pages to allocate
27  * alignment_pages is the alignment of the allocation *in pages*
28  */
29 void *alloc_vpages_aligned(ulong nr, unsigned int align_order)
30 {
31 	uintptr_t ptr;
32 
33 	spin_lock(&lock);
34 	ptr = (uintptr_t)vfree_top;
35 	ptr -= PAGE_SIZE * nr;
36 	ptr &= GENMASK_ULL(63, PAGE_SHIFT + align_order);
37 	vfree_top = (void *)ptr;
38 	spin_unlock(&lock);
39 
40 	/* Cannot return vfree_top here, we are outside the lock! */
41 	return (void *)ptr;
42 }
43 
44 void *alloc_vpages(ulong nr)
45 {
46 	return alloc_vpages_aligned(nr, 0);
47 }
48 
49 void *alloc_vpage(void)
50 {
51 	return alloc_vpages(1);
52 }
53 
54 void *vmap(phys_addr_t phys, size_t size)
55 {
56 	void *mem, *p;
57 	size_t pages;
58 
59 	size = PAGE_ALIGN(size);
60 	pages = size / PAGE_SIZE;
61 	mem = p = alloc_vpages(pages);
62 
63 	phys &= ~(unsigned long long)(PAGE_SIZE - 1);
64 	while (pages--) {
65 		install_page(page_root, phys, p);
66 		phys += PAGE_SIZE;
67 		p += PAGE_SIZE;
68 	}
69 	return mem;
70 }
71 
72 /*
73  * Allocate virtual memory, with the specified minimum alignment.
74  */
75 static void *vm_memalign(size_t alignment, size_t size)
76 {
77 	phys_addr_t pa;
78 	void *mem, *p;
79 
80 	assert(is_power_of_2(alignment));
81 
82 	size = PAGE_ALIGN(size) / PAGE_SIZE;
83 	alignment = get_order(PAGE_ALIGN(alignment) / PAGE_SIZE);
84 	mem = p = alloc_vpages_aligned(size, alignment);
85 	while (size--) {
86 		pa = virt_to_phys(alloc_page());
87 		assert(pa);
88 		install_page(page_root, pa, p);
89 		p += PAGE_SIZE;
90 	}
91 	return mem;
92 }
93 
94 static void vm_free(void *mem, size_t size)
95 {
96 	while (size) {
97 		free_page(phys_to_virt(virt_to_pte_phys(page_root, mem)));
98 		mem += PAGE_SIZE;
99 		size -= PAGE_SIZE;
100 	}
101 }
102 
103 static struct alloc_ops vmalloc_ops = {
104 	.memalign = vm_memalign,
105 	.free = vm_free,
106 	.align_min = PAGE_SIZE,
107 };
108 
109 void __attribute__((__weak__)) find_highmem(void)
110 {
111 }
112 
113 void init_alloc_vpage(void *top)
114 {
115 	spin_lock(&lock);
116 	assert(alloc_ops != &vmalloc_ops);
117 	vfree_top = top;
118 	spin_unlock(&lock);
119 }
120 
121 void setup_vm()
122 {
123 	phys_addr_t base, top;
124 
125 	if (alloc_ops == &vmalloc_ops)
126 		return;
127 
128 	phys_alloc_get_unused(&base, &top);
129 	assert(base != top || page_alloc_initialized());
130 	/*
131 	 * Give low memory immediately to the page allocator,
132 	 * so that it can be used to allocate page tables.
133 	 */
134 	if (!page_alloc_initialized()) {
135 		base = PAGE_ALIGN(base);
136 		top = top & -PAGE_SIZE;
137 		free_pages(phys_to_virt(base), top - base);
138 	}
139 
140 	find_highmem();
141 	phys_alloc_get_unused(&base, &top);
142 	page_root = setup_mmu(top);
143 	if (base != top) {
144 		base = PAGE_ALIGN(base);
145 		top = top & -PAGE_SIZE;
146 		free_pages(phys_to_virt(base), top - base);
147 	}
148 
149 	spin_lock(&lock);
150 	assert(alloc_ops != &vmalloc_ops);
151 	alloc_ops = &vmalloc_ops;
152 	spin_unlock(&lock);
153 }
154