xref: /kvm-unit-tests/lib/alloc_page.c (revision 4a4f8af26f3c1491843ca700d05a534c0e767980)
1 /*
2  * This work is licensed under the terms of the GNU LGPL, version 2.
3  *
4  * This is a simple allocator that provides contiguous physical addresses
5  * with page granularity.
6  */
7 #include "libcflat.h"
8 #include "alloc.h"
9 #include "alloc_phys.h"
10 #include "alloc_page.h"
11 #include "bitops.h"
12 #include <asm/page.h>
13 #include <asm/io.h>
14 #include <asm/spinlock.h>
15 
16 static struct spinlock lock;
17 static void *freelist = 0;
18 
19 bool page_alloc_initialized(void)
20 {
21 	return freelist != 0;
22 }
23 
24 void free_pages(void *mem, unsigned long size)
25 {
26 	void *old_freelist;
27 	void *end;
28 
29 	assert_msg((unsigned long) mem % PAGE_SIZE == 0,
30 		   "mem not page aligned: %p", mem);
31 
32 	assert_msg(size % PAGE_SIZE == 0, "size not page aligned: %#lx", size);
33 
34 	assert_msg(size == 0 || (uintptr_t)mem == -size ||
35 		   (uintptr_t)mem + size > (uintptr_t)mem,
36 		   "mem + size overflow: %p + %#lx", mem, size);
37 
38 	if (size == 0) {
39 		freelist = NULL;
40 		return;
41 	}
42 
43 	spin_lock(&lock);
44 	old_freelist = freelist;
45 	freelist = mem;
46 	end = mem + size;
47 	while (mem + PAGE_SIZE != end) {
48 		*(void **)mem = (mem + PAGE_SIZE);
49 		mem += PAGE_SIZE;
50 	}
51 
52 	*(void **)mem = old_freelist;
53 	spin_unlock(&lock);
54 }
55 
56 void free_pages_by_order(void *mem, unsigned long order)
57 {
58 	free_pages(mem, 1ul << (order + PAGE_SHIFT));
59 }
60 
61 void *alloc_page()
62 {
63 	void *p;
64 
65 	if (!freelist)
66 		return 0;
67 
68 	spin_lock(&lock);
69 	p = freelist;
70 	freelist = *(void **)freelist;
71 	spin_unlock(&lock);
72 
73 	if (p)
74 		memset(p, 0, PAGE_SIZE);
75 	return p;
76 }
77 
78 /*
79  * Allocates (1 << order) physically contiguous and naturally aligned pages.
80  * Returns NULL if there's no memory left.
81  */
82 void *alloc_pages(unsigned long order)
83 {
84 	/* Generic list traversal. */
85 	void *prev;
86 	void *curr = NULL;
87 	void *next = freelist;
88 
89 	/* Looking for a run of length (1 << order). */
90 	unsigned long run = 0;
91 	const unsigned long n = 1ul << order;
92 	const unsigned long align_mask = (n << PAGE_SHIFT) - 1;
93 	void *run_start = NULL;
94 	void *run_prev = NULL;
95 	unsigned long run_next_pa = 0;
96 	unsigned long pa;
97 
98 	assert(order < sizeof(unsigned long) * 8);
99 
100 	spin_lock(&lock);
101 	for (;;) {
102 		prev = curr;
103 		curr = next;
104 
105 		if (!curr) {
106 			run_start = NULL;
107 			break;
108 		}
109 
110 		next = *((void **) curr);
111 		pa = virt_to_phys(curr);
112 
113 		if (run == 0) {
114 			if (!(pa & align_mask)) {
115 				run_start = curr;
116 				run_prev = prev;
117 				run_next_pa = pa + PAGE_SIZE;
118 				run = 1;
119 			}
120 		} else if (pa == run_next_pa) {
121 			run_next_pa += PAGE_SIZE;
122 			run += 1;
123 		} else {
124 			run = 0;
125 		}
126 
127 		if (run == n) {
128 			if (run_prev)
129 				*((void **) run_prev) = next;
130 			else
131 				freelist = next;
132 			break;
133 		}
134 	}
135 	spin_unlock(&lock);
136 	if (run_start)
137 		memset(run_start, 0, n * PAGE_SIZE);
138 	return run_start;
139 }
140 
141 
142 void free_page(void *page)
143 {
144 	spin_lock(&lock);
145 	*(void **)page = freelist;
146 	freelist = page;
147 	spin_unlock(&lock);
148 }
149 
150 static void *page_memalign(size_t alignment, size_t size)
151 {
152 	unsigned long n = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
153 	unsigned long order;
154 
155 	if (!size)
156 		return NULL;
157 
158 	order = get_order(n);
159 
160 	return alloc_pages(order);
161 }
162 
163 static void page_free(void *mem, size_t size)
164 {
165 	free_pages(mem, size);
166 }
167 
168 static struct alloc_ops page_alloc_ops = {
169 	.memalign = page_memalign,
170 	.free = page_free,
171 	.align_min = PAGE_SIZE,
172 };
173 
174 void page_alloc_ops_enable(void)
175 {
176 	alloc_ops = &page_alloc_ops;
177 }
178 
179 unsigned int get_order(size_t size)
180 {
181 	return is_power_of_2(size) ? fls(size) : fls(size) + 1;
182 }
183