xref: /kvm-unit-tests/lib/alloc_page.c (revision bd5f4c8f7f9d16e9574ce77db681ed240b76e82a)
1 /*
2  * This work is licensed under the terms of the GNU LGPL, version 2.
3  *
4  * This is a simple allocator that provides contiguous physical addresses
5  * with page granularity.
6  */
7 #include "libcflat.h"
8 #include "alloc.h"
9 #include "alloc_phys.h"
10 #include "alloc_page.h"
11 #include "bitops.h"
12 #include <asm/page.h>
13 #include <asm/io.h>
14 #include <asm/spinlock.h>
15 
16 static struct spinlock lock;
17 static void *freelist = 0;
18 
19 bool page_alloc_initialized(void)
20 {
21 	return freelist != 0;
22 }
23 
24 void free_pages(void *mem, unsigned long size)
25 {
26 	void *old_freelist;
27 	void *end;
28 
29 	assert_msg((unsigned long) mem % PAGE_SIZE == 0,
30 		   "mem not page aligned: %p", mem);
31 
32 	assert_msg(size % PAGE_SIZE == 0, "size not page aligned: %#lx", size);
33 
34 	assert_msg(size == 0 || (uintptr_t)mem == -size ||
35 		   (uintptr_t)mem + size > (uintptr_t)mem,
36 		   "mem + size overflow: %p + %#lx", mem, size);
37 
38 	if (size == 0) {
39 		freelist = NULL;
40 		return;
41 	}
42 
43 	spin_lock(&lock);
44 	old_freelist = freelist;
45 	freelist = mem;
46 	end = mem + size;
47 	while (mem + PAGE_SIZE != end) {
48 		*(void **)mem = (mem + PAGE_SIZE);
49 		mem += PAGE_SIZE;
50 	}
51 
52 	*(void **)mem = old_freelist;
53 	spin_unlock(&lock);
54 }
55 
56 void *alloc_page()
57 {
58 	void *p;
59 
60 	if (!freelist)
61 		return 0;
62 
63 	spin_lock(&lock);
64 	p = freelist;
65 	freelist = *(void **)freelist;
66 	spin_unlock(&lock);
67 
68 	if (p)
69 		memset(p, 0, PAGE_SIZE);
70 	return p;
71 }
72 
73 /*
74  * Allocates (1 << order) physically contiguous and naturally aligned pages.
75  * Returns NULL if there's no memory left.
76  */
77 void *alloc_pages(unsigned long order)
78 {
79 	/* Generic list traversal. */
80 	void *prev;
81 	void *curr = NULL;
82 	void *next = freelist;
83 
84 	/* Looking for a run of length (1 << order). */
85 	unsigned long run = 0;
86 	const unsigned long n = 1ul << order;
87 	const unsigned long align_mask = (n << PAGE_SHIFT) - 1;
88 	void *run_start = NULL;
89 	void *run_prev = NULL;
90 	unsigned long run_next_pa = 0;
91 	unsigned long pa;
92 
93 	assert(order < sizeof(unsigned long) * 8);
94 
95 	spin_lock(&lock);
96 	for (;;) {
97 		prev = curr;
98 		curr = next;
99 
100 		if (!curr) {
101 			run_start = NULL;
102 			break;
103 		}
104 
105 		next = *((void **) curr);
106 		pa = virt_to_phys(curr);
107 
108 		if (run == 0) {
109 			if (!(pa & align_mask)) {
110 				run_start = curr;
111 				run_prev = prev;
112 				run_next_pa = pa + PAGE_SIZE;
113 				run = 1;
114 			}
115 		} else if (pa == run_next_pa) {
116 			run_next_pa += PAGE_SIZE;
117 			run += 1;
118 		} else {
119 			run = 0;
120 		}
121 
122 		if (run == n) {
123 			if (run_prev)
124 				*((void **) run_prev) = next;
125 			else
126 				freelist = next;
127 			break;
128 		}
129 	}
130 	spin_unlock(&lock);
131 	if (run_start)
132 		memset(run_start, 0, n * PAGE_SIZE);
133 	return run_start;
134 }
135 
136 
137 void free_page(void *page)
138 {
139 	spin_lock(&lock);
140 	*(void **)page = freelist;
141 	freelist = page;
142 	spin_unlock(&lock);
143 }
144 
145 static void *page_memalign(size_t alignment, size_t size)
146 {
147 	unsigned long n = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
148 	unsigned long order;
149 
150 	if (!size)
151 		return NULL;
152 
153 	order = is_power_of_2(n) ? fls(n) : fls(n) + 1;
154 
155 	return alloc_pages(order);
156 }
157 
158 static void page_free(void *mem, size_t size)
159 {
160 	free_pages(mem, size);
161 }
162 
163 static struct alloc_ops page_alloc_ops = {
164 	.memalign = page_memalign,
165 	.free = page_free,
166 	.align_min = PAGE_SIZE,
167 };
168 
169 void page_alloc_ops_enable(void)
170 {
171 	alloc_ops = &page_alloc_ops;
172 }
173