xref: /kvm-unit-tests/lib/alloc_page.c (revision 2352e986e4599bc4842c225762c78fa49f18648d)
1 /*
2  * This work is licensed under the terms of the GNU LGPL, version 2.
3  *
4  * This is a simple allocator that provides contiguous physical addresses
5  * with page granularity.
6  */
7 #include "libcflat.h"
8 #include "alloc.h"
9 #include "alloc_phys.h"
10 #include "alloc_page.h"
11 #include "bitops.h"
12 #include <asm/page.h>
13 #include <asm/io.h>
14 #include <asm/spinlock.h>
15 
16 static struct spinlock lock;
17 static void *freelist = 0;
18 
19 bool page_alloc_initialized(void)
20 {
21 	return freelist != 0;
22 }
23 
24 void free_pages(void *mem, unsigned long size)
25 {
26 	void *old_freelist;
27 	void *end;
28 
29 	assert_msg((unsigned long) mem % PAGE_SIZE == 0,
30 		   "mem not page aligned: %p", mem);
31 
32 	assert_msg(size % PAGE_SIZE == 0, "size not page aligned: %#lx", size);
33 
34 	assert_msg(size == 0 || (uintptr_t)mem == -size ||
35 		   (uintptr_t)mem + size > (uintptr_t)mem,
36 		   "mem + size overflow: %p + %#lx", mem, size);
37 
38 	if (size == 0) {
39 		freelist = NULL;
40 		return;
41 	}
42 
43 	spin_lock(&lock);
44 	old_freelist = freelist;
45 	freelist = mem;
46 	end = mem + size;
47 	while (mem + PAGE_SIZE != end) {
48 		*(void **)mem = (mem + PAGE_SIZE);
49 		mem += PAGE_SIZE;
50 	}
51 
52 	*(void **)mem = old_freelist;
53 	spin_unlock(&lock);
54 }
55 
56 void *alloc_page()
57 {
58 	void *p;
59 
60 	if (!freelist)
61 		return 0;
62 
63 	spin_lock(&lock);
64 	p = freelist;
65 	freelist = *(void **)freelist;
66 	spin_unlock(&lock);
67 
68 	return p;
69 }
70 
71 /*
72  * Allocates (1 << order) physically contiguous and naturally aligned pages.
73  * Returns NULL if there's no memory left.
74  */
75 void *alloc_pages(unsigned long order)
76 {
77 	/* Generic list traversal. */
78 	void *prev;
79 	void *curr = NULL;
80 	void *next = freelist;
81 
82 	/* Looking for a run of length (1 << order). */
83 	unsigned long run = 0;
84 	const unsigned long n = 1ul << order;
85 	const unsigned long align_mask = (n << PAGE_SHIFT) - 1;
86 	void *run_start = NULL;
87 	void *run_prev = NULL;
88 	unsigned long run_next_pa = 0;
89 	unsigned long pa;
90 
91 	assert(order < sizeof(unsigned long) * 8);
92 
93 	spin_lock(&lock);
94 	for (;;) {
95 		prev = curr;
96 		curr = next;
97 
98 		if (!curr) {
99 			run_start = NULL;
100 			break;
101 		}
102 
103 		next = *((void **) curr);
104 		pa = virt_to_phys(curr);
105 
106 		if (run == 0) {
107 			if (!(pa & align_mask)) {
108 				run_start = curr;
109 				run_prev = prev;
110 				run_next_pa = pa + PAGE_SIZE;
111 				run = 1;
112 			}
113 		} else if (pa == run_next_pa) {
114 			run_next_pa += PAGE_SIZE;
115 			run += 1;
116 		} else {
117 			run = 0;
118 		}
119 
120 		if (run == n) {
121 			if (run_prev)
122 				*((void **) run_prev) = next;
123 			else
124 				freelist = next;
125 			break;
126 		}
127 	}
128 	spin_unlock(&lock);
129 	return run_start;
130 }
131 
132 
133 void free_page(void *page)
134 {
135 	spin_lock(&lock);
136 	*(void **)page = freelist;
137 	freelist = page;
138 	spin_unlock(&lock);
139 }
140 
141 static void *page_memalign(size_t alignment, size_t size)
142 {
143 	unsigned long n = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
144 	unsigned long order;
145 
146 	if (!size)
147 		return NULL;
148 
149 	order = is_power_of_2(n) ? fls(n) : fls(n) + 1;
150 
151 	return alloc_pages(order);
152 }
153 
154 static void page_free(void *mem, size_t size)
155 {
156 	free_pages(mem, size);
157 }
158 
159 static struct alloc_ops page_alloc_ops = {
160 	.memalign = page_memalign,
161 	.free = page_free,
162 	.align_min = PAGE_SIZE,
163 };
164 
165 void page_alloc_ops_enable(void)
166 {
167 	alloc_ops = &page_alloc_ops;
168 }
169