xref: /kvm-unit-tests/lib/alloc_page.c (revision be60de6f6f8d3cd83d246af557229e5463febb2c)
1 /*
2  * This work is licensed under the terms of the GNU LGPL, version 2.
3  *
4  * This is a simple allocator that provides contiguous physical addresses
5  * with page granularity.
6  */
7 #include "libcflat.h"
8 #include "alloc.h"
9 #include "alloc_phys.h"
10 #include "bitops.h"
11 #include <asm/page.h>
12 #include <asm/io.h>
13 #include <asm/spinlock.h>
14 
15 static struct spinlock lock;
16 static void *freelist = 0;
17 
18 bool page_alloc_initialized(void)
19 {
20 	return freelist != 0;
21 }
22 
23 void free_pages(void *mem, unsigned long size)
24 {
25 	void *old_freelist;
26 	void *end;
27 
28 	assert_msg((unsigned long) mem % PAGE_SIZE == 0,
29 		   "mem not page aligned: %p", mem);
30 
31 	assert_msg(size % PAGE_SIZE == 0, "size not page aligned: %#lx", size);
32 
33 	assert_msg(size == 0 || (uintptr_t)mem == -size ||
34 		   (uintptr_t)mem + size > (uintptr_t)mem,
35 		   "mem + size overflow: %p + %#lx", mem, size);
36 
37 	if (size == 0) {
38 		freelist = NULL;
39 		return;
40 	}
41 
42 	spin_lock(&lock);
43 	old_freelist = freelist;
44 	freelist = mem;
45 	end = mem + size;
46 	while (mem + PAGE_SIZE != end) {
47 		*(void **)mem = (mem + PAGE_SIZE);
48 		mem += PAGE_SIZE;
49 	}
50 
51 	*(void **)mem = old_freelist;
52 	spin_unlock(&lock);
53 }
54 
55 void *alloc_page()
56 {
57 	void *p;
58 
59 	if (!freelist)
60 		return 0;
61 
62 	spin_lock(&lock);
63 	p = freelist;
64 	freelist = *(void **)freelist;
65 	spin_unlock(&lock);
66 
67 	return p;
68 }
69 
70 /*
71  * Allocates (1 << order) physically contiguous and naturally aligned pages.
72  * Returns NULL if there's no memory left.
73  */
74 void *alloc_pages(unsigned long order)
75 {
76 	/* Generic list traversal. */
77 	void *prev;
78 	void *curr = NULL;
79 	void *next = freelist;
80 
81 	/* Looking for a run of length (1 << order). */
82 	unsigned long run = 0;
83 	const unsigned long n = 1ul << order;
84 	const unsigned long align_mask = (n << PAGE_SHIFT) - 1;
85 	void *run_start = NULL;
86 	void *run_prev = NULL;
87 	unsigned long run_next_pa = 0;
88 	unsigned long pa;
89 
90 	assert(order < sizeof(unsigned long) * 8);
91 
92 	spin_lock(&lock);
93 	for (;;) {
94 		prev = curr;
95 		curr = next;
96 
97 		if (!curr) {
98 			run_start = NULL;
99 			break;
100 		}
101 
102 		next = *((void **) curr);
103 		pa = virt_to_phys(curr);
104 
105 		if (run == 0) {
106 			if (!(pa & align_mask)) {
107 				run_start = curr;
108 				run_prev = prev;
109 				run_next_pa = pa + PAGE_SIZE;
110 				run = 1;
111 			}
112 		} else if (pa == run_next_pa) {
113 			run_next_pa += PAGE_SIZE;
114 			run += 1;
115 		} else {
116 			run = 0;
117 		}
118 
119 		if (run == n) {
120 			if (run_prev)
121 				*((void **) run_prev) = next;
122 			else
123 				freelist = next;
124 			break;
125 		}
126 	}
127 	spin_unlock(&lock);
128 	return run_start;
129 }
130 
131 
132 void free_page(void *page)
133 {
134 	spin_lock(&lock);
135 	*(void **)page = freelist;
136 	freelist = page;
137 	spin_unlock(&lock);
138 }
139 
140 static void *page_memalign(size_t alignment, size_t size)
141 {
142 	unsigned long n = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
143 	unsigned long order;
144 
145 	if (!size)
146 		return NULL;
147 
148 	order = is_power_of_2(n) ? fls(n) : fls(n) + 1;
149 
150 	return alloc_pages(order);
151 }
152 
153 static void page_free(void *mem, size_t size)
154 {
155 	free_pages(mem, size);
156 }
157 
158 static struct alloc_ops page_alloc_ops = {
159 	.memalign = page_memalign,
160 	.free = page_free,
161 	.align_min = PAGE_SIZE,
162 };
163 
164 void page_alloc_ops_enable(void)
165 {
166 	alloc_ops = &page_alloc_ops;
167 }
168