xref: /kvm-unit-tests/lib/alloc_page.c (revision 71a6a145226927f50d938b0f2befc24363a496bc)
1 /*
2  * This work is licensed under the terms of the GNU LGPL, version 2.
3  *
4  * This is a simple allocator that provides contiguous physical addresses
5  * with page granularity.
6  */
7 #include "libcflat.h"
8 #include "alloc_phys.h"
9 #include <asm/page.h>
10 #include <asm/io.h>
11 #include <asm/spinlock.h>
12 
13 static struct spinlock lock;
14 static void *freelist = 0;
15 
16 bool page_alloc_initialized(void)
17 {
18 	return freelist != 0;
19 }
20 
21 void free_pages(void *mem, unsigned long size)
22 {
23 	void *old_freelist;
24 	void *end;
25 
26 	assert_msg((unsigned long) mem % PAGE_SIZE == 0,
27 		   "mem not page aligned: %p", mem);
28 
29 	assert_msg(size % PAGE_SIZE == 0, "size not page aligned: %#lx", size);
30 
31 	assert_msg(size == 0 || (uintptr_t)mem == -size ||
32 		   (uintptr_t)mem + size > (uintptr_t)mem,
33 		   "mem + size overflow: %p + %#lx", mem, size);
34 
35 	if (size == 0) {
36 		freelist = NULL;
37 		return;
38 	}
39 
40 	spin_lock(&lock);
41 	old_freelist = freelist;
42 	freelist = mem;
43 	end = mem + size;
44 	while (mem + PAGE_SIZE != end) {
45 		*(void **)mem = (mem + PAGE_SIZE);
46 		mem += PAGE_SIZE;
47 	}
48 
49 	*(void **)mem = old_freelist;
50 	spin_unlock(&lock);
51 }
52 
53 void *alloc_page()
54 {
55 	void *p;
56 
57 	if (!freelist)
58 		return 0;
59 
60 	spin_lock(&lock);
61 	p = freelist;
62 	freelist = *(void **)freelist;
63 	spin_unlock(&lock);
64 
65 	return p;
66 }
67 
68 /*
69  * Allocates (1 << order) physically contiguous and naturally aligned pages.
70  * Returns NULL if there's no memory left.
71  */
72 void *alloc_pages(unsigned long order)
73 {
74 	/* Generic list traversal. */
75 	void *prev;
76 	void *curr = NULL;
77 	void *next = freelist;
78 
79 	/* Looking for a run of length (1 << order). */
80 	unsigned long run = 0;
81 	const unsigned long n = 1ul << order;
82 	const unsigned long align_mask = (n << PAGE_SHIFT) - 1;
83 	void *run_start = NULL;
84 	void *run_prev = NULL;
85 	unsigned long run_next_pa = 0;
86 	unsigned long pa;
87 
88 	assert(order < sizeof(unsigned long) * 8);
89 
90 	spin_lock(&lock);
91 	for (;;) {
92 		prev = curr;
93 		curr = next;
94 
95 		if (!curr) {
96 			run_start = NULL;
97 			break;
98 		}
99 
100 		next = *((void **) curr);
101 		pa = virt_to_phys(curr);
102 
103 		if (run == 0) {
104 			if (!(pa & align_mask)) {
105 				run_start = curr;
106 				run_prev = prev;
107 				run_next_pa = pa + PAGE_SIZE;
108 				run = 1;
109 			}
110 		} else if (pa == run_next_pa) {
111 			run_next_pa += PAGE_SIZE;
112 			run += 1;
113 		} else {
114 			run = 0;
115 		}
116 
117 		if (run == n) {
118 			if (run_prev)
119 				*((void **) run_prev) = next;
120 			else
121 				freelist = next;
122 			break;
123 		}
124 	}
125 	spin_unlock(&lock);
126 	return run_start;
127 }
128 
129 
130 void free_page(void *page)
131 {
132 	spin_lock(&lock);
133 	*(void **)page = freelist;
134 	freelist = page;
135 	spin_unlock(&lock);
136 }
137