xref: /kvm-unit-tests/lib/alloc_page.c (revision 937e239220378a7417e3b6e7cfc6e0fe1223c440)
1 /*
2  * This work is licensed under the terms of the GNU LGPL, version 2.
3  *
4  * This is a simple allocator that provides contiguous physical addresses
5  * with page granularity.
6  */
7 #include "libcflat.h"
8 #include "alloc_phys.h"
9 #include <asm/page.h>
10 #include <asm/io.h>
11 #include <asm/spinlock.h>
12 
13 static struct spinlock lock;
14 static void *freelist = 0;
15 
16 void free_pages(void *mem, unsigned long size)
17 {
18 	void *old_freelist;
19 	void *end;
20 
21 	assert_msg((unsigned long) mem % PAGE_SIZE == 0,
22 		   "mem not page aligned: %p", mem);
23 
24 	assert_msg(size % PAGE_SIZE == 0, "size not page aligned: %#lx", size);
25 
26 	assert_msg(size == 0 || (uintptr_t)mem == -size ||
27 		   (uintptr_t)mem + size > (uintptr_t)mem,
28 		   "mem + size overflow: %p + %#lx", mem, size);
29 
30 	if (size == 0) {
31 		freelist = NULL;
32 		return;
33 	}
34 
35 	spin_lock(&lock);
36 	old_freelist = freelist;
37 	freelist = mem;
38 	end = mem + size;
39 	while (mem + PAGE_SIZE != end) {
40 		*(void **)mem = (mem + PAGE_SIZE);
41 		mem += PAGE_SIZE;
42 	}
43 
44 	*(void **)mem = old_freelist;
45 	spin_unlock(&lock);
46 }
47 
48 void *alloc_page()
49 {
50 	void *p;
51 
52 	if (!freelist)
53 		return 0;
54 
55 	spin_lock(&lock);
56 	p = freelist;
57 	freelist = *(void **)freelist;
58 	spin_unlock(&lock);
59 
60 	return p;
61 }
62 
63 /*
64  * Allocates (1 << order) physically contiguous and naturally aligned pages.
65  * Returns NULL if there's no memory left.
66  */
67 void *alloc_pages(unsigned long order)
68 {
69 	/* Generic list traversal. */
70 	void *prev;
71 	void *curr = NULL;
72 	void *next = freelist;
73 
74 	/* Looking for a run of length (1 << order). */
75 	unsigned long run = 0;
76 	const unsigned long n = 1ul << order;
77 	const unsigned long align_mask = (n << PAGE_SHIFT) - 1;
78 	void *run_start = NULL;
79 	void *run_prev = NULL;
80 	unsigned long run_next_pa = 0;
81 	unsigned long pa;
82 
83 	assert(order < sizeof(unsigned long) * 8);
84 
85 	spin_lock(&lock);
86 	for (;;) {
87 		prev = curr;
88 		curr = next;
89 
90 		if (!curr) {
91 			run_start = NULL;
92 			break;
93 		}
94 
95 		next = *((void **) curr);
96 		pa = virt_to_phys(curr);
97 
98 		if (run == 0) {
99 			if (!(pa & align_mask)) {
100 				run_start = curr;
101 				run_prev = prev;
102 				run_next_pa = pa + PAGE_SIZE;
103 				run = 1;
104 			}
105 		} else if (pa == run_next_pa) {
106 			run_next_pa += PAGE_SIZE;
107 			run += 1;
108 		} else {
109 			run = 0;
110 		}
111 
112 		if (run == n) {
113 			if (run_prev)
114 				*((void **) run_prev) = next;
115 			else
116 				freelist = next;
117 			break;
118 		}
119 	}
120 	spin_unlock(&lock);
121 	return run_start;
122 }
123 
124 
125 void free_page(void *page)
126 {
127 	spin_lock(&lock);
128 	*(void **)page = freelist;
129 	freelist = page;
130 	spin_unlock(&lock);
131 }
132