xref: /kvm-unit-tests/lib/alloc.c (revision b006d7eb9c64ed1046041c4eb3c4077be11d8a3d)
1 /*
2  * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
3  *
4  * This work is licensed under the terms of the GNU LGPL, version 2.
5  */
6 #include "alloc.h"
7 #include "asm/spinlock.h"
8 #include "asm/io.h"
9 
10 #define MIN(a, b)		((a) < (b) ? (a) : (b))
11 #define MAX(a, b)		((a) > (b) ? (a) : (b))
12 
13 #define PHYS_ALLOC_NR_REGIONS	256
14 
15 struct phys_alloc_region {
16 	phys_addr_t base;
17 	phys_addr_t size;
18 };
19 
20 static struct phys_alloc_region regions[PHYS_ALLOC_NR_REGIONS];
21 static int nr_regions;
22 
23 static struct spinlock lock;
24 static phys_addr_t base, top, align_min;
25 
26 void phys_alloc_show(void)
27 {
28 	int i;
29 
30 	spin_lock(&lock);
31 	printf("phys_alloc minimum alignment: 0x%" PRIx64 "\n",
32 		(u64)align_min);
33 	for (i = 0; i < nr_regions; ++i)
34 		printf("%016" PRIx64 "-%016" PRIx64 " [%s]\n",
35 			(u64)regions[i].base,
36 			(u64)(regions[i].base + regions[i].size - 1),
37 			"USED");
38 	printf("%016" PRIx64 "-%016" PRIx64 " [%s]\n",
39 		(u64)base, (u64)(top - 1), "FREE");
40 	spin_unlock(&lock);
41 }
42 
43 void phys_alloc_init(phys_addr_t base_addr, phys_addr_t size)
44 {
45 	spin_lock(&lock);
46 	base = base_addr;
47 	top = base + size;
48 	align_min = DEFAULT_MINIMUM_ALIGNMENT;
49 	nr_regions = 0;
50 	spin_unlock(&lock);
51 }
52 
53 void phys_alloc_set_minimum_alignment(phys_addr_t align)
54 {
55 	assert(align && !(align & (align - 1)));
56 	spin_lock(&lock);
57 	align_min = align;
58 	spin_unlock(&lock);
59 }
60 
61 static phys_addr_t phys_alloc_aligned_safe(phys_addr_t size,
62 					   phys_addr_t align, bool safe)
63 {
64 	static bool warned = false;
65 	phys_addr_t addr, size_orig = size;
66 	u64 top_safe;
67 
68 	spin_lock(&lock);
69 
70 	top_safe = top;
71 
72 	if (safe && sizeof(long) == 4)
73 		top_safe = MIN(top_safe, 1ULL << 32);
74 
75 	align = MAX(align, align_min);
76 
77 	addr = ALIGN(base, align);
78 	size += addr - base;
79 
80 	if ((top_safe - base) < size) {
81 		printf("phys_alloc: requested=0x%" PRIx64
82 		       " (align=0x%" PRIx64 "), "
83 		       "need=0x%" PRIx64 ", but free=0x%" PRIx64 ". "
84 		       "top=0x%" PRIx64 ", top_safe=0x%" PRIx64 "\n",
85 		       (u64)size_orig, (u64)align, (u64)size, top_safe - base,
86 		       (u64)top, top_safe);
87 		spin_unlock(&lock);
88 		return INVALID_PHYS_ADDR;
89 	}
90 
91 	base += size;
92 
93 	if (nr_regions < PHYS_ALLOC_NR_REGIONS) {
94 		regions[nr_regions].base = addr;
95 		regions[nr_regions].size = size_orig;
96 		++nr_regions;
97 	} else if (!warned) {
98 		printf("WARNING: phys_alloc: No free log entries, "
99 		       "can no longer log allocations...\n");
100 		warned = true;
101 	}
102 
103 	spin_unlock(&lock);
104 
105 	return addr;
106 }
107 
108 static phys_addr_t phys_zalloc_aligned_safe(phys_addr_t size,
109 					    phys_addr_t align, bool safe)
110 {
111 	phys_addr_t addr = phys_alloc_aligned_safe(size, align, safe);
112 	if (addr == INVALID_PHYS_ADDR)
113 		return addr;
114 
115 	memset(phys_to_virt(addr), 0, size);
116 	return addr;
117 }
118 
119 phys_addr_t phys_alloc_aligned(phys_addr_t size, phys_addr_t align)
120 {
121 	return phys_alloc_aligned_safe(size, align, false);
122 }
123 
124 phys_addr_t phys_zalloc_aligned(phys_addr_t size, phys_addr_t align)
125 {
126 	return phys_zalloc_aligned_safe(size, align, false);
127 }
128 
129 phys_addr_t phys_alloc(phys_addr_t size)
130 {
131 	return phys_alloc_aligned(size, align_min);
132 }
133 
134 phys_addr_t phys_zalloc(phys_addr_t size)
135 {
136 	return phys_zalloc_aligned(size, align_min);
137 }
138 
139 static void *early_malloc(size_t size)
140 {
141 	phys_addr_t addr = phys_alloc_aligned_safe(size, align_min, true);
142 	if (addr == INVALID_PHYS_ADDR)
143 		return NULL;
144 
145 	return phys_to_virt(addr);
146 }
147 
148 static void *early_calloc(size_t nmemb, size_t size)
149 {
150 	phys_addr_t addr = phys_zalloc_aligned_safe(nmemb * size,
151 						    align_min, true);
152 	if (addr == INVALID_PHYS_ADDR)
153 		return NULL;
154 
155 	return phys_to_virt(addr);
156 }
157 
158 static void early_free(void *ptr __unused)
159 {
160 }
161 
162 static void *early_memalign(size_t alignment, size_t size)
163 {
164 	phys_addr_t addr;
165 
166 	assert(alignment && !(alignment & (alignment - 1)));
167 
168 	addr = phys_alloc_aligned_safe(size, alignment, true);
169 	if (addr == INVALID_PHYS_ADDR)
170 		return NULL;
171 
172 	return phys_to_virt(addr);
173 }
174 
175 static struct alloc_ops early_alloc_ops = {
176 	.malloc = early_malloc,
177 	.calloc = early_calloc,
178 	.free = early_free,
179 	.memalign = early_memalign,
180 };
181 
182 struct alloc_ops *alloc_ops = &early_alloc_ops;
183