xref: /kvm-unit-tests/lib/alloc.c (revision d3aacb4f57d05f74f2030dbe12e7dfd6aa1b273d)
1 /*
2  * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
3  *
4  * This work is licensed under the terms of the GNU LGPL, version 2.
5  */
6 #include "alloc.h"
7 #include "asm/spinlock.h"
8 #include "asm/io.h"
9 
10 #define ALIGN_UP_MASK(x, mask)	(((x) + (mask)) & ~(mask))
11 #define ALIGN_UP(x, a)		ALIGN_UP_MASK(x, (typeof(x))(a) - 1)
12 #define MIN(a, b)		((a) < (b) ? (a) : (b))
13 #define MAX(a, b)		((a) > (b) ? (a) : (b))
14 
15 #define PHYS_ALLOC_NR_REGIONS	256
16 
17 struct phys_alloc_region {
18 	phys_addr_t base;
19 	phys_addr_t size;
20 };
21 
22 static struct phys_alloc_region regions[PHYS_ALLOC_NR_REGIONS];
23 static int nr_regions;
24 
25 static struct spinlock lock;
26 static phys_addr_t base, top, align_min;
27 
28 void phys_alloc_show(void)
29 {
30 	int i;
31 
32 	spin_lock(&lock);
33 	printf("phys_alloc minimum alignment: 0x%llx\n", align_min);
34 	for (i = 0; i < nr_regions; ++i)
35 		printf("%016llx-%016llx [%s]\n",
36 			regions[i].base,
37 			regions[i].base + regions[i].size - 1,
38 			"USED");
39 	printf("%016llx-%016llx [%s]\n", base, top - 1, "FREE");
40 	spin_unlock(&lock);
41 }
42 
43 void phys_alloc_init(phys_addr_t base_addr, phys_addr_t size)
44 {
45 	spin_lock(&lock);
46 	base = base_addr;
47 	top = base + size;
48 	align_min = DEFAULT_MINIMUM_ALIGNMENT;
49 	spin_unlock(&lock);
50 }
51 
52 void phys_alloc_set_minimum_alignment(phys_addr_t align)
53 {
54 	assert(align && !(align & (align - 1)));
55 	spin_lock(&lock);
56 	align_min = align;
57 	spin_unlock(&lock);
58 }
59 
60 static phys_addr_t phys_alloc_aligned_safe(phys_addr_t size,
61 					   phys_addr_t align, bool safe)
62 {
63 	phys_addr_t addr, size_orig = size;
64 	u64 top_safe = top;
65 
66 	if (safe && sizeof(long) == 4)
67 		top_safe = MIN(top, 1ULL << 32);
68 
69 	align = MAX(align, align_min);
70 
71 	spin_lock(&lock);
72 
73 	addr = ALIGN_UP(base, align);
74 	size += addr - base;
75 
76 	if ((top_safe - base) < size) {
77 		printf("%s: requested=0x%llx (align=0x%llx), "
78 		       "need=0x%llx, but free=0x%llx. "
79 		       "top=0x%llx, top_safe=0x%llx\n", __func__,
80 		       size_orig, align, size, top_safe - base,
81 		       top, top_safe);
82 		spin_unlock(&lock);
83 		return INVALID_PHYS_ADDR;
84 	}
85 
86 	base += size;
87 
88 	if (nr_regions < PHYS_ALLOC_NR_REGIONS) {
89 		regions[nr_regions].base = addr;
90 		regions[nr_regions].size = size_orig;
91 		++nr_regions;
92 	} else {
93 		printf("%s: WARNING: no free log entries, "
94 		       "can't log allocation...\n", __func__);
95 	}
96 
97 	spin_unlock(&lock);
98 
99 	return addr;
100 }
101 
102 static phys_addr_t phys_zalloc_aligned_safe(phys_addr_t size,
103 					    phys_addr_t align, bool safe)
104 {
105 	phys_addr_t addr = phys_alloc_aligned_safe(size, align, safe);
106 	if (addr == INVALID_PHYS_ADDR)
107 		return addr;
108 
109 	memset(phys_to_virt(addr), 0, size);
110 	return addr;
111 }
112 
113 phys_addr_t phys_alloc_aligned(phys_addr_t size, phys_addr_t align)
114 {
115 	return phys_alloc_aligned_safe(size, align, false);
116 }
117 
118 phys_addr_t phys_zalloc_aligned(phys_addr_t size, phys_addr_t align)
119 {
120 	return phys_zalloc_aligned_safe(size, align, false);
121 }
122 
123 phys_addr_t phys_alloc(phys_addr_t size)
124 {
125 	return phys_alloc_aligned(size, align_min);
126 }
127 
128 phys_addr_t phys_zalloc(phys_addr_t size)
129 {
130 	return phys_zalloc_aligned(size, align_min);
131 }
132 
133 static void *early_malloc(size_t size)
134 {
135 	phys_addr_t addr = phys_alloc_aligned_safe(size, align_min, true);
136 	if (addr == INVALID_PHYS_ADDR)
137 		return NULL;
138 
139 	return phys_to_virt(addr);
140 }
141 
142 static void *early_calloc(size_t nmemb, size_t size)
143 {
144 	phys_addr_t addr = phys_zalloc_aligned_safe(nmemb * size,
145 						    align_min, true);
146 	if (addr == INVALID_PHYS_ADDR)
147 		return NULL;
148 
149 	return phys_to_virt(addr);
150 }
151 
152 static void early_free(void *ptr __unused)
153 {
154 }
155 
156 static void *early_memalign(size_t alignment, size_t size)
157 {
158 	phys_addr_t addr;
159 
160 	assert(alignment && !(alignment & (alignment - 1)));
161 
162 	addr = phys_alloc_aligned_safe(size, alignment, true);
163 	if (addr == INVALID_PHYS_ADDR)
164 		return NULL;
165 
166 	return phys_to_virt(addr);
167 }
168 
169 static struct alloc_ops early_alloc_ops = {
170 	.malloc = early_malloc,
171 	.calloc = early_calloc,
172 	.free = early_free,
173 	.memalign = early_memalign,
174 };
175 
176 struct alloc_ops *alloc_ops = &early_alloc_ops;
177