xref: /kvm-unit-tests/lib/alloc.c (revision 372e3528a7881eea82805fa7f3e206be9db6ed7e)
1 /*
2  * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
3  *
4  * This work is licensed under the terms of the GNU LGPL, version 2.
5  */
6 #include "alloc.h"
7 #include "asm/spinlock.h"
8 #include "asm/io.h"
9 
10 #define MIN(a, b)		((a) < (b) ? (a) : (b))
11 #define MAX(a, b)		((a) > (b) ? (a) : (b))
12 
13 #define PHYS_ALLOC_NR_REGIONS	256
14 
15 struct phys_alloc_region {
16 	phys_addr_t base;
17 	phys_addr_t size;
18 };
19 
20 static struct phys_alloc_region regions[PHYS_ALLOC_NR_REGIONS];
21 static int nr_regions;
22 
23 static struct spinlock lock;
24 static phys_addr_t base, top, align_min;
25 
26 void phys_alloc_show(void)
27 {
28 	int i;
29 
30 	spin_lock(&lock);
31 	printf("phys_alloc minimum alignment: 0x%llx\n", align_min);
32 	for (i = 0; i < nr_regions; ++i)
33 		printf("%016llx-%016llx [%s]\n",
34 			regions[i].base,
35 			regions[i].base + regions[i].size - 1,
36 			"USED");
37 	printf("%016llx-%016llx [%s]\n", base, top - 1, "FREE");
38 	spin_unlock(&lock);
39 }
40 
41 void phys_alloc_init(phys_addr_t base_addr, phys_addr_t size)
42 {
43 	spin_lock(&lock);
44 	base = base_addr;
45 	top = base + size;
46 	align_min = DEFAULT_MINIMUM_ALIGNMENT;
47 	nr_regions = 0;
48 	spin_unlock(&lock);
49 }
50 
51 void phys_alloc_set_minimum_alignment(phys_addr_t align)
52 {
53 	assert(align && !(align & (align - 1)));
54 	spin_lock(&lock);
55 	align_min = align;
56 	spin_unlock(&lock);
57 }
58 
59 static phys_addr_t phys_alloc_aligned_safe(phys_addr_t size,
60 					   phys_addr_t align, bool safe)
61 {
62 	static bool warned = false;
63 	phys_addr_t addr, size_orig = size;
64 	u64 top_safe = top;
65 
66 	if (safe && sizeof(long) == 4)
67 		top_safe = MIN(top, 1ULL << 32);
68 
69 	align = MAX(align, align_min);
70 
71 	spin_lock(&lock);
72 
73 	addr = ALIGN(base, align);
74 	size += addr - base;
75 
76 	if ((top_safe - base) < size) {
77 		printf("phys_alloc: requested=0x%llx (align=0x%llx), "
78 		       "need=0x%llx, but free=0x%llx. "
79 		       "top=0x%llx, top_safe=0x%llx\n",
80 		       size_orig, align, size, top_safe - base,
81 		       top, top_safe);
82 		spin_unlock(&lock);
83 		return INVALID_PHYS_ADDR;
84 	}
85 
86 	base += size;
87 
88 	if (nr_regions < PHYS_ALLOC_NR_REGIONS) {
89 		regions[nr_regions].base = addr;
90 		regions[nr_regions].size = size_orig;
91 		++nr_regions;
92 	} else if (!warned) {
93 		printf("WARNING: phys_alloc: No free log entries, "
94 		       "can no longer log allocations...\n");
95 		warned = true;
96 	}
97 
98 	spin_unlock(&lock);
99 
100 	return addr;
101 }
102 
103 static phys_addr_t phys_zalloc_aligned_safe(phys_addr_t size,
104 					    phys_addr_t align, bool safe)
105 {
106 	phys_addr_t addr = phys_alloc_aligned_safe(size, align, safe);
107 	if (addr == INVALID_PHYS_ADDR)
108 		return addr;
109 
110 	memset(phys_to_virt(addr), 0, size);
111 	return addr;
112 }
113 
114 phys_addr_t phys_alloc_aligned(phys_addr_t size, phys_addr_t align)
115 {
116 	return phys_alloc_aligned_safe(size, align, false);
117 }
118 
119 phys_addr_t phys_zalloc_aligned(phys_addr_t size, phys_addr_t align)
120 {
121 	return phys_zalloc_aligned_safe(size, align, false);
122 }
123 
124 phys_addr_t phys_alloc(phys_addr_t size)
125 {
126 	return phys_alloc_aligned(size, align_min);
127 }
128 
129 phys_addr_t phys_zalloc(phys_addr_t size)
130 {
131 	return phys_zalloc_aligned(size, align_min);
132 }
133 
134 static void *early_malloc(size_t size)
135 {
136 	phys_addr_t addr = phys_alloc_aligned_safe(size, align_min, true);
137 	if (addr == INVALID_PHYS_ADDR)
138 		return NULL;
139 
140 	return phys_to_virt(addr);
141 }
142 
143 static void *early_calloc(size_t nmemb, size_t size)
144 {
145 	phys_addr_t addr = phys_zalloc_aligned_safe(nmemb * size,
146 						    align_min, true);
147 	if (addr == INVALID_PHYS_ADDR)
148 		return NULL;
149 
150 	return phys_to_virt(addr);
151 }
152 
153 static void early_free(void *ptr __unused)
154 {
155 }
156 
157 static void *early_memalign(size_t alignment, size_t size)
158 {
159 	phys_addr_t addr;
160 
161 	assert(alignment && !(alignment & (alignment - 1)));
162 
163 	addr = phys_alloc_aligned_safe(size, alignment, true);
164 	if (addr == INVALID_PHYS_ADDR)
165 		return NULL;
166 
167 	return phys_to_virt(addr);
168 }
169 
170 static struct alloc_ops early_alloc_ops = {
171 	.malloc = early_malloc,
172 	.calloc = early_calloc,
173 	.free = early_free,
174 	.memalign = early_memalign,
175 };
176 
177 struct alloc_ops *alloc_ops = &early_alloc_ops;
178