xref: /kvm-unit-tests/lib/alloc_phys.c (revision fd6aada0dac74cf00e2d0b701362e1f89d2c28e3)
1 /*
2  * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
3  *
4  * This work is licensed under the terms of the GNU LGPL, version 2.
5  */
6 #include "alloc.h"
7 #include "asm/spinlock.h"
8 #include "asm/io.h"
9 
10 #define PHYS_ALLOC_NR_REGIONS	256
11 
12 struct phys_alloc_region {
13 	phys_addr_t base;
14 	phys_addr_t size;
15 };
16 
17 static struct phys_alloc_region regions[PHYS_ALLOC_NR_REGIONS];
18 static int nr_regions;
19 
20 static struct spinlock lock;
21 static phys_addr_t base, top, align_min;
22 
23 void phys_alloc_show(void)
24 {
25 	int i;
26 
27 	spin_lock(&lock);
28 	printf("phys_alloc minimum alignment: %#" PRIx64 "\n",
29 		(u64)align_min);
30 	for (i = 0; i < nr_regions; ++i)
31 		printf("%016" PRIx64 "-%016" PRIx64 " [%s]\n",
32 			(u64)regions[i].base,
33 			(u64)(regions[i].base + regions[i].size - 1),
34 			"USED");
35 	printf("%016" PRIx64 "-%016" PRIx64 " [%s]\n",
36 		(u64)base, (u64)(top - 1), "FREE");
37 	spin_unlock(&lock);
38 }
39 
40 void phys_alloc_init(phys_addr_t base_addr, phys_addr_t size)
41 {
42 	spin_lock(&lock);
43 	base = base_addr;
44 	top = base + size;
45 	align_min = DEFAULT_MINIMUM_ALIGNMENT;
46 	nr_regions = 0;
47 	spin_unlock(&lock);
48 }
49 
50 void phys_alloc_set_minimum_alignment(phys_addr_t align)
51 {
52 	assert(align && !(align & (align - 1)));
53 	spin_lock(&lock);
54 	align_min = align;
55 	spin_unlock(&lock);
56 }
57 
58 static phys_addr_t phys_alloc_aligned_safe(phys_addr_t size,
59 					   phys_addr_t align, bool safe)
60 {
61 	static bool warned = false;
62 	phys_addr_t addr, size_orig = size;
63 	u64 top_safe;
64 
65 	spin_lock(&lock);
66 
67 	top_safe = top;
68 
69 	if (safe && sizeof(long) == 4)
70 		top_safe = MIN(top_safe, 1ULL << 32);
71 
72 	align = MAX(align, align_min);
73 
74 	addr = ALIGN(base, align);
75 	size += addr - base;
76 
77 	if ((top_safe - base) < size) {
78 		printf("phys_alloc: requested=%#" PRIx64
79 		       " (align=%#" PRIx64 "), "
80 		       "need=%#" PRIx64 ", but free=%#" PRIx64 ". "
81 		       "top=%#" PRIx64 ", top_safe=%#" PRIx64 "\n",
82 		       (u64)size_orig, (u64)align, (u64)size, top_safe - base,
83 		       (u64)top, top_safe);
84 		spin_unlock(&lock);
85 		return INVALID_PHYS_ADDR;
86 	}
87 
88 	base += size;
89 
90 	if (nr_regions < PHYS_ALLOC_NR_REGIONS) {
91 		regions[nr_regions].base = addr;
92 		regions[nr_regions].size = size_orig;
93 		++nr_regions;
94 	} else if (!warned) {
95 		printf("WARNING: phys_alloc: No free log entries, "
96 		       "can no longer log allocations...\n");
97 		warned = true;
98 	}
99 
100 	spin_unlock(&lock);
101 
102 	return addr;
103 }
104 
105 static phys_addr_t phys_zalloc_aligned_safe(phys_addr_t size,
106 					    phys_addr_t align, bool safe)
107 {
108 	phys_addr_t addr = phys_alloc_aligned_safe(size, align, safe);
109 	if (addr == INVALID_PHYS_ADDR)
110 		return addr;
111 
112 	memset(phys_to_virt(addr), 0, size);
113 	return addr;
114 }
115 
116 phys_addr_t phys_alloc_aligned(phys_addr_t size, phys_addr_t align)
117 {
118 	return phys_alloc_aligned_safe(size, align, false);
119 }
120 
121 phys_addr_t phys_zalloc_aligned(phys_addr_t size, phys_addr_t align)
122 {
123 	return phys_zalloc_aligned_safe(size, align, false);
124 }
125 
126 phys_addr_t phys_alloc(phys_addr_t size)
127 {
128 	return phys_alloc_aligned(size, align_min);
129 }
130 
131 phys_addr_t phys_zalloc(phys_addr_t size)
132 {
133 	return phys_zalloc_aligned(size, align_min);
134 }
135 
136 static void *early_malloc(size_t size)
137 {
138 	phys_addr_t addr = phys_alloc_aligned_safe(size, align_min, true);
139 	if (addr == INVALID_PHYS_ADDR)
140 		return NULL;
141 
142 	return phys_to_virt(addr);
143 }
144 
145 static void *early_calloc(size_t nmemb, size_t size)
146 {
147 	phys_addr_t addr = phys_zalloc_aligned_safe(nmemb * size,
148 						    align_min, true);
149 	if (addr == INVALID_PHYS_ADDR)
150 		return NULL;
151 
152 	return phys_to_virt(addr);
153 }
154 
155 static void early_free(void *ptr __unused)
156 {
157 }
158 
159 static void *early_memalign(size_t alignment, size_t size)
160 {
161 	phys_addr_t addr;
162 
163 	assert(alignment && !(alignment & (alignment - 1)));
164 
165 	addr = phys_alloc_aligned_safe(size, alignment, true);
166 	if (addr == INVALID_PHYS_ADDR)
167 		return NULL;
168 
169 	return phys_to_virt(addr);
170 }
171 
172 static struct alloc_ops early_alloc_ops = {
173 	.malloc = early_malloc,
174 	.calloc = early_calloc,
175 	.free = early_free,
176 	.memalign = early_memalign,
177 };
178 
179 struct alloc_ops *alloc_ops = &early_alloc_ops;
180