xref: /kvm-unit-tests/lib/alloc_phys.c (revision 2352e986e4599bc4842c225762c78fa49f18648d)
1 /*
2  * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
3  *
4  * This work is licensed under the terms of the GNU LGPL, version 2.
5  *
6  * This is a simple allocator that provides contiguous physical addresses
7  * with byte granularity.
8  */
9 #include "alloc.h"
10 #include "asm/spinlock.h"
11 #include "asm/io.h"
12 #include "alloc_phys.h"
13 
14 #define PHYS_ALLOC_NR_REGIONS	256
15 
16 #define DEFAULT_MINIMUM_ALIGNMENT	32
17 
18 struct phys_alloc_region {
19 	phys_addr_t base;
20 	phys_addr_t size;
21 };
22 
23 static struct phys_alloc_region regions[PHYS_ALLOC_NR_REGIONS];
24 static int nr_regions;
25 
26 static struct spinlock lock;
27 static phys_addr_t base, top;
28 
29 static void *early_memalign(size_t alignment, size_t size);
30 static struct alloc_ops early_alloc_ops = {
31 	.memalign = early_memalign,
32 	.align_min = DEFAULT_MINIMUM_ALIGNMENT
33 };
34 
35 struct alloc_ops *alloc_ops = &early_alloc_ops;
36 
37 void phys_alloc_show(void)
38 {
39 	int i;
40 
41 	spin_lock(&lock);
42 	printf("phys_alloc minimum alignment: %#" PRIx64 "\n",
43 		(u64)early_alloc_ops.align_min);
44 	for (i = 0; i < nr_regions; ++i)
45 		printf("%016" PRIx64 "-%016" PRIx64 " [%s]\n",
46 			(u64)regions[i].base,
47 			(u64)(regions[i].base + regions[i].size - 1),
48 			"USED");
49 	printf("%016" PRIx64 "-%016" PRIx64 " [%s]\n",
50 		(u64)base, (u64)(top - 1), "FREE");
51 	spin_unlock(&lock);
52 }
53 
54 void phys_alloc_init(phys_addr_t base_addr, phys_addr_t size)
55 {
56 	spin_lock(&lock);
57 	base = base_addr;
58 	top = base + size;
59 	nr_regions = 0;
60 	spin_unlock(&lock);
61 }
62 
63 void phys_alloc_set_minimum_alignment(phys_addr_t align)
64 {
65 	assert(align && !(align & (align - 1)));
66 	spin_lock(&lock);
67 	early_alloc_ops.align_min = align;
68 	spin_unlock(&lock);
69 }
70 
71 static phys_addr_t phys_alloc_aligned_safe(phys_addr_t size,
72 					   phys_addr_t align, bool safe)
73 {
74 	static bool warned = false;
75 	phys_addr_t addr, size_orig = size;
76 	u64 top_safe;
77 
78 	spin_lock(&lock);
79 
80 	top_safe = top;
81 
82 	if (safe && sizeof(long) == 4)
83 		top_safe = MIN(top_safe, 1ULL << 32);
84 
85 	assert(base < top_safe);
86 
87 	addr = ALIGN(base, align);
88 	size += addr - base;
89 
90 	if ((top_safe - base) < size) {
91 		printf("phys_alloc: requested=%#" PRIx64
92 		       " (align=%#" PRIx64 "), "
93 		       "need=%#" PRIx64 ", but free=%#" PRIx64 ". "
94 		       "top=%#" PRIx64 ", top_safe=%#" PRIx64 "\n",
95 		       (u64)size_orig, (u64)align, (u64)size, top_safe - base,
96 		       (u64)top, top_safe);
97 		spin_unlock(&lock);
98 		return INVALID_PHYS_ADDR;
99 	}
100 
101 	base += size;
102 
103 	if (nr_regions < PHYS_ALLOC_NR_REGIONS) {
104 		regions[nr_regions].base = addr;
105 		regions[nr_regions].size = size_orig;
106 		++nr_regions;
107 	} else if (!warned) {
108 		printf("WARNING: phys_alloc: No free log entries, "
109 		       "can no longer log allocations...\n");
110 		warned = true;
111 	}
112 
113 	spin_unlock(&lock);
114 
115 	return addr;
116 }
117 
118 void phys_alloc_get_unused(phys_addr_t *p_base, phys_addr_t *p_top)
119 {
120 	*p_base = base;
121 	*p_top = top;
122 	if (base == top)
123 		return;
124 	spin_lock(&lock);
125 	regions[nr_regions].base = base;
126 	regions[nr_regions].size = top - base;
127 	++nr_regions;
128 	base = top;
129 	spin_unlock(&lock);
130 }
131 
132 static void *early_memalign(size_t alignment, size_t size)
133 {
134 	phys_addr_t addr;
135 
136 	assert(alignment && !(alignment & (alignment - 1)));
137 
138 	addr = phys_alloc_aligned_safe(size, alignment, true);
139 	if (addr == INVALID_PHYS_ADDR)
140 		return NULL;
141 
142 	return phys_to_virt(addr);
143 }
144