xref: /kvm-unit-tests/lib/alloc_phys.c (revision dc47ac613f1df343417d1a730d6b68e5e0b0c1a9)
1 /*
2  * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
3  *
4  * This work is licensed under the terms of the GNU LGPL, version 2.
5  *
6  * This is a simple allocator that provides contiguous physical addresses
7  * with byte granularity.
8  */
9 #include "alloc.h"
10 #include "asm/spinlock.h"
11 #include "asm/io.h"
12 
13 #define PHYS_ALLOC_NR_REGIONS	256
14 
15 #define DEFAULT_MINIMUM_ALIGNMENT	32
16 
17 struct phys_alloc_region {
18 	phys_addr_t base;
19 	phys_addr_t size;
20 };
21 
22 static struct phys_alloc_region regions[PHYS_ALLOC_NR_REGIONS];
23 static int nr_regions;
24 
25 static struct spinlock lock;
26 static phys_addr_t base, top, align_min;
27 
28 void phys_alloc_show(void)
29 {
30 	int i;
31 
32 	spin_lock(&lock);
33 	printf("phys_alloc minimum alignment: %#" PRIx64 "\n",
34 		(u64)align_min);
35 	for (i = 0; i < nr_regions; ++i)
36 		printf("%016" PRIx64 "-%016" PRIx64 " [%s]\n",
37 			(u64)regions[i].base,
38 			(u64)(regions[i].base + regions[i].size - 1),
39 			"USED");
40 	printf("%016" PRIx64 "-%016" PRIx64 " [%s]\n",
41 		(u64)base, (u64)(top - 1), "FREE");
42 	spin_unlock(&lock);
43 }
44 
45 void phys_alloc_init(phys_addr_t base_addr, phys_addr_t size)
46 {
47 	spin_lock(&lock);
48 	base = base_addr;
49 	top = base + size;
50 	align_min = DEFAULT_MINIMUM_ALIGNMENT;
51 	nr_regions = 0;
52 	spin_unlock(&lock);
53 }
54 
55 void phys_alloc_set_minimum_alignment(phys_addr_t align)
56 {
57 	assert(align && !(align & (align - 1)));
58 	spin_lock(&lock);
59 	align_min = align;
60 	spin_unlock(&lock);
61 }
62 
63 static phys_addr_t phys_alloc_aligned_safe(phys_addr_t size,
64 					   phys_addr_t align, bool safe)
65 {
66 	static bool warned = false;
67 	phys_addr_t addr, size_orig = size;
68 	u64 top_safe;
69 
70 	spin_lock(&lock);
71 
72 	top_safe = top;
73 
74 	if (safe && sizeof(long) == 4)
75 		top_safe = MIN(top_safe, 1ULL << 32);
76 
77 	align = MAX(align, align_min);
78 
79 	addr = ALIGN(base, align);
80 	size += addr - base;
81 
82 	if ((top_safe - base) < size) {
83 		printf("phys_alloc: requested=%#" PRIx64
84 		       " (align=%#" PRIx64 "), "
85 		       "need=%#" PRIx64 ", but free=%#" PRIx64 ". "
86 		       "top=%#" PRIx64 ", top_safe=%#" PRIx64 "\n",
87 		       (u64)size_orig, (u64)align, (u64)size, top_safe - base,
88 		       (u64)top, top_safe);
89 		spin_unlock(&lock);
90 		return INVALID_PHYS_ADDR;
91 	}
92 
93 	base += size;
94 
95 	if (nr_regions < PHYS_ALLOC_NR_REGIONS) {
96 		regions[nr_regions].base = addr;
97 		regions[nr_regions].size = size_orig;
98 		++nr_regions;
99 	} else if (!warned) {
100 		printf("WARNING: phys_alloc: No free log entries, "
101 		       "can no longer log allocations...\n");
102 		warned = true;
103 	}
104 
105 	spin_unlock(&lock);
106 
107 	return addr;
108 }
109 
110 static void *early_memalign(size_t alignment, size_t size)
111 {
112 	phys_addr_t addr;
113 
114 	assert(alignment && !(alignment & (alignment - 1)));
115 
116 	addr = phys_alloc_aligned_safe(size, alignment, true);
117 	if (addr == INVALID_PHYS_ADDR)
118 		return NULL;
119 
120 	return phys_to_virt(addr);
121 }
122 
123 static struct alloc_ops early_alloc_ops = {
124 	.memalign = early_memalign,
125 };
126 
127 struct alloc_ops *alloc_ops = &early_alloc_ops;
128