1 /*
2 * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
3 *
4 * This work is licensed under the terms of the GNU LGPL, version 2.
5 *
6 * This is a simple allocator that provides contiguous physical addresses
7 * with byte granularity.
8 */
9 #include "alloc.h"
10 #include "asm/spinlock.h"
11 #include "asm/io.h"
12 #include "alloc_phys.h"
13
14 #define PHYS_ALLOC_NR_REGIONS 256
15
16 #define DEFAULT_MINIMUM_ALIGNMENT 32
17
18 struct phys_alloc_region {
19 phys_addr_t base;
20 phys_addr_t size;
21 };
22
23 static struct phys_alloc_region regions[PHYS_ALLOC_NR_REGIONS];
24 static int nr_regions;
25
26 static struct spinlock lock;
27 static phys_addr_t base, top;
28
29 static void *early_memalign(size_t alignment, size_t size);
30 static struct alloc_ops early_alloc_ops = {
31 .memalign = early_memalign,
32 };
33 static size_t align_min;
34
35 struct alloc_ops *alloc_ops = &early_alloc_ops;
36
phys_alloc_show(void)37 void phys_alloc_show(void)
38 {
39 int i;
40
41 spin_lock(&lock);
42 printf("phys_alloc minimum alignment: %#" PRIx64 "\n", (u64)align_min);
43 for (i = 0; i < nr_regions; ++i)
44 printf("%016" PRIx64 "-%016" PRIx64 " [%s]\n",
45 (u64)regions[i].base,
46 (u64)(regions[i].base + regions[i].size - 1),
47 "USED");
48 printf("%016" PRIx64 "-%016" PRIx64 " [%s]\n",
49 (u64)base, (u64)(top - 1), "FREE");
50 spin_unlock(&lock);
51 }
52
phys_alloc_init(phys_addr_t base_addr,phys_addr_t size)53 void phys_alloc_init(phys_addr_t base_addr, phys_addr_t size)
54 {
55 spin_lock(&lock);
56 base = base_addr;
57 top = base + size;
58 nr_regions = 0;
59 spin_unlock(&lock);
60 }
61
phys_alloc_set_minimum_alignment(phys_addr_t align)62 void phys_alloc_set_minimum_alignment(phys_addr_t align)
63 {
64 assert(align && !(align & (align - 1)));
65 spin_lock(&lock);
66 align_min = align;
67 spin_unlock(&lock);
68 }
69
phys_alloc_aligned_safe(phys_addr_t size,phys_addr_t align,bool safe)70 static phys_addr_t phys_alloc_aligned_safe(phys_addr_t size,
71 phys_addr_t align, bool safe)
72 {
73 static bool warned = false;
74 phys_addr_t addr, size_orig = size;
75 u64 top_safe;
76
77 spin_lock(&lock);
78
79 top_safe = top;
80
81 if (safe && sizeof(long) == 4)
82 top_safe = MIN(top_safe, 1ULL << 32);
83
84 assert(base < top_safe);
85 if (align < align_min)
86 align = align_min;
87
88 addr = ALIGN(base, align);
89 size += addr - base;
90
91 if ((top_safe - base) < size) {
92 printf("phys_alloc: requested=%#" PRIx64
93 " (align=%#" PRIx64 "), "
94 "need=%#" PRIx64 ", but free=%#" PRIx64 ". "
95 "top=%#" PRIx64 ", top_safe=%#" PRIx64 "\n",
96 (u64)size_orig, (u64)align, (u64)size, top_safe - base,
97 (u64)top, top_safe);
98 spin_unlock(&lock);
99 return INVALID_PHYS_ADDR;
100 }
101
102 base += size;
103
104 if (nr_regions < PHYS_ALLOC_NR_REGIONS) {
105 regions[nr_regions].base = addr;
106 regions[nr_regions].size = size_orig;
107 ++nr_regions;
108 } else if (!warned) {
109 printf("WARNING: phys_alloc: No free log entries, "
110 "can no longer log allocations...\n");
111 warned = true;
112 }
113
114 spin_unlock(&lock);
115
116 return addr;
117 }
118
phys_alloc_get_unused(phys_addr_t * p_base,phys_addr_t * p_top)119 void phys_alloc_get_unused(phys_addr_t *p_base, phys_addr_t *p_top)
120 {
121 *p_base = base;
122 *p_top = top;
123 if (base == top)
124 return;
125 spin_lock(&lock);
126 regions[nr_regions].base = base;
127 regions[nr_regions].size = top - base;
128 ++nr_regions;
129 base = top;
130 spin_unlock(&lock);
131 }
132
early_memalign(size_t alignment,size_t size)133 static void *early_memalign(size_t alignment, size_t size)
134 {
135 phys_addr_t addr;
136
137 assert(alignment && !(alignment & (alignment - 1)));
138
139 addr = phys_alloc_aligned_safe(size, alignment, true);
140 if (addr == INVALID_PHYS_ADDR)
141 return NULL;
142
143 return phys_to_virt(addr);
144 }
145