xref: /cloud-hypervisor/vm-allocator/src/address.rs (revision 7d7bfb2034001d4cb15df2ddc56d2d350c8da30f)
1 // Copyright 2018 The Chromium OS Authors. All rights reserved.
2 // Copyright © 2019 Intel Corporation
3 //
4 // Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
5 // Use of this source code is governed by a BSD-style license that can be
6 // found in the LICENSE-BSD-3-Clause file.
7 //
8 // SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
9 
10 use std::collections::btree_map::BTreeMap;
11 use std::result;
12 use vm_memory::{Address, GuestAddress, GuestUsize};
13 
14 #[derive(Debug)]
15 pub enum Error {
16     Overflow,
17     Overlap,
18     UnalignedAddress,
19 }
20 
21 pub type Result<T> = result::Result<T, Error>;
22 
23 /// Manages allocating address ranges.
24 /// Use `AddressAllocator` whenever an address range needs to be allocated to different users.
25 ///
26 /// # Examples
27 ///
28 /// ```
29 /// # use vm_allocator::AddressAllocator;
30 /// # use vm_memory::{Address, GuestAddress, GuestUsize};
31 ///   AddressAllocator::new(GuestAddress(0x1000), 0x10000).map(|mut pool| {
32 ///       assert_eq!(pool.allocate(None, 0x110, Some(0x100)), Some(GuestAddress(0x10e00)));
33 ///       assert_eq!(pool.allocate(None, 0x100, Some(0x100)), Some(GuestAddress(0x10d00)));
34 ///   });
35 /// ```
36 #[derive(Debug, Eq, PartialEq)]
37 pub struct AddressAllocator {
38     base: GuestAddress,
39     end: GuestAddress,
40     ranges: BTreeMap<GuestAddress, GuestUsize>,
41 }
42 
43 impl AddressAllocator {
44     /// Creates a new `AddressAllocator` for managing a range of addresses.
45     /// Can return `None` if `base` + `size` overflows a u64.
46     ///
47     /// * `base` - The starting address of the range to manage.
48     /// * `size` - The size of the address range in bytes.
49     pub fn new(base: GuestAddress, size: GuestUsize) -> Option<Self> {
50         if size == 0 {
51             return None;
52         }
53 
54         let end = base.checked_add(size - 1)?;
55 
56         let mut allocator = AddressAllocator {
57             base,
58             end,
59             ranges: BTreeMap::new(),
60         };
61 
62         // Insert the last address as a zero size range.
63         // This is our end of address space marker.
64         allocator.ranges.insert(base.checked_add(size)?, 0);
65 
66         Some(allocator)
67     }
68 
69     fn align_address(&self, address: GuestAddress, alignment: GuestUsize) -> GuestAddress {
70         let align_adjust = if address.raw_value() % alignment != 0 {
71             alignment - (address.raw_value() % alignment)
72         } else {
73             0
74         };
75 
76         address.unchecked_add(align_adjust)
77     }
78 
79     fn available_range(
80         &self,
81         req_address: GuestAddress,
82         req_size: GuestUsize,
83         alignment: GuestUsize,
84     ) -> Result<GuestAddress> {
85         let aligned_address = self.align_address(req_address, alignment);
86 
87         // The requested address should be aligned.
88         if aligned_address != req_address {
89             return Err(Error::UnalignedAddress);
90         }
91 
92         // The aligned address should be within the address space range.
93         if aligned_address >= self.end || aligned_address < self.base {
94             return Err(Error::Overflow);
95         }
96 
97         let mut prev_end_address = self.base;
98         for (address, size) in self.ranges.iter() {
99             if aligned_address <= *address {
100                 // Do we overlap with the previous range?
101                 if prev_end_address > aligned_address {
102                     return Err(Error::Overlap);
103                 }
104 
105                 // Do we have enough space?
106                 if address
107                     .unchecked_sub(aligned_address.raw_value())
108                     .raw_value()
109                     < req_size
110                 {
111                     return Err(Error::Overlap);
112                 }
113 
114                 return Ok(aligned_address);
115             }
116 
117             prev_end_address = address.unchecked_add(*size);
118         }
119 
120         // We have not found a range that starts after the requested address,
121         // despite having a marker at the end of our range.
122         Err(Error::Overflow)
123     }
124 
125     fn first_available_range(
126         &self,
127         req_size: GuestUsize,
128         alignment: GuestUsize,
129     ) -> Option<GuestAddress> {
130         let reversed_ranges: Vec<(&GuestAddress, &GuestUsize)> = self.ranges.iter().rev().collect();
131 
132         for (idx, (address, _size)) in reversed_ranges.iter().enumerate() {
133             let next_range_idx = idx + 1;
134             let prev_end_address = if next_range_idx >= reversed_ranges.len() {
135                 self.base
136             } else {
137                 reversed_ranges[next_range_idx]
138                     .0
139                     .unchecked_add(*(reversed_ranges[next_range_idx].1))
140             };
141 
142             // If we have enough space between this range and the previous one,
143             // we return the start of this range minus the requested size.
144             // As each new range is allocated at the end of the available address space,
145             // we will tend to always allocate new ranges there as well. In other words,
146             // ranges accumulate at the end of the address space.
147             if let Some(size_delta) =
148                 address.checked_sub(self.align_address(prev_end_address, alignment).raw_value())
149             {
150                 let adjust = if alignment > 1 { alignment - 1 } else { 0 };
151                 if size_delta.raw_value() >= req_size {
152                     return Some(
153                         self.align_address(address.unchecked_sub(req_size + adjust), alignment),
154                     );
155                 }
156             }
157         }
158 
159         None
160     }
161 
162     /// Allocates a range of addresses from the managed region. Returns `Some(allocated_address)`
163     /// when successful, or `None` if an area of `size` can't be allocated or if alignment isn't
164     /// a power of two.
165     pub fn allocate(
166         &mut self,
167         address: Option<GuestAddress>,
168         size: GuestUsize,
169         align_size: Option<GuestUsize>,
170     ) -> Option<GuestAddress> {
171         if size == 0 {
172             return None;
173         }
174 
175         let alignment = align_size.unwrap_or(4);
176         if !alignment.is_power_of_two() || alignment == 0 {
177             return None;
178         }
179 
180         let new_addr = match address {
181             Some(req_address) => match self.available_range(req_address, size, alignment) {
182                 Ok(addr) => addr,
183                 Err(_) => {
184                     return None;
185                 }
186             },
187             None => self.first_available_range(size, alignment)?,
188         };
189 
190         self.ranges.insert(new_addr, size);
191 
192         Some(new_addr)
193     }
194 
195     /// Free an already allocated address range.
196     /// We can only free a range if it matches exactly an already allocated range.
197     pub fn free(&mut self, address: GuestAddress, size: GuestUsize) {
198         if let Some(&range_size) = self.ranges.get(&address) {
199             if size == range_size {
200                 self.ranges.remove(&address);
201             }
202         }
203     }
204 
205     /// Start address of the allocator
206     pub fn base(&self) -> GuestAddress {
207         self.base
208     }
209 
210     /// Last address of the allocator
211     pub fn end(&self) -> GuestAddress {
212         self.end
213     }
214 }
215 
216 #[cfg(test)]
217 mod tests {
218     use super::*;
219 
220     #[test]
221     fn new_fails_overflow() {
222         assert_eq!(
223             AddressAllocator::new(GuestAddress(u64::max_value()), 0x100),
224             None
225         );
226     }
227 
228     #[test]
229     fn new_fails_size_zero() {
230         assert_eq!(AddressAllocator::new(GuestAddress(0x1000), 0), None);
231     }
232 
233     #[test]
234     fn allocate_fails_alignment_zero() {
235         let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x10000).unwrap();
236         assert_eq!(
237             pool.allocate(Some(GuestAddress(0x1000)), 0x100, Some(0)),
238             None
239         );
240     }
241 
242     #[test]
243     fn allocate_fails_alignment_non_power_of_two() {
244         let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x10000).unwrap();
245         assert_eq!(
246             pool.allocate(Some(GuestAddress(0x1000)), 0x100, Some(200)),
247             None
248         );
249     }
250 
251     #[test]
252     fn allocate_fails_not_enough_space() {
253         let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000).unwrap();
254         assert_eq!(
255             pool.allocate(None, 0x800, Some(0x100)),
256             Some(GuestAddress(0x1800))
257         );
258         assert_eq!(pool.allocate(None, 0x900, Some(0x100)), None);
259         assert_eq!(
260             pool.allocate(None, 0x400, Some(0x100)),
261             Some(GuestAddress(0x1400))
262         );
263     }
264 
265     #[test]
266     fn allocate_alignment() {
267         let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x10000).unwrap();
268         assert_eq!(
269             pool.allocate(None, 0x110, Some(0x100)),
270             Some(GuestAddress(0x10e00))
271         );
272         assert_eq!(
273             pool.allocate(None, 0x100, Some(0x100)),
274             Some(GuestAddress(0x10d00))
275         );
276         assert_eq!(
277             pool.allocate(None, 0x10, Some(0x100)),
278             Some(GuestAddress(0x10c00))
279         );
280     }
281 
282     #[test]
283     fn allocate_address() {
284         let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000).unwrap();
285         assert_eq!(
286             pool.allocate(Some(GuestAddress(0x1200)), 0x800, None),
287             Some(GuestAddress(0x1200))
288         );
289 
290         assert_eq!(
291             pool.allocate(Some(GuestAddress(0x1a00)), 0x100, None),
292             Some(GuestAddress(0x1a00))
293         );
294     }
295 
296     #[test]
297     fn allocate_address_alignment() {
298         let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000).unwrap();
299         assert_eq!(
300             pool.allocate(Some(GuestAddress(0x1200)), 0x800, Some(0x100)),
301             Some(GuestAddress(0x1200))
302         );
303 
304         // Unaligned request
305         assert_eq!(
306             pool.allocate(Some(GuestAddress(0x1210)), 0x800, Some(0x100)),
307             None
308         );
309 
310         // Aligned request
311         assert_eq!(
312             pool.allocate(Some(GuestAddress(0x1b00)), 0x100, Some(0x100)),
313             Some(GuestAddress(0x1b00))
314         );
315     }
316 
317     #[test]
318     fn allocate_address_not_enough_space() {
319         let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000).unwrap();
320 
321         // First range is [0x1200:0x1a00]
322         assert_eq!(
323             pool.allocate(Some(GuestAddress(0x1200)), 0x800, Some(0x100)),
324             Some(GuestAddress(0x1200))
325         );
326 
327         // Second range is [0x1c00:0x1e00]
328         assert_eq!(
329             pool.allocate(Some(GuestAddress(0x1c00)), 0x200, Some(0x100)),
330             Some(GuestAddress(0x1c00))
331         );
332 
333         // There is 0x200 between the first 2 ranges.
334         // We ask for an available address but the range is too big
335         assert_eq!(
336             pool.allocate(Some(GuestAddress(0x1b00)), 0x800, Some(0x100)),
337             None
338         );
339 
340         // We ask for an available address, with a small enough range
341         assert_eq!(
342             pool.allocate(Some(GuestAddress(0x1b00)), 0x100, Some(0x100)),
343             Some(GuestAddress(0x1b00))
344         );
345     }
346 
347     #[test]
348     fn allocate_address_free_and_realloc() {
349         let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000).unwrap();
350 
351         // First range is [0x1200:0x1a00]
352         assert_eq!(
353             pool.allocate(Some(GuestAddress(0x1200)), 0x800, Some(0x100)),
354             Some(GuestAddress(0x1200))
355         );
356 
357         pool.free(GuestAddress(0x1200), 0x800);
358 
359         assert_eq!(
360             pool.allocate(Some(GuestAddress(0x1200)), 0x800, Some(0x100)),
361             Some(GuestAddress(0x1200))
362         );
363     }
364 
365     #[test]
366     fn allocate_address_free_fail_and_realloc() {
367         let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000).unwrap();
368 
369         // First range is [0x1200:0x1a00]
370         assert_eq!(
371             pool.allocate(Some(GuestAddress(0x1200)), 0x800, Some(0x100)),
372             Some(GuestAddress(0x1200))
373         );
374 
375         // We try to free a range smaller than the allocated one.
376         pool.free(GuestAddress(0x1200), 0x100);
377 
378         assert_eq!(
379             pool.allocate(Some(GuestAddress(0x1200)), 0x800, Some(0x100)),
380             None
381         );
382     }
383 
384     #[test]
385     fn allocate_address_fail_free_and_realloc() {
386         let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000).unwrap();
387 
388         // First allocation fails
389         assert_eq!(
390             pool.allocate(Some(GuestAddress(0x1200)), 0x2000, Some(0x100)),
391             None
392         );
393 
394         // We try to free a range that was not allocated.
395         pool.free(GuestAddress(0x1200), 0x2000);
396 
397         // Now we try an allocation that should succeed.
398         assert_eq!(
399             pool.allocate(Some(GuestAddress(0x1200)), 0x800, Some(0x100)),
400             Some(GuestAddress(0x1200))
401         );
402     }
403 }
404