xref: /cloud-hypervisor/vm-allocator/src/address.rs (revision eb0b14f70ed5ed44b76579145fd2a741c0100ae4)
1 // Copyright 2018 The Chromium OS Authors. All rights reserved.
2 // Copyright © 2019 Intel Corporation
3 //
4 // Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
5 // Use of this source code is governed by a BSD-style license that can be
6 // found in the LICENSE-BSD-3-Clause file.
7 //
8 // SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
9 
10 use std::collections::btree_map::BTreeMap;
11 use std::result;
12 
13 use vm_memory::{Address, GuestAddress, GuestUsize};
14 
15 #[derive(Debug)]
16 pub enum Error {
17     Overflow,
18     Overlap,
19     UnalignedAddress,
20 }
21 
22 pub type Result<T> = result::Result<T, Error>;
23 
24 /// Manages allocating address ranges.
25 /// Use `AddressAllocator` whenever an address range needs to be allocated to different users.
26 ///
27 /// # Examples
28 ///
29 /// ```
30 /// # use vm_allocator::AddressAllocator;
31 /// # use vm_memory::{Address, GuestAddress, GuestUsize};
32 ///   AddressAllocator::new(GuestAddress(0x1000), 0x10000).map(|mut pool| {
33 ///       assert_eq!(pool.allocate(None, 0x110, Some(0x100)), Some(GuestAddress(0x10e00)));
34 ///       assert_eq!(pool.allocate(None, 0x100, Some(0x100)), Some(GuestAddress(0x10d00)));
35 ///   });
36 /// ```
37 #[derive(Debug, Eq, PartialEq)]
38 pub struct AddressAllocator {
39     base: GuestAddress,
40     end: GuestAddress,
41     ranges: BTreeMap<GuestAddress, GuestUsize>,
42 }
43 
44 impl AddressAllocator {
45     /// Creates a new `AddressAllocator` for managing a range of addresses.
46     /// Can return `None` if `base` + `size` overflows a u64.
47     ///
48     /// * `base` - The starting address of the range to manage.
49     /// * `size` - The size of the address range in bytes.
50     pub fn new(base: GuestAddress, size: GuestUsize) -> Option<Self> {
51         if size == 0 {
52             return None;
53         }
54 
55         let end = base.checked_add(size - 1)?;
56 
57         let mut allocator = AddressAllocator {
58             base,
59             end,
60             ranges: BTreeMap::new(),
61         };
62 
63         // Insert the last address as a zero size range.
64         // This is our end of address space marker.
65         allocator.ranges.insert(base.checked_add(size)?, 0);
66 
67         Some(allocator)
68     }
69 
70     fn align_address(&self, address: GuestAddress, alignment: GuestUsize) -> GuestAddress {
71         let align_adjust = if address.raw_value() % alignment != 0 {
72             alignment - (address.raw_value() % alignment)
73         } else {
74             0
75         };
76 
77         address.unchecked_add(align_adjust)
78     }
79 
80     fn available_range(
81         &self,
82         req_address: GuestAddress,
83         req_size: GuestUsize,
84         alignment: GuestUsize,
85     ) -> Result<GuestAddress> {
86         let aligned_address = self.align_address(req_address, alignment);
87 
88         // The requested address should be aligned.
89         if aligned_address != req_address {
90             return Err(Error::UnalignedAddress);
91         }
92 
93         // The aligned address should be within the address space range.
94         if aligned_address >= self.end || aligned_address < self.base {
95             return Err(Error::Overflow);
96         }
97 
98         let mut prev_end_address = self.base;
99         for (address, size) in self.ranges.iter() {
100             if aligned_address <= *address {
101                 // Do we overlap with the previous range?
102                 if prev_end_address > aligned_address {
103                     return Err(Error::Overlap);
104                 }
105 
106                 // Do we have enough space?
107                 if address
108                     .unchecked_sub(aligned_address.raw_value())
109                     .raw_value()
110                     < req_size
111                 {
112                     return Err(Error::Overlap);
113                 }
114 
115                 return Ok(aligned_address);
116             }
117 
118             prev_end_address = address.unchecked_add(*size);
119         }
120 
121         // We have not found a range that starts after the requested address,
122         // despite having a marker at the end of our range.
123         Err(Error::Overflow)
124     }
125 
126     fn first_available_range(
127         &self,
128         req_size: GuestUsize,
129         alignment: GuestUsize,
130     ) -> Option<GuestAddress> {
131         let reversed_ranges: Vec<(&GuestAddress, &GuestUsize)> = self.ranges.iter().rev().collect();
132 
133         for (idx, (address, _size)) in reversed_ranges.iter().enumerate() {
134             let next_range_idx = idx + 1;
135             let prev_end_address = if next_range_idx >= reversed_ranges.len() {
136                 self.base
137             } else {
138                 reversed_ranges[next_range_idx]
139                     .0
140                     .unchecked_add(*(reversed_ranges[next_range_idx].1))
141             };
142 
143             // If we have enough space between this range and the previous one,
144             // we return the start of this range minus the requested size.
145             // As each new range is allocated at the end of the available address space,
146             // we will tend to always allocate new ranges there as well. In other words,
147             // ranges accumulate at the end of the address space.
148             if let Some(size_delta) =
149                 address.checked_sub(self.align_address(prev_end_address, alignment).raw_value())
150             {
151                 let adjust = alignment.saturating_sub(1);
152                 if size_delta.raw_value() >= req_size {
153                     return Some(
154                         self.align_address(address.unchecked_sub(req_size + adjust), alignment),
155                     );
156                 }
157             }
158         }
159 
160         None
161     }
162 
163     /// Allocates a range of addresses from the managed region. Returns `Some(allocated_address)`
164     /// when successful, or `None` if an area of `size` can't be allocated or if alignment isn't
165     /// a power of two.
166     pub fn allocate(
167         &mut self,
168         address: Option<GuestAddress>,
169         size: GuestUsize,
170         align_size: Option<GuestUsize>,
171     ) -> Option<GuestAddress> {
172         if size == 0 {
173             return None;
174         }
175 
176         let alignment = align_size.unwrap_or(4);
177         if !alignment.is_power_of_two() || alignment == 0 {
178             return None;
179         }
180 
181         let new_addr = match address {
182             Some(req_address) => match self.available_range(req_address, size, alignment) {
183                 Ok(addr) => addr,
184                 Err(_) => {
185                     return None;
186                 }
187             },
188             None => self.first_available_range(size, alignment)?,
189         };
190 
191         self.ranges.insert(new_addr, size);
192 
193         Some(new_addr)
194     }
195 
196     /// Free an already allocated address range.
197     /// We can only free a range if it matches exactly an already allocated range.
198     pub fn free(&mut self, address: GuestAddress, size: GuestUsize) {
199         if let Some(&range_size) = self.ranges.get(&address) {
200             if size == range_size {
201                 self.ranges.remove(&address);
202             }
203         }
204     }
205 
206     /// Start address of the allocator
207     pub fn base(&self) -> GuestAddress {
208         self.base
209     }
210 
211     /// Last address of the allocator
212     pub fn end(&self) -> GuestAddress {
213         self.end
214     }
215 }
216 
217 #[cfg(test)]
218 mod tests {
219     use super::*;
220 
221     #[test]
222     fn new_fails_overflow() {
223         assert_eq!(AddressAllocator::new(GuestAddress(u64::MAX), 0x100), None);
224     }
225 
226     #[test]
227     fn new_fails_size_zero() {
228         assert_eq!(AddressAllocator::new(GuestAddress(0x1000), 0), None);
229     }
230 
231     #[test]
232     fn allocate_fails_alignment_zero() {
233         let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x10000).unwrap();
234         assert_eq!(
235             pool.allocate(Some(GuestAddress(0x1000)), 0x100, Some(0)),
236             None
237         );
238     }
239 
240     #[test]
241     fn allocate_fails_alignment_non_power_of_two() {
242         let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x10000).unwrap();
243         assert_eq!(
244             pool.allocate(Some(GuestAddress(0x1000)), 0x100, Some(200)),
245             None
246         );
247     }
248 
249     #[test]
250     fn allocate_fails_not_enough_space() {
251         let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000).unwrap();
252         assert_eq!(
253             pool.allocate(None, 0x800, Some(0x100)),
254             Some(GuestAddress(0x1800))
255         );
256         assert_eq!(pool.allocate(None, 0x900, Some(0x100)), None);
257         assert_eq!(
258             pool.allocate(None, 0x400, Some(0x100)),
259             Some(GuestAddress(0x1400))
260         );
261     }
262 
263     #[test]
264     fn allocate_alignment() {
265         let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x10000).unwrap();
266         assert_eq!(
267             pool.allocate(None, 0x110, Some(0x100)),
268             Some(GuestAddress(0x10e00))
269         );
270         assert_eq!(
271             pool.allocate(None, 0x100, Some(0x100)),
272             Some(GuestAddress(0x10d00))
273         );
274         assert_eq!(
275             pool.allocate(None, 0x10, Some(0x100)),
276             Some(GuestAddress(0x10c00))
277         );
278     }
279 
280     #[test]
281     fn allocate_address() {
282         let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000).unwrap();
283         assert_eq!(
284             pool.allocate(Some(GuestAddress(0x1200)), 0x800, None),
285             Some(GuestAddress(0x1200))
286         );
287 
288         assert_eq!(
289             pool.allocate(Some(GuestAddress(0x1a00)), 0x100, None),
290             Some(GuestAddress(0x1a00))
291         );
292     }
293 
294     #[test]
295     fn allocate_address_alignment() {
296         let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000).unwrap();
297         assert_eq!(
298             pool.allocate(Some(GuestAddress(0x1200)), 0x800, Some(0x100)),
299             Some(GuestAddress(0x1200))
300         );
301 
302         // Unaligned request
303         assert_eq!(
304             pool.allocate(Some(GuestAddress(0x1210)), 0x800, Some(0x100)),
305             None
306         );
307 
308         // Aligned request
309         assert_eq!(
310             pool.allocate(Some(GuestAddress(0x1b00)), 0x100, Some(0x100)),
311             Some(GuestAddress(0x1b00))
312         );
313     }
314 
315     #[test]
316     fn allocate_address_not_enough_space() {
317         let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000).unwrap();
318 
319         // First range is [0x1200:0x1a00]
320         assert_eq!(
321             pool.allocate(Some(GuestAddress(0x1200)), 0x800, Some(0x100)),
322             Some(GuestAddress(0x1200))
323         );
324 
325         // Second range is [0x1c00:0x1e00]
326         assert_eq!(
327             pool.allocate(Some(GuestAddress(0x1c00)), 0x200, Some(0x100)),
328             Some(GuestAddress(0x1c00))
329         );
330 
331         // There is 0x200 between the first 2 ranges.
332         // We ask for an available address but the range is too big
333         assert_eq!(
334             pool.allocate(Some(GuestAddress(0x1b00)), 0x800, Some(0x100)),
335             None
336         );
337 
338         // We ask for an available address, with a small enough range
339         assert_eq!(
340             pool.allocate(Some(GuestAddress(0x1b00)), 0x100, Some(0x100)),
341             Some(GuestAddress(0x1b00))
342         );
343     }
344 
345     #[test]
346     fn allocate_address_free_and_realloc() {
347         let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000).unwrap();
348 
349         // First range is [0x1200:0x1a00]
350         assert_eq!(
351             pool.allocate(Some(GuestAddress(0x1200)), 0x800, Some(0x100)),
352             Some(GuestAddress(0x1200))
353         );
354 
355         pool.free(GuestAddress(0x1200), 0x800);
356 
357         assert_eq!(
358             pool.allocate(Some(GuestAddress(0x1200)), 0x800, Some(0x100)),
359             Some(GuestAddress(0x1200))
360         );
361     }
362 
363     #[test]
364     fn allocate_address_free_fail_and_realloc() {
365         let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000).unwrap();
366 
367         // First range is [0x1200:0x1a00]
368         assert_eq!(
369             pool.allocate(Some(GuestAddress(0x1200)), 0x800, Some(0x100)),
370             Some(GuestAddress(0x1200))
371         );
372 
373         // We try to free a range smaller than the allocated one.
374         pool.free(GuestAddress(0x1200), 0x100);
375 
376         assert_eq!(
377             pool.allocate(Some(GuestAddress(0x1200)), 0x800, Some(0x100)),
378             None
379         );
380     }
381 
382     #[test]
383     fn allocate_address_fail_free_and_realloc() {
384         let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000).unwrap();
385 
386         // First allocation fails
387         assert_eq!(
388             pool.allocate(Some(GuestAddress(0x1200)), 0x2000, Some(0x100)),
389             None
390         );
391 
392         // We try to free a range that was not allocated.
393         pool.free(GuestAddress(0x1200), 0x2000);
394 
395         // Now we try an allocation that should succeed.
396         assert_eq!(
397             pool.allocate(Some(GuestAddress(0x1200)), 0x800, Some(0x100)),
398             Some(GuestAddress(0x1200))
399         );
400     }
401 }
402