1 // Copyright 2018 The Chromium OS Authors. All rights reserved. 2 // Copyright © 2019 Intel Corporation 3 // 4 // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. 5 // Use of this source code is governed by a BSD-style license that can be 6 // found in the LICENSE-BSD-3-Clause file. 7 // 8 // SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause 9 10 use std::collections::btree_map::BTreeMap; 11 use std::result; 12 use vm_memory::{Address, GuestAddress, GuestUsize}; 13 14 #[derive(Debug)] 15 pub enum Error { 16 Overflow, 17 Overlap, 18 UnalignedAddress, 19 } 20 21 pub type Result<T> = result::Result<T, Error>; 22 23 /// Manages allocating address ranges. 24 /// Use `AddressAllocator` whenever an address range needs to be allocated to different users. 25 /// 26 /// # Examples 27 /// 28 /// ``` 29 /// # use vm_allocator::AddressAllocator; 30 /// # use vm_memory::{Address, GuestAddress, GuestUsize}; 31 /// AddressAllocator::new(GuestAddress(0x1000), 0x10000).map(|mut pool| { 32 /// assert_eq!(pool.allocate(None, 0x110, Some(0x100)), Some(GuestAddress(0x10e00))); 33 /// assert_eq!(pool.allocate(None, 0x100, Some(0x100)), Some(GuestAddress(0x10d00))); 34 /// }); 35 /// ``` 36 #[derive(Debug, Eq, PartialEq)] 37 pub struct AddressAllocator { 38 base: GuestAddress, 39 end: GuestAddress, 40 ranges: BTreeMap<GuestAddress, GuestUsize>, 41 } 42 43 impl AddressAllocator { 44 /// Creates a new `AddressAllocator` for managing a range of addresses. 45 /// Can return `None` if `base` + `size` overflows a u64. 46 /// 47 /// * `base` - The starting address of the range to manage. 48 /// * `size` - The size of the address range in bytes. 49 pub fn new(base: GuestAddress, size: GuestUsize) -> Option<Self> { 50 if size == 0 { 51 return None; 52 } 53 54 let end = base.checked_add(size - 1)?; 55 56 let mut allocator = AddressAllocator { 57 base, 58 end, 59 ranges: BTreeMap::new(), 60 }; 61 62 // Insert the last address as a zero size range. 63 // This is our end of address space marker. 64 allocator.ranges.insert(base.checked_add(size)?, 0); 65 66 Some(allocator) 67 } 68 69 fn align_address(&self, address: GuestAddress, alignment: GuestUsize) -> GuestAddress { 70 let align_adjust = if address.raw_value() % alignment != 0 { 71 alignment - (address.raw_value() % alignment) 72 } else { 73 0 74 }; 75 76 address.unchecked_add(align_adjust) 77 } 78 79 fn available_range( 80 &self, 81 req_address: GuestAddress, 82 req_size: GuestUsize, 83 alignment: GuestUsize, 84 ) -> Result<GuestAddress> { 85 let aligned_address = self.align_address(req_address, alignment); 86 87 // The requested address should be aligned. 88 if aligned_address != req_address { 89 return Err(Error::UnalignedAddress); 90 } 91 92 // The aligned address should be within the address space range. 93 if aligned_address >= self.end || aligned_address < self.base { 94 return Err(Error::Overflow); 95 } 96 97 let mut prev_end_address = self.base; 98 for (address, size) in self.ranges.iter() { 99 if aligned_address <= *address { 100 // Do we overlap with the previous range? 101 if prev_end_address > aligned_address { 102 return Err(Error::Overlap); 103 } 104 105 // Do we have enough space? 106 if address 107 .unchecked_sub(aligned_address.raw_value()) 108 .raw_value() 109 < req_size 110 { 111 return Err(Error::Overlap); 112 } 113 114 return Ok(aligned_address); 115 } 116 117 prev_end_address = address.unchecked_add(*size); 118 } 119 120 // We have not found a range that starts after the requested address, 121 // despite having a marker at the end of our range. 122 Err(Error::Overflow) 123 } 124 125 fn first_available_range( 126 &self, 127 req_size: GuestUsize, 128 alignment: GuestUsize, 129 ) -> Option<GuestAddress> { 130 let reversed_ranges: Vec<(&GuestAddress, &GuestUsize)> = self.ranges.iter().rev().collect(); 131 132 for (idx, (address, _size)) in reversed_ranges.iter().enumerate() { 133 let next_range_idx = idx + 1; 134 let prev_end_address = if next_range_idx >= reversed_ranges.len() { 135 self.base 136 } else { 137 reversed_ranges[next_range_idx] 138 .0 139 .unchecked_add(*(reversed_ranges[next_range_idx].1)) 140 }; 141 142 // If we have enough space between this range and the previous one, 143 // we return the start of this range minus the requested size. 144 // As each new range is allocated at the end of the available address space, 145 // we will tend to always allocate new ranges there as well. In other words, 146 // ranges accumulate at the end of the address space. 147 if let Some(size_delta) = 148 address.checked_sub(self.align_address(prev_end_address, alignment).raw_value()) 149 { 150 let adjust = if alignment > 1 { alignment - 1 } else { 0 }; 151 if size_delta.raw_value() >= req_size { 152 return Some( 153 self.align_address(address.unchecked_sub(req_size + adjust), alignment), 154 ); 155 } 156 } 157 } 158 159 None 160 } 161 162 /// Allocates a range of addresses from the managed region. Returns `Some(allocated_address)` 163 /// when successful, or `None` if an area of `size` can't be allocated or if alignment isn't 164 /// a power of two. 165 pub fn allocate( 166 &mut self, 167 address: Option<GuestAddress>, 168 size: GuestUsize, 169 align_size: Option<GuestUsize>, 170 ) -> Option<GuestAddress> { 171 if size == 0 { 172 return None; 173 } 174 175 let alignment = align_size.unwrap_or(4); 176 if !alignment.is_power_of_two() || alignment == 0 { 177 return None; 178 } 179 180 let new_addr = match address { 181 Some(req_address) => match self.available_range(req_address, size, alignment) { 182 Ok(addr) => addr, 183 Err(_) => { 184 return None; 185 } 186 }, 187 None => self.first_available_range(size, alignment)?, 188 }; 189 190 self.ranges.insert(new_addr, size); 191 192 Some(new_addr) 193 } 194 195 /// Free an already allocated address range. 196 /// We can only free a range if it matches exactly an already allocated range. 197 pub fn free(&mut self, address: GuestAddress, size: GuestUsize) { 198 if let Some(&range_size) = self.ranges.get(&address) { 199 if size == range_size { 200 self.ranges.remove(&address); 201 } 202 } 203 } 204 205 /// Start address of the allocator 206 pub fn base(&self) -> GuestAddress { 207 self.base 208 } 209 210 /// Last address of the allocator 211 pub fn end(&self) -> GuestAddress { 212 self.end 213 } 214 } 215 216 #[cfg(test)] 217 mod tests { 218 use super::*; 219 220 #[test] 221 fn new_fails_overflow() { 222 assert_eq!(AddressAllocator::new(GuestAddress(u64::MAX), 0x100), None); 223 } 224 225 #[test] 226 fn new_fails_size_zero() { 227 assert_eq!(AddressAllocator::new(GuestAddress(0x1000), 0), None); 228 } 229 230 #[test] 231 fn allocate_fails_alignment_zero() { 232 let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x10000).unwrap(); 233 assert_eq!( 234 pool.allocate(Some(GuestAddress(0x1000)), 0x100, Some(0)), 235 None 236 ); 237 } 238 239 #[test] 240 fn allocate_fails_alignment_non_power_of_two() { 241 let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x10000).unwrap(); 242 assert_eq!( 243 pool.allocate(Some(GuestAddress(0x1000)), 0x100, Some(200)), 244 None 245 ); 246 } 247 248 #[test] 249 fn allocate_fails_not_enough_space() { 250 let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000).unwrap(); 251 assert_eq!( 252 pool.allocate(None, 0x800, Some(0x100)), 253 Some(GuestAddress(0x1800)) 254 ); 255 assert_eq!(pool.allocate(None, 0x900, Some(0x100)), None); 256 assert_eq!( 257 pool.allocate(None, 0x400, Some(0x100)), 258 Some(GuestAddress(0x1400)) 259 ); 260 } 261 262 #[test] 263 fn allocate_alignment() { 264 let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x10000).unwrap(); 265 assert_eq!( 266 pool.allocate(None, 0x110, Some(0x100)), 267 Some(GuestAddress(0x10e00)) 268 ); 269 assert_eq!( 270 pool.allocate(None, 0x100, Some(0x100)), 271 Some(GuestAddress(0x10d00)) 272 ); 273 assert_eq!( 274 pool.allocate(None, 0x10, Some(0x100)), 275 Some(GuestAddress(0x10c00)) 276 ); 277 } 278 279 #[test] 280 fn allocate_address() { 281 let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000).unwrap(); 282 assert_eq!( 283 pool.allocate(Some(GuestAddress(0x1200)), 0x800, None), 284 Some(GuestAddress(0x1200)) 285 ); 286 287 assert_eq!( 288 pool.allocate(Some(GuestAddress(0x1a00)), 0x100, None), 289 Some(GuestAddress(0x1a00)) 290 ); 291 } 292 293 #[test] 294 fn allocate_address_alignment() { 295 let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000).unwrap(); 296 assert_eq!( 297 pool.allocate(Some(GuestAddress(0x1200)), 0x800, Some(0x100)), 298 Some(GuestAddress(0x1200)) 299 ); 300 301 // Unaligned request 302 assert_eq!( 303 pool.allocate(Some(GuestAddress(0x1210)), 0x800, Some(0x100)), 304 None 305 ); 306 307 // Aligned request 308 assert_eq!( 309 pool.allocate(Some(GuestAddress(0x1b00)), 0x100, Some(0x100)), 310 Some(GuestAddress(0x1b00)) 311 ); 312 } 313 314 #[test] 315 fn allocate_address_not_enough_space() { 316 let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000).unwrap(); 317 318 // First range is [0x1200:0x1a00] 319 assert_eq!( 320 pool.allocate(Some(GuestAddress(0x1200)), 0x800, Some(0x100)), 321 Some(GuestAddress(0x1200)) 322 ); 323 324 // Second range is [0x1c00:0x1e00] 325 assert_eq!( 326 pool.allocate(Some(GuestAddress(0x1c00)), 0x200, Some(0x100)), 327 Some(GuestAddress(0x1c00)) 328 ); 329 330 // There is 0x200 between the first 2 ranges. 331 // We ask for an available address but the range is too big 332 assert_eq!( 333 pool.allocate(Some(GuestAddress(0x1b00)), 0x800, Some(0x100)), 334 None 335 ); 336 337 // We ask for an available address, with a small enough range 338 assert_eq!( 339 pool.allocate(Some(GuestAddress(0x1b00)), 0x100, Some(0x100)), 340 Some(GuestAddress(0x1b00)) 341 ); 342 } 343 344 #[test] 345 fn allocate_address_free_and_realloc() { 346 let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000).unwrap(); 347 348 // First range is [0x1200:0x1a00] 349 assert_eq!( 350 pool.allocate(Some(GuestAddress(0x1200)), 0x800, Some(0x100)), 351 Some(GuestAddress(0x1200)) 352 ); 353 354 pool.free(GuestAddress(0x1200), 0x800); 355 356 assert_eq!( 357 pool.allocate(Some(GuestAddress(0x1200)), 0x800, Some(0x100)), 358 Some(GuestAddress(0x1200)) 359 ); 360 } 361 362 #[test] 363 fn allocate_address_free_fail_and_realloc() { 364 let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000).unwrap(); 365 366 // First range is [0x1200:0x1a00] 367 assert_eq!( 368 pool.allocate(Some(GuestAddress(0x1200)), 0x800, Some(0x100)), 369 Some(GuestAddress(0x1200)) 370 ); 371 372 // We try to free a range smaller than the allocated one. 373 pool.free(GuestAddress(0x1200), 0x100); 374 375 assert_eq!( 376 pool.allocate(Some(GuestAddress(0x1200)), 0x800, Some(0x100)), 377 None 378 ); 379 } 380 381 #[test] 382 fn allocate_address_fail_free_and_realloc() { 383 let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000).unwrap(); 384 385 // First allocation fails 386 assert_eq!( 387 pool.allocate(Some(GuestAddress(0x1200)), 0x2000, Some(0x100)), 388 None 389 ); 390 391 // We try to free a range that was not allocated. 392 pool.free(GuestAddress(0x1200), 0x2000); 393 394 // Now we try an allocation that should succeed. 395 assert_eq!( 396 pool.allocate(Some(GuestAddress(0x1200)), 0x800, Some(0x100)), 397 Some(GuestAddress(0x1200)) 398 ); 399 } 400 } 401