1 // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause 2 // 3 // Copyright © 2023, Microsoft Corporation 4 // 5 use crate::igvm::{BootPageAcceptance, StartupMemoryType, HV_PAGE_SIZE}; 6 use range_map_vec::{Entry, RangeMap}; 7 use thiserror::Error; 8 use vm_memory::bitmap::AtomicBitmap; 9 use vm_memory::{ 10 Bytes, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic, GuestMemoryMmap, 11 GuestMemoryRegion, 12 }; 13 14 /// Structure to hold the guest memory info/layout to check 15 /// the if the memory is accepted within the layout. 16 /// Adds up the total bytes written to the guest memory 17 pub struct Loader { 18 memory: GuestMemoryAtomic<GuestMemoryMmap<AtomicBitmap>>, 19 accepted_ranges: RangeMap<u64, BootPageAcceptance>, 20 bytes_written: u64, 21 } 22 23 #[derive(Debug)] 24 pub struct ImportRegion { 25 pub page_base: u64, 26 pub page_count: u64, 27 pub acceptance: BootPageAcceptance, 28 } 29 30 #[derive(Debug, Error)] 31 pub enum Error { 32 #[error("overlaps with existing import region {0:?}")] 33 OverlapsExistingRegion(ImportRegion), 34 #[error("memory unavailable")] 35 MemoryUnavailable, 36 #[error("failed to import pages")] 37 ImportPagesFailed, 38 #[error("invalid vp context memory")] 39 InvalidVpContextMemory(&'static str), 40 #[error("data larger than imported region")] 41 DataTooLarge, 42 } 43 44 impl Loader { 45 pub fn new(memory: GuestMemoryAtomic<GuestMemoryMmap<AtomicBitmap>>) -> Loader { 46 Loader { 47 memory, 48 accepted_ranges: RangeMap::new(), 49 bytes_written: 0, 50 } 51 } 52 53 /// Accept a new page range with a given acceptance into the map of accepted ranges. 54 pub fn accept_new_range( 55 &mut self, 56 page_base: u64, 57 page_count: u64, 58 acceptance: BootPageAcceptance, 59 ) -> Result<(), Error> { 60 let page_end = page_base + page_count - 1; 61 match self.accepted_ranges.entry(page_base..=page_end) { 62 Entry::Overlapping(entry) => { 63 let &(overlap_start, overlap_end, overlap_acceptance) = entry.get(); 64 65 Err(Error::OverlapsExistingRegion(ImportRegion { 66 page_base: overlap_start, 67 page_count: overlap_end - overlap_start + 1, 68 acceptance: overlap_acceptance, 69 })) 70 } 71 Entry::Vacant(entry) => { 72 entry.insert(acceptance); 73 Ok(()) 74 } 75 } 76 } 77 78 pub fn import_pages( 79 &mut self, 80 page_base: u64, 81 page_count: u64, 82 acceptance: BootPageAcceptance, 83 data: &[u8], 84 ) -> Result<(), Error> { 85 // Once we are here at this point, we have a page with 86 // some data or empty, empty does not mean there is no data, 87 // it rather means it's full of zeros. We can skip writing the 88 // data as the guest page is already zeroed. So we return with 89 // updating the bytes_written variable 90 if data.is_empty() { 91 self.bytes_written += page_count * HV_PAGE_SIZE; 92 return Ok(()); 93 } 94 // Page count must be larger or equal to data. 95 if page_count * HV_PAGE_SIZE < data.len() as u64 { 96 return Err(Error::DataTooLarge); 97 } 98 99 // Track accepted ranges for duplicate imports. 100 self.accept_new_range(page_base, page_count, acceptance)?; 101 102 let bytes_written = self 103 .memory 104 .memory() 105 .write(data, GuestAddress(page_base * HV_PAGE_SIZE)) 106 .map_err(|_e| { 107 debug!("Importing pages failed due to MemoryError"); 108 Error::MemoryUnavailable 109 })?; 110 111 // A page could be partially filled and the rest of the content is zero. 112 // Our IGVM generation tool only fills data here if there is some data without zeros. 113 // Rest of them are padded. We only write data without padding and compare whether we 114 // complete writing the buffer content. Still it's a full page and update the variable 115 // with length of the page. 116 if bytes_written != data.len() { 117 return Err(Error::ImportPagesFailed); 118 } 119 self.bytes_written += page_count * HV_PAGE_SIZE; 120 Ok(()) 121 } 122 123 pub fn verify_startup_memory_available( 124 &mut self, 125 page_base: u64, 126 page_count: u64, 127 memory_type: StartupMemoryType, 128 ) -> Result<(), Error> { 129 if memory_type != StartupMemoryType::Ram { 130 return Err(Error::MemoryUnavailable); 131 } 132 133 let mut memory_found = false; 134 135 for range in self.memory.memory().iter() { 136 // Today, the memory layout only describes normal ram and mmio. Thus the memory 137 // request must live completely within a single range, since any gaps are mmio. 138 let base_address = page_base * HV_PAGE_SIZE; 139 let end_address = base_address + (page_count * HV_PAGE_SIZE) - 1; 140 141 if base_address >= range.start_addr().0 && base_address < range.last_addr().0 { 142 if end_address > range.last_addr().0 { 143 debug!("startup memory end bigger than the current range"); 144 return Err(Error::MemoryUnavailable); 145 } 146 147 memory_found = true; 148 } 149 } 150 151 if memory_found { 152 Ok(()) 153 } else { 154 debug!("no valid memory range available for startup memory verify"); 155 Err(Error::MemoryUnavailable) 156 } 157 } 158 } 159