xref: /cloud-hypervisor/vmm/src/igvm/loader.rs (revision 19d36c765fdf00be749d95b3e61028bc302d6d73)
1 // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
2 //
3 // Copyright © 2023, Microsoft Corporation
4 //
5 use range_map_vec::{Entry, RangeMap};
6 use thiserror::Error;
7 use vm_memory::bitmap::AtomicBitmap;
8 use vm_memory::{
9     Bytes, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic, GuestMemoryMmap,
10     GuestMemoryRegion,
11 };
12 
13 use crate::igvm::{BootPageAcceptance, StartupMemoryType, HV_PAGE_SIZE};
14 
15 /// Structure to hold the guest memory info/layout to check
16 /// the if the memory is accepted within the layout.
17 /// Adds up the total bytes written to the guest memory
18 pub struct Loader {
19     memory: GuestMemoryAtomic<GuestMemoryMmap<AtomicBitmap>>,
20     accepted_ranges: RangeMap<u64, BootPageAcceptance>,
21     bytes_written: u64,
22 }
23 
24 #[derive(Debug)]
25 pub struct ImportRegion {
26     pub page_base: u64,
27     pub page_count: u64,
28     pub acceptance: BootPageAcceptance,
29 }
30 
31 #[derive(Debug, Error)]
32 pub enum Error {
33     #[error("overlaps with existing import region {0:?}")]
34     OverlapsExistingRegion(ImportRegion),
35     #[error("memory unavailable")]
36     MemoryUnavailable,
37     #[error("failed to import pages")]
38     ImportPagesFailed,
39     #[error("invalid vp context memory")]
40     InvalidVpContextMemory(&'static str),
41     #[error("data larger than imported region")]
42     DataTooLarge,
43 }
44 
45 impl Loader {
46     pub fn new(memory: GuestMemoryAtomic<GuestMemoryMmap<AtomicBitmap>>) -> Loader {
47         Loader {
48             memory,
49             accepted_ranges: RangeMap::new(),
50             bytes_written: 0,
51         }
52     }
53 
54     /// Accept a new page range with a given acceptance into the map of accepted ranges.
55     pub fn accept_new_range(
56         &mut self,
57         page_base: u64,
58         page_count: u64,
59         acceptance: BootPageAcceptance,
60     ) -> Result<(), Error> {
61         let page_end = page_base + page_count - 1;
62         match self.accepted_ranges.entry(page_base..=page_end) {
63             Entry::Overlapping(entry) => {
64                 let &(overlap_start, overlap_end, overlap_acceptance) = entry.get();
65 
66                 Err(Error::OverlapsExistingRegion(ImportRegion {
67                     page_base: overlap_start,
68                     page_count: overlap_end - overlap_start + 1,
69                     acceptance: overlap_acceptance,
70                 }))
71             }
72             Entry::Vacant(entry) => {
73                 entry.insert(acceptance);
74                 Ok(())
75             }
76         }
77     }
78 
79     pub fn import_pages(
80         &mut self,
81         page_base: u64,
82         page_count: u64,
83         acceptance: BootPageAcceptance,
84         data: &[u8],
85     ) -> Result<(), Error> {
86         // Once we are here at this point, we have a page with
87         // some data or empty, empty does not mean there is no data,
88         // it rather means it's full of zeros. We can skip writing the
89         // data as the guest page is already zeroed. So we return with
90         // updating the bytes_written variable
91         if data.is_empty() {
92             self.bytes_written += page_count * HV_PAGE_SIZE;
93             return Ok(());
94         }
95         // Page count must be larger or equal to data.
96         if page_count * HV_PAGE_SIZE < data.len() as u64 {
97             return Err(Error::DataTooLarge);
98         }
99 
100         // Track accepted ranges for duplicate imports.
101         self.accept_new_range(page_base, page_count, acceptance)?;
102 
103         let bytes_written = self
104             .memory
105             .memory()
106             .write(data, GuestAddress(page_base * HV_PAGE_SIZE))
107             .map_err(|_e| {
108                 debug!("Importing pages failed due to MemoryError");
109                 Error::MemoryUnavailable
110             })?;
111 
112         // A page could be partially filled and the rest of the content is zero.
113         // Our IGVM generation tool only fills data here if there is some data without zeros.
114         // Rest of them are padded. We only write data without padding and compare whether we
115         // complete writing the buffer content. Still it's a full page and update the variable
116         // with length of the page.
117         if bytes_written != data.len() {
118             return Err(Error::ImportPagesFailed);
119         }
120         self.bytes_written += page_count * HV_PAGE_SIZE;
121         Ok(())
122     }
123 
124     pub fn verify_startup_memory_available(
125         &mut self,
126         page_base: u64,
127         page_count: u64,
128         memory_type: StartupMemoryType,
129     ) -> Result<(), Error> {
130         if memory_type != StartupMemoryType::Ram {
131             return Err(Error::MemoryUnavailable);
132         }
133 
134         let mut memory_found = false;
135 
136         for range in self.memory.memory().iter() {
137             // Today, the memory layout only describes normal ram and mmio. Thus the memory
138             // request must live completely within a single range, since any gaps are mmio.
139             let base_address = page_base * HV_PAGE_SIZE;
140             let end_address = base_address + (page_count * HV_PAGE_SIZE) - 1;
141 
142             if base_address >= range.start_addr().0 && base_address < range.last_addr().0 {
143                 if end_address > range.last_addr().0 {
144                     debug!("startup memory end bigger than the current range");
145                     return Err(Error::MemoryUnavailable);
146                 }
147 
148                 memory_found = true;
149             }
150         }
151 
152         if memory_found {
153             Ok(())
154         } else {
155             debug!("no valid memory range available for startup memory verify");
156             Err(Error::MemoryUnavailable)
157         }
158     }
159 }
160