xref: /cloud-hypervisor/vmm/src/igvm/loader.rs (revision 3ce0fef7fd546467398c914dbc74d8542e45cf6f)
1 // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
2 //
3 // Copyright © 2023, Microsoft Corporation
4 //
5 use crate::igvm::{BootPageAcceptance, StartupMemoryType, HV_PAGE_SIZE};
6 use range_map_vec::{Entry, RangeMap};
7 use thiserror::Error;
8 use vm_memory::bitmap::AtomicBitmap;
9 use vm_memory::{
10     Bytes, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic, GuestMemoryMmap,
11     GuestMemoryRegion,
12 };
13 
14 /// Structure to hold the guest memory info/layout to check
15 /// the if the memory is accepted within the layout.
16 /// Adds up the total bytes written to the guest memory
17 pub struct Loader {
18     memory: GuestMemoryAtomic<GuestMemoryMmap<AtomicBitmap>>,
19     accepted_ranges: RangeMap<u64, BootPageAcceptance>,
20     bytes_written: u64,
21 }
22 
23 #[derive(Debug)]
24 pub struct ImportRegion {
25     pub page_base: u64,
26     pub page_count: u64,
27     pub acceptance: BootPageAcceptance,
28 }
29 
30 #[derive(Debug, Error)]
31 pub enum Error {
32     #[error("overlaps with existing import region {0:?}")]
33     OverlapsExistingRegion(ImportRegion),
34     #[error("memory unavailable")]
35     MemoryUnavailable,
36     #[error("failed to import pages")]
37     ImportPagesFailed,
38     #[error("invalid vp context memory")]
39     InvalidVpContextMemory(&'static str),
40     #[error("data larger than imported region")]
41     DataTooLarge,
42 }
43 
44 impl Loader {
45     pub fn new(memory: GuestMemoryAtomic<GuestMemoryMmap<AtomicBitmap>>) -> Loader {
46         Loader {
47             memory,
48             accepted_ranges: RangeMap::new(),
49             bytes_written: 0,
50         }
51     }
52 
53     /// Accept a new page range with a given acceptance into the map of accepted ranges.
54     pub fn accept_new_range(
55         &mut self,
56         page_base: u64,
57         page_count: u64,
58         acceptance: BootPageAcceptance,
59     ) -> Result<(), Error> {
60         let page_end = page_base + page_count - 1;
61         match self.accepted_ranges.entry(page_base..=page_end) {
62             Entry::Overlapping(entry) => {
63                 let &(overlap_start, overlap_end, overlap_acceptance) = entry.get();
64 
65                 Err(Error::OverlapsExistingRegion(ImportRegion {
66                     page_base: overlap_start,
67                     page_count: overlap_end - overlap_start + 1,
68                     acceptance: overlap_acceptance,
69                 }))
70             }
71             Entry::Vacant(entry) => {
72                 entry.insert(acceptance);
73                 Ok(())
74             }
75         }
76     }
77 
78     pub fn import_pages(
79         &mut self,
80         page_base: u64,
81         page_count: u64,
82         acceptance: BootPageAcceptance,
83         data: &[u8],
84     ) -> Result<(), Error> {
85         // Page count must be larger or equal to data.
86         if page_count * HV_PAGE_SIZE < data.len() as u64 {
87             return Err(Error::DataTooLarge);
88         }
89 
90         // Track accepted ranges for duplicate imports.
91         self.accept_new_range(page_base, page_count, acceptance)?;
92 
93         let bytes_written = self
94             .memory
95             .memory()
96             .write(data, GuestAddress(page_base * HV_PAGE_SIZE))
97             .map_err(|_e| {
98                 debug!("Importing pages failed due to MemoryError");
99                 Error::MemoryUnavailable
100             })?;
101         if bytes_written != (page_count * HV_PAGE_SIZE) as usize {
102             return Err(Error::ImportPagesFailed);
103         }
104         self.bytes_written += bytes_written as u64;
105         Ok(())
106     }
107 
108     pub fn verify_startup_memory_available(
109         &mut self,
110         page_base: u64,
111         page_count: u64,
112         memory_type: StartupMemoryType,
113     ) -> Result<(), Error> {
114         if memory_type != StartupMemoryType::Ram {
115             return Err(Error::MemoryUnavailable);
116         }
117 
118         let mut memory_found = false;
119 
120         for range in self.memory.memory().iter() {
121             // Today, the memory layout only describes normal ram and mmio. Thus the memory
122             // request must live completely within a single range, since any gaps are mmio.
123             let base_address = page_base * HV_PAGE_SIZE;
124             let end_address = base_address + (page_count * HV_PAGE_SIZE) - 1;
125 
126             if base_address >= range.start_addr().0 && base_address < range.last_addr().0 {
127                 if end_address > range.last_addr().0 {
128                     debug!("startup memory end bigger than the current range");
129                     return Err(Error::MemoryUnavailable);
130                 }
131 
132                 memory_found = true;
133             }
134         }
135 
136         if memory_found {
137             Ok(())
138         } else {
139             debug!("no valid memory range available for startup memory verify");
140             Err(Error::MemoryUnavailable)
141         }
142     }
143 }
144