1 // Copyright 2020 Arm Limited (or its affiliates). All rights reserved. 2 // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 // SPDX-License-Identifier: Apache-2.0 4 5 /// Module for the flattened device tree. 6 pub mod fdt; 7 /// Layout for this aarch64 system. 8 pub mod layout; 9 /// Module for system registers definition 10 pub mod regs; 11 /// Module for loading UEFI binary. 12 pub mod uefi; 13 14 pub use self::fdt::DeviceInfoForFdt; 15 use crate::{DeviceType, GuestMemoryMmap, NumaNodes, PciSpaceInfo, RegionType}; 16 use hypervisor::arch::aarch64::gic::Vgic; 17 use log::{log_enabled, Level}; 18 use std::collections::HashMap; 19 use std::convert::TryInto; 20 use std::fmt::Debug; 21 use std::sync::{Arc, Mutex}; 22 use vm_memory::{Address, GuestAddress, GuestMemory, GuestUsize}; 23 24 /// Errors thrown while configuring aarch64 system. 25 #[derive(Debug)] 26 pub enum Error { 27 /// Failed to create a FDT. 28 SetupFdt, 29 30 /// Failed to write FDT to memory. 31 WriteFdtToMemory(fdt::Error), 32 33 /// Failed to create a GIC. 34 SetupGic, 35 36 /// Failed to compute the initramfs address. 37 InitramfsAddress, 38 39 /// Error configuring the general purpose registers 40 RegsConfiguration(hypervisor::HypervisorCpuError), 41 42 /// Error configuring the MPIDR register 43 VcpuRegMpidr(hypervisor::HypervisorCpuError), 44 45 /// Error initializing PMU for vcpu 46 VcpuInitPmu, 47 } 48 49 impl From<Error> for super::Error { 50 fn from(e: Error) -> super::Error { 51 super::Error::PlatformSpecific(e) 52 } 53 } 54 55 #[derive(Debug, Copy, Clone)] 56 /// Specifies the entry point address where the guest must start 57 /// executing code. 58 pub struct EntryPoint { 59 /// Address in guest memory where the guest must start execution 60 pub entry_addr: GuestAddress, 61 } 62 63 /// Configure the specified VCPU, and return its MPIDR. 64 pub fn configure_vcpu( 65 vcpu: &Arc<dyn hypervisor::Vcpu>, 66 id: u8, 67 kernel_entry_point: Option<EntryPoint>, 68 ) -> super::Result<u64> { 69 if let Some(kernel_entry_point) = kernel_entry_point { 70 vcpu.setup_regs( 71 id, 72 kernel_entry_point.entry_addr.raw_value(), 73 super::layout::FDT_START.raw_value(), 74 ) 75 .map_err(Error::RegsConfiguration)?; 76 } 77 78 let mpidr = vcpu 79 .get_sys_reg(regs::MPIDR_EL1) 80 .map_err(Error::VcpuRegMpidr)?; 81 Ok(mpidr) 82 } 83 84 pub fn arch_memory_regions(size: GuestUsize) -> Vec<(GuestAddress, usize, RegionType)> { 85 let mut regions = vec![ 86 // 0 MiB ~ 256 MiB: UEFI, GIC and legacy devices 87 ( 88 GuestAddress(0), 89 layout::MEM_32BIT_DEVICES_START.0 as usize, 90 RegionType::Reserved, 91 ), 92 // 256 MiB ~ 768 MiB: MMIO space 93 ( 94 layout::MEM_32BIT_DEVICES_START, 95 layout::MEM_32BIT_DEVICES_SIZE as usize, 96 RegionType::SubRegion, 97 ), 98 // 768 MiB ~ 1 GiB: reserved. The leading 256M for PCIe MMCONFIG space 99 ( 100 layout::PCI_MMCONFIG_START, 101 layout::PCI_MMCONFIG_SIZE as usize, 102 RegionType::Reserved, 103 ), 104 ]; 105 106 let ram_32bit_space_size = 107 layout::MEM_32BIT_RESERVED_START.unchecked_offset_from(layout::RAM_START); 108 109 // RAM space 110 // Case1: guest memory fits before the gap 111 if size as u64 <= ram_32bit_space_size { 112 regions.push((layout::RAM_START, size as usize, RegionType::Ram)); 113 // Case2: guest memory extends beyond the gap 114 } else { 115 // Push memory before the gap 116 regions.push(( 117 layout::RAM_START, 118 ram_32bit_space_size as usize, 119 RegionType::Ram, 120 )); 121 // Other memory is placed after 4GiB 122 regions.push(( 123 layout::RAM_64BIT_START, 124 (size - ram_32bit_space_size) as usize, 125 RegionType::Ram, 126 )); 127 } 128 129 // Add the 32-bit reserved memory hole as a reserved region 130 regions.push(( 131 layout::MEM_32BIT_RESERVED_START, 132 layout::MEM_32BIT_RESERVED_SIZE as usize, 133 RegionType::Reserved, 134 )); 135 136 regions 137 } 138 139 /// Configures the system and should be called once per vm before starting vcpu threads. 140 #[allow(clippy::too_many_arguments)] 141 pub fn configure_system<T: DeviceInfoForFdt + Clone + Debug, S: ::std::hash::BuildHasher>( 142 guest_mem: &GuestMemoryMmap, 143 cmdline: &str, 144 vcpu_mpidr: Vec<u64>, 145 vcpu_topology: Option<(u8, u8, u8)>, 146 device_info: &HashMap<(DeviceType, String), T, S>, 147 initrd: &Option<super::InitramfsConfig>, 148 pci_space_info: &[PciSpaceInfo], 149 virtio_iommu_bdf: Option<u32>, 150 gic_device: &Arc<Mutex<dyn Vgic>>, 151 numa_nodes: &NumaNodes, 152 pmu_supported: bool, 153 ) -> super::Result<()> { 154 let fdt_final = fdt::create_fdt( 155 guest_mem, 156 cmdline, 157 vcpu_mpidr, 158 vcpu_topology, 159 device_info, 160 gic_device, 161 initrd, 162 pci_space_info, 163 numa_nodes, 164 virtio_iommu_bdf, 165 pmu_supported, 166 ) 167 .map_err(|_| Error::SetupFdt)?; 168 169 if log_enabled!(Level::Debug) { 170 fdt::print_fdt(&fdt_final); 171 } 172 173 fdt::write_fdt_to_memory(fdt_final, guest_mem).map_err(Error::WriteFdtToMemory)?; 174 175 Ok(()) 176 } 177 178 /// Returns the memory address where the initramfs could be loaded. 179 pub fn initramfs_load_addr( 180 guest_mem: &GuestMemoryMmap, 181 initramfs_size: usize, 182 ) -> super::Result<u64> { 183 let round_to_pagesize = |size| (size + (super::PAGE_SIZE - 1)) & !(super::PAGE_SIZE - 1); 184 match guest_mem 185 .last_addr() 186 .checked_sub(round_to_pagesize(initramfs_size) as u64 - 1) 187 { 188 Some(offset) => { 189 if guest_mem.address_in_range(offset) { 190 Ok(offset.raw_value()) 191 } else { 192 Err(super::Error::PlatformSpecific(Error::InitramfsAddress)) 193 } 194 } 195 None => Err(super::Error::PlatformSpecific(Error::InitramfsAddress)), 196 } 197 } 198 199 pub fn get_host_cpu_phys_bits() -> u8 { 200 // A dummy hypervisor created only for querying the host IPA size and will 201 // be freed after the query. 202 let hv = hypervisor::new().unwrap(); 203 let host_cpu_phys_bits = hv.get_host_ipa_limit().try_into().unwrap(); 204 if host_cpu_phys_bits == 0 { 205 // Host kernel does not support `get_host_ipa_limit`, 206 // we return the default value 40 here. 207 40 208 } else { 209 host_cpu_phys_bits 210 } 211 } 212 213 #[cfg(test)] 214 mod tests { 215 use super::*; 216 217 #[test] 218 fn test_arch_memory_regions_dram_2gb() { 219 let regions = arch_memory_regions((1usize << 31) as u64); //2GB 220 assert_eq!(5, regions.len()); 221 assert_eq!(layout::RAM_START, regions[3].0); 222 assert_eq!((1usize << 31), regions[3].1); 223 assert_eq!(RegionType::Ram, regions[3].2); 224 assert_eq!(RegionType::Reserved, regions[4].2); 225 } 226 227 #[test] 228 fn test_arch_memory_regions_dram_4gb() { 229 let regions = arch_memory_regions((1usize << 32) as u64); //4GB 230 let ram_32bit_space_size = 231 layout::MEM_32BIT_RESERVED_START.unchecked_offset_from(layout::RAM_START) as usize; 232 assert_eq!(6, regions.len()); 233 assert_eq!(layout::RAM_START, regions[3].0); 234 assert_eq!(ram_32bit_space_size as usize, regions[3].1); 235 assert_eq!(RegionType::Ram, regions[3].2); 236 assert_eq!(RegionType::Reserved, regions[5].2); 237 assert_eq!(RegionType::Ram, regions[4].2); 238 assert_eq!(((1usize << 32) - ram_32bit_space_size), regions[4].1); 239 } 240 } 241