1 // Copyright 2020 Arm Limited (or its affiliates). All rights reserved. 2 // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 // SPDX-License-Identifier: Apache-2.0 4 // 5 // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. 6 // Use of this source code is governed by a BSD-style license that can be 7 // found in the THIRD-PARTY file. 8 9 use crate::{NumaNodes, PciSpaceInfo}; 10 use byteorder::{BigEndian, ByteOrder}; 11 use hypervisor::arch::aarch64::gic::Vgic; 12 use std::cmp; 13 use std::collections::HashMap; 14 use std::ffi::CStr; 15 use std::fmt::Debug; 16 use std::result; 17 use std::str; 18 use std::sync::{Arc, Mutex}; 19 20 use super::super::DeviceType; 21 use super::super::GuestMemoryMmap; 22 use super::super::InitramfsConfig; 23 use super::layout::{ 24 IRQ_BASE, MEM_32BIT_DEVICES_SIZE, MEM_32BIT_DEVICES_START, MEM_PCI_IO_SIZE, MEM_PCI_IO_START, 25 PCI_HIGH_BASE, PCI_MMIO_CONFIG_SIZE_PER_SEGMENT, 26 }; 27 use std::fs; 28 use std::path::Path; 29 use vm_fdt::{FdtWriter, FdtWriterResult}; 30 use vm_memory::{Address, Bytes, GuestMemory, GuestMemoryError, GuestMemoryRegion}; 31 32 // This is a value for uniquely identifying the FDT node declaring the interrupt controller. 33 const GIC_PHANDLE: u32 = 1; 34 // This is a value for uniquely identifying the FDT node declaring the MSI controller. 35 const MSI_PHANDLE: u32 = 2; 36 // This is a value for uniquely identifying the FDT node containing the clock definition. 37 const CLOCK_PHANDLE: u32 = 3; 38 // This is a value for uniquely identifying the FDT node containing the gpio controller. 39 const GPIO_PHANDLE: u32 = 4; 40 // This is a value for virtio-iommu. Now only one virtio-iommu device is supported. 41 const VIRTIO_IOMMU_PHANDLE: u32 = 5; 42 // NOTE: Keep FIRST_VCPU_PHANDLE the last PHANDLE defined. 43 // This is a value for uniquely identifying the FDT node containing the first vCPU. 44 // The last number of vCPU phandle depends on the number of vCPUs. 45 const FIRST_VCPU_PHANDLE: u32 = 8; 46 47 // This is a value for uniquely identifying the FDT node containing the L2 cache info 48 const L2_CACHE_PHANDLE: u32 = 6; 49 // This is a value for uniquely identifying the FDT node containing the L3 cache info 50 const L3_CACHE_PHANDLE: u32 = 7; 51 // Read the documentation specified when appending the root node to the FDT. 52 const ADDRESS_CELLS: u32 = 0x2; 53 const SIZE_CELLS: u32 = 0x2; 54 55 // As per kvm tool and 56 // https://www.kernel.org/doc/Documentation/devicetree/bindings/interrupt-controller/arm%2Cgic.txt 57 // Look for "The 1st cell..." 58 const GIC_FDT_IRQ_TYPE_SPI: u32 = 0; 59 const GIC_FDT_IRQ_TYPE_PPI: u32 = 1; 60 61 // From https://elixir.bootlin.com/linux/v4.9.62/source/include/dt-bindings/interrupt-controller/irq.h#L17 62 const IRQ_TYPE_EDGE_RISING: u32 = 1; 63 const IRQ_TYPE_LEVEL_HI: u32 = 4; 64 65 // PMU PPI interrupt number 66 pub const AARCH64_PMU_IRQ: u32 = 7; 67 68 // Keys and Buttons 69 // System Power Down 70 const KEY_POWER: u32 = 116; 71 72 /// Trait for devices to be added to the Flattened Device Tree. 73 pub trait DeviceInfoForFdt { 74 /// Returns the address where this device will be loaded. 75 fn addr(&self) -> u64; 76 /// Returns the associated interrupt for this device. 77 fn irq(&self) -> u32; 78 /// Returns the amount of memory that needs to be reserved for this device. 79 fn length(&self) -> u64; 80 } 81 82 /// Errors thrown while configuring the Flattened Device Tree for aarch64. 83 #[derive(Debug)] 84 pub enum Error { 85 /// Failure in writing FDT in memory. 86 WriteFdtToMemory(GuestMemoryError), 87 } 88 type Result<T> = result::Result<T, Error>; 89 90 pub enum CacheLevel { 91 /// L1 data cache 92 L1D = 0, 93 /// L1 instruction cache 94 L1I = 1, 95 /// L2 cache 96 L2 = 2, 97 /// L3 cache 98 L3 = 3, 99 } 100 101 /// NOTE: cache size file directory example, 102 /// "/sys/devices/system/cpu/cpu0/cache/index0/size". 103 pub fn get_cache_size(cache_level: CacheLevel) -> u32 { 104 let mut file_directory: String = "/sys/devices/system/cpu/cpu0/cache".to_string(); 105 match cache_level { 106 CacheLevel::L1D => file_directory += "/index0/size", 107 CacheLevel::L1I => file_directory += "/index1/size", 108 CacheLevel::L2 => file_directory += "/index2/size", 109 CacheLevel::L3 => file_directory += "/index3/size", 110 } 111 112 let file_path = Path::new(&file_directory); 113 if !file_path.exists() { 114 warn!("File: {} does not exist.", file_directory); 115 0 116 } else { 117 info!("File: {} exist.", file_directory); 118 119 let src = fs::read_to_string(file_directory).expect("File not exists or file corrupted."); 120 // The content of the file is as simple as a size, like: "32K" 121 let src = src.trim(); 122 let src_digits: u32 = src[0..src.len() - 1].parse().unwrap(); 123 let src_unit = &src[src.len() - 1..]; 124 125 src_digits 126 * match src_unit { 127 "K" => 1024, 128 "M" => 1024u32.pow(2), 129 "G" => 1024u32.pow(3), 130 _ => 1, 131 } 132 } 133 } 134 135 /// NOTE: coherency_line_size file directory example, 136 /// "/sys/devices/system/cpu/cpu0/cache/index0/coherency_line_size". 137 pub fn get_cache_coherency_line_size(cache_level: CacheLevel) -> u32 { 138 let mut file_directory: String = "/sys/devices/system/cpu/cpu0/cache".to_string(); 139 match cache_level { 140 CacheLevel::L1D => file_directory += "/index0/coherency_line_size", 141 CacheLevel::L1I => file_directory += "/index1/coherency_line_size", 142 CacheLevel::L2 => file_directory += "/index2/coherency_line_size", 143 CacheLevel::L3 => file_directory += "/index3/coherency_line_size", 144 } 145 146 let file_path = Path::new(&file_directory); 147 if !file_path.exists() { 148 warn!("File: {} does not exist.", file_directory); 149 0 150 } else { 151 info!("File: {} exist.", file_directory); 152 153 let src = fs::read_to_string(file_directory).expect("File not exists or file corrupted."); 154 src.trim().parse::<u32>().unwrap() 155 } 156 } 157 158 /// NOTE: number_of_sets file directory example, 159 /// "/sys/devices/system/cpu/cpu0/cache/index0/number_of_sets". 160 pub fn get_cache_number_of_sets(cache_level: CacheLevel) -> u32 { 161 let mut file_directory: String = "/sys/devices/system/cpu/cpu0/cache".to_string(); 162 match cache_level { 163 CacheLevel::L1D => file_directory += "/index0/number_of_sets", 164 CacheLevel::L1I => file_directory += "/index1/number_of_sets", 165 CacheLevel::L2 => file_directory += "/index2/number_of_sets", 166 CacheLevel::L3 => file_directory += "/index3/number_of_sets", 167 } 168 169 let file_path = Path::new(&file_directory); 170 if !file_path.exists() { 171 warn!("File: {} does not exist.", file_directory); 172 0 173 } else { 174 info!("File: {} exist.", file_directory); 175 176 let src = fs::read_to_string(file_directory).expect("File not exists or file corrupted."); 177 src.trim().parse::<u32>().unwrap() 178 } 179 } 180 181 /// NOTE: shared_cpu_list file directory example, 182 /// "/sys/devices/system/cpu/cpu0/cache/index0/shared_cpu_list". 183 pub fn get_cache_shared(cache_level: CacheLevel) -> bool { 184 let mut file_directory: String = "/sys/devices/system/cpu/cpu0/cache".to_string(); 185 let mut result = true; 186 187 match cache_level { 188 CacheLevel::L1D | CacheLevel::L1I => result = false, 189 CacheLevel::L2 => file_directory += "/index2/shared_cpu_list", 190 CacheLevel::L3 => file_directory += "/index3/shared_cpu_list", 191 } 192 193 if !result { 194 return false; 195 } 196 197 let file_path = Path::new(&file_directory); 198 if !file_path.exists() { 199 warn!("File: {} does not exist.", file_directory); 200 result = false; 201 } else { 202 info!("File: {} exist.", file_directory); 203 204 let src = fs::read_to_string(file_directory).expect("File not exists or file corrupted."); 205 let src = src.trim(); 206 if src.is_empty() { 207 result = false; 208 } else { 209 result = src.contains('-') || src.contains(','); 210 } 211 } 212 213 result 214 } 215 216 /// Creates the flattened device tree for this aarch64 VM. 217 #[allow(clippy::too_many_arguments)] 218 pub fn create_fdt<T: DeviceInfoForFdt + Clone + Debug, S: ::std::hash::BuildHasher>( 219 guest_mem: &GuestMemoryMmap, 220 cmdline: &str, 221 vcpu_mpidr: Vec<u64>, 222 vcpu_topology: Option<(u8, u8, u8)>, 223 device_info: &HashMap<(DeviceType, String), T, S>, 224 gic_device: &Arc<Mutex<dyn Vgic>>, 225 initrd: &Option<InitramfsConfig>, 226 pci_space_info: &[PciSpaceInfo], 227 numa_nodes: &NumaNodes, 228 virtio_iommu_bdf: Option<u32>, 229 pmu_supported: bool, 230 ) -> FdtWriterResult<Vec<u8>> { 231 // Allocate stuff necessary for the holding the blob. 232 let mut fdt = FdtWriter::new().unwrap(); 233 234 // For an explanation why these nodes were introduced in the blob take a look at 235 // https://github.com/torvalds/linux/blob/master/Documentation/devicetree/booting-without-of.txt#L845 236 // Look for "Required nodes and properties". 237 238 // Header or the root node as per above mentioned documentation. 239 let root_node = fdt.begin_node("")?; 240 fdt.property_string("compatible", "linux,dummy-virt")?; 241 // For info on #address-cells and size-cells read "Note about cells and address representation" 242 // from the above mentioned txt file. 243 fdt.property_u32("#address-cells", ADDRESS_CELLS)?; 244 fdt.property_u32("#size-cells", SIZE_CELLS)?; 245 // This is not mandatory but we use it to point the root node to the node 246 // containing description of the interrupt controller for this VM. 247 fdt.property_u32("interrupt-parent", GIC_PHANDLE)?; 248 create_cpu_nodes(&mut fdt, &vcpu_mpidr, vcpu_topology, numa_nodes)?; 249 create_memory_node(&mut fdt, guest_mem, numa_nodes)?; 250 create_chosen_node(&mut fdt, cmdline, initrd)?; 251 create_gic_node(&mut fdt, gic_device)?; 252 create_timer_node(&mut fdt)?; 253 if pmu_supported { 254 create_pmu_node(&mut fdt)?; 255 } 256 create_clock_node(&mut fdt)?; 257 create_psci_node(&mut fdt)?; 258 create_devices_node(&mut fdt, device_info)?; 259 create_pci_nodes(&mut fdt, pci_space_info, virtio_iommu_bdf)?; 260 if numa_nodes.len() > 1 { 261 create_distance_map_node(&mut fdt, numa_nodes)?; 262 } 263 264 // End Header node. 265 fdt.end_node(root_node)?; 266 267 let fdt_final = fdt.finish()?; 268 269 Ok(fdt_final) 270 } 271 272 pub fn write_fdt_to_memory(fdt_final: Vec<u8>, guest_mem: &GuestMemoryMmap) -> Result<()> { 273 // Write FDT to memory. 274 guest_mem 275 .write_slice(fdt_final.as_slice(), super::layout::FDT_START) 276 .map_err(Error::WriteFdtToMemory)?; 277 Ok(()) 278 } 279 280 // Following are the auxiliary function for creating the different nodes that we append to our FDT. 281 fn create_cpu_nodes( 282 fdt: &mut FdtWriter, 283 vcpu_mpidr: &[u64], 284 vcpu_topology: Option<(u8, u8, u8)>, 285 numa_nodes: &NumaNodes, 286 ) -> FdtWriterResult<()> { 287 // See https://github.com/torvalds/linux/blob/master/Documentation/devicetree/bindings/arm/cpus.yaml. 288 let cpus_node = fdt.begin_node("cpus")?; 289 fdt.property_u32("#address-cells", 0x1)?; 290 fdt.property_u32("#size-cells", 0x0)?; 291 292 let num_cpus = vcpu_mpidr.len(); 293 let threads_per_core = vcpu_topology.unwrap_or_default().0; 294 let cores_per_package = vcpu_topology.unwrap_or_default().1; 295 let packages = vcpu_topology.unwrap_or_default().2; 296 297 let max_cpus: u32 = (threads_per_core * cores_per_package * packages).into(); 298 299 // Add cache info. 300 // L1 Data Cache Info. 301 let mut l1_d_cache_size: u32 = 0; 302 let mut l1_d_cache_line_size: u32 = 0; 303 let mut l1_d_cache_sets: u32 = 0; 304 305 // L1 Instruction Cache Info. 306 let mut l1_i_cache_size: u32 = 0; 307 let mut l1_i_cache_line_size: u32 = 0; 308 let mut l1_i_cache_sets: u32 = 0; 309 310 // L2 Cache Info. 311 let mut l2_cache_size: u32 = 0; 312 let mut l2_cache_line_size: u32 = 0; 313 let mut l2_cache_sets: u32 = 0; 314 315 // L3 Cache Info. 316 let mut l3_cache_size: u32 = 0; 317 let mut l3_cache_line_size: u32 = 0; 318 let mut l3_cache_sets: u32 = 0; 319 320 // Cache Shared Info. 321 let mut l2_cache_shared: bool = false; 322 let mut l3_cache_shared: bool = false; 323 324 let cache_path = Path::new("/sys/devices/system/cpu/cpu0/cache"); 325 let cache_exist: bool = cache_path.exists(); 326 if !cache_exist { 327 warn!("cache sysfs system does not exist."); 328 } else { 329 info!("cache sysfs system exists."); 330 // L1 Data Cache Info. 331 l1_d_cache_size = get_cache_size(CacheLevel::L1D); 332 l1_d_cache_line_size = get_cache_coherency_line_size(CacheLevel::L1D); 333 l1_d_cache_sets = get_cache_number_of_sets(CacheLevel::L1D); 334 335 // L1 Instruction Cache Info. 336 l1_i_cache_size = get_cache_size(CacheLevel::L1I); 337 l1_i_cache_line_size = get_cache_coherency_line_size(CacheLevel::L1I); 338 l1_i_cache_sets = get_cache_number_of_sets(CacheLevel::L1I); 339 340 // L2 Cache Info. 341 l2_cache_size = get_cache_size(CacheLevel::L2); 342 l2_cache_line_size = get_cache_coherency_line_size(CacheLevel::L2); 343 l2_cache_sets = get_cache_number_of_sets(CacheLevel::L2); 344 345 // L3 Cache Info. 346 l3_cache_size = get_cache_size(CacheLevel::L3); 347 l3_cache_line_size = get_cache_coherency_line_size(CacheLevel::L3); 348 l3_cache_sets = get_cache_number_of_sets(CacheLevel::L3); 349 350 // Cache Shared Info. 351 if l2_cache_size != 0 { 352 l2_cache_shared = get_cache_shared(CacheLevel::L2); 353 } 354 if l3_cache_size != 0 { 355 l3_cache_shared = get_cache_shared(CacheLevel::L3); 356 } 357 } 358 359 for (cpu_id, mpidr) in vcpu_mpidr.iter().enumerate().take(num_cpus) { 360 let cpu_name = format!("cpu@{cpu_id:x}"); 361 let cpu_node = fdt.begin_node(&cpu_name)?; 362 fdt.property_string("device_type", "cpu")?; 363 fdt.property_string("compatible", "arm,arm-v8")?; 364 if num_cpus > 1 { 365 // This is required on armv8 64-bit. See aforementioned documentation. 366 fdt.property_string("enable-method", "psci")?; 367 } 368 // Set the field to first 24 bits of the MPIDR - Multiprocessor Affinity Register. 369 // See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0488c/BABHBJCI.html. 370 fdt.property_u32("reg", (mpidr & 0x7FFFFF) as u32)?; 371 fdt.property_u32("phandle", cpu_id as u32 + FIRST_VCPU_PHANDLE)?; 372 373 // Add `numa-node-id` property if there is any numa config. 374 if numa_nodes.len() > 1 { 375 for numa_node_idx in 0..numa_nodes.len() { 376 let numa_node = numa_nodes.get(&(numa_node_idx as u32)); 377 if numa_node.unwrap().cpus.contains(&(cpu_id as u8)) { 378 fdt.property_u32("numa-node-id", numa_node_idx as u32)?; 379 } 380 } 381 } 382 383 if cache_exist && l1_d_cache_size != 0 && l1_i_cache_size != 0 { 384 // Add cache info. 385 fdt.property_u32("d-cache-size", l1_d_cache_size)?; 386 fdt.property_u32("d-cache-line-size", l1_d_cache_line_size)?; 387 fdt.property_u32("d-cache-sets", l1_d_cache_sets)?; 388 389 fdt.property_u32("i-cache-size", l1_i_cache_size)?; 390 fdt.property_u32("i-cache-line-size", l1_i_cache_line_size)?; 391 fdt.property_u32("i-cache-sets", l1_i_cache_sets)?; 392 393 if l2_cache_size != 0 && !l2_cache_shared { 394 fdt.property_u32( 395 "next-level-cache", 396 cpu_id as u32 + max_cpus + FIRST_VCPU_PHANDLE + L2_CACHE_PHANDLE, 397 )?; 398 399 let l2_cache_name = "l2-cache0"; 400 let l2_cache_node = fdt.begin_node(l2_cache_name)?; 401 // PHANDLE is used to mark device node, and PHANDLE is unique. To avoid phandle 402 // conflicts with other device nodes, consider the previous CPU PHANDLE, so the 403 // CPU L2 cache PHANDLE must start from the largest CPU PHANDLE plus 1. 404 fdt.property_u32( 405 "phandle", 406 cpu_id as u32 + max_cpus + FIRST_VCPU_PHANDLE + L2_CACHE_PHANDLE, 407 )?; 408 409 fdt.property_string("compatible", "cache")?; 410 fdt.property_u32("cache-size", l2_cache_size)?; 411 fdt.property_u32("cache-line-size", l2_cache_line_size)?; 412 fdt.property_u32("cache-sets", l2_cache_sets)?; 413 fdt.property_u32("cache-level", 2)?; 414 415 if l3_cache_size != 0 && l3_cache_shared { 416 let package_id: u32 = cpu_id as u32 / cores_per_package as u32; 417 fdt.property_u32( 418 "next-level-cache", 419 package_id 420 + num_cpus as u32 421 + max_cpus 422 + FIRST_VCPU_PHANDLE 423 + L2_CACHE_PHANDLE 424 + L3_CACHE_PHANDLE, 425 )?; 426 } 427 428 fdt.end_node(l2_cache_node)?; 429 } 430 if l2_cache_size != 0 && l2_cache_shared { 431 warn!("L2 cache shared with other cpus"); 432 } 433 } 434 435 fdt.end_node(cpu_node)?; 436 } 437 438 if cache_exist && l3_cache_size != 0 && !l2_cache_shared && l3_cache_shared { 439 let mut i: u32 = 0; 440 while i < packages.into() { 441 let l3_cache_name = "l3-cache0"; 442 let l3_cache_node = fdt.begin_node(l3_cache_name)?; 443 // ARM L3 cache is generally shared within the package (socket), so the 444 // L3 cache node pointed to by the CPU in the package has the same L3 445 // cache PHANDLE. The L3 cache phandle must start from the largest L2 446 // cache PHANDLE plus 1 to avoid duplication. 447 fdt.property_u32( 448 "phandle", 449 i + num_cpus as u32 450 + max_cpus 451 + FIRST_VCPU_PHANDLE 452 + L2_CACHE_PHANDLE 453 + L3_CACHE_PHANDLE, 454 )?; 455 456 fdt.property_string("compatible", "cache")?; 457 fdt.property_null("cache-unified")?; 458 fdt.property_u32("cache-size", l3_cache_size)?; 459 fdt.property_u32("cache-line-size", l3_cache_line_size)?; 460 fdt.property_u32("cache-sets", l3_cache_sets)?; 461 fdt.property_u32("cache-level", 3)?; 462 fdt.end_node(l3_cache_node)?; 463 464 i += 1; 465 } 466 } 467 468 if let Some(topology) = vcpu_topology { 469 let (threads_per_core, cores_per_package, packages) = topology; 470 let cpu_map_node = fdt.begin_node("cpu-map")?; 471 472 // Create device tree nodes with regard of above mapping. 473 for package_idx in 0..packages { 474 let package_name = format!("socket{package_idx:x}"); 475 let package_node = fdt.begin_node(&package_name)?; 476 477 // Cluster is the container of cores, and it is mandatory in the CPU topology. 478 // Add a default "cluster0" in each socket/package. 479 let cluster_node = fdt.begin_node("cluster0")?; 480 481 for core_idx in 0..cores_per_package { 482 let core_name = format!("core{core_idx:x}"); 483 let core_node = fdt.begin_node(&core_name)?; 484 485 for thread_idx in 0..threads_per_core { 486 let thread_name = format!("thread{thread_idx:x}"); 487 let thread_node = fdt.begin_node(&thread_name)?; 488 let cpu_idx = threads_per_core * cores_per_package * package_idx 489 + threads_per_core * core_idx 490 + thread_idx; 491 fdt.property_u32("cpu", cpu_idx as u32 + FIRST_VCPU_PHANDLE)?; 492 fdt.end_node(thread_node)?; 493 } 494 495 fdt.end_node(core_node)?; 496 } 497 fdt.end_node(cluster_node)?; 498 fdt.end_node(package_node)?; 499 } 500 fdt.end_node(cpu_map_node)?; 501 } else { 502 debug!("Boot using device tree, CPU topology is not (correctly) specified"); 503 } 504 505 fdt.end_node(cpus_node)?; 506 507 Ok(()) 508 } 509 510 fn create_memory_node( 511 fdt: &mut FdtWriter, 512 guest_mem: &GuestMemoryMmap, 513 numa_nodes: &NumaNodes, 514 ) -> FdtWriterResult<()> { 515 // See https://github.com/torvalds/linux/blob/58ae0b51506802713aa0e9956d1853ba4c722c98/Documentation/devicetree/bindings/numa.txt 516 // for NUMA setting in memory node. 517 if numa_nodes.len() > 1 { 518 for numa_node_idx in 0..numa_nodes.len() { 519 let numa_node = numa_nodes.get(&(numa_node_idx as u32)); 520 let mut mem_reg_prop: Vec<u64> = Vec::new(); 521 let mut node_memory_addr: u64 = 0; 522 // Each memory zone of numa will have its own memory node, but 523 // different numa nodes should not share same memory zones. 524 for memory_region in numa_node.unwrap().memory_regions.iter() { 525 let memory_region_start_addr: u64 = memory_region.start_addr().raw_value(); 526 let memory_region_size: u64 = memory_region.size() as u64; 527 mem_reg_prop.push(memory_region_start_addr); 528 mem_reg_prop.push(memory_region_size); 529 // Set the node address the first non-zero regison address 530 if node_memory_addr == 0 { 531 node_memory_addr = memory_region_start_addr; 532 } 533 } 534 let memory_node_name = format!("memory@{node_memory_addr:x}"); 535 let memory_node = fdt.begin_node(&memory_node_name)?; 536 fdt.property_string("device_type", "memory")?; 537 fdt.property_array_u64("reg", &mem_reg_prop)?; 538 fdt.property_u32("numa-node-id", numa_node_idx as u32)?; 539 fdt.end_node(memory_node)?; 540 } 541 } else { 542 // Note: memory regions from "GuestMemory" are sorted and non-zero sized. 543 let ram_regions = { 544 let mut ram_regions = Vec::new(); 545 let mut current_start = guest_mem 546 .iter() 547 .next() 548 .map(GuestMemoryRegion::start_addr) 549 .expect("GuestMemory must have one memory region at least") 550 .raw_value(); 551 let mut current_end = current_start; 552 553 for (start, size) in guest_mem 554 .iter() 555 .map(|m| (m.start_addr().raw_value(), m.len())) 556 { 557 if current_end == start { 558 // This zone is continuous with the previous one. 559 current_end += size; 560 } else { 561 ram_regions.push((current_start, current_end)); 562 563 current_start = start; 564 current_end = start + size; 565 } 566 } 567 568 ram_regions.push((current_start, current_end)); 569 570 ram_regions 571 }; 572 573 if ram_regions.len() > 2 { 574 panic!( 575 "There should be up to two non-continuous regions, devidided by the 576 gap at the end of 32bit address space." 577 ); 578 } 579 580 // Create the memory node for memory region before the gap 581 { 582 let (first_region_start, first_region_end) = ram_regions 583 .first() 584 .expect("There should be at last one memory region"); 585 let ram_start = super::layout::RAM_START.raw_value(); 586 let mem_32bit_reserved_start = super::layout::MEM_32BIT_RESERVED_START.raw_value(); 587 588 if !((first_region_start <= &ram_start) 589 && (first_region_end > &ram_start) 590 && (first_region_end <= &mem_32bit_reserved_start)) 591 { 592 panic!( 593 "Unexpected first memory region layout: (start: 0x{:08x}, end: 0x{:08x}). 594 ram_start: 0x{:08x}, mem_32bit_reserved_start: 0x{:08x}", 595 first_region_start, first_region_end, ram_start, mem_32bit_reserved_start 596 ); 597 } 598 599 let mem_size = first_region_end - ram_start; 600 let mem_reg_prop = [ram_start, mem_size]; 601 let memory_node_name = format!("memory@{:x}", ram_start); 602 let memory_node = fdt.begin_node(&memory_node_name)?; 603 fdt.property_string("device_type", "memory")?; 604 fdt.property_array_u64("reg", &mem_reg_prop)?; 605 fdt.end_node(memory_node)?; 606 } 607 608 // Create the memory map entry for memory region after the gap if any 609 if let Some((second_region_start, second_region_end)) = ram_regions.get(1) { 610 let ram_64bit_start = super::layout::RAM_64BIT_START.raw_value(); 611 612 if second_region_start != &ram_64bit_start { 613 panic!( 614 "Unexpected second memory region layout: start: 0x{:08x}, ram_64bit_start: 0x{:08x}", 615 second_region_start, ram_64bit_start 616 ); 617 } 618 619 let mem_size = second_region_end - ram_64bit_start; 620 let mem_reg_prop = [ram_64bit_start, mem_size]; 621 let memory_node_name = format!("memory@{:x}", ram_64bit_start); 622 let memory_node = fdt.begin_node(&memory_node_name)?; 623 fdt.property_string("device_type", "memory")?; 624 fdt.property_array_u64("reg", &mem_reg_prop)?; 625 fdt.end_node(memory_node)?; 626 } 627 } 628 629 Ok(()) 630 } 631 632 fn create_chosen_node( 633 fdt: &mut FdtWriter, 634 cmdline: &str, 635 initrd: &Option<InitramfsConfig>, 636 ) -> FdtWriterResult<()> { 637 let chosen_node = fdt.begin_node("chosen")?; 638 fdt.property_string("bootargs", cmdline)?; 639 640 if let Some(initrd_config) = initrd { 641 let initrd_start = initrd_config.address.raw_value(); 642 let initrd_end = initrd_config.address.raw_value() + initrd_config.size as u64; 643 fdt.property_u64("linux,initrd-start", initrd_start)?; 644 fdt.property_u64("linux,initrd-end", initrd_end)?; 645 } 646 647 fdt.end_node(chosen_node)?; 648 649 Ok(()) 650 } 651 652 fn create_gic_node(fdt: &mut FdtWriter, gic_device: &Arc<Mutex<dyn Vgic>>) -> FdtWriterResult<()> { 653 let gic_reg_prop = gic_device.lock().unwrap().device_properties(); 654 655 let intc_node = fdt.begin_node("intc")?; 656 657 fdt.property_string("compatible", gic_device.lock().unwrap().fdt_compatibility())?; 658 fdt.property_null("interrupt-controller")?; 659 // "interrupt-cells" field specifies the number of cells needed to encode an 660 // interrupt source. The type shall be a <u32> and the value shall be 3 if no PPI affinity description 661 // is required. 662 fdt.property_u32("#interrupt-cells", 3)?; 663 fdt.property_array_u64("reg", &gic_reg_prop)?; 664 fdt.property_u32("phandle", GIC_PHANDLE)?; 665 fdt.property_u32("#address-cells", 2)?; 666 fdt.property_u32("#size-cells", 2)?; 667 fdt.property_null("ranges")?; 668 669 let gic_intr_prop = [ 670 GIC_FDT_IRQ_TYPE_PPI, 671 gic_device.lock().unwrap().fdt_maint_irq(), 672 IRQ_TYPE_LEVEL_HI, 673 ]; 674 fdt.property_array_u32("interrupts", &gic_intr_prop)?; 675 676 if gic_device.lock().unwrap().msi_compatible() { 677 let msic_node = fdt.begin_node("msic")?; 678 fdt.property_string("compatible", gic_device.lock().unwrap().msi_compatibility())?; 679 fdt.property_null("msi-controller")?; 680 fdt.property_u32("phandle", MSI_PHANDLE)?; 681 let msi_reg_prop = gic_device.lock().unwrap().msi_properties(); 682 fdt.property_array_u64("reg", &msi_reg_prop)?; 683 fdt.end_node(msic_node)?; 684 } 685 686 fdt.end_node(intc_node)?; 687 688 Ok(()) 689 } 690 691 fn create_clock_node(fdt: &mut FdtWriter) -> FdtWriterResult<()> { 692 // The Advanced Peripheral Bus (APB) is part of the Advanced Microcontroller Bus Architecture 693 // (AMBA) protocol family. It defines a low-cost interface that is optimized for minimal power 694 // consumption and reduced interface complexity. 695 // PCLK is the clock source and this node defines exactly the clock for the APB. 696 let clock_node = fdt.begin_node("apb-pclk")?; 697 fdt.property_string("compatible", "fixed-clock")?; 698 fdt.property_u32("#clock-cells", 0x0)?; 699 fdt.property_u32("clock-frequency", 24000000)?; 700 fdt.property_string("clock-output-names", "clk24mhz")?; 701 fdt.property_u32("phandle", CLOCK_PHANDLE)?; 702 fdt.end_node(clock_node)?; 703 704 Ok(()) 705 } 706 707 fn create_timer_node(fdt: &mut FdtWriter) -> FdtWriterResult<()> { 708 // See 709 // https://github.com/torvalds/linux/blob/master/Documentation/devicetree/bindings/interrupt-controller/arch_timer.txt 710 // These are fixed interrupt numbers for the timer device. 711 let irqs = [13, 14, 11, 10]; 712 let compatible = "arm,armv8-timer"; 713 714 let mut timer_reg_cells: Vec<u32> = Vec::new(); 715 for &irq in irqs.iter() { 716 timer_reg_cells.push(GIC_FDT_IRQ_TYPE_PPI); 717 timer_reg_cells.push(irq); 718 timer_reg_cells.push(IRQ_TYPE_LEVEL_HI); 719 } 720 721 let timer_node = fdt.begin_node("timer")?; 722 fdt.property_string("compatible", compatible)?; 723 fdt.property_null("always-on")?; 724 fdt.property_array_u32("interrupts", &timer_reg_cells)?; 725 fdt.end_node(timer_node)?; 726 727 Ok(()) 728 } 729 730 fn create_psci_node(fdt: &mut FdtWriter) -> FdtWriterResult<()> { 731 let compatible = "arm,psci-0.2"; 732 let psci_node = fdt.begin_node("psci")?; 733 fdt.property_string("compatible", compatible)?; 734 // Two methods available: hvc and smc. 735 // As per documentation, PSCI calls between a guest and hypervisor may use the HVC conduit instead of SMC. 736 // So, since we are using kvm, we need to use hvc. 737 fdt.property_string("method", "hvc")?; 738 fdt.end_node(psci_node)?; 739 740 Ok(()) 741 } 742 743 fn create_virtio_node<T: DeviceInfoForFdt + Clone + Debug>( 744 fdt: &mut FdtWriter, 745 dev_info: &T, 746 ) -> FdtWriterResult<()> { 747 let device_reg_prop = [dev_info.addr(), dev_info.length()]; 748 let irq = [GIC_FDT_IRQ_TYPE_SPI, dev_info.irq(), IRQ_TYPE_EDGE_RISING]; 749 750 let virtio_node = fdt.begin_node(&format!("virtio_mmio@{:x}", dev_info.addr()))?; 751 fdt.property_string("compatible", "virtio,mmio")?; 752 fdt.property_array_u64("reg", &device_reg_prop)?; 753 fdt.property_array_u32("interrupts", &irq)?; 754 fdt.property_u32("interrupt-parent", GIC_PHANDLE)?; 755 fdt.end_node(virtio_node)?; 756 757 Ok(()) 758 } 759 760 fn create_serial_node<T: DeviceInfoForFdt + Clone + Debug>( 761 fdt: &mut FdtWriter, 762 dev_info: &T, 763 ) -> FdtWriterResult<()> { 764 let compatible = b"arm,pl011\0arm,primecell\0"; 765 let serial_reg_prop = [dev_info.addr(), dev_info.length()]; 766 let irq = [ 767 GIC_FDT_IRQ_TYPE_SPI, 768 dev_info.irq() - IRQ_BASE, 769 IRQ_TYPE_EDGE_RISING, 770 ]; 771 772 let serial_node = fdt.begin_node(&format!("pl011@{:x}", dev_info.addr()))?; 773 fdt.property("compatible", compatible)?; 774 fdt.property_array_u64("reg", &serial_reg_prop)?; 775 fdt.property_u32("clocks", CLOCK_PHANDLE)?; 776 fdt.property_string("clock-names", "apb_pclk")?; 777 fdt.property_array_u32("interrupts", &irq)?; 778 fdt.end_node(serial_node)?; 779 780 Ok(()) 781 } 782 783 fn create_rtc_node<T: DeviceInfoForFdt + Clone + Debug>( 784 fdt: &mut FdtWriter, 785 dev_info: &T, 786 ) -> FdtWriterResult<()> { 787 let compatible = b"arm,pl031\0arm,primecell\0"; 788 let rtc_reg_prop = [dev_info.addr(), dev_info.length()]; 789 let irq = [ 790 GIC_FDT_IRQ_TYPE_SPI, 791 dev_info.irq() - IRQ_BASE, 792 IRQ_TYPE_LEVEL_HI, 793 ]; 794 795 let rtc_node = fdt.begin_node(&format!("rtc@{:x}", dev_info.addr()))?; 796 fdt.property("compatible", compatible)?; 797 fdt.property_array_u64("reg", &rtc_reg_prop)?; 798 fdt.property_array_u32("interrupts", &irq)?; 799 fdt.property_u32("clocks", CLOCK_PHANDLE)?; 800 fdt.property_string("clock-names", "apb_pclk")?; 801 fdt.end_node(rtc_node)?; 802 803 Ok(()) 804 } 805 806 fn create_gpio_node<T: DeviceInfoForFdt + Clone + Debug>( 807 fdt: &mut FdtWriter, 808 dev_info: &T, 809 ) -> FdtWriterResult<()> { 810 // PL061 GPIO controller node 811 let compatible = b"arm,pl061\0arm,primecell\0"; 812 let gpio_reg_prop = [dev_info.addr(), dev_info.length()]; 813 let irq = [ 814 GIC_FDT_IRQ_TYPE_SPI, 815 dev_info.irq() - IRQ_BASE, 816 IRQ_TYPE_EDGE_RISING, 817 ]; 818 819 let gpio_node = fdt.begin_node(&format!("pl061@{:x}", dev_info.addr()))?; 820 fdt.property("compatible", compatible)?; 821 fdt.property_array_u64("reg", &gpio_reg_prop)?; 822 fdt.property_array_u32("interrupts", &irq)?; 823 fdt.property_null("gpio-controller")?; 824 fdt.property_u32("#gpio-cells", 2)?; 825 fdt.property_u32("clocks", CLOCK_PHANDLE)?; 826 fdt.property_string("clock-names", "apb_pclk")?; 827 fdt.property_u32("phandle", GPIO_PHANDLE)?; 828 fdt.end_node(gpio_node)?; 829 830 // gpio-keys node 831 let gpio_keys_node = fdt.begin_node("gpio-keys")?; 832 fdt.property_string("compatible", "gpio-keys")?; 833 fdt.property_u32("#size-cells", 0)?; 834 fdt.property_u32("#address-cells", 1)?; 835 let gpio_keys_poweroff_node = fdt.begin_node("button@1")?; 836 fdt.property_string("label", "GPIO Key Poweroff")?; 837 fdt.property_u32("linux,code", KEY_POWER)?; 838 let gpios = [GPIO_PHANDLE, 3, 0]; 839 fdt.property_array_u32("gpios", &gpios)?; 840 fdt.end_node(gpio_keys_poweroff_node)?; 841 fdt.end_node(gpio_keys_node)?; 842 843 Ok(()) 844 } 845 846 fn create_devices_node<T: DeviceInfoForFdt + Clone + Debug, S: ::std::hash::BuildHasher>( 847 fdt: &mut FdtWriter, 848 dev_info: &HashMap<(DeviceType, String), T, S>, 849 ) -> FdtWriterResult<()> { 850 // Create one temp Vec to store all virtio devices 851 let mut ordered_virtio_device: Vec<&T> = Vec::new(); 852 853 for ((device_type, _device_id), info) in dev_info { 854 match device_type { 855 DeviceType::Gpio => create_gpio_node(fdt, info)?, 856 DeviceType::Rtc => create_rtc_node(fdt, info)?, 857 DeviceType::Serial => create_serial_node(fdt, info)?, 858 DeviceType::Virtio(_) => { 859 ordered_virtio_device.push(info); 860 } 861 } 862 } 863 864 // Sort out virtio devices by address from low to high and insert them into fdt table. 865 ordered_virtio_device.sort_by_key(|&a| a.addr()); 866 // Current address allocation strategy in cloud-hypervisor is: the first created device 867 // will be allocated to higher address. Here we reverse the vector to make sure that 868 // the older created device will appear in front of the newer created device in FDT. 869 ordered_virtio_device.reverse(); 870 for ordered_device_info in ordered_virtio_device.drain(..) { 871 create_virtio_node(fdt, ordered_device_info)?; 872 } 873 874 Ok(()) 875 } 876 877 fn create_pmu_node(fdt: &mut FdtWriter) -> FdtWriterResult<()> { 878 let compatible = "arm,armv8-pmuv3"; 879 let irq = [GIC_FDT_IRQ_TYPE_PPI, AARCH64_PMU_IRQ, IRQ_TYPE_LEVEL_HI]; 880 881 let pmu_node = fdt.begin_node("pmu")?; 882 fdt.property_string("compatible", compatible)?; 883 fdt.property_array_u32("interrupts", &irq)?; 884 fdt.end_node(pmu_node)?; 885 Ok(()) 886 } 887 888 fn create_pci_nodes( 889 fdt: &mut FdtWriter, 890 pci_device_info: &[PciSpaceInfo], 891 virtio_iommu_bdf: Option<u32>, 892 ) -> FdtWriterResult<()> { 893 // Add node for PCIe controller. 894 // See Documentation/devicetree/bindings/pci/host-generic-pci.txt in the kernel 895 // and https://elinux.org/Device_Tree_Usage. 896 // In multiple PCI segments setup, each PCI segment needs a PCI node. 897 for pci_device_info_elem in pci_device_info.iter() { 898 // EDK2 requires the PCIe high space above 4G address. 899 // The actual space in CLH follows the RAM. If the RAM space is small, the PCIe high space 900 // could fall bellow 4G. 901 // Here we cut off PCI device space below 8G in FDT to workaround the EDK2 check. 902 // But the address written in ACPI is not impacted. 903 let (pci_device_base_64bit, pci_device_size_64bit) = 904 if pci_device_info_elem.pci_device_space_start < PCI_HIGH_BASE.raw_value() { 905 ( 906 PCI_HIGH_BASE.raw_value(), 907 pci_device_info_elem.pci_device_space_size 908 - (PCI_HIGH_BASE.raw_value() - pci_device_info_elem.pci_device_space_start), 909 ) 910 } else { 911 ( 912 pci_device_info_elem.pci_device_space_start, 913 pci_device_info_elem.pci_device_space_size, 914 ) 915 }; 916 // There is no specific requirement of the 32bit MMIO range, and 917 // therefore at least we can make these ranges 4K aligned. 918 let pci_device_size_32bit: u64 = 919 MEM_32BIT_DEVICES_SIZE / ((1 << 12) * pci_device_info.len() as u64) * (1 << 12); 920 let pci_device_base_32bit: u64 = MEM_32BIT_DEVICES_START.0 921 + pci_device_size_32bit * pci_device_info_elem.pci_segment_id as u64; 922 923 let ranges = [ 924 // io addresses. Since AArch64 will not use IO address, 925 // we can set the same IO address range for every segment. 926 0x1000000, 927 0_u32, 928 0_u32, 929 (MEM_PCI_IO_START.0 >> 32) as u32, 930 MEM_PCI_IO_START.0 as u32, 931 (MEM_PCI_IO_SIZE >> 32) as u32, 932 MEM_PCI_IO_SIZE as u32, 933 // mmio addresses 934 0x2000000, // (ss = 10: 32-bit memory space) 935 (pci_device_base_32bit >> 32) as u32, // PCI address 936 pci_device_base_32bit as u32, 937 (pci_device_base_32bit >> 32) as u32, // CPU address 938 pci_device_base_32bit as u32, 939 (pci_device_size_32bit >> 32) as u32, // size 940 pci_device_size_32bit as u32, 941 // device addresses 942 0x3000000, // (ss = 11: 64-bit memory space) 943 (pci_device_base_64bit >> 32) as u32, // PCI address 944 pci_device_base_64bit as u32, 945 (pci_device_base_64bit >> 32) as u32, // CPU address 946 pci_device_base_64bit as u32, 947 (pci_device_size_64bit >> 32) as u32, // size 948 pci_device_size_64bit as u32, 949 ]; 950 let bus_range = [0, 0]; // Only bus 0 951 let reg = [ 952 pci_device_info_elem.mmio_config_address, 953 PCI_MMIO_CONFIG_SIZE_PER_SEGMENT, 954 ]; 955 // See kernel document Documentation/devicetree/bindings/pci/pci-msi.txt 956 let msi_map = [ 957 // rid-base: A single cell describing the first RID matched by the entry. 958 0x0, 959 // msi-controller: A single phandle to an MSI controller. 960 MSI_PHANDLE, 961 // msi-base: An msi-specifier describing the msi-specifier produced for the 962 // first RID matched by the entry. 963 (pci_device_info_elem.pci_segment_id as u32) << 8, 964 // length: A single cell describing how many consecutive RIDs are matched 965 // following the rid-base. 966 0x100, 967 ]; 968 969 let pci_node_name = format!("pci@{:x}", pci_device_info_elem.mmio_config_address); 970 let pci_node = fdt.begin_node(&pci_node_name)?; 971 972 fdt.property_string("compatible", "pci-host-ecam-generic")?; 973 fdt.property_string("device_type", "pci")?; 974 fdt.property_array_u32("ranges", &ranges)?; 975 fdt.property_array_u32("bus-range", &bus_range)?; 976 fdt.property_u32( 977 "linux,pci-domain", 978 pci_device_info_elem.pci_segment_id as u32, 979 )?; 980 fdt.property_u32("#address-cells", 3)?; 981 fdt.property_u32("#size-cells", 2)?; 982 fdt.property_array_u64("reg", ®)?; 983 fdt.property_u32("#interrupt-cells", 1)?; 984 fdt.property_null("interrupt-map")?; 985 fdt.property_null("interrupt-map-mask")?; 986 fdt.property_null("dma-coherent")?; 987 fdt.property_array_u32("msi-map", &msi_map)?; 988 fdt.property_u32("msi-parent", MSI_PHANDLE)?; 989 990 if pci_device_info_elem.pci_segment_id == 0 { 991 if let Some(virtio_iommu_bdf) = virtio_iommu_bdf { 992 // See kernel document Documentation/devicetree/bindings/pci/pci-iommu.txt 993 // for 'iommu-map' attribute setting. 994 let iommu_map = [ 995 0_u32, 996 VIRTIO_IOMMU_PHANDLE, 997 0_u32, 998 virtio_iommu_bdf, 999 virtio_iommu_bdf + 1, 1000 VIRTIO_IOMMU_PHANDLE, 1001 virtio_iommu_bdf + 1, 1002 0xffff - virtio_iommu_bdf, 1003 ]; 1004 fdt.property_array_u32("iommu-map", &iommu_map)?; 1005 1006 // See kernel document Documentation/devicetree/bindings/virtio/iommu.txt 1007 // for virtio-iommu node settings. 1008 let virtio_iommu_node_name = format!("virtio_iommu@{virtio_iommu_bdf:x}"); 1009 let virtio_iommu_node = fdt.begin_node(&virtio_iommu_node_name)?; 1010 fdt.property_u32("#iommu-cells", 1)?; 1011 fdt.property_string("compatible", "virtio,pci-iommu")?; 1012 1013 // 'reg' is a five-cell address encoded as 1014 // (phys.hi phys.mid phys.lo size.hi size.lo). phys.hi should contain the 1015 // device's BDF as 0b00000000 bbbbbbbb dddddfff 00000000. The other cells 1016 // should be zero. 1017 let reg = [virtio_iommu_bdf << 8, 0_u32, 0_u32, 0_u32, 0_u32]; 1018 fdt.property_array_u32("reg", ®)?; 1019 fdt.property_u32("phandle", VIRTIO_IOMMU_PHANDLE)?; 1020 1021 fdt.end_node(virtio_iommu_node)?; 1022 } 1023 } 1024 1025 fdt.end_node(pci_node)?; 1026 } 1027 1028 Ok(()) 1029 } 1030 1031 fn create_distance_map_node(fdt: &mut FdtWriter, numa_nodes: &NumaNodes) -> FdtWriterResult<()> { 1032 let distance_map_node = fdt.begin_node("distance-map")?; 1033 fdt.property_string("compatible", "numa-distance-map-v1")?; 1034 // Construct the distance matrix. 1035 // 1. We use the word entry to describe a distance from a node to 1036 // its destination, e.g. 0 -> 1 = 20 is described as <0 1 20>. 1037 // 2. Each entry represents distance from first node to second node. 1038 // The distances are equal in either direction. 1039 // 3. The distance from a node to self (local distance) is represented 1040 // with value 10 and all internode distance should be represented with 1041 // a value greater than 10. 1042 // 4. distance-matrix should have entries in lexicographical ascending 1043 // order of nodes. 1044 let mut distance_matrix = Vec::new(); 1045 for numa_node_idx in 0..numa_nodes.len() { 1046 let numa_node = numa_nodes.get(&(numa_node_idx as u32)); 1047 for dest_numa_node in 0..numa_node.unwrap().distances.len() + 1 { 1048 if numa_node_idx == dest_numa_node { 1049 distance_matrix.push(numa_node_idx as u32); 1050 distance_matrix.push(dest_numa_node as u32); 1051 distance_matrix.push(10_u32); 1052 continue; 1053 } 1054 1055 distance_matrix.push(numa_node_idx as u32); 1056 distance_matrix.push(dest_numa_node as u32); 1057 distance_matrix.push( 1058 *numa_node 1059 .unwrap() 1060 .distances 1061 .get(&(dest_numa_node as u32)) 1062 .unwrap() as u32, 1063 ); 1064 } 1065 } 1066 fdt.property_array_u32("distance-matrix", distance_matrix.as_ref())?; 1067 fdt.end_node(distance_map_node)?; 1068 1069 Ok(()) 1070 } 1071 1072 // Parse the DTB binary and print for debugging 1073 pub fn print_fdt(dtb: &[u8]) { 1074 match fdt_parser::Fdt::new(dtb) { 1075 Ok(fdt) => { 1076 if let Some(root) = fdt.find_node("/") { 1077 debug!("Printing the FDT:"); 1078 print_node(root, 0); 1079 } else { 1080 debug!("Failed to find root node in FDT for debugging."); 1081 } 1082 } 1083 Err(_) => debug!("Failed to parse FDT for debugging."), 1084 } 1085 } 1086 1087 fn print_node(node: fdt_parser::node::FdtNode<'_, '_>, n_spaces: usize) { 1088 debug!("{:indent$}{}/", "", node.name, indent = n_spaces); 1089 for property in node.properties() { 1090 let name = property.name; 1091 1092 // If the property is 'compatible', its value requires special handling. 1093 // The u8 array could contain multiple null-terminated strings. 1094 // We copy the original array and simply replace all 'null' characters with spaces. 1095 let value = if name == "compatible" { 1096 let mut compatible = vec![0u8; 256]; 1097 let handled_value = property 1098 .value 1099 .iter() 1100 .map(|&c| if c == 0 { b' ' } else { c }) 1101 .collect::<Vec<_>>(); 1102 let len = cmp::min(255, handled_value.len()); 1103 compatible[..len].copy_from_slice(&handled_value[..len]); 1104 compatible[..(len + 1)].to_vec() 1105 } else { 1106 property.value.to_vec() 1107 }; 1108 let value = &value; 1109 1110 // Now the value can be either: 1111 // - A null-terminated C string, or 1112 // - Binary data 1113 // We follow a very simple logic to present the value: 1114 // - At first, try to convert it to CStr and print, 1115 // - If failed, print it as u32 array. 1116 let value_result = match CStr::from_bytes_with_nul(value) { 1117 Ok(value_cstr) => match value_cstr.to_str() { 1118 Ok(value_str) => Some(value_str), 1119 Err(_e) => None, 1120 }, 1121 Err(_e) => None, 1122 }; 1123 1124 if let Some(value_str) = value_result { 1125 debug!( 1126 "{:indent$}{} : {:#?}", 1127 "", 1128 name, 1129 value_str, 1130 indent = (n_spaces + 2) 1131 ); 1132 } else { 1133 let mut array = Vec::with_capacity(256); 1134 array.resize(value.len() / 4, 0u32); 1135 BigEndian::read_u32_into(value, &mut array); 1136 debug!( 1137 "{:indent$}{} : {:X?}", 1138 "", 1139 name, 1140 array, 1141 indent = (n_spaces + 2) 1142 ); 1143 }; 1144 } 1145 1146 // Print children nodes if there is any 1147 for child in node.children() { 1148 print_node(child, n_spaces + 2); 1149 } 1150 } 1151