1 // Copyright 2020 Arm Limited (or its affiliates). All rights reserved. 2 // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 // SPDX-License-Identifier: Apache-2.0 4 // 5 // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. 6 // Use of this source code is governed by a BSD-style license that can be 7 // found in the THIRD-PARTY file. 8 9 use std::collections::HashMap; 10 use std::ffi::CStr; 11 use std::fmt::Debug; 12 use std::path::Path; 13 use std::sync::{Arc, Mutex}; 14 use std::{cmp, fs, result, str}; 15 16 use byteorder::{BigEndian, ByteOrder}; 17 use hypervisor::arch::aarch64::gic::Vgic; 18 use thiserror::Error; 19 use vm_fdt::{FdtWriter, FdtWriterResult}; 20 use vm_memory::{Address, Bytes, GuestMemory, GuestMemoryError, GuestMemoryRegion}; 21 22 use super::super::{DeviceType, GuestMemoryMmap, InitramfsConfig}; 23 use super::layout::{ 24 IRQ_BASE, MEM_32BIT_DEVICES_SIZE, MEM_32BIT_DEVICES_START, MEM_PCI_IO_SIZE, MEM_PCI_IO_START, 25 PCI_HIGH_BASE, PCI_MMIO_CONFIG_SIZE_PER_SEGMENT, 26 }; 27 use crate::{NumaNodes, PciSpaceInfo}; 28 29 // This is a value for uniquely identifying the FDT node declaring the interrupt controller. 30 const GIC_PHANDLE: u32 = 1; 31 // This is a value for uniquely identifying the FDT node declaring the MSI controller. 32 const MSI_PHANDLE: u32 = 2; 33 // This is a value for uniquely identifying the FDT node containing the clock definition. 34 const CLOCK_PHANDLE: u32 = 3; 35 // This is a value for uniquely identifying the FDT node containing the gpio controller. 36 const GPIO_PHANDLE: u32 = 4; 37 // This is a value for virtio-iommu. Now only one virtio-iommu device is supported. 38 const VIRTIO_IOMMU_PHANDLE: u32 = 5; 39 // NOTE: Keep FIRST_VCPU_PHANDLE the last PHANDLE defined. 40 // This is a value for uniquely identifying the FDT node containing the first vCPU. 41 // The last number of vCPU phandle depends on the number of vCPUs. 42 const FIRST_VCPU_PHANDLE: u32 = 8; 43 44 // This is a value for uniquely identifying the FDT node containing the L2 cache info 45 const L2_CACHE_PHANDLE: u32 = 6; 46 // This is a value for uniquely identifying the FDT node containing the L3 cache info 47 const L3_CACHE_PHANDLE: u32 = 7; 48 // Read the documentation specified when appending the root node to the FDT. 49 const ADDRESS_CELLS: u32 = 0x2; 50 const SIZE_CELLS: u32 = 0x2; 51 52 // As per kvm tool and 53 // https://www.kernel.org/doc/Documentation/devicetree/bindings/interrupt-controller/arm%2Cgic.txt 54 // Look for "The 1st cell..." 55 const GIC_FDT_IRQ_TYPE_SPI: u32 = 0; 56 const GIC_FDT_IRQ_TYPE_PPI: u32 = 1; 57 58 // From https://elixir.bootlin.com/linux/v4.9.62/source/include/dt-bindings/interrupt-controller/irq.h#L17 59 const IRQ_TYPE_EDGE_RISING: u32 = 1; 60 const IRQ_TYPE_LEVEL_HI: u32 = 4; 61 62 // PMU PPI interrupt number 63 pub const AARCH64_PMU_IRQ: u32 = 7; 64 65 // Keys and Buttons 66 // System Power Down 67 const KEY_POWER: u32 = 116; 68 69 /// Trait for devices to be added to the Flattened Device Tree. 70 pub trait DeviceInfoForFdt { 71 /// Returns the address where this device will be loaded. 72 fn addr(&self) -> u64; 73 /// Returns the associated interrupt for this device. 74 fn irq(&self) -> u32; 75 /// Returns the amount of memory that needs to be reserved for this device. 76 fn length(&self) -> u64; 77 } 78 79 /// Errors thrown while configuring the Flattened Device Tree for aarch64. 80 #[derive(Debug, Error)] 81 pub enum Error { 82 /// Failure in writing FDT in memory. 83 #[error("Failure in writing FDT in memory: {0}")] 84 WriteFdtToMemory(GuestMemoryError), 85 } 86 type Result<T> = result::Result<T, Error>; 87 88 pub enum CacheLevel { 89 /// L1 data cache 90 L1D = 0, 91 /// L1 instruction cache 92 L1I = 1, 93 /// L2 cache 94 L2 = 2, 95 /// L3 cache 96 L3 = 3, 97 } 98 99 /// NOTE: cache size file directory example, 100 /// "/sys/devices/system/cpu/cpu0/cache/index0/size". 101 pub fn get_cache_size(cache_level: CacheLevel) -> u32 { 102 let mut file_directory: String = "/sys/devices/system/cpu/cpu0/cache".to_string(); 103 match cache_level { 104 CacheLevel::L1D => file_directory += "/index0/size", 105 CacheLevel::L1I => file_directory += "/index1/size", 106 CacheLevel::L2 => file_directory += "/index2/size", 107 CacheLevel::L3 => file_directory += "/index3/size", 108 } 109 110 let file_path = Path::new(&file_directory); 111 if !file_path.exists() { 112 warn!("File: {} does not exist.", file_directory); 113 0 114 } else { 115 info!("File: {} exist.", file_directory); 116 117 let src = fs::read_to_string(file_directory).expect("File not exists or file corrupted."); 118 // The content of the file is as simple as a size, like: "32K" 119 let src = src.trim(); 120 let src_digits: u32 = src[0..src.len() - 1].parse().unwrap(); 121 let src_unit = &src[src.len() - 1..]; 122 123 src_digits 124 * match src_unit { 125 "K" => 1024, 126 "M" => 1024u32.pow(2), 127 "G" => 1024u32.pow(3), 128 _ => 1, 129 } 130 } 131 } 132 133 /// NOTE: coherency_line_size file directory example, 134 /// "/sys/devices/system/cpu/cpu0/cache/index0/coherency_line_size". 135 pub fn get_cache_coherency_line_size(cache_level: CacheLevel) -> u32 { 136 let mut file_directory: String = "/sys/devices/system/cpu/cpu0/cache".to_string(); 137 match cache_level { 138 CacheLevel::L1D => file_directory += "/index0/coherency_line_size", 139 CacheLevel::L1I => file_directory += "/index1/coherency_line_size", 140 CacheLevel::L2 => file_directory += "/index2/coherency_line_size", 141 CacheLevel::L3 => file_directory += "/index3/coherency_line_size", 142 } 143 144 let file_path = Path::new(&file_directory); 145 if !file_path.exists() { 146 warn!("File: {} does not exist.", file_directory); 147 0 148 } else { 149 info!("File: {} exist.", file_directory); 150 151 let src = fs::read_to_string(file_directory).expect("File not exists or file corrupted."); 152 src.trim().parse::<u32>().unwrap() 153 } 154 } 155 156 /// NOTE: number_of_sets file directory example, 157 /// "/sys/devices/system/cpu/cpu0/cache/index0/number_of_sets". 158 pub fn get_cache_number_of_sets(cache_level: CacheLevel) -> u32 { 159 let mut file_directory: String = "/sys/devices/system/cpu/cpu0/cache".to_string(); 160 match cache_level { 161 CacheLevel::L1D => file_directory += "/index0/number_of_sets", 162 CacheLevel::L1I => file_directory += "/index1/number_of_sets", 163 CacheLevel::L2 => file_directory += "/index2/number_of_sets", 164 CacheLevel::L3 => file_directory += "/index3/number_of_sets", 165 } 166 167 let file_path = Path::new(&file_directory); 168 if !file_path.exists() { 169 warn!("File: {} does not exist.", file_directory); 170 0 171 } else { 172 info!("File: {} exist.", file_directory); 173 174 let src = fs::read_to_string(file_directory).expect("File not exists or file corrupted."); 175 src.trim().parse::<u32>().unwrap() 176 } 177 } 178 179 /// NOTE: shared_cpu_list file directory example, 180 /// "/sys/devices/system/cpu/cpu0/cache/index0/shared_cpu_list". 181 pub fn get_cache_shared(cache_level: CacheLevel) -> bool { 182 let mut file_directory: String = "/sys/devices/system/cpu/cpu0/cache".to_string(); 183 let mut result = true; 184 185 match cache_level { 186 CacheLevel::L1D | CacheLevel::L1I => result = false, 187 CacheLevel::L2 => file_directory += "/index2/shared_cpu_list", 188 CacheLevel::L3 => file_directory += "/index3/shared_cpu_list", 189 } 190 191 if !result { 192 return false; 193 } 194 195 let file_path = Path::new(&file_directory); 196 if !file_path.exists() { 197 warn!("File: {} does not exist.", file_directory); 198 result = false; 199 } else { 200 info!("File: {} exist.", file_directory); 201 202 let src = fs::read_to_string(file_directory).expect("File not exists or file corrupted."); 203 let src = src.trim(); 204 if src.is_empty() { 205 result = false; 206 } else { 207 result = src.contains('-') || src.contains(','); 208 } 209 } 210 211 result 212 } 213 214 /// Creates the flattened device tree for this aarch64 VM. 215 #[allow(clippy::too_many_arguments)] 216 pub fn create_fdt<T: DeviceInfoForFdt + Clone + Debug, S: ::std::hash::BuildHasher>( 217 guest_mem: &GuestMemoryMmap, 218 cmdline: &str, 219 vcpu_mpidr: Vec<u64>, 220 vcpu_topology: Option<(u8, u8, u8)>, 221 device_info: &HashMap<(DeviceType, String), T, S>, 222 gic_device: &Arc<Mutex<dyn Vgic>>, 223 initrd: &Option<InitramfsConfig>, 224 pci_space_info: &[PciSpaceInfo], 225 numa_nodes: &NumaNodes, 226 virtio_iommu_bdf: Option<u32>, 227 pmu_supported: bool, 228 ) -> FdtWriterResult<Vec<u8>> { 229 // Allocate stuff necessary for the holding the blob. 230 let mut fdt = FdtWriter::new().unwrap(); 231 232 // For an explanation why these nodes were introduced in the blob take a look at 233 // https://github.com/torvalds/linux/blob/master/Documentation/devicetree/booting-without-of.txt#L845 234 // Look for "Required nodes and properties". 235 236 // Header or the root node as per above mentioned documentation. 237 let root_node = fdt.begin_node("")?; 238 fdt.property_string("compatible", "linux,dummy-virt")?; 239 // For info on #address-cells and size-cells read "Note about cells and address representation" 240 // from the above mentioned txt file. 241 fdt.property_u32("#address-cells", ADDRESS_CELLS)?; 242 fdt.property_u32("#size-cells", SIZE_CELLS)?; 243 // This is not mandatory but we use it to point the root node to the node 244 // containing description of the interrupt controller for this VM. 245 fdt.property_u32("interrupt-parent", GIC_PHANDLE)?; 246 create_cpu_nodes(&mut fdt, &vcpu_mpidr, vcpu_topology, numa_nodes)?; 247 create_memory_node(&mut fdt, guest_mem, numa_nodes)?; 248 create_chosen_node(&mut fdt, cmdline, initrd)?; 249 create_gic_node(&mut fdt, gic_device)?; 250 create_timer_node(&mut fdt)?; 251 if pmu_supported { 252 create_pmu_node(&mut fdt)?; 253 } 254 create_clock_node(&mut fdt)?; 255 create_psci_node(&mut fdt)?; 256 create_devices_node(&mut fdt, device_info)?; 257 create_pci_nodes(&mut fdt, pci_space_info, virtio_iommu_bdf)?; 258 if numa_nodes.len() > 1 { 259 create_distance_map_node(&mut fdt, numa_nodes)?; 260 } 261 262 // End Header node. 263 fdt.end_node(root_node)?; 264 265 let fdt_final = fdt.finish()?; 266 267 Ok(fdt_final) 268 } 269 270 pub fn write_fdt_to_memory(fdt_final: Vec<u8>, guest_mem: &GuestMemoryMmap) -> Result<()> { 271 // Write FDT to memory. 272 guest_mem 273 .write_slice(fdt_final.as_slice(), super::layout::FDT_START) 274 .map_err(Error::WriteFdtToMemory)?; 275 Ok(()) 276 } 277 278 // Following are the auxiliary function for creating the different nodes that we append to our FDT. 279 fn create_cpu_nodes( 280 fdt: &mut FdtWriter, 281 vcpu_mpidr: &[u64], 282 vcpu_topology: Option<(u8, u8, u8)>, 283 numa_nodes: &NumaNodes, 284 ) -> FdtWriterResult<()> { 285 // See https://github.com/torvalds/linux/blob/master/Documentation/devicetree/bindings/arm/cpus.yaml. 286 let cpus_node = fdt.begin_node("cpus")?; 287 fdt.property_u32("#address-cells", 0x1)?; 288 fdt.property_u32("#size-cells", 0x0)?; 289 290 let num_cpus = vcpu_mpidr.len(); 291 let (threads_per_core, cores_per_package, packages) = vcpu_topology.unwrap_or((1, 1, 1)); 292 let max_cpus: u32 = (threads_per_core * cores_per_package * packages).into(); 293 294 // Add cache info. 295 // L1 Data Cache Info. 296 let mut l1_d_cache_size: u32 = 0; 297 let mut l1_d_cache_line_size: u32 = 0; 298 let mut l1_d_cache_sets: u32 = 0; 299 300 // L1 Instruction Cache Info. 301 let mut l1_i_cache_size: u32 = 0; 302 let mut l1_i_cache_line_size: u32 = 0; 303 let mut l1_i_cache_sets: u32 = 0; 304 305 // L2 Cache Info. 306 let mut l2_cache_size: u32 = 0; 307 let mut l2_cache_line_size: u32 = 0; 308 let mut l2_cache_sets: u32 = 0; 309 310 // L3 Cache Info. 311 let mut l3_cache_size: u32 = 0; 312 let mut l3_cache_line_size: u32 = 0; 313 let mut l3_cache_sets: u32 = 0; 314 315 // Cache Shared Info. 316 let mut l2_cache_shared: bool = false; 317 let mut l3_cache_shared: bool = false; 318 319 let cache_path = Path::new("/sys/devices/system/cpu/cpu0/cache"); 320 let cache_exist: bool = cache_path.exists(); 321 if !cache_exist { 322 warn!("cache sysfs system does not exist."); 323 } else { 324 info!("cache sysfs system exists."); 325 // L1 Data Cache Info. 326 l1_d_cache_size = get_cache_size(CacheLevel::L1D); 327 l1_d_cache_line_size = get_cache_coherency_line_size(CacheLevel::L1D); 328 l1_d_cache_sets = get_cache_number_of_sets(CacheLevel::L1D); 329 330 // L1 Instruction Cache Info. 331 l1_i_cache_size = get_cache_size(CacheLevel::L1I); 332 l1_i_cache_line_size = get_cache_coherency_line_size(CacheLevel::L1I); 333 l1_i_cache_sets = get_cache_number_of_sets(CacheLevel::L1I); 334 335 // L2 Cache Info. 336 l2_cache_size = get_cache_size(CacheLevel::L2); 337 l2_cache_line_size = get_cache_coherency_line_size(CacheLevel::L2); 338 l2_cache_sets = get_cache_number_of_sets(CacheLevel::L2); 339 340 // L3 Cache Info. 341 l3_cache_size = get_cache_size(CacheLevel::L3); 342 l3_cache_line_size = get_cache_coherency_line_size(CacheLevel::L3); 343 l3_cache_sets = get_cache_number_of_sets(CacheLevel::L3); 344 345 // Cache Shared Info. 346 if l2_cache_size != 0 { 347 l2_cache_shared = get_cache_shared(CacheLevel::L2); 348 } 349 if l3_cache_size != 0 { 350 l3_cache_shared = get_cache_shared(CacheLevel::L3); 351 } 352 } 353 354 for (cpu_id, mpidr) in vcpu_mpidr.iter().enumerate().take(num_cpus) { 355 let cpu_name = format!("cpu@{cpu_id:x}"); 356 let cpu_node = fdt.begin_node(&cpu_name)?; 357 fdt.property_string("device_type", "cpu")?; 358 fdt.property_string("compatible", "arm,arm-v8")?; 359 if num_cpus > 1 { 360 // This is required on armv8 64-bit. See aforementioned documentation. 361 fdt.property_string("enable-method", "psci")?; 362 } 363 // Set the field to first 24 bits of the MPIDR - Multiprocessor Affinity Register. 364 // See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0488c/BABHBJCI.html. 365 fdt.property_u32("reg", (mpidr & 0x7FFFFF) as u32)?; 366 fdt.property_u32("phandle", cpu_id as u32 + FIRST_VCPU_PHANDLE)?; 367 368 // Add `numa-node-id` property if there is any numa config. 369 if numa_nodes.len() > 1 { 370 for numa_node_idx in 0..numa_nodes.len() { 371 let numa_node = numa_nodes.get(&(numa_node_idx as u32)); 372 if numa_node.unwrap().cpus.contains(&(cpu_id as u8)) { 373 fdt.property_u32("numa-node-id", numa_node_idx as u32)?; 374 } 375 } 376 } 377 378 if cache_exist && l1_d_cache_size != 0 && l1_i_cache_size != 0 { 379 // Add cache info. 380 fdt.property_u32("d-cache-size", l1_d_cache_size)?; 381 fdt.property_u32("d-cache-line-size", l1_d_cache_line_size)?; 382 fdt.property_u32("d-cache-sets", l1_d_cache_sets)?; 383 384 fdt.property_u32("i-cache-size", l1_i_cache_size)?; 385 fdt.property_u32("i-cache-line-size", l1_i_cache_line_size)?; 386 fdt.property_u32("i-cache-sets", l1_i_cache_sets)?; 387 388 if l2_cache_size != 0 && !l2_cache_shared { 389 fdt.property_u32( 390 "next-level-cache", 391 cpu_id as u32 + max_cpus + FIRST_VCPU_PHANDLE + L2_CACHE_PHANDLE, 392 )?; 393 394 let l2_cache_name = "l2-cache0"; 395 let l2_cache_node = fdt.begin_node(l2_cache_name)?; 396 // PHANDLE is used to mark device node, and PHANDLE is unique. To avoid phandle 397 // conflicts with other device nodes, consider the previous CPU PHANDLE, so the 398 // CPU L2 cache PHANDLE must start from the largest CPU PHANDLE plus 1. 399 fdt.property_u32( 400 "phandle", 401 cpu_id as u32 + max_cpus + FIRST_VCPU_PHANDLE + L2_CACHE_PHANDLE, 402 )?; 403 404 fdt.property_string("compatible", "cache")?; 405 fdt.property_u32("cache-size", l2_cache_size)?; 406 fdt.property_u32("cache-line-size", l2_cache_line_size)?; 407 fdt.property_u32("cache-sets", l2_cache_sets)?; 408 fdt.property_u32("cache-level", 2)?; 409 410 if l3_cache_size != 0 && l3_cache_shared { 411 let package_id: u32 = cpu_id as u32 / cores_per_package as u32; 412 fdt.property_u32( 413 "next-level-cache", 414 package_id 415 + num_cpus as u32 416 + max_cpus 417 + FIRST_VCPU_PHANDLE 418 + L2_CACHE_PHANDLE 419 + L3_CACHE_PHANDLE, 420 )?; 421 } 422 423 fdt.end_node(l2_cache_node)?; 424 } 425 if l2_cache_size != 0 && l2_cache_shared { 426 warn!("L2 cache shared with other cpus"); 427 } 428 } 429 430 fdt.end_node(cpu_node)?; 431 } 432 433 if cache_exist && l3_cache_size != 0 && !l2_cache_shared && l3_cache_shared { 434 let mut i: u32 = 0; 435 while i < packages.into() { 436 let l3_cache_name = "l3-cache0"; 437 let l3_cache_node = fdt.begin_node(l3_cache_name)?; 438 // ARM L3 cache is generally shared within the package (socket), so the 439 // L3 cache node pointed to by the CPU in the package has the same L3 440 // cache PHANDLE. The L3 cache phandle must start from the largest L2 441 // cache PHANDLE plus 1 to avoid duplication. 442 fdt.property_u32( 443 "phandle", 444 i + num_cpus as u32 445 + max_cpus 446 + FIRST_VCPU_PHANDLE 447 + L2_CACHE_PHANDLE 448 + L3_CACHE_PHANDLE, 449 )?; 450 451 fdt.property_string("compatible", "cache")?; 452 fdt.property_null("cache-unified")?; 453 fdt.property_u32("cache-size", l3_cache_size)?; 454 fdt.property_u32("cache-line-size", l3_cache_line_size)?; 455 fdt.property_u32("cache-sets", l3_cache_sets)?; 456 fdt.property_u32("cache-level", 3)?; 457 fdt.end_node(l3_cache_node)?; 458 459 i += 1; 460 } 461 } 462 463 if let Some(topology) = vcpu_topology { 464 let (threads_per_core, cores_per_package, packages) = topology; 465 let cpu_map_node = fdt.begin_node("cpu-map")?; 466 467 // Create device tree nodes with regard of above mapping. 468 for package_idx in 0..packages { 469 let package_name = format!("socket{package_idx:x}"); 470 let package_node = fdt.begin_node(&package_name)?; 471 472 // Cluster is the container of cores, and it is mandatory in the CPU topology. 473 // Add a default "cluster0" in each socket/package. 474 let cluster_node = fdt.begin_node("cluster0")?; 475 476 for core_idx in 0..cores_per_package { 477 let core_name = format!("core{core_idx:x}"); 478 let core_node = fdt.begin_node(&core_name)?; 479 480 for thread_idx in 0..threads_per_core { 481 let thread_name = format!("thread{thread_idx:x}"); 482 let thread_node = fdt.begin_node(&thread_name)?; 483 let cpu_idx = threads_per_core * cores_per_package * package_idx 484 + threads_per_core * core_idx 485 + thread_idx; 486 fdt.property_u32("cpu", cpu_idx as u32 + FIRST_VCPU_PHANDLE)?; 487 fdt.end_node(thread_node)?; 488 } 489 490 fdt.end_node(core_node)?; 491 } 492 fdt.end_node(cluster_node)?; 493 fdt.end_node(package_node)?; 494 } 495 fdt.end_node(cpu_map_node)?; 496 } else { 497 debug!("Boot using device tree, CPU topology is not (correctly) specified"); 498 } 499 500 fdt.end_node(cpus_node)?; 501 502 Ok(()) 503 } 504 505 fn create_memory_node( 506 fdt: &mut FdtWriter, 507 guest_mem: &GuestMemoryMmap, 508 numa_nodes: &NumaNodes, 509 ) -> FdtWriterResult<()> { 510 // See https://github.com/torvalds/linux/blob/58ae0b51506802713aa0e9956d1853ba4c722c98/Documentation/devicetree/bindings/numa.txt 511 // for NUMA setting in memory node. 512 if numa_nodes.len() > 1 { 513 for numa_node_idx in 0..numa_nodes.len() { 514 let numa_node = numa_nodes.get(&(numa_node_idx as u32)); 515 let mut mem_reg_prop: Vec<u64> = Vec::new(); 516 let mut node_memory_addr: u64 = 0; 517 // Each memory zone of numa will have its own memory node, but 518 // different numa nodes should not share same memory zones. 519 for memory_region in numa_node.unwrap().memory_regions.iter() { 520 let memory_region_start_addr: u64 = memory_region.start_addr().raw_value(); 521 let memory_region_size: u64 = memory_region.size() as u64; 522 mem_reg_prop.push(memory_region_start_addr); 523 mem_reg_prop.push(memory_region_size); 524 // Set the node address the first non-zero region address 525 if node_memory_addr == 0 { 526 node_memory_addr = memory_region_start_addr; 527 } 528 } 529 let memory_node_name = format!("memory@{node_memory_addr:x}"); 530 let memory_node = fdt.begin_node(&memory_node_name)?; 531 fdt.property_string("device_type", "memory")?; 532 fdt.property_array_u64("reg", &mem_reg_prop)?; 533 fdt.property_u32("numa-node-id", numa_node_idx as u32)?; 534 fdt.end_node(memory_node)?; 535 } 536 } else { 537 // Note: memory regions from "GuestMemory" are sorted and non-zero sized. 538 let ram_regions = { 539 let mut ram_regions = Vec::new(); 540 let mut current_start = guest_mem 541 .iter() 542 .next() 543 .map(GuestMemoryRegion::start_addr) 544 .expect("GuestMemory must have one memory region at least") 545 .raw_value(); 546 let mut current_end = current_start; 547 548 for (start, size) in guest_mem 549 .iter() 550 .map(|m| (m.start_addr().raw_value(), m.len())) 551 { 552 if current_end == start { 553 // This zone is continuous with the previous one. 554 current_end += size; 555 } else { 556 ram_regions.push((current_start, current_end)); 557 558 current_start = start; 559 current_end = start + size; 560 } 561 } 562 563 ram_regions.push((current_start, current_end)); 564 565 ram_regions 566 }; 567 568 if ram_regions.len() > 2 { 569 panic!( 570 "There should be up to two non-continuous regions, divided by the 571 gap at the end of 32bit address space." 572 ); 573 } 574 575 // Create the memory node for memory region before the gap 576 { 577 let (first_region_start, first_region_end) = ram_regions 578 .first() 579 .expect("There should be at last one memory region"); 580 let ram_start = super::layout::RAM_START.raw_value(); 581 let mem_32bit_reserved_start = super::layout::MEM_32BIT_RESERVED_START.raw_value(); 582 583 if !((first_region_start <= &ram_start) 584 && (first_region_end > &ram_start) 585 && (first_region_end <= &mem_32bit_reserved_start)) 586 { 587 panic!( 588 "Unexpected first memory region layout: (start: 0x{:08x}, end: 0x{:08x}). 589 ram_start: 0x{:08x}, mem_32bit_reserved_start: 0x{:08x}", 590 first_region_start, first_region_end, ram_start, mem_32bit_reserved_start 591 ); 592 } 593 594 let mem_size = first_region_end - ram_start; 595 let mem_reg_prop = [ram_start, mem_size]; 596 let memory_node_name = format!("memory@{:x}", ram_start); 597 let memory_node = fdt.begin_node(&memory_node_name)?; 598 fdt.property_string("device_type", "memory")?; 599 fdt.property_array_u64("reg", &mem_reg_prop)?; 600 fdt.end_node(memory_node)?; 601 } 602 603 // Create the memory map entry for memory region after the gap if any 604 if let Some((second_region_start, second_region_end)) = ram_regions.get(1) { 605 let ram_64bit_start = super::layout::RAM_64BIT_START.raw_value(); 606 607 if second_region_start != &ram_64bit_start { 608 panic!( 609 "Unexpected second memory region layout: start: 0x{:08x}, ram_64bit_start: 0x{:08x}", 610 second_region_start, ram_64bit_start 611 ); 612 } 613 614 let mem_size = second_region_end - ram_64bit_start; 615 let mem_reg_prop = [ram_64bit_start, mem_size]; 616 let memory_node_name = format!("memory@{:x}", ram_64bit_start); 617 let memory_node = fdt.begin_node(&memory_node_name)?; 618 fdt.property_string("device_type", "memory")?; 619 fdt.property_array_u64("reg", &mem_reg_prop)?; 620 fdt.end_node(memory_node)?; 621 } 622 } 623 624 Ok(()) 625 } 626 627 fn create_chosen_node( 628 fdt: &mut FdtWriter, 629 cmdline: &str, 630 initrd: &Option<InitramfsConfig>, 631 ) -> FdtWriterResult<()> { 632 let chosen_node = fdt.begin_node("chosen")?; 633 fdt.property_string("bootargs", cmdline)?; 634 635 if let Some(initrd_config) = initrd { 636 let initrd_start = initrd_config.address.raw_value(); 637 let initrd_end = initrd_config.address.raw_value() + initrd_config.size as u64; 638 fdt.property_u64("linux,initrd-start", initrd_start)?; 639 fdt.property_u64("linux,initrd-end", initrd_end)?; 640 } 641 642 fdt.end_node(chosen_node)?; 643 644 Ok(()) 645 } 646 647 fn create_gic_node(fdt: &mut FdtWriter, gic_device: &Arc<Mutex<dyn Vgic>>) -> FdtWriterResult<()> { 648 let gic_reg_prop = gic_device.lock().unwrap().device_properties(); 649 650 let intc_node = fdt.begin_node("intc")?; 651 652 fdt.property_string("compatible", gic_device.lock().unwrap().fdt_compatibility())?; 653 fdt.property_null("interrupt-controller")?; 654 // "interrupt-cells" field specifies the number of cells needed to encode an 655 // interrupt source. The type shall be a <u32> and the value shall be 3 if no PPI affinity description 656 // is required. 657 fdt.property_u32("#interrupt-cells", 3)?; 658 fdt.property_array_u64("reg", &gic_reg_prop)?; 659 fdt.property_u32("phandle", GIC_PHANDLE)?; 660 fdt.property_u32("#address-cells", 2)?; 661 fdt.property_u32("#size-cells", 2)?; 662 fdt.property_null("ranges")?; 663 664 let gic_intr_prop = [ 665 GIC_FDT_IRQ_TYPE_PPI, 666 gic_device.lock().unwrap().fdt_maint_irq(), 667 IRQ_TYPE_LEVEL_HI, 668 ]; 669 fdt.property_array_u32("interrupts", &gic_intr_prop)?; 670 671 if gic_device.lock().unwrap().msi_compatible() { 672 let msic_node = fdt.begin_node("msic")?; 673 fdt.property_string("compatible", gic_device.lock().unwrap().msi_compatibility())?; 674 fdt.property_null("msi-controller")?; 675 fdt.property_u32("phandle", MSI_PHANDLE)?; 676 let msi_reg_prop = gic_device.lock().unwrap().msi_properties(); 677 fdt.property_array_u64("reg", &msi_reg_prop)?; 678 fdt.end_node(msic_node)?; 679 } 680 681 fdt.end_node(intc_node)?; 682 683 Ok(()) 684 } 685 686 fn create_clock_node(fdt: &mut FdtWriter) -> FdtWriterResult<()> { 687 // The Advanced Peripheral Bus (APB) is part of the Advanced Microcontroller Bus Architecture 688 // (AMBA) protocol family. It defines a low-cost interface that is optimized for minimal power 689 // consumption and reduced interface complexity. 690 // PCLK is the clock source and this node defines exactly the clock for the APB. 691 let clock_node = fdt.begin_node("apb-pclk")?; 692 fdt.property_string("compatible", "fixed-clock")?; 693 fdt.property_u32("#clock-cells", 0x0)?; 694 fdt.property_u32("clock-frequency", 24000000)?; 695 fdt.property_string("clock-output-names", "clk24mhz")?; 696 fdt.property_u32("phandle", CLOCK_PHANDLE)?; 697 fdt.end_node(clock_node)?; 698 699 Ok(()) 700 } 701 702 fn create_timer_node(fdt: &mut FdtWriter) -> FdtWriterResult<()> { 703 // See 704 // https://github.com/torvalds/linux/blob/master/Documentation/devicetree/bindings/interrupt-controller/arch_timer.txt 705 // These are fixed interrupt numbers for the timer device. 706 let irqs = [13, 14, 11, 10]; 707 let compatible = "arm,armv8-timer"; 708 709 let mut timer_reg_cells: Vec<u32> = Vec::new(); 710 for &irq in irqs.iter() { 711 timer_reg_cells.push(GIC_FDT_IRQ_TYPE_PPI); 712 timer_reg_cells.push(irq); 713 timer_reg_cells.push(IRQ_TYPE_LEVEL_HI); 714 } 715 716 let timer_node = fdt.begin_node("timer")?; 717 fdt.property_string("compatible", compatible)?; 718 fdt.property_null("always-on")?; 719 fdt.property_array_u32("interrupts", &timer_reg_cells)?; 720 fdt.end_node(timer_node)?; 721 722 Ok(()) 723 } 724 725 fn create_psci_node(fdt: &mut FdtWriter) -> FdtWriterResult<()> { 726 let compatible = "arm,psci-0.2"; 727 let psci_node = fdt.begin_node("psci")?; 728 fdt.property_string("compatible", compatible)?; 729 // Two methods available: hvc and smc. 730 // As per documentation, PSCI calls between a guest and hypervisor may use the HVC conduit instead of SMC. 731 // So, since we are using kvm, we need to use hvc. 732 fdt.property_string("method", "hvc")?; 733 fdt.end_node(psci_node)?; 734 735 Ok(()) 736 } 737 738 fn create_virtio_node<T: DeviceInfoForFdt + Clone + Debug>( 739 fdt: &mut FdtWriter, 740 dev_info: &T, 741 ) -> FdtWriterResult<()> { 742 let device_reg_prop = [dev_info.addr(), dev_info.length()]; 743 let irq = [GIC_FDT_IRQ_TYPE_SPI, dev_info.irq(), IRQ_TYPE_EDGE_RISING]; 744 745 let virtio_node = fdt.begin_node(&format!("virtio_mmio@{:x}", dev_info.addr()))?; 746 fdt.property_string("compatible", "virtio,mmio")?; 747 fdt.property_array_u64("reg", &device_reg_prop)?; 748 fdt.property_array_u32("interrupts", &irq)?; 749 fdt.property_u32("interrupt-parent", GIC_PHANDLE)?; 750 fdt.end_node(virtio_node)?; 751 752 Ok(()) 753 } 754 755 fn create_serial_node<T: DeviceInfoForFdt + Clone + Debug>( 756 fdt: &mut FdtWriter, 757 dev_info: &T, 758 ) -> FdtWriterResult<()> { 759 let compatible = b"arm,pl011\0arm,primecell\0"; 760 let serial_reg_prop = [dev_info.addr(), dev_info.length()]; 761 let irq = [ 762 GIC_FDT_IRQ_TYPE_SPI, 763 dev_info.irq() - IRQ_BASE, 764 IRQ_TYPE_EDGE_RISING, 765 ]; 766 767 let serial_node = fdt.begin_node(&format!("pl011@{:x}", dev_info.addr()))?; 768 fdt.property("compatible", compatible)?; 769 fdt.property_array_u64("reg", &serial_reg_prop)?; 770 fdt.property_u32("clocks", CLOCK_PHANDLE)?; 771 fdt.property_string("clock-names", "apb_pclk")?; 772 fdt.property_array_u32("interrupts", &irq)?; 773 fdt.end_node(serial_node)?; 774 775 Ok(()) 776 } 777 778 fn create_rtc_node<T: DeviceInfoForFdt + Clone + Debug>( 779 fdt: &mut FdtWriter, 780 dev_info: &T, 781 ) -> FdtWriterResult<()> { 782 let compatible = b"arm,pl031\0arm,primecell\0"; 783 let rtc_reg_prop = [dev_info.addr(), dev_info.length()]; 784 let irq = [ 785 GIC_FDT_IRQ_TYPE_SPI, 786 dev_info.irq() - IRQ_BASE, 787 IRQ_TYPE_LEVEL_HI, 788 ]; 789 790 let rtc_node = fdt.begin_node(&format!("rtc@{:x}", dev_info.addr()))?; 791 fdt.property("compatible", compatible)?; 792 fdt.property_array_u64("reg", &rtc_reg_prop)?; 793 fdt.property_array_u32("interrupts", &irq)?; 794 fdt.property_u32("clocks", CLOCK_PHANDLE)?; 795 fdt.property_string("clock-names", "apb_pclk")?; 796 fdt.end_node(rtc_node)?; 797 798 Ok(()) 799 } 800 801 fn create_gpio_node<T: DeviceInfoForFdt + Clone + Debug>( 802 fdt: &mut FdtWriter, 803 dev_info: &T, 804 ) -> FdtWriterResult<()> { 805 // PL061 GPIO controller node 806 let compatible = b"arm,pl061\0arm,primecell\0"; 807 let gpio_reg_prop = [dev_info.addr(), dev_info.length()]; 808 let irq = [ 809 GIC_FDT_IRQ_TYPE_SPI, 810 dev_info.irq() - IRQ_BASE, 811 IRQ_TYPE_EDGE_RISING, 812 ]; 813 814 let gpio_node = fdt.begin_node(&format!("pl061@{:x}", dev_info.addr()))?; 815 fdt.property("compatible", compatible)?; 816 fdt.property_array_u64("reg", &gpio_reg_prop)?; 817 fdt.property_array_u32("interrupts", &irq)?; 818 fdt.property_null("gpio-controller")?; 819 fdt.property_u32("#gpio-cells", 2)?; 820 fdt.property_u32("clocks", CLOCK_PHANDLE)?; 821 fdt.property_string("clock-names", "apb_pclk")?; 822 fdt.property_u32("phandle", GPIO_PHANDLE)?; 823 fdt.end_node(gpio_node)?; 824 825 // gpio-keys node 826 let gpio_keys_node = fdt.begin_node("gpio-keys")?; 827 fdt.property_string("compatible", "gpio-keys")?; 828 fdt.property_u32("#size-cells", 0)?; 829 fdt.property_u32("#address-cells", 1)?; 830 let gpio_keys_poweroff_node = fdt.begin_node("button@1")?; 831 fdt.property_string("label", "GPIO Key Poweroff")?; 832 fdt.property_u32("linux,code", KEY_POWER)?; 833 let gpios = [GPIO_PHANDLE, 3, 0]; 834 fdt.property_array_u32("gpios", &gpios)?; 835 fdt.end_node(gpio_keys_poweroff_node)?; 836 fdt.end_node(gpio_keys_node)?; 837 838 Ok(()) 839 } 840 841 fn create_devices_node<T: DeviceInfoForFdt + Clone + Debug, S: ::std::hash::BuildHasher>( 842 fdt: &mut FdtWriter, 843 dev_info: &HashMap<(DeviceType, String), T, S>, 844 ) -> FdtWriterResult<()> { 845 // Create one temp Vec to store all virtio devices 846 let mut ordered_virtio_device: Vec<&T> = Vec::new(); 847 848 for ((device_type, _device_id), info) in dev_info { 849 match device_type { 850 DeviceType::Gpio => create_gpio_node(fdt, info)?, 851 DeviceType::Rtc => create_rtc_node(fdt, info)?, 852 DeviceType::Serial => create_serial_node(fdt, info)?, 853 DeviceType::Virtio(_) => { 854 ordered_virtio_device.push(info); 855 } 856 } 857 } 858 859 // Sort out virtio devices by address from low to high and insert them into fdt table. 860 ordered_virtio_device.sort_by_key(|&a| a.addr()); 861 // Current address allocation strategy in cloud-hypervisor is: the first created device 862 // will be allocated to higher address. Here we reverse the vector to make sure that 863 // the older created device will appear in front of the newer created device in FDT. 864 ordered_virtio_device.reverse(); 865 for ordered_device_info in ordered_virtio_device.drain(..) { 866 create_virtio_node(fdt, ordered_device_info)?; 867 } 868 869 Ok(()) 870 } 871 872 fn create_pmu_node(fdt: &mut FdtWriter) -> FdtWriterResult<()> { 873 let compatible = "arm,armv8-pmuv3"; 874 let irq = [GIC_FDT_IRQ_TYPE_PPI, AARCH64_PMU_IRQ, IRQ_TYPE_LEVEL_HI]; 875 876 let pmu_node = fdt.begin_node("pmu")?; 877 fdt.property_string("compatible", compatible)?; 878 fdt.property_array_u32("interrupts", &irq)?; 879 fdt.end_node(pmu_node)?; 880 Ok(()) 881 } 882 883 fn create_pci_nodes( 884 fdt: &mut FdtWriter, 885 pci_device_info: &[PciSpaceInfo], 886 virtio_iommu_bdf: Option<u32>, 887 ) -> FdtWriterResult<()> { 888 // Add node for PCIe controller. 889 // See Documentation/devicetree/bindings/pci/host-generic-pci.txt in the kernel 890 // and https://elinux.org/Device_Tree_Usage. 891 // In multiple PCI segments setup, each PCI segment needs a PCI node. 892 for pci_device_info_elem in pci_device_info.iter() { 893 // EDK2 requires the PCIe high space above 4G address. 894 // The actual space in CLH follows the RAM. If the RAM space is small, the PCIe high space 895 // could fall below 4G. 896 // Here we cut off PCI device space below 8G in FDT to workaround the EDK2 check. 897 // But the address written in ACPI is not impacted. 898 let (pci_device_base_64bit, pci_device_size_64bit) = 899 if pci_device_info_elem.pci_device_space_start < PCI_HIGH_BASE.raw_value() { 900 ( 901 PCI_HIGH_BASE.raw_value(), 902 pci_device_info_elem.pci_device_space_size 903 - (PCI_HIGH_BASE.raw_value() - pci_device_info_elem.pci_device_space_start), 904 ) 905 } else { 906 ( 907 pci_device_info_elem.pci_device_space_start, 908 pci_device_info_elem.pci_device_space_size, 909 ) 910 }; 911 // There is no specific requirement of the 32bit MMIO range, and 912 // therefore at least we can make these ranges 4K aligned. 913 let pci_device_size_32bit: u64 = 914 MEM_32BIT_DEVICES_SIZE / ((1 << 12) * pci_device_info.len() as u64) * (1 << 12); 915 let pci_device_base_32bit: u64 = MEM_32BIT_DEVICES_START.0 916 + pci_device_size_32bit * pci_device_info_elem.pci_segment_id as u64; 917 918 let ranges = [ 919 // io addresses. Since AArch64 will not use IO address, 920 // we can set the same IO address range for every segment. 921 0x1000000, 922 0_u32, 923 0_u32, 924 (MEM_PCI_IO_START.0 >> 32) as u32, 925 MEM_PCI_IO_START.0 as u32, 926 (MEM_PCI_IO_SIZE >> 32) as u32, 927 MEM_PCI_IO_SIZE as u32, 928 // mmio addresses 929 0x2000000, // (ss = 10: 32-bit memory space) 930 (pci_device_base_32bit >> 32) as u32, // PCI address 931 pci_device_base_32bit as u32, 932 (pci_device_base_32bit >> 32) as u32, // CPU address 933 pci_device_base_32bit as u32, 934 (pci_device_size_32bit >> 32) as u32, // size 935 pci_device_size_32bit as u32, 936 // device addresses 937 0x3000000, // (ss = 11: 64-bit memory space) 938 (pci_device_base_64bit >> 32) as u32, // PCI address 939 pci_device_base_64bit as u32, 940 (pci_device_base_64bit >> 32) as u32, // CPU address 941 pci_device_base_64bit as u32, 942 (pci_device_size_64bit >> 32) as u32, // size 943 pci_device_size_64bit as u32, 944 ]; 945 let bus_range = [0, 0]; // Only bus 0 946 let reg = [ 947 pci_device_info_elem.mmio_config_address, 948 PCI_MMIO_CONFIG_SIZE_PER_SEGMENT, 949 ]; 950 // See kernel document Documentation/devicetree/bindings/pci/pci-msi.txt 951 let msi_map = [ 952 // rid-base: A single cell describing the first RID matched by the entry. 953 0x0, 954 // msi-controller: A single phandle to an MSI controller. 955 MSI_PHANDLE, 956 // msi-base: An msi-specifier describing the msi-specifier produced for the 957 // first RID matched by the entry. 958 (pci_device_info_elem.pci_segment_id as u32) << 8, 959 // length: A single cell describing how many consecutive RIDs are matched 960 // following the rid-base. 961 0x100, 962 ]; 963 964 let pci_node_name = format!("pci@{:x}", pci_device_info_elem.mmio_config_address); 965 let pci_node = fdt.begin_node(&pci_node_name)?; 966 967 fdt.property_string("compatible", "pci-host-ecam-generic")?; 968 fdt.property_string("device_type", "pci")?; 969 fdt.property_array_u32("ranges", &ranges)?; 970 fdt.property_array_u32("bus-range", &bus_range)?; 971 fdt.property_u32( 972 "linux,pci-domain", 973 pci_device_info_elem.pci_segment_id as u32, 974 )?; 975 fdt.property_u32("#address-cells", 3)?; 976 fdt.property_u32("#size-cells", 2)?; 977 fdt.property_array_u64("reg", ®)?; 978 fdt.property_u32("#interrupt-cells", 1)?; 979 fdt.property_null("interrupt-map")?; 980 fdt.property_null("interrupt-map-mask")?; 981 fdt.property_null("dma-coherent")?; 982 fdt.property_array_u32("msi-map", &msi_map)?; 983 fdt.property_u32("msi-parent", MSI_PHANDLE)?; 984 985 if pci_device_info_elem.pci_segment_id == 0 { 986 if let Some(virtio_iommu_bdf) = virtio_iommu_bdf { 987 // See kernel document Documentation/devicetree/bindings/pci/pci-iommu.txt 988 // for 'iommu-map' attribute setting. 989 let iommu_map = [ 990 0_u32, 991 VIRTIO_IOMMU_PHANDLE, 992 0_u32, 993 virtio_iommu_bdf, 994 virtio_iommu_bdf + 1, 995 VIRTIO_IOMMU_PHANDLE, 996 virtio_iommu_bdf + 1, 997 0xffff - virtio_iommu_bdf, 998 ]; 999 fdt.property_array_u32("iommu-map", &iommu_map)?; 1000 1001 // See kernel document Documentation/devicetree/bindings/virtio/iommu.txt 1002 // for virtio-iommu node settings. 1003 let virtio_iommu_node_name = format!("virtio_iommu@{virtio_iommu_bdf:x}"); 1004 let virtio_iommu_node = fdt.begin_node(&virtio_iommu_node_name)?; 1005 fdt.property_u32("#iommu-cells", 1)?; 1006 fdt.property_string("compatible", "virtio,pci-iommu")?; 1007 1008 // 'reg' is a five-cell address encoded as 1009 // (phys.hi phys.mid phys.lo size.hi size.lo). phys.hi should contain the 1010 // device's BDF as 0b00000000 bbbbbbbb dddddfff 00000000. The other cells 1011 // should be zero. 1012 let reg = [virtio_iommu_bdf << 8, 0_u32, 0_u32, 0_u32, 0_u32]; 1013 fdt.property_array_u32("reg", ®)?; 1014 fdt.property_u32("phandle", VIRTIO_IOMMU_PHANDLE)?; 1015 1016 fdt.end_node(virtio_iommu_node)?; 1017 } 1018 } 1019 1020 fdt.end_node(pci_node)?; 1021 } 1022 1023 Ok(()) 1024 } 1025 1026 fn create_distance_map_node(fdt: &mut FdtWriter, numa_nodes: &NumaNodes) -> FdtWriterResult<()> { 1027 let distance_map_node = fdt.begin_node("distance-map")?; 1028 fdt.property_string("compatible", "numa-distance-map-v1")?; 1029 // Construct the distance matrix. 1030 // 1. We use the word entry to describe a distance from a node to 1031 // its destination, e.g. 0 -> 1 = 20 is described as <0 1 20>. 1032 // 2. Each entry represents distance from first node to second node. 1033 // The distances are equal in either direction. 1034 // 3. The distance from a node to self (local distance) is represented 1035 // with value 10 and all internode distance should be represented with 1036 // a value greater than 10. 1037 // 4. distance-matrix should have entries in lexicographical ascending 1038 // order of nodes. 1039 let mut distance_matrix = Vec::new(); 1040 for numa_node_idx in 0..numa_nodes.len() { 1041 let numa_node = numa_nodes.get(&(numa_node_idx as u32)); 1042 for dest_numa_node in 0..numa_node.unwrap().distances.len() + 1 { 1043 if numa_node_idx == dest_numa_node { 1044 distance_matrix.push(numa_node_idx as u32); 1045 distance_matrix.push(dest_numa_node as u32); 1046 distance_matrix.push(10_u32); 1047 continue; 1048 } 1049 1050 distance_matrix.push(numa_node_idx as u32); 1051 distance_matrix.push(dest_numa_node as u32); 1052 distance_matrix.push( 1053 *numa_node 1054 .unwrap() 1055 .distances 1056 .get(&(dest_numa_node as u32)) 1057 .unwrap() as u32, 1058 ); 1059 } 1060 } 1061 fdt.property_array_u32("distance-matrix", distance_matrix.as_ref())?; 1062 fdt.end_node(distance_map_node)?; 1063 1064 Ok(()) 1065 } 1066 1067 // Parse the DTB binary and print for debugging 1068 pub fn print_fdt(dtb: &[u8]) { 1069 match fdt_parser::Fdt::new(dtb) { 1070 Ok(fdt) => { 1071 if let Some(root) = fdt.find_node("/") { 1072 debug!("Printing the FDT:"); 1073 print_node(root, 0); 1074 } else { 1075 debug!("Failed to find root node in FDT for debugging."); 1076 } 1077 } 1078 Err(_) => debug!("Failed to parse FDT for debugging."), 1079 } 1080 } 1081 1082 fn print_node(node: fdt_parser::node::FdtNode<'_, '_>, n_spaces: usize) { 1083 debug!("{:indent$}{}/", "", node.name, indent = n_spaces); 1084 for property in node.properties() { 1085 let name = property.name; 1086 1087 // If the property is 'compatible', its value requires special handling. 1088 // The u8 array could contain multiple null-terminated strings. 1089 // We copy the original array and simply replace all 'null' characters with spaces. 1090 let value = if name == "compatible" { 1091 let mut compatible = vec![0u8; 256]; 1092 let handled_value = property 1093 .value 1094 .iter() 1095 .map(|&c| if c == 0 { b' ' } else { c }) 1096 .collect::<Vec<_>>(); 1097 let len = cmp::min(255, handled_value.len()); 1098 compatible[..len].copy_from_slice(&handled_value[..len]); 1099 compatible[..(len + 1)].to_vec() 1100 } else { 1101 property.value.to_vec() 1102 }; 1103 let value = &value; 1104 1105 // Now the value can be either: 1106 // - A null-terminated C string, or 1107 // - Binary data 1108 // We follow a very simple logic to present the value: 1109 // - At first, try to convert it to CStr and print, 1110 // - If failed, print it as u32 array. 1111 let value_result = match CStr::from_bytes_with_nul(value) { 1112 Ok(value_cstr) => match value_cstr.to_str() { 1113 Ok(value_str) => Some(value_str), 1114 Err(_e) => None, 1115 }, 1116 Err(_e) => None, 1117 }; 1118 1119 if let Some(value_str) = value_result { 1120 debug!( 1121 "{:indent$}{} : {:#?}", 1122 "", 1123 name, 1124 value_str, 1125 indent = (n_spaces + 2) 1126 ); 1127 } else { 1128 let mut array = Vec::with_capacity(256); 1129 array.resize(value.len() / 4, 0u32); 1130 BigEndian::read_u32_into(value, &mut array); 1131 debug!( 1132 "{:indent$}{} : {:X?}", 1133 "", 1134 name, 1135 array, 1136 indent = (n_spaces + 2) 1137 ); 1138 }; 1139 } 1140 1141 // Print children nodes if there is any 1142 for child in node.children() { 1143 print_node(child, n_spaces + 2); 1144 } 1145 } 1146