1 // Copyright © 2020 Intel Corporation 2 // 3 // SPDX-License-Identifier: Apache-2.0 4 // 5 #![allow(clippy::undocumented_unsafe_blocks)] 6 // When enabling the `mshv` feature, we skip quite some tests and 7 // hence have known dead-code. This annotation silences dead-code 8 // related warnings for our quality workflow to pass. 9 #![allow(dead_code)] 10 11 extern crate test_infra; 12 13 use net_util::MacAddr; 14 use std::collections::HashMap; 15 use std::fs; 16 use std::io; 17 use std::io::BufRead; 18 use std::io::Read; 19 use std::io::Seek; 20 use std::io::Write; 21 use std::os::unix::io::AsRawFd; 22 use std::path::PathBuf; 23 use std::process::{Child, Command, Stdio}; 24 use std::string::String; 25 use std::sync::mpsc; 26 use std::sync::mpsc::Receiver; 27 use std::sync::Mutex; 28 use std::thread; 29 use test_infra::*; 30 use vmm_sys_util::{tempdir::TempDir, tempfile::TempFile}; 31 use wait_timeout::ChildExt; 32 33 // Constant taken from the VMM crate. 34 const MAX_NUM_PCI_SEGMENTS: u16 = 96; 35 36 #[cfg(target_arch = "x86_64")] 37 mod x86_64 { 38 pub const FOCAL_IMAGE_NAME: &str = "focal-server-cloudimg-amd64-custom-20210609-0.raw"; 39 pub const JAMMY_NVIDIA_IMAGE_NAME: &str = "jammy-server-cloudimg-amd64-nvidia.raw"; 40 pub const FOCAL_IMAGE_NAME_QCOW2: &str = "focal-server-cloudimg-amd64-custom-20210609-0.qcow2"; 41 pub const FOCAL_IMAGE_NAME_VHD: &str = "focal-server-cloudimg-amd64-custom-20210609-0.vhd"; 42 pub const FOCAL_IMAGE_NAME_VHDX: &str = "focal-server-cloudimg-amd64-custom-20210609-0.vhdx"; 43 pub const JAMMY_IMAGE_NAME: &str = "jammy-server-cloudimg-amd64-custom-20230119-0.raw"; 44 pub const WINDOWS_IMAGE_NAME: &str = "windows-server-2022-amd64-2.raw"; 45 pub const OVMF_NAME: &str = "CLOUDHV.fd"; 46 pub const GREP_SERIAL_IRQ_CMD: &str = "grep -c 'IO-APIC.*ttyS0' /proc/interrupts || true"; 47 } 48 49 #[cfg(target_arch = "x86_64")] 50 use x86_64::*; 51 52 #[cfg(target_arch = "aarch64")] 53 mod aarch64 { 54 pub const BIONIC_IMAGE_NAME: &str = "bionic-server-cloudimg-arm64.raw"; 55 pub const FOCAL_IMAGE_NAME: &str = "focal-server-cloudimg-arm64-custom-20210929-0.raw"; 56 pub const FOCAL_IMAGE_UPDATE_KERNEL_NAME: &str = 57 "focal-server-cloudimg-arm64-custom-20210929-0-update-kernel.raw"; 58 pub const FOCAL_IMAGE_NAME_QCOW2: &str = "focal-server-cloudimg-arm64-custom-20210929-0.qcow2"; 59 pub const FOCAL_IMAGE_NAME_VHD: &str = "focal-server-cloudimg-arm64-custom-20210929-0.vhd"; 60 pub const FOCAL_IMAGE_NAME_VHDX: &str = "focal-server-cloudimg-arm64-custom-20210929-0.vhdx"; 61 pub const JAMMY_IMAGE_NAME: &str = "jammy-server-cloudimg-arm64-custom-20220329-0.raw"; 62 pub const WINDOWS_IMAGE_NAME: &str = "windows-11-iot-enterprise-aarch64.raw"; 63 pub const OVMF_NAME: &str = "CLOUDHV_EFI.fd"; 64 pub const GREP_SERIAL_IRQ_CMD: &str = "grep -c 'GICv3.*uart-pl011' /proc/interrupts || true"; 65 pub const GREP_PMU_IRQ_CMD: &str = "grep -c 'GICv3.*arm-pmu' /proc/interrupts || true"; 66 } 67 68 #[cfg(target_arch = "aarch64")] 69 use aarch64::*; 70 71 const DIRECT_KERNEL_BOOT_CMDLINE: &str = 72 "root=/dev/vda1 console=hvc0 rw systemd.journald.forward_to_console=1"; 73 74 const CONSOLE_TEST_STRING: &str = "Started OpenBSD Secure Shell server"; 75 76 // This enum exists to make it more convenient to 77 // implement test for both D-Bus and REST APIs. 78 enum TargetApi { 79 // API socket 80 HttpApi(String), 81 // well known service name, object path 82 DBusApi(String, String), 83 } 84 85 impl TargetApi { 86 fn new_http_api(tmp_dir: &TempDir) -> Self { 87 Self::HttpApi(temp_api_path(tmp_dir)) 88 } 89 90 fn new_dbus_api(tmp_dir: &TempDir) -> Self { 91 // `tmp_dir` is in the form of "/tmp/chXXXXXX" 92 // and we take the `chXXXXXX` part as a unique identifier for the guest 93 let id = tmp_dir.as_path().file_name().unwrap().to_str().unwrap(); 94 95 Self::DBusApi( 96 format!("org.cloudhypervisor.{id}"), 97 format!("/org/cloudhypervisor/{id}"), 98 ) 99 } 100 101 fn guest_args(&self) -> Vec<&str> { 102 match self { 103 TargetApi::HttpApi(api_socket) => { 104 vec!["--api-socket", api_socket.as_str()] 105 } 106 TargetApi::DBusApi(service_name, object_path) => { 107 vec![ 108 "--dbus-service-name", 109 service_name.as_str(), 110 "--dbus-object-path", 111 object_path.as_str(), 112 ] 113 } 114 } 115 } 116 117 fn remote_args(&self) -> Vec<&str> { 118 // `guest_args` and `remote_args` are consistent with each other 119 self.guest_args() 120 } 121 122 fn remote_command(&self, command: &str, arg: Option<&str>) -> bool { 123 let mut cmd = Command::new(clh_command("ch-remote")); 124 cmd.args(self.remote_args()); 125 cmd.arg(command); 126 127 if let Some(arg) = arg { 128 cmd.arg(arg); 129 } 130 131 let output = cmd.output().unwrap(); 132 if output.status.success() { 133 true 134 } else { 135 eprintln!("Error running ch-remote command: {:?}", &cmd); 136 let stderr = String::from_utf8_lossy(&output.stderr); 137 eprintln!("stderr: {stderr}"); 138 false 139 } 140 } 141 } 142 143 // Start cloud-hypervisor with no VM parameters, only the API server running. 144 // From the API: Create a VM, boot it and check that it looks as expected. 145 fn _test_api_create_boot(target_api: TargetApi, guest: Guest) { 146 let mut child = GuestCommand::new(&guest) 147 .args(target_api.guest_args()) 148 .capture_output() 149 .spawn() 150 .unwrap(); 151 152 thread::sleep(std::time::Duration::new(1, 0)); 153 154 // Verify API server is running 155 assert!(target_api.remote_command("ping", None)); 156 157 // Create the VM first 158 let cpu_count: u8 = 4; 159 let request_body = guest.api_create_body( 160 cpu_count, 161 direct_kernel_boot_path().to_str().unwrap(), 162 DIRECT_KERNEL_BOOT_CMDLINE, 163 ); 164 165 let temp_config_path = guest.tmp_dir.as_path().join("config"); 166 std::fs::write(&temp_config_path, request_body).unwrap(); 167 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 168 169 assert!(target_api.remote_command("create", Some(create_config),)); 170 171 // Then boot it 172 assert!(target_api.remote_command("boot", None)); 173 thread::sleep(std::time::Duration::new(20, 0)); 174 175 let r = std::panic::catch_unwind(|| { 176 // Check that the VM booted as expected 177 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 178 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 179 }); 180 181 let _ = child.kill(); 182 let output = child.wait_with_output().unwrap(); 183 184 handle_child_output(r, &output); 185 } 186 187 // Start cloud-hypervisor with no VM parameters, only the API server running. 188 // From the API: Create a VM, boot it and check it can be shutdown and then 189 // booted again 190 fn _test_api_shutdown(target_api: TargetApi, guest: Guest) { 191 let mut child = GuestCommand::new(&guest) 192 .args(target_api.guest_args()) 193 .capture_output() 194 .spawn() 195 .unwrap(); 196 197 thread::sleep(std::time::Duration::new(1, 0)); 198 199 // Verify API server is running 200 assert!(target_api.remote_command("ping", None)); 201 202 // Create the VM first 203 let cpu_count: u8 = 4; 204 let request_body = guest.api_create_body( 205 cpu_count, 206 direct_kernel_boot_path().to_str().unwrap(), 207 DIRECT_KERNEL_BOOT_CMDLINE, 208 ); 209 210 let temp_config_path = guest.tmp_dir.as_path().join("config"); 211 std::fs::write(&temp_config_path, request_body).unwrap(); 212 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 213 214 let r = std::panic::catch_unwind(|| { 215 assert!(target_api.remote_command("create", Some(create_config))); 216 217 // Then boot it 218 assert!(target_api.remote_command("boot", None)); 219 220 guest.wait_vm_boot(None).unwrap(); 221 222 // Check that the VM booted as expected 223 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 224 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 225 226 // Sync and shutdown without powering off to prevent filesystem 227 // corruption. 228 guest.ssh_command("sync").unwrap(); 229 guest.ssh_command("sudo shutdown -H now").unwrap(); 230 231 // Wait for the guest to be fully shutdown 232 thread::sleep(std::time::Duration::new(20, 0)); 233 234 // Then shut it down 235 assert!(target_api.remote_command("shutdown", None)); 236 237 // Then boot it again 238 assert!(target_api.remote_command("boot", None)); 239 240 guest.wait_vm_boot(None).unwrap(); 241 242 // Check that the VM booted as expected 243 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 244 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 245 }); 246 247 let _ = child.kill(); 248 let output = child.wait_with_output().unwrap(); 249 250 handle_child_output(r, &output); 251 } 252 253 // Start cloud-hypervisor with no VM parameters, only the API server running. 254 // From the API: Create a VM, boot it and check it can be deleted and then recreated 255 // booted again. 256 fn _test_api_delete(target_api: TargetApi, guest: Guest) { 257 let mut child = GuestCommand::new(&guest) 258 .args(target_api.guest_args()) 259 .capture_output() 260 .spawn() 261 .unwrap(); 262 263 thread::sleep(std::time::Duration::new(1, 0)); 264 265 // Verify API server is running 266 assert!(target_api.remote_command("ping", None)); 267 268 // Create the VM first 269 let cpu_count: u8 = 4; 270 let request_body = guest.api_create_body( 271 cpu_count, 272 direct_kernel_boot_path().to_str().unwrap(), 273 DIRECT_KERNEL_BOOT_CMDLINE, 274 ); 275 let temp_config_path = guest.tmp_dir.as_path().join("config"); 276 std::fs::write(&temp_config_path, request_body).unwrap(); 277 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 278 279 let r = std::panic::catch_unwind(|| { 280 assert!(target_api.remote_command("create", Some(create_config))); 281 282 // Then boot it 283 assert!(target_api.remote_command("boot", None)); 284 285 guest.wait_vm_boot(None).unwrap(); 286 287 // Check that the VM booted as expected 288 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 289 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 290 291 // Sync and shutdown without powering off to prevent filesystem 292 // corruption. 293 guest.ssh_command("sync").unwrap(); 294 guest.ssh_command("sudo shutdown -H now").unwrap(); 295 296 // Wait for the guest to be fully shutdown 297 thread::sleep(std::time::Duration::new(20, 0)); 298 299 // Then delete it 300 assert!(target_api.remote_command("delete", None)); 301 302 assert!(target_api.remote_command("create", Some(create_config))); 303 304 // Then boot it again 305 assert!(target_api.remote_command("boot", None)); 306 307 guest.wait_vm_boot(None).unwrap(); 308 309 // Check that the VM booted as expected 310 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 311 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 312 }); 313 314 let _ = child.kill(); 315 let output = child.wait_with_output().unwrap(); 316 317 handle_child_output(r, &output); 318 } 319 320 // Start cloud-hypervisor with no VM parameters, only the API server running. 321 // From the API: Create a VM, boot it and check that it looks as expected. 322 // Then we pause the VM, check that it's no longer available. 323 // Finally we resume the VM and check that it's available. 324 fn _test_api_pause_resume(target_api: TargetApi, guest: Guest) { 325 let mut child = GuestCommand::new(&guest) 326 .args(target_api.guest_args()) 327 .capture_output() 328 .spawn() 329 .unwrap(); 330 331 thread::sleep(std::time::Duration::new(1, 0)); 332 333 // Verify API server is running 334 assert!(target_api.remote_command("ping", None)); 335 336 // Create the VM first 337 let cpu_count: u8 = 4; 338 let request_body = guest.api_create_body( 339 cpu_count, 340 direct_kernel_boot_path().to_str().unwrap(), 341 DIRECT_KERNEL_BOOT_CMDLINE, 342 ); 343 344 let temp_config_path = guest.tmp_dir.as_path().join("config"); 345 std::fs::write(&temp_config_path, request_body).unwrap(); 346 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 347 348 assert!(target_api.remote_command("create", Some(create_config))); 349 350 // Then boot it 351 assert!(target_api.remote_command("boot", None)); 352 thread::sleep(std::time::Duration::new(20, 0)); 353 354 let r = std::panic::catch_unwind(|| { 355 // Check that the VM booted as expected 356 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 357 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 358 359 // We now pause the VM 360 assert!(target_api.remote_command("pause", None)); 361 362 // Check pausing again fails 363 assert!(!target_api.remote_command("pause", None)); 364 365 thread::sleep(std::time::Duration::new(2, 0)); 366 367 // SSH into the VM should fail 368 assert!(ssh_command_ip( 369 "grep -c processor /proc/cpuinfo", 370 &guest.network.guest_ip, 371 2, 372 5 373 ) 374 .is_err()); 375 376 // Resume the VM 377 assert!(target_api.remote_command("resume", None)); 378 379 // Check resuming again fails 380 assert!(!target_api.remote_command("resume", None)); 381 382 thread::sleep(std::time::Duration::new(2, 0)); 383 384 // Now we should be able to SSH back in and get the right number of CPUs 385 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 386 }); 387 388 let _ = child.kill(); 389 let output = child.wait_with_output().unwrap(); 390 391 handle_child_output(r, &output); 392 } 393 394 fn prepare_virtiofsd(tmp_dir: &TempDir, shared_dir: &str) -> (std::process::Child, String) { 395 let mut workload_path = dirs::home_dir().unwrap(); 396 workload_path.push("workloads"); 397 398 let mut virtiofsd_path = workload_path; 399 virtiofsd_path.push("virtiofsd"); 400 let virtiofsd_path = String::from(virtiofsd_path.to_str().unwrap()); 401 402 let virtiofsd_socket_path = 403 String::from(tmp_dir.as_path().join("virtiofs.sock").to_str().unwrap()); 404 405 // Start the daemon 406 let child = Command::new(virtiofsd_path.as_str()) 407 .args(["--shared-dir", shared_dir]) 408 .args(["--socket-path", virtiofsd_socket_path.as_str()]) 409 .args(["--cache", "never"]) 410 .spawn() 411 .unwrap(); 412 413 thread::sleep(std::time::Duration::new(10, 0)); 414 415 (child, virtiofsd_socket_path) 416 } 417 418 fn prepare_vubd( 419 tmp_dir: &TempDir, 420 blk_img: &str, 421 num_queues: usize, 422 rdonly: bool, 423 direct: bool, 424 ) -> (std::process::Child, String) { 425 let mut workload_path = dirs::home_dir().unwrap(); 426 workload_path.push("workloads"); 427 428 let mut blk_file_path = workload_path; 429 blk_file_path.push(blk_img); 430 let blk_file_path = String::from(blk_file_path.to_str().unwrap()); 431 432 let vubd_socket_path = String::from(tmp_dir.as_path().join("vub.sock").to_str().unwrap()); 433 434 // Start the daemon 435 let child = Command::new(clh_command("vhost_user_block")) 436 .args([ 437 "--block-backend", 438 format!( 439 "path={blk_file_path},socket={vubd_socket_path},num_queues={num_queues},readonly={rdonly},direct={direct}" 440 ) 441 .as_str(), 442 ]) 443 .spawn() 444 .unwrap(); 445 446 thread::sleep(std::time::Duration::new(10, 0)); 447 448 (child, vubd_socket_path) 449 } 450 451 fn temp_vsock_path(tmp_dir: &TempDir) -> String { 452 String::from(tmp_dir.as_path().join("vsock").to_str().unwrap()) 453 } 454 455 fn temp_api_path(tmp_dir: &TempDir) -> String { 456 String::from( 457 tmp_dir 458 .as_path() 459 .join("cloud-hypervisor.sock") 460 .to_str() 461 .unwrap(), 462 ) 463 } 464 465 fn temp_event_monitor_path(tmp_dir: &TempDir) -> String { 466 String::from(tmp_dir.as_path().join("event.json").to_str().unwrap()) 467 } 468 469 // Creates the directory and returns the path. 470 fn temp_snapshot_dir_path(tmp_dir: &TempDir) -> String { 471 let snapshot_dir = String::from(tmp_dir.as_path().join("snapshot").to_str().unwrap()); 472 std::fs::create_dir(&snapshot_dir).unwrap(); 473 snapshot_dir 474 } 475 476 fn temp_vmcore_file_path(tmp_dir: &TempDir) -> String { 477 let vmcore_file = String::from(tmp_dir.as_path().join("vmcore").to_str().unwrap()); 478 vmcore_file 479 } 480 481 // Creates the path for direct kernel boot and return the path. 482 // For x86_64, this function returns the vmlinux kernel path. 483 // For AArch64, this function returns the PE kernel path. 484 fn direct_kernel_boot_path() -> PathBuf { 485 let mut workload_path = dirs::home_dir().unwrap(); 486 workload_path.push("workloads"); 487 488 let mut kernel_path = workload_path; 489 #[cfg(target_arch = "x86_64")] 490 kernel_path.push("vmlinux"); 491 #[cfg(target_arch = "aarch64")] 492 kernel_path.push("Image"); 493 494 kernel_path 495 } 496 497 fn edk2_path() -> PathBuf { 498 let mut workload_path = dirs::home_dir().unwrap(); 499 workload_path.push("workloads"); 500 let mut edk2_path = workload_path; 501 edk2_path.push(OVMF_NAME); 502 503 edk2_path 504 } 505 506 fn cloud_hypervisor_release_path() -> String { 507 let mut workload_path = dirs::home_dir().unwrap(); 508 workload_path.push("workloads"); 509 510 let mut ch_release_path = workload_path; 511 #[cfg(target_arch = "x86_64")] 512 ch_release_path.push("cloud-hypervisor-static"); 513 #[cfg(target_arch = "aarch64")] 514 ch_release_path.push("cloud-hypervisor-static-aarch64"); 515 516 ch_release_path.into_os_string().into_string().unwrap() 517 } 518 519 fn prepare_vhost_user_net_daemon( 520 tmp_dir: &TempDir, 521 ip: &str, 522 tap: Option<&str>, 523 mtu: Option<u16>, 524 num_queues: usize, 525 client_mode: bool, 526 ) -> (std::process::Command, String) { 527 let vunet_socket_path = String::from(tmp_dir.as_path().join("vunet.sock").to_str().unwrap()); 528 529 // Start the daemon 530 let mut net_params = format!( 531 "ip={ip},mask=255.255.255.0,socket={vunet_socket_path},num_queues={num_queues},queue_size=1024,client={client_mode}" 532 ); 533 534 if let Some(tap) = tap { 535 net_params.push_str(format!(",tap={tap}").as_str()); 536 } 537 538 if let Some(mtu) = mtu { 539 net_params.push_str(format!(",mtu={mtu}").as_str()); 540 } 541 542 let mut command = Command::new(clh_command("vhost_user_net")); 543 command.args(["--net-backend", net_params.as_str()]); 544 545 (command, vunet_socket_path) 546 } 547 548 fn prepare_swtpm_daemon(tmp_dir: &TempDir) -> (std::process::Command, String) { 549 let swtpm_tpm_dir = String::from(tmp_dir.as_path().join("swtpm").to_str().unwrap()); 550 let swtpm_socket_path = String::from( 551 tmp_dir 552 .as_path() 553 .join("swtpm") 554 .join("swtpm.sock") 555 .to_str() 556 .unwrap(), 557 ); 558 std::fs::create_dir(&swtpm_tpm_dir).unwrap(); 559 560 let mut swtpm_command = Command::new("swtpm"); 561 let swtpm_args = [ 562 "socket", 563 "--tpmstate", 564 &format!("dir={swtpm_tpm_dir}"), 565 "--ctrl", 566 &format!("type=unixio,path={swtpm_socket_path}"), 567 "--flags", 568 "startup-clear", 569 "--tpm2", 570 ]; 571 swtpm_command.args(swtpm_args); 572 573 (swtpm_command, swtpm_socket_path) 574 } 575 576 fn remote_command(api_socket: &str, command: &str, arg: Option<&str>) -> bool { 577 let mut cmd = Command::new(clh_command("ch-remote")); 578 cmd.args(["--api-socket", api_socket, command]); 579 580 if let Some(arg) = arg { 581 cmd.arg(arg); 582 } 583 let output = cmd.output().unwrap(); 584 if output.status.success() { 585 true 586 } else { 587 eprintln!("Error running ch-remote command: {:?}", &cmd); 588 let stderr = String::from_utf8_lossy(&output.stderr); 589 eprintln!("stderr: {stderr}"); 590 false 591 } 592 } 593 594 fn remote_command_w_output(api_socket: &str, command: &str, arg: Option<&str>) -> (bool, Vec<u8>) { 595 let mut cmd = Command::new(clh_command("ch-remote")); 596 cmd.args(["--api-socket", api_socket, command]); 597 598 if let Some(arg) = arg { 599 cmd.arg(arg); 600 } 601 602 let output = cmd.output().expect("Failed to launch ch-remote"); 603 604 (output.status.success(), output.stdout) 605 } 606 607 fn resize_command( 608 api_socket: &str, 609 desired_vcpus: Option<u8>, 610 desired_ram: Option<usize>, 611 desired_balloon: Option<usize>, 612 event_file: Option<&str>, 613 ) -> bool { 614 let mut cmd = Command::new(clh_command("ch-remote")); 615 cmd.args(["--api-socket", api_socket, "resize"]); 616 617 if let Some(desired_vcpus) = desired_vcpus { 618 cmd.args(["--cpus", &format!("{desired_vcpus}")]); 619 } 620 621 if let Some(desired_ram) = desired_ram { 622 cmd.args(["--memory", &format!("{desired_ram}")]); 623 } 624 625 if let Some(desired_balloon) = desired_balloon { 626 cmd.args(["--balloon", &format!("{desired_balloon}")]); 627 } 628 629 let ret = cmd.status().expect("Failed to launch ch-remote").success(); 630 631 if let Some(event_path) = event_file { 632 let latest_events = [ 633 &MetaEvent { 634 event: "resizing".to_string(), 635 device_id: None, 636 }, 637 &MetaEvent { 638 event: "resized".to_string(), 639 device_id: None, 640 }, 641 ]; 642 assert!(check_latest_events_exact(&latest_events, event_path)); 643 } 644 645 ret 646 } 647 648 fn resize_zone_command(api_socket: &str, id: &str, desired_size: &str) -> bool { 649 let mut cmd = Command::new(clh_command("ch-remote")); 650 cmd.args([ 651 "--api-socket", 652 api_socket, 653 "resize-zone", 654 "--id", 655 id, 656 "--size", 657 desired_size, 658 ]); 659 660 cmd.status().expect("Failed to launch ch-remote").success() 661 } 662 663 // setup OVS-DPDK bridge and ports 664 fn setup_ovs_dpdk() { 665 // setup OVS-DPDK 666 assert!(exec_host_command_status("service openvswitch-switch start").success()); 667 assert!(exec_host_command_status("ovs-vsctl init").success()); 668 assert!( 669 exec_host_command_status("ovs-vsctl set Open_vSwitch . other_config:dpdk-init=true") 670 .success() 671 ); 672 assert!(exec_host_command_status("service openvswitch-switch restart").success()); 673 674 // Create OVS-DPDK bridge and ports 675 assert!(exec_host_command_status( 676 "ovs-vsctl add-br ovsbr0 -- set bridge ovsbr0 datapath_type=netdev", 677 ) 678 .success()); 679 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface vhost-user1 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient1").success()); 680 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user2 -- set Interface vhost-user2 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient2").success()); 681 assert!(exec_host_command_status("ip link set up dev ovsbr0").success()); 682 assert!(exec_host_command_status("service openvswitch-switch restart").success()); 683 } 684 fn cleanup_ovs_dpdk() { 685 assert!(exec_host_command_status("ovs-vsctl del-br ovsbr0").success()); 686 exec_host_command_status("rm -f ovs-vsctl /tmp/dpdkvhostclient1 /tmp/dpdkvhostclient2"); 687 } 688 // Setup two guests and ensure they are connected through ovs-dpdk 689 fn setup_ovs_dpdk_guests( 690 guest1: &Guest, 691 guest2: &Guest, 692 api_socket: &str, 693 release_binary: bool, 694 ) -> (Child, Child) { 695 setup_ovs_dpdk(); 696 697 let clh_path = if !release_binary { 698 clh_command("cloud-hypervisor") 699 } else { 700 cloud_hypervisor_release_path() 701 }; 702 703 let mut child1 = GuestCommand::new_with_binary_path(guest1, &clh_path) 704 .args(["--cpus", "boot=2"]) 705 .args(["--memory", "size=0,shared=on"]) 706 .args(["--memory-zone", "id=mem0,size=1G,shared=on,host_numa_node=0"]) 707 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 708 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 709 .default_disks() 710 .args(["--net", guest1.default_net_string().as_str(), "--net", "vhost_user=true,socket=/tmp/dpdkvhostclient1,num_queues=2,queue_size=256,vhost_mode=server"]) 711 .capture_output() 712 .spawn() 713 .unwrap(); 714 715 #[cfg(target_arch = "x86_64")] 716 let guest_net_iface = "ens5"; 717 #[cfg(target_arch = "aarch64")] 718 let guest_net_iface = "enp0s5"; 719 720 let r = std::panic::catch_unwind(|| { 721 guest1.wait_vm_boot(None).unwrap(); 722 723 guest1 724 .ssh_command(&format!( 725 "sudo ip addr add 172.100.0.1/24 dev {guest_net_iface}" 726 )) 727 .unwrap(); 728 guest1 729 .ssh_command(&format!("sudo ip link set up dev {guest_net_iface}")) 730 .unwrap(); 731 732 let guest_ip = guest1.network.guest_ip.clone(); 733 thread::spawn(move || { 734 ssh_command_ip( 735 "nc -l 12345", 736 &guest_ip, 737 DEFAULT_SSH_RETRIES, 738 DEFAULT_SSH_TIMEOUT, 739 ) 740 .unwrap(); 741 }); 742 }); 743 if r.is_err() { 744 cleanup_ovs_dpdk(); 745 746 let _ = child1.kill(); 747 let output = child1.wait_with_output().unwrap(); 748 handle_child_output(r, &output); 749 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return 750 } 751 752 let mut child2 = GuestCommand::new_with_binary_path(guest2, &clh_path) 753 .args(["--api-socket", api_socket]) 754 .args(["--cpus", "boot=2"]) 755 .args(["--memory", "size=0,shared=on"]) 756 .args(["--memory-zone", "id=mem0,size=1G,shared=on,host_numa_node=0"]) 757 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 758 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 759 .default_disks() 760 .args(["--net", guest2.default_net_string().as_str(), "--net", "vhost_user=true,socket=/tmp/dpdkvhostclient2,num_queues=2,queue_size=256,vhost_mode=server"]) 761 .capture_output() 762 .spawn() 763 .unwrap(); 764 765 let r = std::panic::catch_unwind(|| { 766 guest2.wait_vm_boot(None).unwrap(); 767 768 guest2 769 .ssh_command(&format!( 770 "sudo ip addr add 172.100.0.2/24 dev {guest_net_iface}" 771 )) 772 .unwrap(); 773 guest2 774 .ssh_command(&format!("sudo ip link set up dev {guest_net_iface}")) 775 .unwrap(); 776 777 // Check the connection works properly between the two VMs 778 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap(); 779 }); 780 if r.is_err() { 781 cleanup_ovs_dpdk(); 782 783 let _ = child1.kill(); 784 let _ = child2.kill(); 785 let output = child2.wait_with_output().unwrap(); 786 handle_child_output(r, &output); 787 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return 788 } 789 790 (child1, child2) 791 } 792 793 enum FwType { 794 Ovmf, 795 RustHypervisorFirmware, 796 } 797 798 fn fw_path(_fw_type: FwType) -> String { 799 let mut workload_path = dirs::home_dir().unwrap(); 800 workload_path.push("workloads"); 801 802 let mut fw_path = workload_path; 803 #[cfg(target_arch = "aarch64")] 804 fw_path.push("CLOUDHV_EFI.fd"); 805 #[cfg(target_arch = "x86_64")] 806 { 807 match _fw_type { 808 FwType::Ovmf => fw_path.push(OVMF_NAME), 809 FwType::RustHypervisorFirmware => fw_path.push("hypervisor-fw"), 810 } 811 } 812 813 fw_path.to_str().unwrap().to_string() 814 } 815 816 struct MetaEvent { 817 event: String, 818 device_id: Option<String>, 819 } 820 821 impl MetaEvent { 822 pub fn match_with_json_event(&self, v: &serde_json::Value) -> bool { 823 let mut matched = false; 824 if v["event"].as_str().unwrap() == self.event { 825 if let Some(device_id) = &self.device_id { 826 if v["properties"]["id"].as_str().unwrap() == device_id { 827 matched = true 828 } 829 } else { 830 matched = true; 831 } 832 } 833 matched 834 } 835 } 836 837 // Parse the event_monitor file based on the format that each event 838 // is followed by a double newline 839 fn parse_event_file(event_file: &str) -> Vec<serde_json::Value> { 840 let content = fs::read(event_file).unwrap(); 841 let mut ret = Vec::new(); 842 for entry in String::from_utf8_lossy(&content) 843 .trim() 844 .split("\n\n") 845 .collect::<Vec<&str>>() 846 { 847 ret.push(serde_json::from_str(entry).unwrap()); 848 } 849 850 ret 851 } 852 853 // Return true if all events from the input 'expected_events' are matched sequentially 854 // with events from the 'event_file' 855 fn check_sequential_events(expected_events: &[&MetaEvent], event_file: &str) -> bool { 856 let json_events = parse_event_file(event_file); 857 let len = expected_events.len(); 858 let mut idx = 0; 859 for e in &json_events { 860 if idx == len { 861 break; 862 } 863 if expected_events[idx].match_with_json_event(e) { 864 idx += 1; 865 } 866 } 867 868 idx == len 869 } 870 871 // Return true if all events from the input 'expected_events' are matched exactly 872 // with events from the 'event_file' 873 fn check_sequential_events_exact(expected_events: &[&MetaEvent], event_file: &str) -> bool { 874 let json_events = parse_event_file(event_file); 875 assert!(expected_events.len() <= json_events.len()); 876 let json_events = &json_events[..expected_events.len()]; 877 878 for (idx, e) in json_events.iter().enumerate() { 879 if !expected_events[idx].match_with_json_event(e) { 880 return false; 881 } 882 } 883 884 true 885 } 886 887 // Return true if events from the input 'expected_events' are matched exactly 888 // with the most recent events from the 'event_file' 889 fn check_latest_events_exact(latest_events: &[&MetaEvent], event_file: &str) -> bool { 890 let json_events = parse_event_file(event_file); 891 assert!(latest_events.len() <= json_events.len()); 892 let json_events = &json_events[(json_events.len() - latest_events.len())..]; 893 894 for (idx, e) in json_events.iter().enumerate() { 895 if !latest_events[idx].match_with_json_event(e) { 896 return false; 897 } 898 } 899 900 true 901 } 902 903 fn test_cpu_topology(threads_per_core: u8, cores_per_package: u8, packages: u8, use_fw: bool) { 904 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 905 let guest = Guest::new(Box::new(focal)); 906 let total_vcpus = threads_per_core * cores_per_package * packages; 907 let direct_kernel_boot_path = direct_kernel_boot_path(); 908 let mut kernel_path = direct_kernel_boot_path.to_str().unwrap(); 909 let fw_path = fw_path(FwType::RustHypervisorFirmware); 910 if use_fw { 911 kernel_path = fw_path.as_str(); 912 } 913 914 let mut child = GuestCommand::new(&guest) 915 .args([ 916 "--cpus", 917 &format!( 918 "boot={total_vcpus},topology={threads_per_core}:{cores_per_package}:1:{packages}" 919 ), 920 ]) 921 .args(["--memory", "size=512M"]) 922 .args(["--kernel", kernel_path]) 923 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 924 .default_disks() 925 .default_net() 926 .capture_output() 927 .spawn() 928 .unwrap(); 929 930 let r = std::panic::catch_unwind(|| { 931 guest.wait_vm_boot(None).unwrap(); 932 assert_eq!( 933 guest.get_cpu_count().unwrap_or_default(), 934 u32::from(total_vcpus) 935 ); 936 assert_eq!( 937 guest 938 .ssh_command("lscpu | grep \"per core\" | cut -f 2 -d \":\" | sed \"s# *##\"") 939 .unwrap() 940 .trim() 941 .parse::<u8>() 942 .unwrap_or(0), 943 threads_per_core 944 ); 945 946 assert_eq!( 947 guest 948 .ssh_command("lscpu | grep \"per socket\" | cut -f 2 -d \":\" | sed \"s# *##\"") 949 .unwrap() 950 .trim() 951 .parse::<u8>() 952 .unwrap_or(0), 953 cores_per_package 954 ); 955 956 assert_eq!( 957 guest 958 .ssh_command("lscpu | grep \"Socket\" | cut -f 2 -d \":\" | sed \"s# *##\"") 959 .unwrap() 960 .trim() 961 .parse::<u8>() 962 .unwrap_or(0), 963 packages 964 ); 965 }); 966 967 let _ = child.kill(); 968 let output = child.wait_with_output().unwrap(); 969 970 handle_child_output(r, &output); 971 } 972 973 #[allow(unused_variables)] 974 fn _test_guest_numa_nodes(acpi: bool) { 975 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 976 let guest = Guest::new(Box::new(focal)); 977 let api_socket = temp_api_path(&guest.tmp_dir); 978 #[cfg(target_arch = "x86_64")] 979 let kernel_path = direct_kernel_boot_path(); 980 #[cfg(target_arch = "aarch64")] 981 let kernel_path = if acpi { 982 edk2_path() 983 } else { 984 direct_kernel_boot_path() 985 }; 986 987 let mut child = GuestCommand::new(&guest) 988 .args(["--cpus", "boot=6,max=12"]) 989 .args(["--memory", "size=0,hotplug_method=virtio-mem"]) 990 .args([ 991 "--memory-zone", 992 "id=mem0,size=1G,hotplug_size=3G", 993 "--memory-zone", 994 "id=mem1,size=2G,hotplug_size=3G", 995 "--memory-zone", 996 "id=mem2,size=3G,hotplug_size=3G", 997 ]) 998 .args([ 999 "--numa", 1000 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0", 1001 "--numa", 1002 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1", 1003 "--numa", 1004 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2", 1005 ]) 1006 .args(["--kernel", kernel_path.to_str().unwrap()]) 1007 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1008 .args(["--api-socket", &api_socket]) 1009 .capture_output() 1010 .default_disks() 1011 .default_net() 1012 .spawn() 1013 .unwrap(); 1014 1015 let r = std::panic::catch_unwind(|| { 1016 guest.wait_vm_boot(None).unwrap(); 1017 1018 guest.check_numa_common( 1019 Some(&[960_000, 1_920_000, 2_880_000]), 1020 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 1021 Some(&["10 15 20", "20 10 25", "25 30 10"]), 1022 ); 1023 1024 // AArch64 currently does not support hotplug, and therefore we only 1025 // test hotplug-related function on x86_64 here. 1026 #[cfg(target_arch = "x86_64")] 1027 { 1028 guest.enable_memory_hotplug(); 1029 1030 // Resize every memory zone and check each associated NUMA node 1031 // has been assigned the right amount of memory. 1032 resize_zone_command(&api_socket, "mem0", "4G"); 1033 resize_zone_command(&api_socket, "mem1", "4G"); 1034 resize_zone_command(&api_socket, "mem2", "4G"); 1035 // Resize to the maximum amount of CPUs and check each NUMA 1036 // node has been assigned the right CPUs set. 1037 resize_command(&api_socket, Some(12), None, None, None); 1038 thread::sleep(std::time::Duration::new(5, 0)); 1039 1040 guest.check_numa_common( 1041 Some(&[3_840_000, 3_840_000, 3_840_000]), 1042 Some(&[vec![0, 1, 2, 9], vec![3, 4, 6, 7, 8], vec![5, 10, 11]]), 1043 None, 1044 ); 1045 } 1046 }); 1047 1048 let _ = child.kill(); 1049 let output = child.wait_with_output().unwrap(); 1050 1051 handle_child_output(r, &output); 1052 } 1053 1054 #[allow(unused_variables)] 1055 fn _test_power_button(acpi: bool) { 1056 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1057 let guest = Guest::new(Box::new(focal)); 1058 let mut cmd = GuestCommand::new(&guest); 1059 let api_socket = temp_api_path(&guest.tmp_dir); 1060 1061 #[cfg(target_arch = "x86_64")] 1062 let kernel_path = direct_kernel_boot_path(); 1063 #[cfg(target_arch = "aarch64")] 1064 let kernel_path = if acpi { 1065 edk2_path() 1066 } else { 1067 direct_kernel_boot_path() 1068 }; 1069 1070 cmd.args(["--cpus", "boot=1"]) 1071 .args(["--memory", "size=512M"]) 1072 .args(["--kernel", kernel_path.to_str().unwrap()]) 1073 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1074 .capture_output() 1075 .default_disks() 1076 .default_net() 1077 .args(["--api-socket", &api_socket]); 1078 1079 let child = cmd.spawn().unwrap(); 1080 1081 let r = std::panic::catch_unwind(|| { 1082 guest.wait_vm_boot(None).unwrap(); 1083 assert!(remote_command(&api_socket, "power-button", None)); 1084 }); 1085 1086 let output = child.wait_with_output().unwrap(); 1087 assert!(output.status.success()); 1088 handle_child_output(r, &output); 1089 } 1090 1091 type PrepareNetDaemon = dyn Fn( 1092 &TempDir, 1093 &str, 1094 Option<&str>, 1095 Option<u16>, 1096 usize, 1097 bool, 1098 ) -> (std::process::Command, String); 1099 1100 fn test_vhost_user_net( 1101 tap: Option<&str>, 1102 num_queues: usize, 1103 prepare_daemon: &PrepareNetDaemon, 1104 generate_host_mac: bool, 1105 client_mode_daemon: bool, 1106 ) { 1107 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1108 let guest = Guest::new(Box::new(focal)); 1109 let api_socket = temp_api_path(&guest.tmp_dir); 1110 1111 let kernel_path = direct_kernel_boot_path(); 1112 1113 let host_mac = if generate_host_mac { 1114 Some(MacAddr::local_random()) 1115 } else { 1116 None 1117 }; 1118 1119 let mtu = Some(3000); 1120 1121 let (mut daemon_command, vunet_socket_path) = prepare_daemon( 1122 &guest.tmp_dir, 1123 &guest.network.host_ip, 1124 tap, 1125 mtu, 1126 num_queues, 1127 client_mode_daemon, 1128 ); 1129 1130 let net_params = format!( 1131 "vhost_user=true,mac={},socket={},num_queues={},queue_size=1024{},vhost_mode={},mtu=3000", 1132 guest.network.guest_mac, 1133 vunet_socket_path, 1134 num_queues, 1135 if let Some(host_mac) = host_mac { 1136 format!(",host_mac={host_mac}") 1137 } else { 1138 "".to_owned() 1139 }, 1140 if client_mode_daemon { 1141 "server" 1142 } else { 1143 "client" 1144 }, 1145 ); 1146 1147 let mut ch_command = GuestCommand::new(&guest); 1148 ch_command 1149 .args(["--cpus", format!("boot={}", num_queues / 2).as_str()]) 1150 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"]) 1151 .args(["--kernel", kernel_path.to_str().unwrap()]) 1152 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1153 .default_disks() 1154 .args(["--net", net_params.as_str()]) 1155 .args(["--api-socket", &api_socket]) 1156 .capture_output(); 1157 1158 let mut daemon_child: std::process::Child; 1159 let mut child: std::process::Child; 1160 1161 if client_mode_daemon { 1162 child = ch_command.spawn().unwrap(); 1163 // Make sure the VMM is waiting for the backend to connect 1164 thread::sleep(std::time::Duration::new(10, 0)); 1165 daemon_child = daemon_command.spawn().unwrap(); 1166 } else { 1167 daemon_child = daemon_command.spawn().unwrap(); 1168 // Make sure the backend is waiting for the VMM to connect 1169 thread::sleep(std::time::Duration::new(10, 0)); 1170 child = ch_command.spawn().unwrap(); 1171 } 1172 1173 let r = std::panic::catch_unwind(|| { 1174 guest.wait_vm_boot(None).unwrap(); 1175 1176 if let Some(tap_name) = tap { 1177 let tap_count = exec_host_command_output(&format!("ip link | grep -c {tap_name}")); 1178 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1"); 1179 } 1180 1181 if let Some(host_mac) = tap { 1182 let mac_count = exec_host_command_output(&format!("ip link | grep -c {host_mac}")); 1183 assert_eq!(String::from_utf8_lossy(&mac_count.stdout).trim(), "1"); 1184 } 1185 1186 #[cfg(target_arch = "aarch64")] 1187 let iface = "enp0s4"; 1188 #[cfg(target_arch = "x86_64")] 1189 let iface = "ens4"; 1190 1191 assert_eq!( 1192 guest 1193 .ssh_command(format!("cat /sys/class/net/{iface}/mtu").as_str()) 1194 .unwrap() 1195 .trim(), 1196 "3000" 1197 ); 1198 1199 // 1 network interface + default localhost ==> 2 interfaces 1200 // It's important to note that this test is fully exercising the 1201 // vhost-user-net implementation and the associated backend since 1202 // it does not define any --net network interface. That means all 1203 // the ssh communication in that test happens through the network 1204 // interface backed by vhost-user-net. 1205 assert_eq!( 1206 guest 1207 .ssh_command("ip -o link | wc -l") 1208 .unwrap() 1209 .trim() 1210 .parse::<u32>() 1211 .unwrap_or_default(), 1212 2 1213 ); 1214 1215 // The following pci devices will appear on guest with PCI-MSI 1216 // interrupt vectors assigned. 1217 // 1 virtio-console with 3 vectors: config, Rx, Tx 1218 // 1 virtio-blk with 2 vectors: config, Request 1219 // 1 virtio-blk with 2 vectors: config, Request 1220 // 1 virtio-rng with 2 vectors: config, Request 1221 // Since virtio-net has 2 queue pairs, its vectors is as follows: 1222 // 1 virtio-net with 5 vectors: config, Rx (2), Tx (2) 1223 // Based on the above, the total vectors should 14. 1224 #[cfg(target_arch = "x86_64")] 1225 let grep_cmd = "grep -c PCI-MSI /proc/interrupts"; 1226 #[cfg(target_arch = "aarch64")] 1227 let grep_cmd = "grep -c ITS-MSI /proc/interrupts"; 1228 assert_eq!( 1229 guest 1230 .ssh_command(grep_cmd) 1231 .unwrap() 1232 .trim() 1233 .parse::<u32>() 1234 .unwrap_or_default(), 1235 10 + (num_queues as u32) 1236 ); 1237 1238 // ACPI feature is needed. 1239 #[cfg(target_arch = "x86_64")] 1240 { 1241 guest.enable_memory_hotplug(); 1242 1243 // Add RAM to the VM 1244 let desired_ram = 1024 << 20; 1245 resize_command(&api_socket, None, Some(desired_ram), None, None); 1246 1247 thread::sleep(std::time::Duration::new(10, 0)); 1248 1249 // Here by simply checking the size (through ssh), we validate 1250 // the connection is still working, which means vhost-user-net 1251 // keeps working after the resize. 1252 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 1253 } 1254 }); 1255 1256 let _ = child.kill(); 1257 let output = child.wait_with_output().unwrap(); 1258 1259 thread::sleep(std::time::Duration::new(5, 0)); 1260 let _ = daemon_child.kill(); 1261 let _ = daemon_child.wait(); 1262 1263 handle_child_output(r, &output); 1264 } 1265 1266 type PrepareBlkDaemon = dyn Fn(&TempDir, &str, usize, bool, bool) -> (std::process::Child, String); 1267 1268 fn test_vhost_user_blk( 1269 num_queues: usize, 1270 readonly: bool, 1271 direct: bool, 1272 prepare_vhost_user_blk_daemon: Option<&PrepareBlkDaemon>, 1273 ) { 1274 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1275 let guest = Guest::new(Box::new(focal)); 1276 let api_socket = temp_api_path(&guest.tmp_dir); 1277 1278 let kernel_path = direct_kernel_boot_path(); 1279 1280 let (blk_params, daemon_child) = { 1281 let prepare_daemon = prepare_vhost_user_blk_daemon.unwrap(); 1282 // Start the daemon 1283 let (daemon_child, vubd_socket_path) = 1284 prepare_daemon(&guest.tmp_dir, "blk.img", num_queues, readonly, direct); 1285 1286 ( 1287 format!( 1288 "vhost_user=true,socket={vubd_socket_path},num_queues={num_queues},queue_size=128", 1289 ), 1290 Some(daemon_child), 1291 ) 1292 }; 1293 1294 let mut child = GuestCommand::new(&guest) 1295 .args(["--cpus", format!("boot={num_queues}").as_str()]) 1296 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"]) 1297 .args(["--kernel", kernel_path.to_str().unwrap()]) 1298 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1299 .args([ 1300 "--disk", 1301 format!( 1302 "path={}", 1303 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 1304 ) 1305 .as_str(), 1306 "--disk", 1307 format!( 1308 "path={}", 1309 guest.disk_config.disk(DiskType::CloudInit).unwrap() 1310 ) 1311 .as_str(), 1312 "--disk", 1313 blk_params.as_str(), 1314 ]) 1315 .default_net() 1316 .args(["--api-socket", &api_socket]) 1317 .capture_output() 1318 .spawn() 1319 .unwrap(); 1320 1321 let r = std::panic::catch_unwind(|| { 1322 guest.wait_vm_boot(None).unwrap(); 1323 1324 // Check both if /dev/vdc exists and if the block size is 16M. 1325 assert_eq!( 1326 guest 1327 .ssh_command("lsblk | grep vdc | grep -c 16M") 1328 .unwrap() 1329 .trim() 1330 .parse::<u32>() 1331 .unwrap_or_default(), 1332 1 1333 ); 1334 1335 // Check if this block is RO or RW. 1336 assert_eq!( 1337 guest 1338 .ssh_command("lsblk | grep vdc | awk '{print $5}'") 1339 .unwrap() 1340 .trim() 1341 .parse::<u32>() 1342 .unwrap_or_default(), 1343 readonly as u32 1344 ); 1345 1346 // Check if the number of queues in /sys/block/vdc/mq matches the 1347 // expected num_queues. 1348 assert_eq!( 1349 guest 1350 .ssh_command("ls -ll /sys/block/vdc/mq | grep ^d | wc -l") 1351 .unwrap() 1352 .trim() 1353 .parse::<u32>() 1354 .unwrap_or_default(), 1355 num_queues as u32 1356 ); 1357 1358 // Mount the device 1359 let mount_ro_rw_flag = if readonly { "ro,noload" } else { "rw" }; 1360 guest.ssh_command("mkdir mount_image").unwrap(); 1361 guest 1362 .ssh_command( 1363 format!("sudo mount -o {mount_ro_rw_flag} -t ext4 /dev/vdc mount_image/").as_str(), 1364 ) 1365 .unwrap(); 1366 1367 // Check the content of the block device. The file "foo" should 1368 // contain "bar". 1369 assert_eq!( 1370 guest.ssh_command("cat mount_image/foo").unwrap().trim(), 1371 "bar" 1372 ); 1373 1374 // ACPI feature is needed. 1375 #[cfg(target_arch = "x86_64")] 1376 { 1377 guest.enable_memory_hotplug(); 1378 1379 // Add RAM to the VM 1380 let desired_ram = 1024 << 20; 1381 resize_command(&api_socket, None, Some(desired_ram), None, None); 1382 1383 thread::sleep(std::time::Duration::new(10, 0)); 1384 1385 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 1386 1387 // Check again the content of the block device after the resize 1388 // has been performed. 1389 assert_eq!( 1390 guest.ssh_command("cat mount_image/foo").unwrap().trim(), 1391 "bar" 1392 ); 1393 } 1394 1395 // Unmount the device 1396 guest.ssh_command("sudo umount /dev/vdc").unwrap(); 1397 guest.ssh_command("rm -r mount_image").unwrap(); 1398 }); 1399 1400 let _ = child.kill(); 1401 let output = child.wait_with_output().unwrap(); 1402 1403 if let Some(mut daemon_child) = daemon_child { 1404 thread::sleep(std::time::Duration::new(5, 0)); 1405 let _ = daemon_child.kill(); 1406 let _ = daemon_child.wait(); 1407 } 1408 1409 handle_child_output(r, &output); 1410 } 1411 1412 fn test_boot_from_vhost_user_blk( 1413 num_queues: usize, 1414 readonly: bool, 1415 direct: bool, 1416 prepare_vhost_user_blk_daemon: Option<&PrepareBlkDaemon>, 1417 ) { 1418 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1419 let guest = Guest::new(Box::new(focal)); 1420 1421 let kernel_path = direct_kernel_boot_path(); 1422 1423 let disk_path = guest.disk_config.disk(DiskType::OperatingSystem).unwrap(); 1424 1425 let (blk_boot_params, daemon_child) = { 1426 let prepare_daemon = prepare_vhost_user_blk_daemon.unwrap(); 1427 // Start the daemon 1428 let (daemon_child, vubd_socket_path) = prepare_daemon( 1429 &guest.tmp_dir, 1430 disk_path.as_str(), 1431 num_queues, 1432 readonly, 1433 direct, 1434 ); 1435 1436 ( 1437 format!( 1438 "vhost_user=true,socket={vubd_socket_path},num_queues={num_queues},queue_size=128", 1439 ), 1440 Some(daemon_child), 1441 ) 1442 }; 1443 1444 let mut child = GuestCommand::new(&guest) 1445 .args(["--cpus", format!("boot={num_queues}").as_str()]) 1446 .args(["--memory", "size=512M,shared=on"]) 1447 .args(["--kernel", kernel_path.to_str().unwrap()]) 1448 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1449 .args([ 1450 "--disk", 1451 blk_boot_params.as_str(), 1452 "--disk", 1453 format!( 1454 "path={}", 1455 guest.disk_config.disk(DiskType::CloudInit).unwrap() 1456 ) 1457 .as_str(), 1458 ]) 1459 .default_net() 1460 .capture_output() 1461 .spawn() 1462 .unwrap(); 1463 1464 let r = std::panic::catch_unwind(|| { 1465 guest.wait_vm_boot(None).unwrap(); 1466 1467 // Just check the VM booted correctly. 1468 assert_eq!(guest.get_cpu_count().unwrap_or_default(), num_queues as u32); 1469 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 1470 }); 1471 let _ = child.kill(); 1472 let output = child.wait_with_output().unwrap(); 1473 1474 if let Some(mut daemon_child) = daemon_child { 1475 thread::sleep(std::time::Duration::new(5, 0)); 1476 let _ = daemon_child.kill(); 1477 let _ = daemon_child.wait(); 1478 } 1479 1480 handle_child_output(r, &output); 1481 } 1482 1483 fn _test_virtio_fs( 1484 prepare_daemon: &dyn Fn(&TempDir, &str) -> (std::process::Child, String), 1485 hotplug: bool, 1486 pci_segment: Option<u16>, 1487 ) { 1488 #[cfg(target_arch = "aarch64")] 1489 let focal_image = if hotplug { 1490 FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string() 1491 } else { 1492 FOCAL_IMAGE_NAME.to_string() 1493 }; 1494 #[cfg(target_arch = "x86_64")] 1495 let focal_image = FOCAL_IMAGE_NAME.to_string(); 1496 let focal = UbuntuDiskConfig::new(focal_image); 1497 let guest = Guest::new(Box::new(focal)); 1498 let api_socket = temp_api_path(&guest.tmp_dir); 1499 1500 let mut workload_path = dirs::home_dir().unwrap(); 1501 workload_path.push("workloads"); 1502 1503 let mut shared_dir = workload_path; 1504 shared_dir.push("shared_dir"); 1505 1506 #[cfg(target_arch = "x86_64")] 1507 let kernel_path = direct_kernel_boot_path(); 1508 #[cfg(target_arch = "aarch64")] 1509 let kernel_path = if hotplug { 1510 edk2_path() 1511 } else { 1512 direct_kernel_boot_path() 1513 }; 1514 1515 let (mut daemon_child, virtiofsd_socket_path) = 1516 prepare_daemon(&guest.tmp_dir, shared_dir.to_str().unwrap()); 1517 1518 let mut guest_command = GuestCommand::new(&guest); 1519 guest_command 1520 .args(["--cpus", "boot=1"]) 1521 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"]) 1522 .args(["--kernel", kernel_path.to_str().unwrap()]) 1523 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1524 .default_disks() 1525 .default_net() 1526 .args(["--api-socket", &api_socket]); 1527 if pci_segment.is_some() { 1528 guest_command.args([ 1529 "--platform", 1530 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 1531 ]); 1532 } 1533 1534 let fs_params = format!( 1535 "id=myfs0,tag=myfs,socket={},num_queues=1,queue_size=1024{}", 1536 virtiofsd_socket_path, 1537 if let Some(pci_segment) = pci_segment { 1538 format!(",pci_segment={pci_segment}") 1539 } else { 1540 "".to_owned() 1541 } 1542 ); 1543 1544 if !hotplug { 1545 guest_command.args(["--fs", fs_params.as_str()]); 1546 } 1547 1548 let mut child = guest_command.capture_output().spawn().unwrap(); 1549 1550 let r = std::panic::catch_unwind(|| { 1551 guest.wait_vm_boot(None).unwrap(); 1552 1553 if hotplug { 1554 // Add fs to the VM 1555 let (cmd_success, cmd_output) = 1556 remote_command_w_output(&api_socket, "add-fs", Some(&fs_params)); 1557 assert!(cmd_success); 1558 1559 if let Some(pci_segment) = pci_segment { 1560 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 1561 "{{\"id\":\"myfs0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 1562 ))); 1563 } else { 1564 assert!(String::from_utf8_lossy(&cmd_output) 1565 .contains("{\"id\":\"myfs0\",\"bdf\":\"0000:00:06.0\"}")); 1566 } 1567 1568 thread::sleep(std::time::Duration::new(10, 0)); 1569 } 1570 1571 // Mount shared directory through virtio_fs filesystem 1572 guest 1573 .ssh_command("mkdir -p mount_dir && sudo mount -t virtiofs myfs mount_dir/") 1574 .unwrap(); 1575 1576 // Check file1 exists and its content is "foo" 1577 assert_eq!( 1578 guest.ssh_command("cat mount_dir/file1").unwrap().trim(), 1579 "foo" 1580 ); 1581 // Check file2 does not exist 1582 guest 1583 .ssh_command("[ ! -f 'mount_dir/file2' ] || true") 1584 .unwrap(); 1585 1586 // Check file3 exists and its content is "bar" 1587 assert_eq!( 1588 guest.ssh_command("cat mount_dir/file3").unwrap().trim(), 1589 "bar" 1590 ); 1591 1592 // ACPI feature is needed. 1593 #[cfg(target_arch = "x86_64")] 1594 { 1595 guest.enable_memory_hotplug(); 1596 1597 // Add RAM to the VM 1598 let desired_ram = 1024 << 20; 1599 resize_command(&api_socket, None, Some(desired_ram), None, None); 1600 1601 thread::sleep(std::time::Duration::new(30, 0)); 1602 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 1603 1604 // After the resize, check again that file1 exists and its 1605 // content is "foo". 1606 assert_eq!( 1607 guest.ssh_command("cat mount_dir/file1").unwrap().trim(), 1608 "foo" 1609 ); 1610 } 1611 1612 if hotplug { 1613 // Remove from VM 1614 guest.ssh_command("sudo umount mount_dir").unwrap(); 1615 assert!(remote_command(&api_socket, "remove-device", Some("myfs0"))); 1616 } 1617 }); 1618 1619 let (r, hotplug_daemon_child) = if r.is_ok() && hotplug { 1620 thread::sleep(std::time::Duration::new(10, 0)); 1621 let (daemon_child, virtiofsd_socket_path) = 1622 prepare_daemon(&guest.tmp_dir, shared_dir.to_str().unwrap()); 1623 1624 let r = std::panic::catch_unwind(|| { 1625 thread::sleep(std::time::Duration::new(10, 0)); 1626 let fs_params = format!( 1627 "id=myfs0,tag=myfs,socket={},num_queues=1,queue_size=1024{}", 1628 virtiofsd_socket_path, 1629 if let Some(pci_segment) = pci_segment { 1630 format!(",pci_segment={pci_segment}") 1631 } else { 1632 "".to_owned() 1633 } 1634 ); 1635 1636 // Add back and check it works 1637 let (cmd_success, cmd_output) = 1638 remote_command_w_output(&api_socket, "add-fs", Some(&fs_params)); 1639 assert!(cmd_success); 1640 if let Some(pci_segment) = pci_segment { 1641 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 1642 "{{\"id\":\"myfs0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 1643 ))); 1644 } else { 1645 assert!(String::from_utf8_lossy(&cmd_output) 1646 .contains("{\"id\":\"myfs0\",\"bdf\":\"0000:00:06.0\"}")); 1647 } 1648 1649 thread::sleep(std::time::Duration::new(10, 0)); 1650 // Mount shared directory through virtio_fs filesystem 1651 guest 1652 .ssh_command("mkdir -p mount_dir && sudo mount -t virtiofs myfs mount_dir/") 1653 .unwrap(); 1654 1655 // Check file1 exists and its content is "foo" 1656 assert_eq!( 1657 guest.ssh_command("cat mount_dir/file1").unwrap().trim(), 1658 "foo" 1659 ); 1660 }); 1661 1662 (r, Some(daemon_child)) 1663 } else { 1664 (r, None) 1665 }; 1666 1667 let _ = child.kill(); 1668 let output = child.wait_with_output().unwrap(); 1669 1670 let _ = daemon_child.kill(); 1671 let _ = daemon_child.wait(); 1672 1673 if let Some(mut daemon_child) = hotplug_daemon_child { 1674 let _ = daemon_child.kill(); 1675 let _ = daemon_child.wait(); 1676 } 1677 1678 handle_child_output(r, &output); 1679 } 1680 1681 fn test_virtio_pmem(discard_writes: bool, specify_size: bool) { 1682 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1683 let guest = Guest::new(Box::new(focal)); 1684 1685 let kernel_path = direct_kernel_boot_path(); 1686 1687 let pmem_temp_file = TempFile::new().unwrap(); 1688 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 1689 1690 std::process::Command::new("mkfs.ext4") 1691 .arg(pmem_temp_file.as_path()) 1692 .output() 1693 .expect("Expect creating disk image to succeed"); 1694 1695 let mut child = GuestCommand::new(&guest) 1696 .args(["--cpus", "boot=1"]) 1697 .args(["--memory", "size=512M"]) 1698 .args(["--kernel", kernel_path.to_str().unwrap()]) 1699 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1700 .default_disks() 1701 .default_net() 1702 .args([ 1703 "--pmem", 1704 format!( 1705 "file={}{}{}", 1706 pmem_temp_file.as_path().to_str().unwrap(), 1707 if specify_size { ",size=128M" } else { "" }, 1708 if discard_writes { 1709 ",discard_writes=on" 1710 } else { 1711 "" 1712 } 1713 ) 1714 .as_str(), 1715 ]) 1716 .capture_output() 1717 .spawn() 1718 .unwrap(); 1719 1720 let r = std::panic::catch_unwind(|| { 1721 guest.wait_vm_boot(None).unwrap(); 1722 1723 // Check for the presence of /dev/pmem0 1724 assert_eq!( 1725 guest.ssh_command("ls /dev/pmem0").unwrap().trim(), 1726 "/dev/pmem0" 1727 ); 1728 1729 // Check changes persist after reboot 1730 assert_eq!(guest.ssh_command("sudo mount /dev/pmem0 /mnt").unwrap(), ""); 1731 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), "lost+found\n"); 1732 guest 1733 .ssh_command("echo test123 | sudo tee /mnt/test") 1734 .unwrap(); 1735 assert_eq!(guest.ssh_command("sudo umount /mnt").unwrap(), ""); 1736 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), ""); 1737 1738 guest.reboot_linux(0, None); 1739 assert_eq!(guest.ssh_command("sudo mount /dev/pmem0 /mnt").unwrap(), ""); 1740 assert_eq!( 1741 guest 1742 .ssh_command("sudo cat /mnt/test || true") 1743 .unwrap() 1744 .trim(), 1745 if discard_writes { "" } else { "test123" } 1746 ); 1747 }); 1748 1749 let _ = child.kill(); 1750 let output = child.wait_with_output().unwrap(); 1751 1752 handle_child_output(r, &output); 1753 } 1754 1755 fn get_fd_count(pid: u32) -> usize { 1756 fs::read_dir(format!("/proc/{pid}/fd")).unwrap().count() 1757 } 1758 1759 fn _test_virtio_vsock(hotplug: bool) { 1760 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1761 let guest = Guest::new(Box::new(focal)); 1762 1763 #[cfg(target_arch = "x86_64")] 1764 let kernel_path = direct_kernel_boot_path(); 1765 #[cfg(target_arch = "aarch64")] 1766 let kernel_path = if hotplug { 1767 edk2_path() 1768 } else { 1769 direct_kernel_boot_path() 1770 }; 1771 1772 let socket = temp_vsock_path(&guest.tmp_dir); 1773 let api_socket = temp_api_path(&guest.tmp_dir); 1774 1775 let mut cmd = GuestCommand::new(&guest); 1776 cmd.args(["--api-socket", &api_socket]); 1777 cmd.args(["--cpus", "boot=1"]); 1778 cmd.args(["--memory", "size=512M"]); 1779 cmd.args(["--kernel", kernel_path.to_str().unwrap()]); 1780 cmd.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]); 1781 cmd.default_disks(); 1782 cmd.default_net(); 1783 1784 if !hotplug { 1785 cmd.args(["--vsock", format!("cid=3,socket={socket}").as_str()]); 1786 } 1787 1788 let mut child = cmd.capture_output().spawn().unwrap(); 1789 1790 let r = std::panic::catch_unwind(|| { 1791 guest.wait_vm_boot(None).unwrap(); 1792 1793 if hotplug { 1794 let (cmd_success, cmd_output) = remote_command_w_output( 1795 &api_socket, 1796 "add-vsock", 1797 Some(format!("cid=3,socket={socket},id=test0").as_str()), 1798 ); 1799 assert!(cmd_success); 1800 assert!(String::from_utf8_lossy(&cmd_output) 1801 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 1802 thread::sleep(std::time::Duration::new(10, 0)); 1803 // Check adding a second one fails 1804 assert!(!remote_command( 1805 &api_socket, 1806 "add-vsock", 1807 Some("cid=1234,socket=/tmp/fail") 1808 )); 1809 } 1810 1811 // Validate vsock works as expected. 1812 guest.check_vsock(socket.as_str()); 1813 guest.reboot_linux(0, None); 1814 // Validate vsock still works after a reboot. 1815 guest.check_vsock(socket.as_str()); 1816 1817 if hotplug { 1818 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 1819 } 1820 }); 1821 1822 let _ = child.kill(); 1823 let output = child.wait_with_output().unwrap(); 1824 1825 handle_child_output(r, &output); 1826 } 1827 1828 fn get_ksm_pages_shared() -> u32 { 1829 fs::read_to_string("/sys/kernel/mm/ksm/pages_shared") 1830 .unwrap() 1831 .trim() 1832 .parse::<u32>() 1833 .unwrap() 1834 } 1835 1836 fn test_memory_mergeable(mergeable: bool) { 1837 let memory_param = if mergeable { 1838 "mergeable=on" 1839 } else { 1840 "mergeable=off" 1841 }; 1842 1843 // We are assuming the rest of the system in our CI is not using mergeable memeory 1844 let ksm_ps_init = get_ksm_pages_shared(); 1845 assert!(ksm_ps_init == 0); 1846 1847 let focal1 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1848 let guest1 = Guest::new(Box::new(focal1)); 1849 let mut child1 = GuestCommand::new(&guest1) 1850 .args(["--cpus", "boot=1"]) 1851 .args(["--memory", format!("size=512M,{memory_param}").as_str()]) 1852 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 1853 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1854 .default_disks() 1855 .args(["--net", guest1.default_net_string().as_str()]) 1856 .args(["--serial", "tty", "--console", "off"]) 1857 .capture_output() 1858 .spawn() 1859 .unwrap(); 1860 1861 let r = std::panic::catch_unwind(|| { 1862 guest1.wait_vm_boot(None).unwrap(); 1863 }); 1864 if r.is_err() { 1865 let _ = child1.kill(); 1866 let output = child1.wait_with_output().unwrap(); 1867 handle_child_output(r, &output); 1868 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return 1869 } 1870 1871 let ksm_ps_guest1 = get_ksm_pages_shared(); 1872 1873 let focal2 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1874 let guest2 = Guest::new(Box::new(focal2)); 1875 let mut child2 = GuestCommand::new(&guest2) 1876 .args(["--cpus", "boot=1"]) 1877 .args(["--memory", format!("size=512M,{memory_param}").as_str()]) 1878 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 1879 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1880 .default_disks() 1881 .args(["--net", guest2.default_net_string().as_str()]) 1882 .args(["--serial", "tty", "--console", "off"]) 1883 .capture_output() 1884 .spawn() 1885 .unwrap(); 1886 1887 let r = std::panic::catch_unwind(|| { 1888 guest2.wait_vm_boot(None).unwrap(); 1889 let ksm_ps_guest2 = get_ksm_pages_shared(); 1890 1891 if mergeable { 1892 println!( 1893 "ksm pages_shared after vm1 booted '{ksm_ps_guest1}', ksm pages_shared after vm2 booted '{ksm_ps_guest2}'" 1894 ); 1895 // We are expecting the number of shared pages to increase as the number of VM increases 1896 assert!(ksm_ps_guest1 < ksm_ps_guest2); 1897 } else { 1898 assert!(ksm_ps_guest1 == 0); 1899 assert!(ksm_ps_guest2 == 0); 1900 } 1901 }); 1902 1903 let _ = child1.kill(); 1904 let _ = child2.kill(); 1905 1906 let output = child1.wait_with_output().unwrap(); 1907 child2.wait().unwrap(); 1908 1909 handle_child_output(r, &output); 1910 } 1911 1912 fn _get_vmm_overhead(pid: u32, guest_memory_size: u32) -> HashMap<String, u32> { 1913 let smaps = fs::File::open(format!("/proc/{pid}/smaps")).unwrap(); 1914 let reader = io::BufReader::new(smaps); 1915 1916 let mut skip_map: bool = false; 1917 let mut region_name: String = "".to_string(); 1918 let mut region_maps = HashMap::new(); 1919 for line in reader.lines() { 1920 let l = line.unwrap(); 1921 1922 if l.contains('-') { 1923 let values: Vec<&str> = l.split_whitespace().collect(); 1924 region_name = values.last().unwrap().trim().to_string(); 1925 if region_name == "0" { 1926 region_name = "anonymous".to_string() 1927 } 1928 } 1929 1930 // Each section begins with something that looks like: 1931 // Size: 2184 kB 1932 if l.starts_with("Size:") { 1933 let values: Vec<&str> = l.split_whitespace().collect(); 1934 let map_size = values[1].parse::<u32>().unwrap(); 1935 // We skip the assigned guest RAM map, its RSS is only 1936 // dependent on the guest actual memory usage. 1937 // Everything else can be added to the VMM overhead. 1938 skip_map = map_size >= guest_memory_size; 1939 continue; 1940 } 1941 1942 // If this is a map we're taking into account, then we only 1943 // count the RSS. The sum of all counted RSS is the VMM overhead. 1944 if !skip_map && l.starts_with("Rss:") { 1945 let values: Vec<&str> = l.split_whitespace().collect(); 1946 let value = values[1].trim().parse::<u32>().unwrap(); 1947 *region_maps.entry(region_name.clone()).or_insert(0) += value; 1948 } 1949 } 1950 1951 region_maps 1952 } 1953 1954 fn get_vmm_overhead(pid: u32, guest_memory_size: u32) -> u32 { 1955 let mut total = 0; 1956 1957 for (region_name, value) in &_get_vmm_overhead(pid, guest_memory_size) { 1958 eprintln!("{region_name}: {value}"); 1959 total += value; 1960 } 1961 1962 total 1963 } 1964 1965 fn process_rss_kib(pid: u32) -> usize { 1966 let command = format!("ps -q {pid} -o rss="); 1967 let rss = exec_host_command_output(&command); 1968 String::from_utf8_lossy(&rss.stdout).trim().parse().unwrap() 1969 } 1970 1971 // 10MB is our maximum accepted overhead. 1972 const MAXIMUM_VMM_OVERHEAD_KB: u32 = 10 * 1024; 1973 1974 #[derive(PartialEq, Eq, PartialOrd)] 1975 struct Counters { 1976 rx_bytes: u64, 1977 rx_frames: u64, 1978 tx_bytes: u64, 1979 tx_frames: u64, 1980 read_bytes: u64, 1981 write_bytes: u64, 1982 read_ops: u64, 1983 write_ops: u64, 1984 } 1985 1986 fn get_counters(api_socket: &str) -> Counters { 1987 // Get counters 1988 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "counters", None); 1989 assert!(cmd_success); 1990 1991 let counters: HashMap<&str, HashMap<&str, u64>> = 1992 serde_json::from_slice(&cmd_output).unwrap_or_default(); 1993 1994 let rx_bytes = *counters.get("_net2").unwrap().get("rx_bytes").unwrap(); 1995 let rx_frames = *counters.get("_net2").unwrap().get("rx_frames").unwrap(); 1996 let tx_bytes = *counters.get("_net2").unwrap().get("tx_bytes").unwrap(); 1997 let tx_frames = *counters.get("_net2").unwrap().get("tx_frames").unwrap(); 1998 1999 let read_bytes = *counters.get("_disk0").unwrap().get("read_bytes").unwrap(); 2000 let write_bytes = *counters.get("_disk0").unwrap().get("write_bytes").unwrap(); 2001 let read_ops = *counters.get("_disk0").unwrap().get("read_ops").unwrap(); 2002 let write_ops = *counters.get("_disk0").unwrap().get("write_ops").unwrap(); 2003 2004 Counters { 2005 rx_bytes, 2006 rx_frames, 2007 tx_bytes, 2008 tx_frames, 2009 read_bytes, 2010 write_bytes, 2011 read_ops, 2012 write_ops, 2013 } 2014 } 2015 2016 fn pty_read(mut pty: std::fs::File) -> Receiver<String> { 2017 let (tx, rx) = mpsc::channel::<String>(); 2018 thread::spawn(move || loop { 2019 thread::sleep(std::time::Duration::new(1, 0)); 2020 let mut buf = [0; 512]; 2021 match pty.read(&mut buf) { 2022 Ok(_) => { 2023 let output = std::str::from_utf8(&buf).unwrap().to_string(); 2024 match tx.send(output) { 2025 Ok(_) => (), 2026 Err(_) => break, 2027 } 2028 } 2029 Err(_) => break, 2030 } 2031 }); 2032 rx 2033 } 2034 2035 fn get_pty_path(api_socket: &str, pty_type: &str) -> PathBuf { 2036 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None); 2037 assert!(cmd_success); 2038 let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default(); 2039 assert_eq!("Pty", info["config"][pty_type]["mode"]); 2040 PathBuf::from( 2041 info["config"][pty_type]["file"] 2042 .as_str() 2043 .expect("Missing pty path"), 2044 ) 2045 } 2046 2047 // VFIO test network setup. 2048 // We reserve a different IP class for it: 172.18.0.0/24. 2049 #[cfg(target_arch = "x86_64")] 2050 fn setup_vfio_network_interfaces() { 2051 // 'vfio-br0' 2052 assert!(exec_host_command_status("sudo ip link add name vfio-br0 type bridge").success()); 2053 assert!(exec_host_command_status("sudo ip link set vfio-br0 up").success()); 2054 assert!(exec_host_command_status("sudo ip addr add 172.18.0.1/24 dev vfio-br0").success()); 2055 // 'vfio-tap0' 2056 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap0 mode tap").success()); 2057 assert!(exec_host_command_status("sudo ip link set vfio-tap0 master vfio-br0").success()); 2058 assert!(exec_host_command_status("sudo ip link set vfio-tap0 up").success()); 2059 // 'vfio-tap1' 2060 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap1 mode tap").success()); 2061 assert!(exec_host_command_status("sudo ip link set vfio-tap1 master vfio-br0").success()); 2062 assert!(exec_host_command_status("sudo ip link set vfio-tap1 up").success()); 2063 // 'vfio-tap2' 2064 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap2 mode tap").success()); 2065 assert!(exec_host_command_status("sudo ip link set vfio-tap2 master vfio-br0").success()); 2066 assert!(exec_host_command_status("sudo ip link set vfio-tap2 up").success()); 2067 // 'vfio-tap3' 2068 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap3 mode tap").success()); 2069 assert!(exec_host_command_status("sudo ip link set vfio-tap3 master vfio-br0").success()); 2070 assert!(exec_host_command_status("sudo ip link set vfio-tap3 up").success()); 2071 } 2072 2073 // Tear VFIO test network down 2074 #[cfg(target_arch = "x86_64")] 2075 fn cleanup_vfio_network_interfaces() { 2076 assert!(exec_host_command_status("sudo ip link del vfio-br0").success()); 2077 assert!(exec_host_command_status("sudo ip link del vfio-tap0").success()); 2078 assert!(exec_host_command_status("sudo ip link del vfio-tap1").success()); 2079 assert!(exec_host_command_status("sudo ip link del vfio-tap2").success()); 2080 assert!(exec_host_command_status("sudo ip link del vfio-tap3").success()); 2081 } 2082 2083 fn balloon_size(api_socket: &str) -> u64 { 2084 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None); 2085 assert!(cmd_success); 2086 2087 let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default(); 2088 let total_mem = &info["config"]["memory"]["size"] 2089 .to_string() 2090 .parse::<u64>() 2091 .unwrap(); 2092 let actual_mem = &info["memory_actual_size"] 2093 .to_string() 2094 .parse::<u64>() 2095 .unwrap(); 2096 total_mem - actual_mem 2097 } 2098 2099 // This test validates that it can find the virtio-iommu device at first. 2100 // It also verifies that both disks and the network card are attached to 2101 // the virtual IOMMU by looking at /sys/kernel/iommu_groups directory. 2102 // The last interesting part of this test is that it exercises the network 2103 // interface attached to the virtual IOMMU since this is the one used to 2104 // send all commands through SSH. 2105 fn _test_virtio_iommu(acpi: bool) { 2106 // Virtio-iommu support is ready in recent kernel (v5.14). But the kernel in 2107 // Focal image is still old. 2108 // So if ACPI is enabled on AArch64, we use a modified Focal image in which 2109 // the kernel binary has been updated. 2110 #[cfg(target_arch = "aarch64")] 2111 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string(); 2112 #[cfg(target_arch = "x86_64")] 2113 let focal_image = FOCAL_IMAGE_NAME.to_string(); 2114 let focal = UbuntuDiskConfig::new(focal_image); 2115 let guest = Guest::new(Box::new(focal)); 2116 2117 #[cfg(target_arch = "x86_64")] 2118 let kernel_path = direct_kernel_boot_path(); 2119 #[cfg(target_arch = "aarch64")] 2120 let kernel_path = if acpi { 2121 edk2_path() 2122 } else { 2123 direct_kernel_boot_path() 2124 }; 2125 2126 let mut child = GuestCommand::new(&guest) 2127 .args(["--cpus", "boot=1"]) 2128 .args(["--memory", "size=512M"]) 2129 .args(["--kernel", kernel_path.to_str().unwrap()]) 2130 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2131 .args([ 2132 "--disk", 2133 format!( 2134 "path={},iommu=on", 2135 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 2136 ) 2137 .as_str(), 2138 "--disk", 2139 format!( 2140 "path={},iommu=on", 2141 guest.disk_config.disk(DiskType::CloudInit).unwrap() 2142 ) 2143 .as_str(), 2144 ]) 2145 .args(["--net", guest.default_net_string_w_iommu().as_str()]) 2146 .capture_output() 2147 .spawn() 2148 .unwrap(); 2149 2150 let r = std::panic::catch_unwind(|| { 2151 guest.wait_vm_boot(None).unwrap(); 2152 2153 // Verify the virtio-iommu device is present. 2154 assert!(guest 2155 .does_device_vendor_pair_match("0x1057", "0x1af4") 2156 .unwrap_or_default()); 2157 2158 // On AArch64, if the guest system boots from FDT, the behavior of IOMMU is a bit 2159 // different with ACPI. 2160 // All devices on the PCI bus will be attached to the virtual IOMMU, except the 2161 // virtio-iommu device itself. So these devices will all be added to IOMMU groups, 2162 // and appear under folder '/sys/kernel/iommu_groups/'. 2163 // The result is, in the case of FDT, IOMMU group '0' contains "0000:00:01.0" 2164 // which is the console. The first disk "0000:00:02.0" is in group '1'. 2165 // While on ACPI, console device is not attached to IOMMU. So the IOMMU group '0' 2166 // contains "0000:00:02.0" which is the first disk. 2167 // 2168 // Verify the iommu group of the first disk. 2169 let iommu_group = !acpi as i32; 2170 assert_eq!( 2171 guest 2172 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str()) 2173 .unwrap() 2174 .trim(), 2175 "0000:00:02.0" 2176 ); 2177 2178 // Verify the iommu group of the second disk. 2179 let iommu_group = if acpi { 1 } else { 2 }; 2180 assert_eq!( 2181 guest 2182 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str()) 2183 .unwrap() 2184 .trim(), 2185 "0000:00:03.0" 2186 ); 2187 2188 // Verify the iommu group of the network card. 2189 let iommu_group = if acpi { 2 } else { 3 }; 2190 assert_eq!( 2191 guest 2192 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str()) 2193 .unwrap() 2194 .trim(), 2195 "0000:00:04.0" 2196 ); 2197 }); 2198 2199 let _ = child.kill(); 2200 let output = child.wait_with_output().unwrap(); 2201 2202 handle_child_output(r, &output); 2203 } 2204 2205 fn get_reboot_count(guest: &Guest) -> u32 { 2206 guest 2207 .ssh_command("sudo last | grep -c reboot") 2208 .unwrap() 2209 .trim() 2210 .parse::<u32>() 2211 .unwrap_or_default() 2212 } 2213 2214 fn enable_guest_watchdog(guest: &Guest, watchdog_sec: u32) { 2215 // Check for PCI device 2216 assert!(guest 2217 .does_device_vendor_pair_match("0x1063", "0x1af4") 2218 .unwrap_or_default()); 2219 2220 // Enable systemd watchdog 2221 guest 2222 .ssh_command(&format!( 2223 "echo RuntimeWatchdogSec={watchdog_sec}s | sudo tee -a /etc/systemd/system.conf" 2224 )) 2225 .unwrap(); 2226 } 2227 2228 mod common_parallel { 2229 use std::{fs::OpenOptions, io::SeekFrom}; 2230 2231 use crate::*; 2232 2233 #[test] 2234 #[cfg(target_arch = "x86_64")] 2235 fn test_focal_hypervisor_fw() { 2236 test_simple_launch(fw_path(FwType::RustHypervisorFirmware), FOCAL_IMAGE_NAME) 2237 } 2238 2239 #[test] 2240 #[cfg(target_arch = "x86_64")] 2241 fn test_focal_ovmf() { 2242 test_simple_launch(fw_path(FwType::Ovmf), FOCAL_IMAGE_NAME) 2243 } 2244 2245 #[cfg(target_arch = "x86_64")] 2246 fn test_simple_launch(fw_path: String, disk_path: &str) { 2247 let disk_config = Box::new(UbuntuDiskConfig::new(disk_path.to_string())); 2248 let guest = Guest::new(disk_config); 2249 let event_path = temp_event_monitor_path(&guest.tmp_dir); 2250 2251 let mut child = GuestCommand::new(&guest) 2252 .args(["--cpus", "boot=1"]) 2253 .args(["--memory", "size=512M"]) 2254 .args(["--kernel", fw_path.as_str()]) 2255 .default_disks() 2256 .default_net() 2257 .args(["--serial", "tty", "--console", "off"]) 2258 .args(["--event-monitor", format!("path={event_path}").as_str()]) 2259 .capture_output() 2260 .spawn() 2261 .unwrap(); 2262 2263 let r = std::panic::catch_unwind(|| { 2264 guest.wait_vm_boot(Some(120)).unwrap(); 2265 2266 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 2267 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 2268 assert_eq!(guest.get_pci_bridge_class().unwrap_or_default(), "0x060000"); 2269 2270 let expected_sequential_events = [ 2271 &MetaEvent { 2272 event: "starting".to_string(), 2273 device_id: None, 2274 }, 2275 &MetaEvent { 2276 event: "booting".to_string(), 2277 device_id: None, 2278 }, 2279 &MetaEvent { 2280 event: "booted".to_string(), 2281 device_id: None, 2282 }, 2283 &MetaEvent { 2284 event: "activated".to_string(), 2285 device_id: Some("_disk0".to_string()), 2286 }, 2287 &MetaEvent { 2288 event: "reset".to_string(), 2289 device_id: Some("_disk0".to_string()), 2290 }, 2291 ]; 2292 assert!(check_sequential_events( 2293 &expected_sequential_events, 2294 &event_path 2295 )); 2296 2297 // It's been observed on the Bionic image that udev and snapd 2298 // services can cause some delay in the VM's shutdown. Disabling 2299 // them improves the reliability of this test. 2300 let _ = guest.ssh_command("sudo systemctl disable udev"); 2301 let _ = guest.ssh_command("sudo systemctl stop udev"); 2302 let _ = guest.ssh_command("sudo systemctl disable snapd"); 2303 let _ = guest.ssh_command("sudo systemctl stop snapd"); 2304 2305 guest.ssh_command("sudo poweroff").unwrap(); 2306 thread::sleep(std::time::Duration::new(20, 0)); 2307 let latest_events = [ 2308 &MetaEvent { 2309 event: "shutdown".to_string(), 2310 device_id: None, 2311 }, 2312 &MetaEvent { 2313 event: "deleted".to_string(), 2314 device_id: None, 2315 }, 2316 &MetaEvent { 2317 event: "shutdown".to_string(), 2318 device_id: None, 2319 }, 2320 ]; 2321 assert!(check_latest_events_exact(&latest_events, &event_path)); 2322 }); 2323 2324 let _ = child.kill(); 2325 let output = child.wait_with_output().unwrap(); 2326 2327 handle_child_output(r, &output); 2328 } 2329 2330 #[test] 2331 fn test_multi_cpu() { 2332 let jammy_image = JAMMY_IMAGE_NAME.to_string(); 2333 let jammy = UbuntuDiskConfig::new(jammy_image); 2334 let guest = Guest::new(Box::new(jammy)); 2335 2336 let mut cmd = GuestCommand::new(&guest); 2337 cmd.args(["--cpus", "boot=2,max=4"]) 2338 .args(["--memory", "size=512M"]) 2339 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2340 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2341 .capture_output() 2342 .default_disks() 2343 .default_net(); 2344 2345 let mut child = cmd.spawn().unwrap(); 2346 2347 let r = std::panic::catch_unwind(|| { 2348 guest.wait_vm_boot(Some(120)).unwrap(); 2349 2350 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 2351 2352 assert_eq!( 2353 guest 2354 .ssh_command( 2355 r#"sudo dmesg | grep "smp: Brought up" | sed "s/\[\ *[0-9.]*\] //""# 2356 ) 2357 .unwrap() 2358 .trim(), 2359 "smp: Brought up 1 node, 2 CPUs" 2360 ); 2361 }); 2362 2363 let _ = child.kill(); 2364 let output = child.wait_with_output().unwrap(); 2365 2366 handle_child_output(r, &output); 2367 } 2368 2369 #[test] 2370 fn test_cpu_topology_421() { 2371 test_cpu_topology(4, 2, 1, false); 2372 } 2373 2374 #[test] 2375 fn test_cpu_topology_142() { 2376 test_cpu_topology(1, 4, 2, false); 2377 } 2378 2379 #[test] 2380 fn test_cpu_topology_262() { 2381 test_cpu_topology(2, 6, 2, false); 2382 } 2383 2384 #[test] 2385 #[cfg(target_arch = "x86_64")] 2386 #[cfg(not(feature = "mshv"))] 2387 fn test_cpu_physical_bits() { 2388 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2389 let guest = Guest::new(Box::new(focal)); 2390 let max_phys_bits: u8 = 36; 2391 let mut child = GuestCommand::new(&guest) 2392 .args(["--cpus", &format!("max_phys_bits={max_phys_bits}")]) 2393 .args(["--memory", "size=512M"]) 2394 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2395 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2396 .default_disks() 2397 .default_net() 2398 .capture_output() 2399 .spawn() 2400 .unwrap(); 2401 2402 let r = std::panic::catch_unwind(|| { 2403 guest.wait_vm_boot(None).unwrap(); 2404 2405 assert!( 2406 guest 2407 .ssh_command("lscpu | grep \"Address sizes:\" | cut -f 2 -d \":\" | sed \"s# *##\" | cut -f 1 -d \" \"") 2408 .unwrap() 2409 .trim() 2410 .parse::<u8>() 2411 .unwrap_or(max_phys_bits + 1) <= max_phys_bits, 2412 ); 2413 }); 2414 2415 let _ = child.kill(); 2416 let output = child.wait_with_output().unwrap(); 2417 2418 handle_child_output(r, &output); 2419 } 2420 2421 #[test] 2422 fn test_cpu_affinity() { 2423 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2424 let guest = Guest::new(Box::new(focal)); 2425 2426 // We need the host to have at least 4 CPUs if we want to be able 2427 // to run this test. 2428 let host_cpus_count = exec_host_command_output("nproc"); 2429 assert!( 2430 String::from_utf8_lossy(&host_cpus_count.stdout) 2431 .trim() 2432 .parse::<u8>() 2433 .unwrap_or(0) 2434 >= 4 2435 ); 2436 2437 let mut child = GuestCommand::new(&guest) 2438 .args(["--cpus", "boot=2,affinity=[0@[0,2],1@[1,3]]"]) 2439 .args(["--memory", "size=512M"]) 2440 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2441 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2442 .default_disks() 2443 .default_net() 2444 .capture_output() 2445 .spawn() 2446 .unwrap(); 2447 2448 let r = std::panic::catch_unwind(|| { 2449 guest.wait_vm_boot(None).unwrap(); 2450 let pid = child.id(); 2451 let taskset_vcpu0 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep vcpu0 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2452 assert_eq!(String::from_utf8_lossy(&taskset_vcpu0.stdout).trim(), "0,2"); 2453 let taskset_vcpu1 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep vcpu1 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2454 assert_eq!(String::from_utf8_lossy(&taskset_vcpu1.stdout).trim(), "1,3"); 2455 }); 2456 2457 let _ = child.kill(); 2458 let output = child.wait_with_output().unwrap(); 2459 2460 handle_child_output(r, &output); 2461 } 2462 2463 #[test] 2464 #[cfg(not(feature = "mshv"))] 2465 fn test_large_vm() { 2466 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2467 let guest = Guest::new(Box::new(focal)); 2468 let mut cmd = GuestCommand::new(&guest); 2469 cmd.args(["--cpus", "boot=48"]) 2470 .args(["--memory", "size=5120M"]) 2471 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2472 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2473 .args(["--serial", "tty"]) 2474 .args(["--console", "off"]) 2475 .capture_output() 2476 .default_disks() 2477 .default_net(); 2478 2479 let mut child = cmd.spawn().unwrap(); 2480 2481 guest.wait_vm_boot(None).unwrap(); 2482 2483 let r = std::panic::catch_unwind(|| { 2484 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 48); 2485 assert_eq!( 2486 guest 2487 .ssh_command("lscpu | grep \"On-line\" | cut -f 2 -d \":\" | sed \"s# *##\"") 2488 .unwrap() 2489 .trim(), 2490 "0-47" 2491 ); 2492 2493 assert!(guest.get_total_memory().unwrap_or_default() > 5_000_000); 2494 }); 2495 2496 let _ = child.kill(); 2497 let output = child.wait_with_output().unwrap(); 2498 2499 handle_child_output(r, &output); 2500 } 2501 2502 #[test] 2503 #[cfg(not(feature = "mshv"))] 2504 fn test_huge_memory() { 2505 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2506 let guest = Guest::new(Box::new(focal)); 2507 let mut cmd = GuestCommand::new(&guest); 2508 cmd.args(["--cpus", "boot=1"]) 2509 .args(["--memory", "size=128G"]) 2510 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2511 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2512 .capture_output() 2513 .default_disks() 2514 .default_net(); 2515 2516 let mut child = cmd.spawn().unwrap(); 2517 2518 guest.wait_vm_boot(Some(120)).unwrap(); 2519 2520 let r = std::panic::catch_unwind(|| { 2521 assert!(guest.get_total_memory().unwrap_or_default() > 128_000_000); 2522 }); 2523 2524 let _ = child.kill(); 2525 let output = child.wait_with_output().unwrap(); 2526 2527 handle_child_output(r, &output); 2528 } 2529 2530 #[test] 2531 fn test_power_button() { 2532 _test_power_button(false); 2533 } 2534 2535 #[test] 2536 #[cfg(not(feature = "mshv"))] 2537 fn test_user_defined_memory_regions() { 2538 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2539 let guest = Guest::new(Box::new(focal)); 2540 let api_socket = temp_api_path(&guest.tmp_dir); 2541 2542 let kernel_path = direct_kernel_boot_path(); 2543 2544 let mut child = GuestCommand::new(&guest) 2545 .args(["--cpus", "boot=1"]) 2546 .args(["--memory", "size=0,hotplug_method=virtio-mem"]) 2547 .args([ 2548 "--memory-zone", 2549 "id=mem0,size=1G,hotplug_size=2G", 2550 "--memory-zone", 2551 "id=mem1,size=1G,shared=on", 2552 "--memory-zone", 2553 "id=mem2,size=1G,host_numa_node=0,hotplug_size=2G", 2554 ]) 2555 .args(["--kernel", kernel_path.to_str().unwrap()]) 2556 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2557 .args(["--api-socket", &api_socket]) 2558 .capture_output() 2559 .default_disks() 2560 .default_net() 2561 .spawn() 2562 .unwrap(); 2563 2564 let r = std::panic::catch_unwind(|| { 2565 guest.wait_vm_boot(None).unwrap(); 2566 2567 assert!(guest.get_total_memory().unwrap_or_default() > 2_880_000); 2568 2569 guest.enable_memory_hotplug(); 2570 2571 resize_zone_command(&api_socket, "mem0", "3G"); 2572 thread::sleep(std::time::Duration::new(5, 0)); 2573 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000); 2574 resize_zone_command(&api_socket, "mem2", "3G"); 2575 thread::sleep(std::time::Duration::new(5, 0)); 2576 assert!(guest.get_total_memory().unwrap_or_default() > 6_720_000); 2577 resize_zone_command(&api_socket, "mem0", "2G"); 2578 thread::sleep(std::time::Duration::new(5, 0)); 2579 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 2580 resize_zone_command(&api_socket, "mem2", "2G"); 2581 thread::sleep(std::time::Duration::new(5, 0)); 2582 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000); 2583 2584 guest.reboot_linux(0, None); 2585 2586 // Check the amount of RAM after reboot 2587 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000); 2588 assert!(guest.get_total_memory().unwrap_or_default() < 5_760_000); 2589 2590 // Check if we can still resize down to the initial 'boot'size 2591 resize_zone_command(&api_socket, "mem0", "1G"); 2592 thread::sleep(std::time::Duration::new(5, 0)); 2593 assert!(guest.get_total_memory().unwrap_or_default() < 4_800_000); 2594 resize_zone_command(&api_socket, "mem2", "1G"); 2595 thread::sleep(std::time::Duration::new(5, 0)); 2596 assert!(guest.get_total_memory().unwrap_or_default() < 3_840_000); 2597 }); 2598 2599 let _ = child.kill(); 2600 let output = child.wait_with_output().unwrap(); 2601 2602 handle_child_output(r, &output); 2603 } 2604 2605 #[test] 2606 #[cfg(not(feature = "mshv"))] 2607 fn test_guest_numa_nodes() { 2608 _test_guest_numa_nodes(false); 2609 } 2610 2611 #[test] 2612 #[cfg(target_arch = "x86_64")] 2613 fn test_iommu_segments() { 2614 let focal_image = FOCAL_IMAGE_NAME.to_string(); 2615 let focal = UbuntuDiskConfig::new(focal_image); 2616 let guest = Guest::new(Box::new(focal)); 2617 2618 // Prepare another disk file for the virtio-disk device 2619 let test_disk_path = String::from( 2620 guest 2621 .tmp_dir 2622 .as_path() 2623 .join("test-disk.raw") 2624 .to_str() 2625 .unwrap(), 2626 ); 2627 assert!( 2628 exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success() 2629 ); 2630 assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success()); 2631 2632 let api_socket = temp_api_path(&guest.tmp_dir); 2633 let mut cmd = GuestCommand::new(&guest); 2634 2635 cmd.args(["--cpus", "boot=1"]) 2636 .args(["--api-socket", &api_socket]) 2637 .args(["--memory", "size=512M"]) 2638 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2639 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2640 .args([ 2641 "--platform", 2642 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS},iommu_segments=[1]"), 2643 ]) 2644 .default_disks() 2645 .capture_output() 2646 .default_net(); 2647 2648 let mut child = cmd.spawn().unwrap(); 2649 2650 guest.wait_vm_boot(None).unwrap(); 2651 2652 let r = std::panic::catch_unwind(|| { 2653 let (cmd_success, cmd_output) = remote_command_w_output( 2654 &api_socket, 2655 "add-disk", 2656 Some( 2657 format!( 2658 "path={},id=test0,pci_segment=1,iommu=on", 2659 test_disk_path.as_str() 2660 ) 2661 .as_str(), 2662 ), 2663 ); 2664 assert!(cmd_success); 2665 assert!(String::from_utf8_lossy(&cmd_output) 2666 .contains("{\"id\":\"test0\",\"bdf\":\"0001:00:01.0\"}")); 2667 2668 // Check IOMMU setup 2669 assert!(guest 2670 .does_device_vendor_pair_match("0x1057", "0x1af4") 2671 .unwrap_or_default()); 2672 assert_eq!( 2673 guest 2674 .ssh_command("ls /sys/kernel/iommu_groups/0/devices") 2675 .unwrap() 2676 .trim(), 2677 "0001:00:01.0" 2678 ); 2679 }); 2680 2681 let _ = child.kill(); 2682 let output = child.wait_with_output().unwrap(); 2683 2684 handle_child_output(r, &output); 2685 } 2686 2687 #[test] 2688 fn test_pci_msi() { 2689 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2690 let guest = Guest::new(Box::new(focal)); 2691 let mut cmd = GuestCommand::new(&guest); 2692 cmd.args(["--cpus", "boot=1"]) 2693 .args(["--memory", "size=512M"]) 2694 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2695 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2696 .capture_output() 2697 .default_disks() 2698 .default_net(); 2699 2700 let mut child = cmd.spawn().unwrap(); 2701 2702 guest.wait_vm_boot(None).unwrap(); 2703 2704 #[cfg(target_arch = "x86_64")] 2705 let grep_cmd = "grep -c PCI-MSI /proc/interrupts"; 2706 #[cfg(target_arch = "aarch64")] 2707 let grep_cmd = "grep -c ITS-MSI /proc/interrupts"; 2708 2709 let r = std::panic::catch_unwind(|| { 2710 assert_eq!( 2711 guest 2712 .ssh_command(grep_cmd) 2713 .unwrap() 2714 .trim() 2715 .parse::<u32>() 2716 .unwrap_or_default(), 2717 12 2718 ); 2719 }); 2720 2721 let _ = child.kill(); 2722 let output = child.wait_with_output().unwrap(); 2723 2724 handle_child_output(r, &output); 2725 } 2726 2727 #[test] 2728 fn test_virtio_net_ctrl_queue() { 2729 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2730 let guest = Guest::new(Box::new(focal)); 2731 let mut cmd = GuestCommand::new(&guest); 2732 cmd.args(["--cpus", "boot=1"]) 2733 .args(["--memory", "size=512M"]) 2734 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2735 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2736 .args(["--net", guest.default_net_string_w_mtu(3000).as_str()]) 2737 .capture_output() 2738 .default_disks(); 2739 2740 let mut child = cmd.spawn().unwrap(); 2741 2742 guest.wait_vm_boot(None).unwrap(); 2743 2744 #[cfg(target_arch = "aarch64")] 2745 let iface = "enp0s4"; 2746 #[cfg(target_arch = "x86_64")] 2747 let iface = "ens4"; 2748 2749 let r = std::panic::catch_unwind(|| { 2750 assert_eq!( 2751 guest 2752 .ssh_command( 2753 format!("sudo ethtool -K {iface} rx-gro-hw off && echo success").as_str() 2754 ) 2755 .unwrap() 2756 .trim(), 2757 "success" 2758 ); 2759 assert_eq!( 2760 guest 2761 .ssh_command(format!("cat /sys/class/net/{iface}/mtu").as_str()) 2762 .unwrap() 2763 .trim(), 2764 "3000" 2765 ); 2766 }); 2767 2768 let _ = child.kill(); 2769 let output = child.wait_with_output().unwrap(); 2770 2771 handle_child_output(r, &output); 2772 } 2773 2774 #[test] 2775 #[cfg(not(feature = "mshv"))] 2776 fn test_pci_multiple_segments() { 2777 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2778 let guest = Guest::new(Box::new(focal)); 2779 2780 // Prepare another disk file for the virtio-disk device 2781 let test_disk_path = String::from( 2782 guest 2783 .tmp_dir 2784 .as_path() 2785 .join("test-disk.raw") 2786 .to_str() 2787 .unwrap(), 2788 ); 2789 assert!( 2790 exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success() 2791 ); 2792 assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success()); 2793 2794 let mut cmd = GuestCommand::new(&guest); 2795 cmd.args(["--cpus", "boot=1"]) 2796 .args(["--memory", "size=512M"]) 2797 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2798 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2799 .args([ 2800 "--platform", 2801 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 2802 ]) 2803 .args([ 2804 "--disk", 2805 format!( 2806 "path={}", 2807 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 2808 ) 2809 .as_str(), 2810 "--disk", 2811 format!( 2812 "path={}", 2813 guest.disk_config.disk(DiskType::CloudInit).unwrap() 2814 ) 2815 .as_str(), 2816 "--disk", 2817 format!("path={test_disk_path},pci_segment=15").as_str(), 2818 ]) 2819 .capture_output() 2820 .default_net(); 2821 2822 let mut child = cmd.spawn().unwrap(); 2823 2824 guest.wait_vm_boot(None).unwrap(); 2825 2826 let grep_cmd = "lspci | grep \"Host bridge\" | wc -l"; 2827 2828 let r = std::panic::catch_unwind(|| { 2829 // There should be MAX_NUM_PCI_SEGMENTS PCI host bridges in the guest. 2830 assert_eq!( 2831 guest 2832 .ssh_command(grep_cmd) 2833 .unwrap() 2834 .trim() 2835 .parse::<u16>() 2836 .unwrap_or_default(), 2837 MAX_NUM_PCI_SEGMENTS 2838 ); 2839 2840 // Check both if /dev/vdc exists and if the block size is 4M. 2841 assert_eq!( 2842 guest 2843 .ssh_command("lsblk | grep vdc | grep -c 4M") 2844 .unwrap() 2845 .trim() 2846 .parse::<u32>() 2847 .unwrap_or_default(), 2848 1 2849 ); 2850 2851 // Mount the device. 2852 guest.ssh_command("mkdir mount_image").unwrap(); 2853 guest 2854 .ssh_command("sudo mount -o rw -t ext4 /dev/vdc mount_image/") 2855 .unwrap(); 2856 // Grant all users with write permission. 2857 guest.ssh_command("sudo chmod a+w mount_image/").unwrap(); 2858 2859 // Write something to the device. 2860 guest 2861 .ssh_command("sudo echo \"bar\" >> mount_image/foo") 2862 .unwrap(); 2863 2864 // Check the content of the block device. The file "foo" should 2865 // contain "bar". 2866 assert_eq!( 2867 guest 2868 .ssh_command("sudo cat mount_image/foo") 2869 .unwrap() 2870 .trim(), 2871 "bar" 2872 ); 2873 }); 2874 2875 let _ = child.kill(); 2876 let output = child.wait_with_output().unwrap(); 2877 2878 handle_child_output(r, &output); 2879 } 2880 2881 #[test] 2882 fn test_direct_kernel_boot() { 2883 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2884 let guest = Guest::new(Box::new(focal)); 2885 2886 let kernel_path = direct_kernel_boot_path(); 2887 2888 let mut child = GuestCommand::new(&guest) 2889 .args(["--cpus", "boot=1"]) 2890 .args(["--memory", "size=512M"]) 2891 .args(["--kernel", kernel_path.to_str().unwrap()]) 2892 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2893 .default_disks() 2894 .default_net() 2895 .capture_output() 2896 .spawn() 2897 .unwrap(); 2898 2899 let r = std::panic::catch_unwind(|| { 2900 guest.wait_vm_boot(None).unwrap(); 2901 2902 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 2903 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 2904 2905 let grep_cmd = if cfg!(target_arch = "x86_64") { 2906 "grep -c PCI-MSI /proc/interrupts" 2907 } else { 2908 "grep -c ITS-MSI /proc/interrupts" 2909 }; 2910 assert_eq!( 2911 guest 2912 .ssh_command(grep_cmd) 2913 .unwrap() 2914 .trim() 2915 .parse::<u32>() 2916 .unwrap_or_default(), 2917 12 2918 ); 2919 }); 2920 2921 let _ = child.kill(); 2922 let output = child.wait_with_output().unwrap(); 2923 2924 handle_child_output(r, &output); 2925 } 2926 2927 fn _test_virtio_block(image_name: &str, disable_io_uring: bool) { 2928 let focal = UbuntuDiskConfig::new(image_name.to_string()); 2929 let guest = Guest::new(Box::new(focal)); 2930 2931 let mut workload_path = dirs::home_dir().unwrap(); 2932 workload_path.push("workloads"); 2933 2934 let mut blk_file_path = workload_path; 2935 blk_file_path.push("blk.img"); 2936 2937 let kernel_path = direct_kernel_boot_path(); 2938 2939 let mut cloud_child = GuestCommand::new(&guest) 2940 .args(["--cpus", "boot=4"]) 2941 .args(["--memory", "size=512M,shared=on"]) 2942 .args(["--kernel", kernel_path.to_str().unwrap()]) 2943 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2944 .args([ 2945 "--disk", 2946 format!( 2947 "path={}", 2948 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 2949 ) 2950 .as_str(), 2951 "--disk", 2952 format!( 2953 "path={}", 2954 guest.disk_config.disk(DiskType::CloudInit).unwrap() 2955 ) 2956 .as_str(), 2957 "--disk", 2958 format!( 2959 "path={},readonly=on,direct=on,num_queues=4,_disable_io_uring={}", 2960 blk_file_path.to_str().unwrap(), 2961 disable_io_uring 2962 ) 2963 .as_str(), 2964 ]) 2965 .default_net() 2966 .capture_output() 2967 .spawn() 2968 .unwrap(); 2969 2970 let r = std::panic::catch_unwind(|| { 2971 guest.wait_vm_boot(None).unwrap(); 2972 2973 // Check both if /dev/vdc exists and if the block size is 16M. 2974 assert_eq!( 2975 guest 2976 .ssh_command("lsblk | grep vdc | grep -c 16M") 2977 .unwrap() 2978 .trim() 2979 .parse::<u32>() 2980 .unwrap_or_default(), 2981 1 2982 ); 2983 2984 // Check both if /dev/vdc exists and if this block is RO. 2985 assert_eq!( 2986 guest 2987 .ssh_command("lsblk | grep vdc | awk '{print $5}'") 2988 .unwrap() 2989 .trim() 2990 .parse::<u32>() 2991 .unwrap_or_default(), 2992 1 2993 ); 2994 2995 // Check if the number of queues is 4. 2996 assert_eq!( 2997 guest 2998 .ssh_command("ls -ll /sys/block/vdc/mq | grep ^d | wc -l") 2999 .unwrap() 3000 .trim() 3001 .parse::<u32>() 3002 .unwrap_or_default(), 3003 4 3004 ); 3005 }); 3006 3007 let _ = cloud_child.kill(); 3008 let output = cloud_child.wait_with_output().unwrap(); 3009 3010 handle_child_output(r, &output); 3011 } 3012 3013 #[test] 3014 fn test_virtio_block() { 3015 _test_virtio_block(FOCAL_IMAGE_NAME, false) 3016 } 3017 3018 #[test] 3019 fn test_virtio_block_disable_io_uring() { 3020 _test_virtio_block(FOCAL_IMAGE_NAME, true) 3021 } 3022 3023 #[test] 3024 fn test_virtio_block_qcow2() { 3025 _test_virtio_block(FOCAL_IMAGE_NAME_QCOW2, false) 3026 } 3027 3028 #[test] 3029 fn test_virtio_block_vhd() { 3030 let mut workload_path = dirs::home_dir().unwrap(); 3031 workload_path.push("workloads"); 3032 3033 let mut raw_file_path = workload_path.clone(); 3034 let mut vhd_file_path = workload_path; 3035 raw_file_path.push(FOCAL_IMAGE_NAME); 3036 vhd_file_path.push(FOCAL_IMAGE_NAME_VHD); 3037 3038 // Generate VHD file from RAW file 3039 std::process::Command::new("qemu-img") 3040 .arg("convert") 3041 .arg("-p") 3042 .args(["-f", "raw"]) 3043 .args(["-O", "vpc"]) 3044 .args(["-o", "subformat=fixed"]) 3045 .arg(raw_file_path.to_str().unwrap()) 3046 .arg(vhd_file_path.to_str().unwrap()) 3047 .output() 3048 .expect("Expect generating VHD image from RAW image"); 3049 3050 _test_virtio_block(FOCAL_IMAGE_NAME_VHD, false) 3051 } 3052 3053 #[test] 3054 fn test_virtio_block_vhdx() { 3055 let mut workload_path = dirs::home_dir().unwrap(); 3056 workload_path.push("workloads"); 3057 3058 let mut raw_file_path = workload_path.clone(); 3059 let mut vhdx_file_path = workload_path; 3060 raw_file_path.push(FOCAL_IMAGE_NAME); 3061 vhdx_file_path.push(FOCAL_IMAGE_NAME_VHDX); 3062 3063 // Generate dynamic VHDX file from RAW file 3064 std::process::Command::new("qemu-img") 3065 .arg("convert") 3066 .arg("-p") 3067 .args(["-f", "raw"]) 3068 .args(["-O", "vhdx"]) 3069 .arg(raw_file_path.to_str().unwrap()) 3070 .arg(vhdx_file_path.to_str().unwrap()) 3071 .output() 3072 .expect("Expect generating dynamic VHDx image from RAW image"); 3073 3074 _test_virtio_block(FOCAL_IMAGE_NAME_VHDX, false) 3075 } 3076 3077 #[test] 3078 fn test_virtio_block_dynamic_vhdx_expand() { 3079 const VIRTUAL_DISK_SIZE: u64 = 100 << 20; 3080 const EMPTY_VHDX_FILE_SIZE: u64 = 8 << 20; 3081 const FULL_VHDX_FILE_SIZE: u64 = 112 << 20; 3082 const DYNAMIC_VHDX_NAME: &str = "dynamic.vhdx"; 3083 3084 let mut workload_path = dirs::home_dir().unwrap(); 3085 workload_path.push("workloads"); 3086 3087 let mut vhdx_file_path = workload_path; 3088 vhdx_file_path.push(DYNAMIC_VHDX_NAME); 3089 let vhdx_path = vhdx_file_path.to_str().unwrap(); 3090 3091 // Generate a 100 MiB dynamic VHDX file 3092 std::process::Command::new("qemu-img") 3093 .arg("create") 3094 .args(["-f", "vhdx"]) 3095 .arg(vhdx_path) 3096 .arg(VIRTUAL_DISK_SIZE.to_string()) 3097 .output() 3098 .expect("Expect generating dynamic VHDx image from RAW image"); 3099 3100 // Check if the size matches with empty VHDx file size 3101 assert_eq!(vhdx_image_size(vhdx_path), EMPTY_VHDX_FILE_SIZE); 3102 3103 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3104 let guest = Guest::new(Box::new(focal)); 3105 let kernel_path = direct_kernel_boot_path(); 3106 3107 let mut cloud_child = GuestCommand::new(&guest) 3108 .args(["--cpus", "boot=1"]) 3109 .args(["--memory", "size=512M"]) 3110 .args(["--kernel", kernel_path.to_str().unwrap()]) 3111 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3112 .args([ 3113 "--disk", 3114 format!( 3115 "path={}", 3116 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 3117 ) 3118 .as_str(), 3119 "--disk", 3120 format!( 3121 "path={}", 3122 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3123 ) 3124 .as_str(), 3125 "--disk", 3126 format!("path={vhdx_path}").as_str(), 3127 ]) 3128 .default_net() 3129 .capture_output() 3130 .spawn() 3131 .unwrap(); 3132 3133 let r = std::panic::catch_unwind(|| { 3134 guest.wait_vm_boot(None).unwrap(); 3135 3136 // Check both if /dev/vdc exists and if the block size is 100 MiB. 3137 assert_eq!( 3138 guest 3139 .ssh_command("lsblk | grep vdc | grep -c 100M") 3140 .unwrap() 3141 .trim() 3142 .parse::<u32>() 3143 .unwrap_or_default(), 3144 1 3145 ); 3146 3147 // Write 100 MB of data to the VHDx disk 3148 guest 3149 .ssh_command("sudo dd if=/dev/urandom of=/dev/vdc bs=1M count=100") 3150 .unwrap(); 3151 }); 3152 3153 // Check if the size matches with expected expanded VHDx file size 3154 assert_eq!(vhdx_image_size(vhdx_path), FULL_VHDX_FILE_SIZE); 3155 3156 let _ = cloud_child.kill(); 3157 let output = cloud_child.wait_with_output().unwrap(); 3158 3159 handle_child_output(r, &output); 3160 } 3161 3162 fn vhdx_image_size(disk_name: &str) -> u64 { 3163 std::fs::File::open(disk_name) 3164 .unwrap() 3165 .seek(SeekFrom::End(0)) 3166 .unwrap() 3167 } 3168 3169 #[test] 3170 fn test_virtio_block_direct_and_firmware() { 3171 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3172 let guest = Guest::new(Box::new(focal)); 3173 3174 // The OS disk must be copied to a location that is not backed by 3175 // tmpfs, otherwise the syscall openat(2) with O_DIRECT simply fails 3176 // with EINVAL because tmpfs doesn't support this flag. 3177 let mut workloads_path = dirs::home_dir().unwrap(); 3178 workloads_path.push("workloads"); 3179 let os_dir = TempDir::new_in(workloads_path.as_path()).unwrap(); 3180 let mut os_path = os_dir.as_path().to_path_buf(); 3181 os_path.push("osdisk.img"); 3182 rate_limited_copy( 3183 guest.disk_config.disk(DiskType::OperatingSystem).unwrap(), 3184 os_path.as_path(), 3185 ) 3186 .expect("copying of OS disk failed"); 3187 3188 let mut child = GuestCommand::new(&guest) 3189 .args(["--cpus", "boot=1"]) 3190 .args(["--memory", "size=512M"]) 3191 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 3192 .args([ 3193 "--disk", 3194 format!("path={},direct=on", os_path.as_path().to_str().unwrap()).as_str(), 3195 "--disk", 3196 format!( 3197 "path={}", 3198 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3199 ) 3200 .as_str(), 3201 ]) 3202 .default_net() 3203 .capture_output() 3204 .spawn() 3205 .unwrap(); 3206 3207 let r = std::panic::catch_unwind(|| { 3208 guest.wait_vm_boot(Some(120)).unwrap(); 3209 }); 3210 3211 let _ = child.kill(); 3212 let output = child.wait_with_output().unwrap(); 3213 3214 handle_child_output(r, &output); 3215 } 3216 3217 #[test] 3218 fn test_vhost_user_net_default() { 3219 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, false, false) 3220 } 3221 3222 #[test] 3223 fn test_vhost_user_net_named_tap() { 3224 test_vhost_user_net( 3225 Some("mytap0"), 3226 2, 3227 &prepare_vhost_user_net_daemon, 3228 false, 3229 false, 3230 ) 3231 } 3232 3233 #[test] 3234 fn test_vhost_user_net_existing_tap() { 3235 test_vhost_user_net( 3236 Some("vunet-tap0"), 3237 2, 3238 &prepare_vhost_user_net_daemon, 3239 false, 3240 false, 3241 ) 3242 } 3243 3244 #[test] 3245 fn test_vhost_user_net_multiple_queues() { 3246 test_vhost_user_net(None, 4, &prepare_vhost_user_net_daemon, false, false) 3247 } 3248 3249 #[test] 3250 fn test_vhost_user_net_tap_multiple_queues() { 3251 test_vhost_user_net( 3252 Some("vunet-tap1"), 3253 4, 3254 &prepare_vhost_user_net_daemon, 3255 false, 3256 false, 3257 ) 3258 } 3259 3260 #[test] 3261 fn test_vhost_user_net_host_mac() { 3262 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, true, false) 3263 } 3264 3265 #[test] 3266 fn test_vhost_user_net_client_mode() { 3267 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, false, true) 3268 } 3269 3270 #[test] 3271 fn test_vhost_user_blk_default() { 3272 test_vhost_user_blk(2, false, false, Some(&prepare_vubd)) 3273 } 3274 3275 #[test] 3276 fn test_vhost_user_blk_readonly() { 3277 test_vhost_user_blk(1, true, false, Some(&prepare_vubd)) 3278 } 3279 3280 #[test] 3281 fn test_vhost_user_blk_direct() { 3282 test_vhost_user_blk(1, false, true, Some(&prepare_vubd)) 3283 } 3284 3285 #[test] 3286 fn test_boot_from_vhost_user_blk_default() { 3287 test_boot_from_vhost_user_blk(1, false, false, Some(&prepare_vubd)) 3288 } 3289 3290 #[test] 3291 #[cfg(target_arch = "x86_64")] 3292 fn test_split_irqchip() { 3293 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3294 let guest = Guest::new(Box::new(focal)); 3295 3296 let mut child = GuestCommand::new(&guest) 3297 .args(["--cpus", "boot=1"]) 3298 .args(["--memory", "size=512M"]) 3299 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3300 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3301 .default_disks() 3302 .default_net() 3303 .capture_output() 3304 .spawn() 3305 .unwrap(); 3306 3307 let r = std::panic::catch_unwind(|| { 3308 guest.wait_vm_boot(None).unwrap(); 3309 3310 assert_eq!( 3311 guest 3312 .ssh_command("grep -c IO-APIC.*timer /proc/interrupts || true") 3313 .unwrap() 3314 .trim() 3315 .parse::<u32>() 3316 .unwrap_or(1), 3317 0 3318 ); 3319 assert_eq!( 3320 guest 3321 .ssh_command("grep -c IO-APIC.*cascade /proc/interrupts || true") 3322 .unwrap() 3323 .trim() 3324 .parse::<u32>() 3325 .unwrap_or(1), 3326 0 3327 ); 3328 }); 3329 3330 let _ = child.kill(); 3331 let output = child.wait_with_output().unwrap(); 3332 3333 handle_child_output(r, &output); 3334 } 3335 3336 #[test] 3337 #[cfg(target_arch = "x86_64")] 3338 fn test_dmi_serial_number() { 3339 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3340 let guest = Guest::new(Box::new(focal)); 3341 3342 let mut child = GuestCommand::new(&guest) 3343 .args(["--cpus", "boot=1"]) 3344 .args(["--memory", "size=512M"]) 3345 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3346 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3347 .args(["--platform", "serial_number=a=b;c=d"]) 3348 .default_disks() 3349 .default_net() 3350 .capture_output() 3351 .spawn() 3352 .unwrap(); 3353 3354 let r = std::panic::catch_unwind(|| { 3355 guest.wait_vm_boot(None).unwrap(); 3356 3357 assert_eq!( 3358 guest 3359 .ssh_command("sudo cat /sys/class/dmi/id/product_serial") 3360 .unwrap() 3361 .trim(), 3362 "a=b;c=d" 3363 ); 3364 }); 3365 3366 let _ = child.kill(); 3367 let output = child.wait_with_output().unwrap(); 3368 3369 handle_child_output(r, &output); 3370 } 3371 3372 #[test] 3373 #[cfg(target_arch = "x86_64")] 3374 fn test_dmi_uuid() { 3375 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3376 let guest = Guest::new(Box::new(focal)); 3377 3378 let mut child = GuestCommand::new(&guest) 3379 .args(["--cpus", "boot=1"]) 3380 .args(["--memory", "size=512M"]) 3381 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3382 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3383 .args(["--platform", "uuid=1e8aa28a-435d-4027-87f4-40dceff1fa0a"]) 3384 .default_disks() 3385 .default_net() 3386 .capture_output() 3387 .spawn() 3388 .unwrap(); 3389 3390 let r = std::panic::catch_unwind(|| { 3391 guest.wait_vm_boot(None).unwrap(); 3392 3393 assert_eq!( 3394 guest 3395 .ssh_command("sudo cat /sys/class/dmi/id/product_uuid") 3396 .unwrap() 3397 .trim(), 3398 "1e8aa28a-435d-4027-87f4-40dceff1fa0a" 3399 ); 3400 }); 3401 3402 let _ = child.kill(); 3403 let output = child.wait_with_output().unwrap(); 3404 3405 handle_child_output(r, &output); 3406 } 3407 3408 #[test] 3409 #[cfg(target_arch = "x86_64")] 3410 fn test_dmi_oem_strings() { 3411 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3412 let guest = Guest::new(Box::new(focal)); 3413 3414 let s1 = "io.systemd.credential:xx=yy"; 3415 let s2 = "This is a test string"; 3416 3417 let oem_strings = format!("oem_strings=[{s1},{s2}]"); 3418 3419 let mut child = GuestCommand::new(&guest) 3420 .args(["--cpus", "boot=1"]) 3421 .args(["--memory", "size=512M"]) 3422 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3423 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3424 .args(["--platform", &oem_strings]) 3425 .default_disks() 3426 .default_net() 3427 .capture_output() 3428 .spawn() 3429 .unwrap(); 3430 3431 let r = std::panic::catch_unwind(|| { 3432 guest.wait_vm_boot(None).unwrap(); 3433 3434 assert_eq!( 3435 guest 3436 .ssh_command("sudo dmidecode --oem-string count") 3437 .unwrap() 3438 .trim(), 3439 "2" 3440 ); 3441 3442 assert_eq!( 3443 guest 3444 .ssh_command("sudo dmidecode --oem-string 1") 3445 .unwrap() 3446 .trim(), 3447 s1 3448 ); 3449 3450 assert_eq!( 3451 guest 3452 .ssh_command("sudo dmidecode --oem-string 2") 3453 .unwrap() 3454 .trim(), 3455 s2 3456 ); 3457 }); 3458 3459 let _ = child.kill(); 3460 let output = child.wait_with_output().unwrap(); 3461 3462 handle_child_output(r, &output); 3463 } 3464 3465 #[test] 3466 fn test_virtio_fs() { 3467 _test_virtio_fs(&prepare_virtiofsd, false, None) 3468 } 3469 3470 #[test] 3471 fn test_virtio_fs_hotplug() { 3472 _test_virtio_fs(&prepare_virtiofsd, true, None) 3473 } 3474 3475 #[test] 3476 #[cfg(not(feature = "mshv"))] 3477 fn test_virtio_fs_multi_segment_hotplug() { 3478 _test_virtio_fs(&prepare_virtiofsd, true, Some(15)) 3479 } 3480 3481 #[test] 3482 #[cfg(not(feature = "mshv"))] 3483 fn test_virtio_fs_multi_segment() { 3484 _test_virtio_fs(&prepare_virtiofsd, false, Some(15)) 3485 } 3486 3487 #[test] 3488 fn test_virtio_pmem_persist_writes() { 3489 test_virtio_pmem(false, false) 3490 } 3491 3492 #[test] 3493 fn test_virtio_pmem_discard_writes() { 3494 test_virtio_pmem(true, false) 3495 } 3496 3497 #[test] 3498 fn test_virtio_pmem_with_size() { 3499 test_virtio_pmem(true, true) 3500 } 3501 3502 #[test] 3503 fn test_boot_from_virtio_pmem() { 3504 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3505 let guest = Guest::new(Box::new(focal)); 3506 3507 let kernel_path = direct_kernel_boot_path(); 3508 3509 let mut child = GuestCommand::new(&guest) 3510 .args(["--cpus", "boot=1"]) 3511 .args(["--memory", "size=512M"]) 3512 .args(["--kernel", kernel_path.to_str().unwrap()]) 3513 .args([ 3514 "--disk", 3515 format!( 3516 "path={}", 3517 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3518 ) 3519 .as_str(), 3520 ]) 3521 .default_net() 3522 .args([ 3523 "--pmem", 3524 format!( 3525 "file={},size={}", 3526 guest.disk_config.disk(DiskType::OperatingSystem).unwrap(), 3527 fs::metadata(guest.disk_config.disk(DiskType::OperatingSystem).unwrap()) 3528 .unwrap() 3529 .len() 3530 ) 3531 .as_str(), 3532 ]) 3533 .args([ 3534 "--cmdline", 3535 DIRECT_KERNEL_BOOT_CMDLINE 3536 .replace("vda1", "pmem0p1") 3537 .as_str(), 3538 ]) 3539 .capture_output() 3540 .spawn() 3541 .unwrap(); 3542 3543 let r = std::panic::catch_unwind(|| { 3544 guest.wait_vm_boot(None).unwrap(); 3545 3546 // Simple checks to validate the VM booted properly 3547 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 3548 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 3549 }); 3550 3551 let _ = child.kill(); 3552 let output = child.wait_with_output().unwrap(); 3553 3554 handle_child_output(r, &output); 3555 } 3556 3557 #[test] 3558 fn test_multiple_network_interfaces() { 3559 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3560 let guest = Guest::new(Box::new(focal)); 3561 3562 let kernel_path = direct_kernel_boot_path(); 3563 3564 let mut child = GuestCommand::new(&guest) 3565 .args(["--cpus", "boot=1"]) 3566 .args(["--memory", "size=512M"]) 3567 .args(["--kernel", kernel_path.to_str().unwrap()]) 3568 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3569 .default_disks() 3570 .args([ 3571 "--net", 3572 guest.default_net_string().as_str(), 3573 "--net", 3574 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0", 3575 "--net", 3576 "tap=mytap1,mac=fe:1f:9e:e1:60:f2,ip=192.168.4.1,mask=255.255.255.0", 3577 ]) 3578 .capture_output() 3579 .spawn() 3580 .unwrap(); 3581 3582 let r = std::panic::catch_unwind(|| { 3583 guest.wait_vm_boot(None).unwrap(); 3584 3585 let tap_count = exec_host_command_output("ip link | grep -c mytap1"); 3586 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1"); 3587 3588 // 3 network interfaces + default localhost ==> 4 interfaces 3589 assert_eq!( 3590 guest 3591 .ssh_command("ip -o link | wc -l") 3592 .unwrap() 3593 .trim() 3594 .parse::<u32>() 3595 .unwrap_or_default(), 3596 4 3597 ); 3598 }); 3599 3600 let _ = child.kill(); 3601 let output = child.wait_with_output().unwrap(); 3602 3603 handle_child_output(r, &output); 3604 } 3605 3606 #[test] 3607 #[cfg(target_arch = "aarch64")] 3608 fn test_pmu_on() { 3609 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3610 let guest = Guest::new(Box::new(focal)); 3611 let mut child = GuestCommand::new(&guest) 3612 .args(["--cpus", "boot=1"]) 3613 .args(["--memory", "size=512M"]) 3614 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3615 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3616 .default_disks() 3617 .default_net() 3618 .capture_output() 3619 .spawn() 3620 .unwrap(); 3621 3622 let r = std::panic::catch_unwind(|| { 3623 guest.wait_vm_boot(None).unwrap(); 3624 3625 // Test that PMU exists. 3626 assert_eq!( 3627 guest 3628 .ssh_command(GREP_PMU_IRQ_CMD) 3629 .unwrap() 3630 .trim() 3631 .parse::<u32>() 3632 .unwrap_or_default(), 3633 1 3634 ); 3635 }); 3636 3637 let _ = child.kill(); 3638 let output = child.wait_with_output().unwrap(); 3639 3640 handle_child_output(r, &output); 3641 } 3642 3643 #[test] 3644 fn test_serial_off() { 3645 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3646 let guest = Guest::new(Box::new(focal)); 3647 let mut child = GuestCommand::new(&guest) 3648 .args(["--cpus", "boot=1"]) 3649 .args(["--memory", "size=512M"]) 3650 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3651 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3652 .default_disks() 3653 .default_net() 3654 .args(["--serial", "off"]) 3655 .capture_output() 3656 .spawn() 3657 .unwrap(); 3658 3659 let r = std::panic::catch_unwind(|| { 3660 guest.wait_vm_boot(None).unwrap(); 3661 3662 // Test that there is no ttyS0 3663 assert_eq!( 3664 guest 3665 .ssh_command(GREP_SERIAL_IRQ_CMD) 3666 .unwrap() 3667 .trim() 3668 .parse::<u32>() 3669 .unwrap_or(1), 3670 0 3671 ); 3672 }); 3673 3674 let _ = child.kill(); 3675 let output = child.wait_with_output().unwrap(); 3676 3677 handle_child_output(r, &output); 3678 } 3679 3680 #[test] 3681 fn test_serial_null() { 3682 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3683 let guest = Guest::new(Box::new(focal)); 3684 let mut cmd = GuestCommand::new(&guest); 3685 #[cfg(target_arch = "x86_64")] 3686 let console_str: &str = "console=ttyS0"; 3687 #[cfg(target_arch = "aarch64")] 3688 let console_str: &str = "console=ttyAMA0"; 3689 3690 cmd.args(["--cpus", "boot=1"]) 3691 .args(["--memory", "size=512M"]) 3692 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3693 .args([ 3694 "--cmdline", 3695 DIRECT_KERNEL_BOOT_CMDLINE 3696 .replace("console=hvc0 ", console_str) 3697 .as_str(), 3698 ]) 3699 .default_disks() 3700 .default_net() 3701 .args(["--serial", "null"]) 3702 .args(["--console", "off"]) 3703 .capture_output(); 3704 3705 let mut child = cmd.spawn().unwrap(); 3706 3707 let r = std::panic::catch_unwind(|| { 3708 guest.wait_vm_boot(None).unwrap(); 3709 3710 // Test that there is a ttyS0 3711 assert_eq!( 3712 guest 3713 .ssh_command(GREP_SERIAL_IRQ_CMD) 3714 .unwrap() 3715 .trim() 3716 .parse::<u32>() 3717 .unwrap_or_default(), 3718 1 3719 ); 3720 }); 3721 3722 let _ = child.kill(); 3723 let output = child.wait_with_output().unwrap(); 3724 handle_child_output(r, &output); 3725 3726 let r = std::panic::catch_unwind(|| { 3727 assert!(!String::from_utf8_lossy(&output.stdout).contains(CONSOLE_TEST_STRING)); 3728 }); 3729 3730 handle_child_output(r, &output); 3731 } 3732 3733 #[test] 3734 fn test_serial_tty() { 3735 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3736 let guest = Guest::new(Box::new(focal)); 3737 3738 let kernel_path = direct_kernel_boot_path(); 3739 3740 #[cfg(target_arch = "x86_64")] 3741 let console_str: &str = "console=ttyS0"; 3742 #[cfg(target_arch = "aarch64")] 3743 let console_str: &str = "console=ttyAMA0"; 3744 3745 let mut child = GuestCommand::new(&guest) 3746 .args(["--cpus", "boot=1"]) 3747 .args(["--memory", "size=512M"]) 3748 .args(["--kernel", kernel_path.to_str().unwrap()]) 3749 .args([ 3750 "--cmdline", 3751 DIRECT_KERNEL_BOOT_CMDLINE 3752 .replace("console=hvc0 ", console_str) 3753 .as_str(), 3754 ]) 3755 .default_disks() 3756 .default_net() 3757 .args(["--serial", "tty"]) 3758 .args(["--console", "off"]) 3759 .capture_output() 3760 .spawn() 3761 .unwrap(); 3762 3763 let r = std::panic::catch_unwind(|| { 3764 guest.wait_vm_boot(None).unwrap(); 3765 3766 // Test that there is a ttyS0 3767 assert_eq!( 3768 guest 3769 .ssh_command(GREP_SERIAL_IRQ_CMD) 3770 .unwrap() 3771 .trim() 3772 .parse::<u32>() 3773 .unwrap_or_default(), 3774 1 3775 ); 3776 }); 3777 3778 // This sleep is needed to wait for the login prompt 3779 thread::sleep(std::time::Duration::new(2, 0)); 3780 3781 let _ = child.kill(); 3782 let output = child.wait_with_output().unwrap(); 3783 handle_child_output(r, &output); 3784 3785 let r = std::panic::catch_unwind(|| { 3786 assert!(String::from_utf8_lossy(&output.stdout).contains(CONSOLE_TEST_STRING)); 3787 }); 3788 3789 handle_child_output(r, &output); 3790 } 3791 3792 #[test] 3793 fn test_serial_file() { 3794 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3795 let guest = Guest::new(Box::new(focal)); 3796 3797 let serial_path = guest.tmp_dir.as_path().join("/tmp/serial-output"); 3798 #[cfg(target_arch = "x86_64")] 3799 let console_str: &str = "console=ttyS0"; 3800 #[cfg(target_arch = "aarch64")] 3801 let console_str: &str = "console=ttyAMA0"; 3802 3803 let mut child = GuestCommand::new(&guest) 3804 .args(["--cpus", "boot=1"]) 3805 .args(["--memory", "size=512M"]) 3806 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3807 .args([ 3808 "--cmdline", 3809 DIRECT_KERNEL_BOOT_CMDLINE 3810 .replace("console=hvc0 ", console_str) 3811 .as_str(), 3812 ]) 3813 .default_disks() 3814 .default_net() 3815 .args([ 3816 "--serial", 3817 format!("file={}", serial_path.to_str().unwrap()).as_str(), 3818 ]) 3819 .capture_output() 3820 .spawn() 3821 .unwrap(); 3822 3823 let r = std::panic::catch_unwind(|| { 3824 guest.wait_vm_boot(None).unwrap(); 3825 3826 // Test that there is a ttyS0 3827 assert_eq!( 3828 guest 3829 .ssh_command(GREP_SERIAL_IRQ_CMD) 3830 .unwrap() 3831 .trim() 3832 .parse::<u32>() 3833 .unwrap_or_default(), 3834 1 3835 ); 3836 3837 guest.ssh_command("sudo shutdown -h now").unwrap(); 3838 }); 3839 3840 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 3841 let _ = child.kill(); 3842 let output = child.wait_with_output().unwrap(); 3843 handle_child_output(r, &output); 3844 3845 let r = std::panic::catch_unwind(|| { 3846 // Check that the cloud-hypervisor binary actually terminated 3847 assert!(output.status.success()); 3848 3849 // Do this check after shutdown of the VM as an easy way to ensure 3850 // all writes are flushed to disk 3851 let mut f = std::fs::File::open(serial_path).unwrap(); 3852 let mut buf = String::new(); 3853 f.read_to_string(&mut buf).unwrap(); 3854 assert!(buf.contains(CONSOLE_TEST_STRING)); 3855 }); 3856 3857 handle_child_output(r, &output); 3858 } 3859 3860 #[test] 3861 fn test_pty_interaction() { 3862 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3863 let guest = Guest::new(Box::new(focal)); 3864 let api_socket = temp_api_path(&guest.tmp_dir); 3865 let serial_option = if cfg!(target_arch = "x86_64") { 3866 " console=ttyS0" 3867 } else { 3868 " console=ttyAMA0" 3869 }; 3870 let cmdline = DIRECT_KERNEL_BOOT_CMDLINE.to_owned() + serial_option; 3871 3872 let mut child = GuestCommand::new(&guest) 3873 .args(["--cpus", "boot=1"]) 3874 .args(["--memory", "size=512M"]) 3875 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3876 .args(["--cmdline", &cmdline]) 3877 .default_disks() 3878 .default_net() 3879 .args(["--serial", "null"]) 3880 .args(["--console", "pty"]) 3881 .args(["--api-socket", &api_socket]) 3882 .spawn() 3883 .unwrap(); 3884 3885 let r = std::panic::catch_unwind(|| { 3886 guest.wait_vm_boot(None).unwrap(); 3887 // Get pty fd for console 3888 let console_path = get_pty_path(&api_socket, "console"); 3889 // TODO: Get serial pty test working 3890 let mut cf = std::fs::OpenOptions::new() 3891 .write(true) 3892 .read(true) 3893 .open(console_path) 3894 .unwrap(); 3895 3896 // Some dumb sleeps but we don't want to write 3897 // before the console is up and we don't want 3898 // to try and write the next line before the 3899 // login process is ready. 3900 thread::sleep(std::time::Duration::new(5, 0)); 3901 assert_eq!(cf.write(b"cloud\n").unwrap(), 6); 3902 thread::sleep(std::time::Duration::new(2, 0)); 3903 assert_eq!(cf.write(b"cloud123\n").unwrap(), 9); 3904 thread::sleep(std::time::Duration::new(2, 0)); 3905 assert_eq!(cf.write(b"echo test_pty_console\n").unwrap(), 22); 3906 thread::sleep(std::time::Duration::new(2, 0)); 3907 3908 // read pty and ensure they have a login shell 3909 // some fairly hacky workarounds to avoid looping 3910 // forever in case the channel is blocked getting output 3911 let ptyc = pty_read(cf); 3912 let mut empty = 0; 3913 let mut prev = String::new(); 3914 loop { 3915 thread::sleep(std::time::Duration::new(2, 0)); 3916 match ptyc.try_recv() { 3917 Ok(line) => { 3918 empty = 0; 3919 prev = prev + &line; 3920 if prev.contains("test_pty_console") { 3921 break; 3922 } 3923 } 3924 Err(mpsc::TryRecvError::Empty) => { 3925 empty += 1; 3926 assert!(empty <= 5, "No login on pty"); 3927 } 3928 _ => panic!("No login on pty"), 3929 } 3930 } 3931 3932 guest.ssh_command("sudo shutdown -h now").unwrap(); 3933 }); 3934 3935 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 3936 let _ = child.kill(); 3937 let output = child.wait_with_output().unwrap(); 3938 handle_child_output(r, &output); 3939 3940 let r = std::panic::catch_unwind(|| { 3941 // Check that the cloud-hypervisor binary actually terminated 3942 assert!(output.status.success()) 3943 }); 3944 handle_child_output(r, &output); 3945 } 3946 3947 #[test] 3948 fn test_virtio_console() { 3949 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3950 let guest = Guest::new(Box::new(focal)); 3951 3952 let kernel_path = direct_kernel_boot_path(); 3953 3954 let mut child = GuestCommand::new(&guest) 3955 .args(["--cpus", "boot=1"]) 3956 .args(["--memory", "size=512M"]) 3957 .args(["--kernel", kernel_path.to_str().unwrap()]) 3958 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3959 .default_disks() 3960 .default_net() 3961 .args(["--console", "tty"]) 3962 .args(["--serial", "null"]) 3963 .capture_output() 3964 .spawn() 3965 .unwrap(); 3966 3967 let text = String::from("On a branch floating down river a cricket, singing."); 3968 let cmd = format!("echo {text} | sudo tee /dev/hvc0"); 3969 3970 let r = std::panic::catch_unwind(|| { 3971 guest.wait_vm_boot(None).unwrap(); 3972 3973 assert!(guest 3974 .does_device_vendor_pair_match("0x1043", "0x1af4") 3975 .unwrap_or_default()); 3976 3977 guest.ssh_command(&cmd).unwrap(); 3978 }); 3979 3980 let _ = child.kill(); 3981 let output = child.wait_with_output().unwrap(); 3982 handle_child_output(r, &output); 3983 3984 let r = std::panic::catch_unwind(|| { 3985 assert!(String::from_utf8_lossy(&output.stdout).contains(&text)); 3986 }); 3987 3988 handle_child_output(r, &output); 3989 } 3990 3991 #[test] 3992 fn test_console_file() { 3993 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3994 let guest = Guest::new(Box::new(focal)); 3995 3996 let console_path = guest.tmp_dir.as_path().join("/tmp/console-output"); 3997 let mut child = GuestCommand::new(&guest) 3998 .args(["--cpus", "boot=1"]) 3999 .args(["--memory", "size=512M"]) 4000 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4001 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4002 .default_disks() 4003 .default_net() 4004 .args([ 4005 "--console", 4006 format!("file={}", console_path.to_str().unwrap()).as_str(), 4007 ]) 4008 .capture_output() 4009 .spawn() 4010 .unwrap(); 4011 4012 guest.wait_vm_boot(None).unwrap(); 4013 4014 guest.ssh_command("sudo shutdown -h now").unwrap(); 4015 4016 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 4017 let _ = child.kill(); 4018 let output = child.wait_with_output().unwrap(); 4019 4020 let r = std::panic::catch_unwind(|| { 4021 // Check that the cloud-hypervisor binary actually terminated 4022 assert!(output.status.success()); 4023 4024 // Do this check after shutdown of the VM as an easy way to ensure 4025 // all writes are flushed to disk 4026 let mut f = std::fs::File::open(console_path).unwrap(); 4027 let mut buf = String::new(); 4028 f.read_to_string(&mut buf).unwrap(); 4029 4030 if !buf.contains(CONSOLE_TEST_STRING) { 4031 eprintln!( 4032 "\n\n==== Console file output ====\n\n{buf}\n\n==== End console file output ====" 4033 ); 4034 } 4035 assert!(buf.contains(CONSOLE_TEST_STRING)); 4036 }); 4037 4038 handle_child_output(r, &output); 4039 } 4040 4041 #[test] 4042 #[cfg(target_arch = "x86_64")] 4043 #[cfg(not(feature = "mshv"))] 4044 #[ignore = "See #4324"] 4045 // The VFIO integration test starts cloud-hypervisor guest with 3 TAP 4046 // backed networking interfaces, bound through a simple bridge on the host. 4047 // So if the nested cloud-hypervisor succeeds in getting a directly 4048 // assigned interface from its cloud-hypervisor host, we should be able to 4049 // ssh into it, and verify that it's running with the right kernel command 4050 // line (We tag the command line from cloud-hypervisor for that purpose). 4051 // The third device is added to validate that hotplug works correctly since 4052 // it is being added to the L2 VM through hotplugging mechanism. 4053 // Also, we pass-through a virtio-blk device to the L2 VM to test the 32-bit 4054 // vfio device support 4055 fn test_vfio() { 4056 setup_vfio_network_interfaces(); 4057 4058 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4059 let guest = Guest::new_from_ip_range(Box::new(focal), "172.18", 0); 4060 4061 let mut workload_path = dirs::home_dir().unwrap(); 4062 workload_path.push("workloads"); 4063 4064 let kernel_path = direct_kernel_boot_path(); 4065 4066 let mut vfio_path = workload_path.clone(); 4067 vfio_path.push("vfio"); 4068 4069 let mut cloud_init_vfio_base_path = vfio_path.clone(); 4070 cloud_init_vfio_base_path.push("cloudinit.img"); 4071 4072 // We copy our cloudinit into the vfio mount point, for the nested 4073 // cloud-hypervisor guest to use. 4074 rate_limited_copy( 4075 guest.disk_config.disk(DiskType::CloudInit).unwrap(), 4076 &cloud_init_vfio_base_path, 4077 ) 4078 .expect("copying of cloud-init disk failed"); 4079 4080 let mut vfio_disk_path = workload_path.clone(); 4081 vfio_disk_path.push("vfio.img"); 4082 4083 // Create the vfio disk image 4084 let output = Command::new("mkfs.ext4") 4085 .arg("-d") 4086 .arg(vfio_path.to_str().unwrap()) 4087 .arg(vfio_disk_path.to_str().unwrap()) 4088 .arg("2g") 4089 .output() 4090 .unwrap(); 4091 if !output.status.success() { 4092 eprintln!("{}", String::from_utf8_lossy(&output.stderr)); 4093 panic!("mkfs.ext4 command generated an error"); 4094 } 4095 4096 let mut blk_file_path = workload_path; 4097 blk_file_path.push("blk.img"); 4098 4099 let vfio_tap0 = "vfio-tap0"; 4100 let vfio_tap1 = "vfio-tap1"; 4101 let vfio_tap2 = "vfio-tap2"; 4102 let vfio_tap3 = "vfio-tap3"; 4103 4104 let mut child = GuestCommand::new(&guest) 4105 .args(["--cpus", "boot=4"]) 4106 .args(["--memory", "size=2G,hugepages=on,shared=on"]) 4107 .args(["--kernel", kernel_path.to_str().unwrap()]) 4108 .args([ 4109 "--disk", 4110 format!( 4111 "path={}", 4112 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 4113 ) 4114 .as_str(), 4115 "--disk", 4116 format!( 4117 "path={}", 4118 guest.disk_config.disk(DiskType::CloudInit).unwrap() 4119 ) 4120 .as_str(), 4121 "--disk", 4122 format!("path={}", vfio_disk_path.to_str().unwrap()).as_str(), 4123 "--disk", 4124 format!("path={},iommu=on", blk_file_path.to_str().unwrap()).as_str(), 4125 ]) 4126 .args([ 4127 "--cmdline", 4128 format!( 4129 "{DIRECT_KERNEL_BOOT_CMDLINE} kvm-intel.nested=1 vfio_iommu_type1.allow_unsafe_interrupts" 4130 ) 4131 .as_str(), 4132 ]) 4133 .args([ 4134 "--net", 4135 format!("tap={},mac={}", vfio_tap0, guest.network.guest_mac).as_str(), 4136 "--net", 4137 format!( 4138 "tap={},mac={},iommu=on", 4139 vfio_tap1, guest.network.l2_guest_mac1 4140 ) 4141 .as_str(), 4142 "--net", 4143 format!( 4144 "tap={},mac={},iommu=on", 4145 vfio_tap2, guest.network.l2_guest_mac2 4146 ) 4147 .as_str(), 4148 "--net", 4149 format!( 4150 "tap={},mac={},iommu=on", 4151 vfio_tap3, guest.network.l2_guest_mac3 4152 ) 4153 .as_str(), 4154 ]) 4155 .capture_output() 4156 .spawn() 4157 .unwrap(); 4158 4159 thread::sleep(std::time::Duration::new(30, 0)); 4160 4161 let r = std::panic::catch_unwind(|| { 4162 guest.ssh_command_l1("sudo systemctl start vfio").unwrap(); 4163 thread::sleep(std::time::Duration::new(120, 0)); 4164 4165 // We booted our cloud hypervisor L2 guest with a "VFIOTAG" tag 4166 // added to its kernel command line. 4167 // Let's ssh into it and verify that it's there. If it is it means 4168 // we're in the right guest (The L2 one) because the QEMU L1 guest 4169 // does not have this command line tag. 4170 assert_eq!( 4171 guest 4172 .ssh_command_l2_1("grep -c VFIOTAG /proc/cmdline") 4173 .unwrap() 4174 .trim() 4175 .parse::<u32>() 4176 .unwrap_or_default(), 4177 1 4178 ); 4179 4180 // Let's also verify from the second virtio-net device passed to 4181 // the L2 VM. 4182 assert_eq!( 4183 guest 4184 .ssh_command_l2_2("grep -c VFIOTAG /proc/cmdline") 4185 .unwrap() 4186 .trim() 4187 .parse::<u32>() 4188 .unwrap_or_default(), 4189 1 4190 ); 4191 4192 // Check the amount of PCI devices appearing in L2 VM. 4193 assert_eq!( 4194 guest 4195 .ssh_command_l2_1("ls /sys/bus/pci/devices | wc -l") 4196 .unwrap() 4197 .trim() 4198 .parse::<u32>() 4199 .unwrap_or_default(), 4200 8, 4201 ); 4202 4203 // Check both if /dev/vdc exists and if the block size is 16M in L2 VM 4204 assert_eq!( 4205 guest 4206 .ssh_command_l2_1("lsblk | grep vdc | grep -c 16M") 4207 .unwrap() 4208 .trim() 4209 .parse::<u32>() 4210 .unwrap_or_default(), 4211 1 4212 ); 4213 4214 // Hotplug an extra virtio-net device through L2 VM. 4215 guest 4216 .ssh_command_l1( 4217 "echo 0000:00:09.0 | sudo tee /sys/bus/pci/devices/0000:00:09.0/driver/unbind", 4218 ) 4219 .unwrap(); 4220 guest 4221 .ssh_command_l1("echo 0000:00:09.0 | sudo tee /sys/bus/pci/drivers/vfio-pci/bind") 4222 .unwrap(); 4223 let vfio_hotplug_output = guest 4224 .ssh_command_l1( 4225 "sudo /mnt/ch-remote \ 4226 --api-socket /tmp/ch_api.sock \ 4227 add-device path=/sys/bus/pci/devices/0000:00:09.0,id=vfio123", 4228 ) 4229 .unwrap(); 4230 assert!(vfio_hotplug_output.contains("{\"id\":\"vfio123\",\"bdf\":\"0000:00:08.0\"}")); 4231 4232 thread::sleep(std::time::Duration::new(10, 0)); 4233 4234 // Let's also verify from the third virtio-net device passed to 4235 // the L2 VM. This third device has been hotplugged through the L2 4236 // VM, so this is our way to validate hotplug works for VFIO PCI. 4237 assert_eq!( 4238 guest 4239 .ssh_command_l2_3("grep -c VFIOTAG /proc/cmdline") 4240 .unwrap() 4241 .trim() 4242 .parse::<u32>() 4243 .unwrap_or_default(), 4244 1 4245 ); 4246 4247 // Check the amount of PCI devices appearing in L2 VM. 4248 // There should be one more device than before, raising the count 4249 // up to 9 PCI devices. 4250 assert_eq!( 4251 guest 4252 .ssh_command_l2_1("ls /sys/bus/pci/devices | wc -l") 4253 .unwrap() 4254 .trim() 4255 .parse::<u32>() 4256 .unwrap_or_default(), 4257 9, 4258 ); 4259 4260 // Let's now verify that we can correctly remove the virtio-net 4261 // device through the "remove-device" command responsible for 4262 // unplugging VFIO devices. 4263 guest 4264 .ssh_command_l1( 4265 "sudo /mnt/ch-remote \ 4266 --api-socket /tmp/ch_api.sock \ 4267 remove-device vfio123", 4268 ) 4269 .unwrap(); 4270 thread::sleep(std::time::Duration::new(10, 0)); 4271 4272 // Check the amount of PCI devices appearing in L2 VM is back down 4273 // to 8 devices. 4274 assert_eq!( 4275 guest 4276 .ssh_command_l2_1("ls /sys/bus/pci/devices | wc -l") 4277 .unwrap() 4278 .trim() 4279 .parse::<u32>() 4280 .unwrap_or_default(), 4281 8, 4282 ); 4283 4284 // Perform memory hotplug in L2 and validate the memory is showing 4285 // up as expected. In order to check, we will use the virtio-net 4286 // device already passed through L2 as a VFIO device, this will 4287 // verify that VFIO devices are functional with memory hotplug. 4288 assert!(guest.get_total_memory_l2().unwrap_or_default() > 480_000); 4289 guest 4290 .ssh_command_l2_1( 4291 "sudo bash -c 'echo online > /sys/devices/system/memory/auto_online_blocks'", 4292 ) 4293 .unwrap(); 4294 guest 4295 .ssh_command_l1( 4296 "sudo /mnt/ch-remote \ 4297 --api-socket /tmp/ch_api.sock \ 4298 resize --memory=1073741824", 4299 ) 4300 .unwrap(); 4301 assert!(guest.get_total_memory_l2().unwrap_or_default() > 960_000); 4302 }); 4303 4304 let _ = child.kill(); 4305 let output = child.wait_with_output().unwrap(); 4306 4307 cleanup_vfio_network_interfaces(); 4308 4309 handle_child_output(r, &output); 4310 } 4311 4312 #[test] 4313 fn test_direct_kernel_boot_noacpi() { 4314 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4315 let guest = Guest::new(Box::new(focal)); 4316 4317 let kernel_path = direct_kernel_boot_path(); 4318 4319 let mut child = GuestCommand::new(&guest) 4320 .args(["--cpus", "boot=1"]) 4321 .args(["--memory", "size=512M"]) 4322 .args(["--kernel", kernel_path.to_str().unwrap()]) 4323 .args([ 4324 "--cmdline", 4325 format!("{DIRECT_KERNEL_BOOT_CMDLINE} acpi=off").as_str(), 4326 ]) 4327 .default_disks() 4328 .default_net() 4329 .capture_output() 4330 .spawn() 4331 .unwrap(); 4332 4333 let r = std::panic::catch_unwind(|| { 4334 guest.wait_vm_boot(None).unwrap(); 4335 4336 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 4337 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4338 }); 4339 4340 let _ = child.kill(); 4341 let output = child.wait_with_output().unwrap(); 4342 4343 handle_child_output(r, &output); 4344 } 4345 4346 #[test] 4347 fn test_virtio_vsock() { 4348 _test_virtio_vsock(false) 4349 } 4350 4351 #[test] 4352 fn test_virtio_vsock_hotplug() { 4353 _test_virtio_vsock(true); 4354 } 4355 4356 #[test] 4357 fn test_api_http_shutdown() { 4358 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4359 let guest = Guest::new(Box::new(focal)); 4360 4361 _test_api_shutdown(TargetApi::new_http_api(&guest.tmp_dir), guest) 4362 } 4363 4364 #[test] 4365 fn test_api_http_delete() { 4366 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4367 let guest = Guest::new(Box::new(focal)); 4368 4369 _test_api_delete(TargetApi::new_http_api(&guest.tmp_dir), guest); 4370 } 4371 4372 #[test] 4373 fn test_api_http_pause_resume() { 4374 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4375 let guest = Guest::new(Box::new(focal)); 4376 4377 _test_api_pause_resume(TargetApi::new_http_api(&guest.tmp_dir), guest) 4378 } 4379 4380 #[test] 4381 fn test_api_http_create_boot() { 4382 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4383 let guest = Guest::new(Box::new(focal)); 4384 4385 _test_api_create_boot(TargetApi::new_http_api(&guest.tmp_dir), guest) 4386 } 4387 4388 #[test] 4389 fn test_virtio_iommu() { 4390 _test_virtio_iommu(cfg!(target_arch = "x86_64")) 4391 } 4392 4393 #[test] 4394 // We cannot force the software running in the guest to reprogram the BAR 4395 // with some different addresses, but we have a reliable way of testing it 4396 // with a standard Linux kernel. 4397 // By removing a device from the PCI tree, and then rescanning the tree, 4398 // Linux consistently chooses to reorganize the PCI device BARs to other 4399 // locations in the guest address space. 4400 // This test creates a dedicated PCI network device to be checked as being 4401 // properly probed first, then removing it, and adding it again by doing a 4402 // rescan. 4403 fn test_pci_bar_reprogramming() { 4404 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4405 let guest = Guest::new(Box::new(focal)); 4406 4407 #[cfg(target_arch = "x86_64")] 4408 let kernel_path = direct_kernel_boot_path(); 4409 #[cfg(target_arch = "aarch64")] 4410 let kernel_path = edk2_path(); 4411 4412 let mut child = GuestCommand::new(&guest) 4413 .args(["--cpus", "boot=1"]) 4414 .args(["--memory", "size=512M"]) 4415 .args(["--kernel", kernel_path.to_str().unwrap()]) 4416 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4417 .default_disks() 4418 .args([ 4419 "--net", 4420 guest.default_net_string().as_str(), 4421 "--net", 4422 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0", 4423 ]) 4424 .capture_output() 4425 .spawn() 4426 .unwrap(); 4427 4428 let r = std::panic::catch_unwind(|| { 4429 guest.wait_vm_boot(None).unwrap(); 4430 4431 // 2 network interfaces + default localhost ==> 3 interfaces 4432 assert_eq!( 4433 guest 4434 .ssh_command("ip -o link | wc -l") 4435 .unwrap() 4436 .trim() 4437 .parse::<u32>() 4438 .unwrap_or_default(), 4439 3 4440 ); 4441 4442 let init_bar_addr = guest 4443 .ssh_command( 4444 "sudo awk '{print $1; exit}' /sys/bus/pci/devices/0000:00:05.0/resource", 4445 ) 4446 .unwrap(); 4447 4448 // Remove the PCI device 4449 guest 4450 .ssh_command("echo 1 | sudo tee /sys/bus/pci/devices/0000:00:05.0/remove") 4451 .unwrap(); 4452 4453 // Only 1 network interface left + default localhost ==> 2 interfaces 4454 assert_eq!( 4455 guest 4456 .ssh_command("ip -o link | wc -l") 4457 .unwrap() 4458 .trim() 4459 .parse::<u32>() 4460 .unwrap_or_default(), 4461 2 4462 ); 4463 4464 // Remove the PCI device 4465 guest 4466 .ssh_command("echo 1 | sudo tee /sys/bus/pci/rescan") 4467 .unwrap(); 4468 4469 // Back to 2 network interface + default localhost ==> 3 interfaces 4470 assert_eq!( 4471 guest 4472 .ssh_command("ip -o link | wc -l") 4473 .unwrap() 4474 .trim() 4475 .parse::<u32>() 4476 .unwrap_or_default(), 4477 3 4478 ); 4479 4480 let new_bar_addr = guest 4481 .ssh_command( 4482 "sudo awk '{print $1; exit}' /sys/bus/pci/devices/0000:00:05.0/resource", 4483 ) 4484 .unwrap(); 4485 4486 // Let's compare the BAR addresses for our virtio-net device. 4487 // They should be different as we expect the BAR reprogramming 4488 // to have happened. 4489 assert_ne!(init_bar_addr, new_bar_addr); 4490 }); 4491 4492 let _ = child.kill(); 4493 let output = child.wait_with_output().unwrap(); 4494 4495 handle_child_output(r, &output); 4496 } 4497 4498 #[test] 4499 fn test_memory_mergeable_off() { 4500 test_memory_mergeable(false) 4501 } 4502 4503 #[test] 4504 #[cfg(target_arch = "x86_64")] 4505 fn test_cpu_hotplug() { 4506 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4507 let guest = Guest::new(Box::new(focal)); 4508 let api_socket = temp_api_path(&guest.tmp_dir); 4509 4510 let kernel_path = direct_kernel_boot_path(); 4511 4512 let mut child = GuestCommand::new(&guest) 4513 .args(["--cpus", "boot=2,max=4"]) 4514 .args(["--memory", "size=512M"]) 4515 .args(["--kernel", kernel_path.to_str().unwrap()]) 4516 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4517 .default_disks() 4518 .default_net() 4519 .args(["--api-socket", &api_socket]) 4520 .capture_output() 4521 .spawn() 4522 .unwrap(); 4523 4524 let r = std::panic::catch_unwind(|| { 4525 guest.wait_vm_boot(None).unwrap(); 4526 4527 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 4528 4529 // Resize the VM 4530 let desired_vcpus = 4; 4531 resize_command(&api_socket, Some(desired_vcpus), None, None, None); 4532 4533 guest 4534 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online") 4535 .unwrap(); 4536 guest 4537 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online") 4538 .unwrap(); 4539 thread::sleep(std::time::Duration::new(10, 0)); 4540 assert_eq!( 4541 guest.get_cpu_count().unwrap_or_default(), 4542 u32::from(desired_vcpus) 4543 ); 4544 4545 guest.reboot_linux(0, None); 4546 4547 assert_eq!( 4548 guest.get_cpu_count().unwrap_or_default(), 4549 u32::from(desired_vcpus) 4550 ); 4551 4552 // Resize the VM 4553 let desired_vcpus = 2; 4554 resize_command(&api_socket, Some(desired_vcpus), None, None, None); 4555 4556 thread::sleep(std::time::Duration::new(10, 0)); 4557 assert_eq!( 4558 guest.get_cpu_count().unwrap_or_default(), 4559 u32::from(desired_vcpus) 4560 ); 4561 4562 // Resize the VM back up to 4 4563 let desired_vcpus = 4; 4564 resize_command(&api_socket, Some(desired_vcpus), None, None, None); 4565 4566 guest 4567 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online") 4568 .unwrap(); 4569 guest 4570 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online") 4571 .unwrap(); 4572 thread::sleep(std::time::Duration::new(10, 0)); 4573 assert_eq!( 4574 guest.get_cpu_count().unwrap_or_default(), 4575 u32::from(desired_vcpus) 4576 ); 4577 }); 4578 4579 let _ = child.kill(); 4580 let output = child.wait_with_output().unwrap(); 4581 4582 handle_child_output(r, &output); 4583 } 4584 4585 #[test] 4586 fn test_memory_hotplug() { 4587 #[cfg(target_arch = "aarch64")] 4588 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string(); 4589 #[cfg(target_arch = "x86_64")] 4590 let focal_image = FOCAL_IMAGE_NAME.to_string(); 4591 let focal = UbuntuDiskConfig::new(focal_image); 4592 let guest = Guest::new(Box::new(focal)); 4593 let api_socket = temp_api_path(&guest.tmp_dir); 4594 4595 #[cfg(target_arch = "aarch64")] 4596 let kernel_path = edk2_path(); 4597 #[cfg(target_arch = "x86_64")] 4598 let kernel_path = direct_kernel_boot_path(); 4599 4600 let mut child = GuestCommand::new(&guest) 4601 .args(["--cpus", "boot=2,max=4"]) 4602 .args(["--memory", "size=512M,hotplug_size=8192M"]) 4603 .args(["--kernel", kernel_path.to_str().unwrap()]) 4604 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4605 .default_disks() 4606 .default_net() 4607 .args(["--balloon", "size=0"]) 4608 .args(["--api-socket", &api_socket]) 4609 .capture_output() 4610 .spawn() 4611 .unwrap(); 4612 4613 let r = std::panic::catch_unwind(|| { 4614 guest.wait_vm_boot(None).unwrap(); 4615 4616 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4617 4618 guest.enable_memory_hotplug(); 4619 4620 // Add RAM to the VM 4621 let desired_ram = 1024 << 20; 4622 resize_command(&api_socket, None, Some(desired_ram), None, None); 4623 4624 thread::sleep(std::time::Duration::new(10, 0)); 4625 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4626 4627 // Use balloon to remove RAM from the VM 4628 let desired_balloon = 512 << 20; 4629 resize_command(&api_socket, None, None, Some(desired_balloon), None); 4630 4631 thread::sleep(std::time::Duration::new(10, 0)); 4632 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4633 assert!(guest.get_total_memory().unwrap_or_default() < 960_000); 4634 4635 guest.reboot_linux(0, None); 4636 4637 assert!(guest.get_total_memory().unwrap_or_default() < 960_000); 4638 4639 // Use balloon add RAM to the VM 4640 let desired_balloon = 0; 4641 resize_command(&api_socket, None, None, Some(desired_balloon), None); 4642 4643 thread::sleep(std::time::Duration::new(10, 0)); 4644 4645 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4646 4647 guest.enable_memory_hotplug(); 4648 4649 // Add RAM to the VM 4650 let desired_ram = 2048 << 20; 4651 resize_command(&api_socket, None, Some(desired_ram), None, None); 4652 4653 thread::sleep(std::time::Duration::new(10, 0)); 4654 assert!(guest.get_total_memory().unwrap_or_default() > 1_920_000); 4655 4656 // Remove RAM to the VM (only applies after reboot) 4657 let desired_ram = 1024 << 20; 4658 resize_command(&api_socket, None, Some(desired_ram), None, None); 4659 4660 guest.reboot_linux(1, None); 4661 4662 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4663 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000); 4664 }); 4665 4666 let _ = child.kill(); 4667 let output = child.wait_with_output().unwrap(); 4668 4669 handle_child_output(r, &output); 4670 } 4671 4672 #[test] 4673 #[cfg(not(feature = "mshv"))] 4674 fn test_virtio_mem() { 4675 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4676 let guest = Guest::new(Box::new(focal)); 4677 let api_socket = temp_api_path(&guest.tmp_dir); 4678 4679 let kernel_path = direct_kernel_boot_path(); 4680 4681 let mut child = GuestCommand::new(&guest) 4682 .args(["--cpus", "boot=2,max=4"]) 4683 .args([ 4684 "--memory", 4685 "size=512M,hotplug_method=virtio-mem,hotplug_size=8192M", 4686 ]) 4687 .args(["--kernel", kernel_path.to_str().unwrap()]) 4688 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4689 .default_disks() 4690 .default_net() 4691 .args(["--api-socket", &api_socket]) 4692 .capture_output() 4693 .spawn() 4694 .unwrap(); 4695 4696 let r = std::panic::catch_unwind(|| { 4697 guest.wait_vm_boot(None).unwrap(); 4698 4699 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4700 4701 guest.enable_memory_hotplug(); 4702 4703 // Add RAM to the VM 4704 let desired_ram = 1024 << 20; 4705 resize_command(&api_socket, None, Some(desired_ram), None, None); 4706 4707 thread::sleep(std::time::Duration::new(10, 0)); 4708 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4709 4710 // Add RAM to the VM 4711 let desired_ram = 2048 << 20; 4712 resize_command(&api_socket, None, Some(desired_ram), None, None); 4713 4714 thread::sleep(std::time::Duration::new(10, 0)); 4715 assert!(guest.get_total_memory().unwrap_or_default() > 1_920_000); 4716 4717 // Remove RAM from the VM 4718 let desired_ram = 1024 << 20; 4719 resize_command(&api_socket, None, Some(desired_ram), None, None); 4720 4721 thread::sleep(std::time::Duration::new(10, 0)); 4722 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4723 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000); 4724 4725 guest.reboot_linux(0, None); 4726 4727 // Check the amount of memory after reboot is 1GiB 4728 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4729 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000); 4730 4731 // Check we can still resize to 512MiB 4732 let desired_ram = 512 << 20; 4733 resize_command(&api_socket, None, Some(desired_ram), None, None); 4734 thread::sleep(std::time::Duration::new(10, 0)); 4735 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4736 assert!(guest.get_total_memory().unwrap_or_default() < 960_000); 4737 }); 4738 4739 let _ = child.kill(); 4740 let output = child.wait_with_output().unwrap(); 4741 4742 handle_child_output(r, &output); 4743 } 4744 4745 #[test] 4746 #[cfg(target_arch = "x86_64")] 4747 #[cfg(not(feature = "mshv"))] 4748 // Test both vCPU and memory resizing together 4749 fn test_resize() { 4750 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4751 let guest = Guest::new(Box::new(focal)); 4752 let api_socket = temp_api_path(&guest.tmp_dir); 4753 4754 let kernel_path = direct_kernel_boot_path(); 4755 4756 let mut child = GuestCommand::new(&guest) 4757 .args(["--cpus", "boot=2,max=4"]) 4758 .args(["--memory", "size=512M,hotplug_size=8192M"]) 4759 .args(["--kernel", kernel_path.to_str().unwrap()]) 4760 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4761 .default_disks() 4762 .default_net() 4763 .args(["--api-socket", &api_socket]) 4764 .capture_output() 4765 .spawn() 4766 .unwrap(); 4767 4768 let r = std::panic::catch_unwind(|| { 4769 guest.wait_vm_boot(None).unwrap(); 4770 4771 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 4772 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4773 4774 guest.enable_memory_hotplug(); 4775 4776 // Resize the VM 4777 let desired_vcpus = 4; 4778 let desired_ram = 1024 << 20; 4779 resize_command( 4780 &api_socket, 4781 Some(desired_vcpus), 4782 Some(desired_ram), 4783 None, 4784 None, 4785 ); 4786 4787 guest 4788 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online") 4789 .unwrap(); 4790 guest 4791 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online") 4792 .unwrap(); 4793 thread::sleep(std::time::Duration::new(10, 0)); 4794 assert_eq!( 4795 guest.get_cpu_count().unwrap_or_default(), 4796 u32::from(desired_vcpus) 4797 ); 4798 4799 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4800 }); 4801 4802 let _ = child.kill(); 4803 let output = child.wait_with_output().unwrap(); 4804 4805 handle_child_output(r, &output); 4806 } 4807 4808 #[test] 4809 fn test_memory_overhead() { 4810 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4811 let guest = Guest::new(Box::new(focal)); 4812 4813 let kernel_path = direct_kernel_boot_path(); 4814 4815 let guest_memory_size_kb = 512 * 1024; 4816 4817 let mut child = GuestCommand::new(&guest) 4818 .args(["--cpus", "boot=1"]) 4819 .args(["--memory", format!("size={guest_memory_size_kb}K").as_str()]) 4820 .args(["--kernel", kernel_path.to_str().unwrap()]) 4821 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4822 .default_disks() 4823 .capture_output() 4824 .spawn() 4825 .unwrap(); 4826 4827 thread::sleep(std::time::Duration::new(20, 0)); 4828 4829 let r = std::panic::catch_unwind(|| { 4830 let overhead = get_vmm_overhead(child.id(), guest_memory_size_kb); 4831 eprintln!("Guest memory overhead: {overhead} vs {MAXIMUM_VMM_OVERHEAD_KB}"); 4832 assert!(overhead <= MAXIMUM_VMM_OVERHEAD_KB); 4833 }); 4834 4835 let _ = child.kill(); 4836 let output = child.wait_with_output().unwrap(); 4837 4838 handle_child_output(r, &output); 4839 } 4840 4841 #[test] 4842 fn test_disk_hotplug() { 4843 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4844 let guest = Guest::new(Box::new(focal)); 4845 4846 #[cfg(target_arch = "x86_64")] 4847 let kernel_path = direct_kernel_boot_path(); 4848 #[cfg(target_arch = "aarch64")] 4849 let kernel_path = edk2_path(); 4850 4851 let api_socket = temp_api_path(&guest.tmp_dir); 4852 4853 let mut child = GuestCommand::new(&guest) 4854 .args(["--api-socket", &api_socket]) 4855 .args(["--cpus", "boot=1"]) 4856 .args(["--memory", "size=512M"]) 4857 .args(["--kernel", kernel_path.to_str().unwrap()]) 4858 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4859 .default_disks() 4860 .default_net() 4861 .capture_output() 4862 .spawn() 4863 .unwrap(); 4864 4865 let r = std::panic::catch_unwind(|| { 4866 guest.wait_vm_boot(None).unwrap(); 4867 4868 // Check /dev/vdc is not there 4869 assert_eq!( 4870 guest 4871 .ssh_command("lsblk | grep -c vdc.*16M || true") 4872 .unwrap() 4873 .trim() 4874 .parse::<u32>() 4875 .unwrap_or(1), 4876 0 4877 ); 4878 4879 // Now let's add the extra disk. 4880 let mut blk_file_path = dirs::home_dir().unwrap(); 4881 blk_file_path.push("workloads"); 4882 blk_file_path.push("blk.img"); 4883 let (cmd_success, cmd_output) = remote_command_w_output( 4884 &api_socket, 4885 "add-disk", 4886 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 4887 ); 4888 assert!(cmd_success); 4889 assert!(String::from_utf8_lossy(&cmd_output) 4890 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 4891 4892 thread::sleep(std::time::Duration::new(10, 0)); 4893 4894 // Check that /dev/vdc exists and the block size is 16M. 4895 assert_eq!( 4896 guest 4897 .ssh_command("lsblk | grep vdc | grep -c 16M") 4898 .unwrap() 4899 .trim() 4900 .parse::<u32>() 4901 .unwrap_or_default(), 4902 1 4903 ); 4904 // And check the block device can be read. 4905 guest 4906 .ssh_command("sudo dd if=/dev/vdc of=/dev/null bs=1M iflag=direct count=16") 4907 .unwrap(); 4908 4909 // Let's remove it the extra disk. 4910 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 4911 thread::sleep(std::time::Duration::new(5, 0)); 4912 // And check /dev/vdc is not there 4913 assert_eq!( 4914 guest 4915 .ssh_command("lsblk | grep -c vdc.*16M || true") 4916 .unwrap() 4917 .trim() 4918 .parse::<u32>() 4919 .unwrap_or(1), 4920 0 4921 ); 4922 4923 // And add it back to validate unplug did work correctly. 4924 let (cmd_success, cmd_output) = remote_command_w_output( 4925 &api_socket, 4926 "add-disk", 4927 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 4928 ); 4929 assert!(cmd_success); 4930 assert!(String::from_utf8_lossy(&cmd_output) 4931 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 4932 4933 thread::sleep(std::time::Duration::new(10, 0)); 4934 4935 // Check that /dev/vdc exists and the block size is 16M. 4936 assert_eq!( 4937 guest 4938 .ssh_command("lsblk | grep vdc | grep -c 16M") 4939 .unwrap() 4940 .trim() 4941 .parse::<u32>() 4942 .unwrap_or_default(), 4943 1 4944 ); 4945 // And check the block device can be read. 4946 guest 4947 .ssh_command("sudo dd if=/dev/vdc of=/dev/null bs=1M iflag=direct count=16") 4948 .unwrap(); 4949 4950 // Reboot the VM. 4951 guest.reboot_linux(0, None); 4952 4953 // Check still there after reboot 4954 assert_eq!( 4955 guest 4956 .ssh_command("lsblk | grep vdc | grep -c 16M") 4957 .unwrap() 4958 .trim() 4959 .parse::<u32>() 4960 .unwrap_or_default(), 4961 1 4962 ); 4963 4964 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 4965 4966 thread::sleep(std::time::Duration::new(20, 0)); 4967 4968 // Check device has gone away 4969 assert_eq!( 4970 guest 4971 .ssh_command("lsblk | grep -c vdc.*16M || true") 4972 .unwrap() 4973 .trim() 4974 .parse::<u32>() 4975 .unwrap_or(1), 4976 0 4977 ); 4978 4979 guest.reboot_linux(1, None); 4980 4981 // Check device still absent 4982 assert_eq!( 4983 guest 4984 .ssh_command("lsblk | grep -c vdc.*16M || true") 4985 .unwrap() 4986 .trim() 4987 .parse::<u32>() 4988 .unwrap_or(1), 4989 0 4990 ); 4991 }); 4992 4993 let _ = child.kill(); 4994 let output = child.wait_with_output().unwrap(); 4995 4996 handle_child_output(r, &output); 4997 } 4998 4999 #[allow(clippy::useless_conversion)] 5000 fn create_loop_device(backing_file_path: &str, block_size: u32, num_retries: usize) -> String { 5001 const LOOP_CONFIGURE: u64 = 0x4c0a; 5002 const LOOP_CTL_GET_FREE: u64 = 0x4c82; 5003 const LOOP_CTL_PATH: &str = "/dev/loop-control"; 5004 const LOOP_DEVICE_PREFIX: &str = "/dev/loop"; 5005 5006 #[repr(C)] 5007 struct LoopInfo64 { 5008 lo_device: u64, 5009 lo_inode: u64, 5010 lo_rdevice: u64, 5011 lo_offset: u64, 5012 lo_sizelimit: u64, 5013 lo_number: u32, 5014 lo_encrypt_type: u32, 5015 lo_encrypt_key_size: u32, 5016 lo_flags: u32, 5017 lo_file_name: [u8; 64], 5018 lo_crypt_name: [u8; 64], 5019 lo_encrypt_key: [u8; 32], 5020 lo_init: [u64; 2], 5021 } 5022 5023 impl Default for LoopInfo64 { 5024 fn default() -> Self { 5025 LoopInfo64 { 5026 lo_device: 0, 5027 lo_inode: 0, 5028 lo_rdevice: 0, 5029 lo_offset: 0, 5030 lo_sizelimit: 0, 5031 lo_number: 0, 5032 lo_encrypt_type: 0, 5033 lo_encrypt_key_size: 0, 5034 lo_flags: 0, 5035 lo_file_name: [0; 64], 5036 lo_crypt_name: [0; 64], 5037 lo_encrypt_key: [0; 32], 5038 lo_init: [0; 2], 5039 } 5040 } 5041 } 5042 5043 #[derive(Default)] 5044 #[repr(C)] 5045 struct LoopConfig { 5046 fd: u32, 5047 block_size: u32, 5048 info: LoopInfo64, 5049 _reserved: [u64; 8], 5050 } 5051 5052 // Open loop-control device 5053 let loop_ctl_file = OpenOptions::new() 5054 .read(true) 5055 .write(true) 5056 .open(LOOP_CTL_PATH) 5057 .unwrap(); 5058 5059 // Request a free loop device 5060 let loop_device_number = unsafe { 5061 libc::ioctl( 5062 loop_ctl_file.as_raw_fd(), 5063 LOOP_CTL_GET_FREE.try_into().unwrap(), 5064 ) 5065 }; 5066 if loop_device_number < 0 { 5067 panic!("Couldn't find a free loop device"); 5068 } 5069 5070 // Create loop device path 5071 let loop_device_path = format!("{LOOP_DEVICE_PREFIX}{loop_device_number}"); 5072 5073 // Open loop device 5074 let loop_device_file = OpenOptions::new() 5075 .read(true) 5076 .write(true) 5077 .open(&loop_device_path) 5078 .unwrap(); 5079 5080 // Open backing file 5081 let backing_file = OpenOptions::new() 5082 .read(true) 5083 .write(true) 5084 .open(backing_file_path) 5085 .unwrap(); 5086 5087 let loop_config = LoopConfig { 5088 fd: backing_file.as_raw_fd() as u32, 5089 block_size, 5090 ..Default::default() 5091 }; 5092 5093 for i in 0..num_retries { 5094 let ret = unsafe { 5095 libc::ioctl( 5096 loop_device_file.as_raw_fd(), 5097 LOOP_CONFIGURE.try_into().unwrap(), 5098 &loop_config, 5099 ) 5100 }; 5101 if ret != 0 { 5102 if i < num_retries - 1 { 5103 println!( 5104 "Iteration {}: Failed to configure the loop device {}: {}", 5105 i, 5106 loop_device_path, 5107 std::io::Error::last_os_error() 5108 ); 5109 } else { 5110 panic!( 5111 "Failed {} times trying to configure the loop device {}: {}", 5112 num_retries, 5113 loop_device_path, 5114 std::io::Error::last_os_error() 5115 ); 5116 } 5117 } else { 5118 break; 5119 } 5120 5121 // Wait for a bit before retrying 5122 thread::sleep(std::time::Duration::new(5, 0)); 5123 } 5124 5125 loop_device_path 5126 } 5127 5128 #[test] 5129 fn test_virtio_block_topology() { 5130 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5131 let guest = Guest::new(Box::new(focal)); 5132 5133 let kernel_path = direct_kernel_boot_path(); 5134 let test_disk_path = guest.tmp_dir.as_path().join("test.img"); 5135 5136 let output = exec_host_command_output( 5137 format!( 5138 "qemu-img create -f raw {} 16M", 5139 test_disk_path.to_str().unwrap() 5140 ) 5141 .as_str(), 5142 ); 5143 if !output.status.success() { 5144 let stdout = String::from_utf8_lossy(&output.stdout); 5145 let stderr = String::from_utf8_lossy(&output.stderr); 5146 panic!("qemu-img command failed\nstdout\n{stdout}\nstderr\n{stderr}"); 5147 } 5148 5149 let loop_dev = create_loop_device(test_disk_path.to_str().unwrap(), 4096, 5); 5150 5151 let mut child = GuestCommand::new(&guest) 5152 .args(["--cpus", "boot=1"]) 5153 .args(["--memory", "size=512M"]) 5154 .args(["--kernel", kernel_path.to_str().unwrap()]) 5155 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5156 .args([ 5157 "--disk", 5158 format!( 5159 "path={}", 5160 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 5161 ) 5162 .as_str(), 5163 "--disk", 5164 format!( 5165 "path={}", 5166 guest.disk_config.disk(DiskType::CloudInit).unwrap() 5167 ) 5168 .as_str(), 5169 "--disk", 5170 format!("path={}", &loop_dev).as_str(), 5171 ]) 5172 .default_net() 5173 .capture_output() 5174 .spawn() 5175 .unwrap(); 5176 5177 let r = std::panic::catch_unwind(|| { 5178 guest.wait_vm_boot(None).unwrap(); 5179 5180 // MIN-IO column 5181 assert_eq!( 5182 guest 5183 .ssh_command("lsblk -t| grep vdc | awk '{print $3}'") 5184 .unwrap() 5185 .trim() 5186 .parse::<u32>() 5187 .unwrap_or_default(), 5188 4096 5189 ); 5190 // PHY-SEC column 5191 assert_eq!( 5192 guest 5193 .ssh_command("lsblk -t| grep vdc | awk '{print $5}'") 5194 .unwrap() 5195 .trim() 5196 .parse::<u32>() 5197 .unwrap_or_default(), 5198 4096 5199 ); 5200 // LOG-SEC column 5201 assert_eq!( 5202 guest 5203 .ssh_command("lsblk -t| grep vdc | awk '{print $6}'") 5204 .unwrap() 5205 .trim() 5206 .parse::<u32>() 5207 .unwrap_or_default(), 5208 4096 5209 ); 5210 }); 5211 5212 let _ = child.kill(); 5213 let output = child.wait_with_output().unwrap(); 5214 5215 handle_child_output(r, &output); 5216 5217 Command::new("losetup") 5218 .args(["-d", &loop_dev]) 5219 .output() 5220 .expect("loop device not found"); 5221 } 5222 5223 #[test] 5224 fn test_virtio_balloon_deflate_on_oom() { 5225 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5226 let guest = Guest::new(Box::new(focal)); 5227 5228 let kernel_path = direct_kernel_boot_path(); 5229 5230 let api_socket = temp_api_path(&guest.tmp_dir); 5231 5232 //Let's start a 4G guest with balloon occupied 2G memory 5233 let mut child = GuestCommand::new(&guest) 5234 .args(["--api-socket", &api_socket]) 5235 .args(["--cpus", "boot=1"]) 5236 .args(["--memory", "size=4G"]) 5237 .args(["--kernel", kernel_path.to_str().unwrap()]) 5238 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5239 .args(["--balloon", "size=2G,deflate_on_oom=on"]) 5240 .default_disks() 5241 .default_net() 5242 .capture_output() 5243 .spawn() 5244 .unwrap(); 5245 5246 let r = std::panic::catch_unwind(|| { 5247 guest.wait_vm_boot(None).unwrap(); 5248 5249 // Wait for balloon memory's initialization and check its size. 5250 // The virtio-balloon driver might take a few seconds to report the 5251 // balloon effective size back to the VMM. 5252 thread::sleep(std::time::Duration::new(20, 0)); 5253 5254 let orig_balloon = balloon_size(&api_socket); 5255 println!("The original balloon memory size is {orig_balloon} bytes"); 5256 assert!(orig_balloon == 2147483648); 5257 5258 // Two steps to verify if the 'deflate_on_oom' parameter works. 5259 // 1st: run a command to trigger an OOM in the guest. 5260 guest 5261 .ssh_command("echo f | sudo tee /proc/sysrq-trigger") 5262 .unwrap(); 5263 5264 // Give some time for the OOM to happen in the guest and be reported 5265 // back to the host. 5266 thread::sleep(std::time::Duration::new(20, 0)); 5267 5268 // 2nd: check balloon_mem's value to verify balloon has been automatically deflated 5269 let deflated_balloon = balloon_size(&api_socket); 5270 println!("After deflating, balloon memory size is {deflated_balloon} bytes"); 5271 // Verify the balloon size deflated 5272 assert!(deflated_balloon < 2147483648); 5273 }); 5274 5275 let _ = child.kill(); 5276 let output = child.wait_with_output().unwrap(); 5277 5278 handle_child_output(r, &output); 5279 } 5280 5281 #[test] 5282 #[cfg(not(feature = "mshv"))] 5283 fn test_virtio_balloon_free_page_reporting() { 5284 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5285 let guest = Guest::new(Box::new(focal)); 5286 5287 //Let's start a 4G guest with balloon occupied 2G memory 5288 let mut child = GuestCommand::new(&guest) 5289 .args(["--cpus", "boot=1"]) 5290 .args(["--memory", "size=4G"]) 5291 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 5292 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5293 .args(["--balloon", "size=0,free_page_reporting=on"]) 5294 .default_disks() 5295 .default_net() 5296 .capture_output() 5297 .spawn() 5298 .unwrap(); 5299 5300 let pid = child.id(); 5301 let r = std::panic::catch_unwind(|| { 5302 guest.wait_vm_boot(None).unwrap(); 5303 5304 // Check the initial RSS is less than 1GiB 5305 let rss = process_rss_kib(pid); 5306 println!("RSS {rss} < 1048576"); 5307 assert!(rss < 1048576); 5308 5309 // Spawn a command inside the guest to consume 2GiB of RAM for 60 5310 // seconds 5311 let guest_ip = guest.network.guest_ip.clone(); 5312 thread::spawn(move || { 5313 ssh_command_ip( 5314 "stress --vm 1 --vm-bytes 2G --vm-keep --timeout 60", 5315 &guest_ip, 5316 DEFAULT_SSH_RETRIES, 5317 DEFAULT_SSH_TIMEOUT, 5318 ) 5319 .unwrap(); 5320 }); 5321 5322 // Wait for 50 seconds to make sure the stress command is consuming 5323 // the expected amount of memory. 5324 thread::sleep(std::time::Duration::new(50, 0)); 5325 let rss = process_rss_kib(pid); 5326 println!("RSS {rss} >= 2097152"); 5327 assert!(rss >= 2097152); 5328 5329 // Wait for an extra minute to make sure the stress command has 5330 // completed and that the guest reported the free pages to the VMM 5331 // through the virtio-balloon device. We expect the RSS to be under 5332 // 2GiB. 5333 thread::sleep(std::time::Duration::new(60, 0)); 5334 let rss = process_rss_kib(pid); 5335 println!("RSS {rss} < 2097152"); 5336 assert!(rss < 2097152); 5337 }); 5338 5339 let _ = child.kill(); 5340 let output = child.wait_with_output().unwrap(); 5341 5342 handle_child_output(r, &output); 5343 } 5344 5345 #[test] 5346 fn test_pmem_hotplug() { 5347 _test_pmem_hotplug(None) 5348 } 5349 5350 #[test] 5351 fn test_pmem_multi_segment_hotplug() { 5352 _test_pmem_hotplug(Some(15)) 5353 } 5354 5355 fn _test_pmem_hotplug(pci_segment: Option<u16>) { 5356 #[cfg(target_arch = "aarch64")] 5357 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string(); 5358 #[cfg(target_arch = "x86_64")] 5359 let focal_image = FOCAL_IMAGE_NAME.to_string(); 5360 let focal = UbuntuDiskConfig::new(focal_image); 5361 let guest = Guest::new(Box::new(focal)); 5362 5363 #[cfg(target_arch = "x86_64")] 5364 let kernel_path = direct_kernel_boot_path(); 5365 #[cfg(target_arch = "aarch64")] 5366 let kernel_path = edk2_path(); 5367 5368 let api_socket = temp_api_path(&guest.tmp_dir); 5369 5370 let mut cmd = GuestCommand::new(&guest); 5371 5372 cmd.args(["--api-socket", &api_socket]) 5373 .args(["--cpus", "boot=1"]) 5374 .args(["--memory", "size=512M"]) 5375 .args(["--kernel", kernel_path.to_str().unwrap()]) 5376 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5377 .default_disks() 5378 .default_net() 5379 .capture_output(); 5380 5381 if pci_segment.is_some() { 5382 cmd.args([ 5383 "--platform", 5384 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 5385 ]); 5386 } 5387 5388 let mut child = cmd.spawn().unwrap(); 5389 5390 let r = std::panic::catch_unwind(|| { 5391 guest.wait_vm_boot(None).unwrap(); 5392 5393 // Check /dev/pmem0 is not there 5394 assert_eq!( 5395 guest 5396 .ssh_command("lsblk | grep -c pmem0 || true") 5397 .unwrap() 5398 .trim() 5399 .parse::<u32>() 5400 .unwrap_or(1), 5401 0 5402 ); 5403 5404 let pmem_temp_file = TempFile::new().unwrap(); 5405 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 5406 let (cmd_success, cmd_output) = remote_command_w_output( 5407 &api_socket, 5408 "add-pmem", 5409 Some(&format!( 5410 "file={},id=test0{}", 5411 pmem_temp_file.as_path().to_str().unwrap(), 5412 if let Some(pci_segment) = pci_segment { 5413 format!(",pci_segment={pci_segment}") 5414 } else { 5415 "".to_owned() 5416 } 5417 )), 5418 ); 5419 assert!(cmd_success); 5420 if let Some(pci_segment) = pci_segment { 5421 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 5422 "{{\"id\":\"test0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 5423 ))); 5424 } else { 5425 assert!(String::from_utf8_lossy(&cmd_output) 5426 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 5427 } 5428 5429 // Check that /dev/pmem0 exists and the block size is 128M 5430 assert_eq!( 5431 guest 5432 .ssh_command("lsblk | grep pmem0 | grep -c 128M") 5433 .unwrap() 5434 .trim() 5435 .parse::<u32>() 5436 .unwrap_or_default(), 5437 1 5438 ); 5439 5440 guest.reboot_linux(0, None); 5441 5442 // Check still there after reboot 5443 assert_eq!( 5444 guest 5445 .ssh_command("lsblk | grep pmem0 | grep -c 128M") 5446 .unwrap() 5447 .trim() 5448 .parse::<u32>() 5449 .unwrap_or_default(), 5450 1 5451 ); 5452 5453 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 5454 5455 thread::sleep(std::time::Duration::new(20, 0)); 5456 5457 // Check device has gone away 5458 assert_eq!( 5459 guest 5460 .ssh_command("lsblk | grep -c pmem0.*128M || true") 5461 .unwrap() 5462 .trim() 5463 .parse::<u32>() 5464 .unwrap_or(1), 5465 0 5466 ); 5467 5468 guest.reboot_linux(1, None); 5469 5470 // Check still absent after reboot 5471 assert_eq!( 5472 guest 5473 .ssh_command("lsblk | grep -c pmem0.*128M || true") 5474 .unwrap() 5475 .trim() 5476 .parse::<u32>() 5477 .unwrap_or(1), 5478 0 5479 ); 5480 }); 5481 5482 let _ = child.kill(); 5483 let output = child.wait_with_output().unwrap(); 5484 5485 handle_child_output(r, &output); 5486 } 5487 5488 #[test] 5489 fn test_net_hotplug() { 5490 _test_net_hotplug(None) 5491 } 5492 5493 #[test] 5494 fn test_net_multi_segment_hotplug() { 5495 _test_net_hotplug(Some(15)) 5496 } 5497 5498 fn _test_net_hotplug(pci_segment: Option<u16>) { 5499 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5500 let guest = Guest::new(Box::new(focal)); 5501 5502 #[cfg(target_arch = "x86_64")] 5503 let kernel_path = direct_kernel_boot_path(); 5504 #[cfg(target_arch = "aarch64")] 5505 let kernel_path = edk2_path(); 5506 5507 let api_socket = temp_api_path(&guest.tmp_dir); 5508 5509 // Boot without network 5510 let mut cmd = GuestCommand::new(&guest); 5511 5512 cmd.args(["--api-socket", &api_socket]) 5513 .args(["--cpus", "boot=1"]) 5514 .args(["--memory", "size=512M"]) 5515 .args(["--kernel", kernel_path.to_str().unwrap()]) 5516 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5517 .default_disks() 5518 .capture_output(); 5519 5520 if pci_segment.is_some() { 5521 cmd.args([ 5522 "--platform", 5523 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 5524 ]); 5525 } 5526 5527 let mut child = cmd.spawn().unwrap(); 5528 5529 thread::sleep(std::time::Duration::new(20, 0)); 5530 5531 let r = std::panic::catch_unwind(|| { 5532 // Add network 5533 let (cmd_success, cmd_output) = remote_command_w_output( 5534 &api_socket, 5535 "add-net", 5536 Some( 5537 format!( 5538 "{}{},id=test0", 5539 guest.default_net_string(), 5540 if let Some(pci_segment) = pci_segment { 5541 format!(",pci_segment={pci_segment}") 5542 } else { 5543 "".to_owned() 5544 } 5545 ) 5546 .as_str(), 5547 ), 5548 ); 5549 assert!(cmd_success); 5550 5551 if let Some(pci_segment) = pci_segment { 5552 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 5553 "{{\"id\":\"test0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 5554 ))); 5555 } else { 5556 assert!(String::from_utf8_lossy(&cmd_output) 5557 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:05.0\"}")); 5558 } 5559 5560 thread::sleep(std::time::Duration::new(5, 0)); 5561 5562 // 1 network interfaces + default localhost ==> 2 interfaces 5563 assert_eq!( 5564 guest 5565 .ssh_command("ip -o link | wc -l") 5566 .unwrap() 5567 .trim() 5568 .parse::<u32>() 5569 .unwrap_or_default(), 5570 2 5571 ); 5572 5573 // Remove network 5574 assert!(remote_command(&api_socket, "remove-device", Some("test0"),)); 5575 thread::sleep(std::time::Duration::new(5, 0)); 5576 5577 let (cmd_success, cmd_output) = remote_command_w_output( 5578 &api_socket, 5579 "add-net", 5580 Some( 5581 format!( 5582 "{}{},id=test1", 5583 guest.default_net_string(), 5584 if let Some(pci_segment) = pci_segment { 5585 format!(",pci_segment={pci_segment}") 5586 } else { 5587 "".to_owned() 5588 } 5589 ) 5590 .as_str(), 5591 ), 5592 ); 5593 assert!(cmd_success); 5594 5595 if let Some(pci_segment) = pci_segment { 5596 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 5597 "{{\"id\":\"test1\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 5598 ))); 5599 } else { 5600 assert!(String::from_utf8_lossy(&cmd_output) 5601 .contains("{\"id\":\"test1\",\"bdf\":\"0000:00:05.0\"}")); 5602 } 5603 5604 thread::sleep(std::time::Duration::new(5, 0)); 5605 5606 // 1 network interfaces + default localhost ==> 2 interfaces 5607 assert_eq!( 5608 guest 5609 .ssh_command("ip -o link | wc -l") 5610 .unwrap() 5611 .trim() 5612 .parse::<u32>() 5613 .unwrap_or_default(), 5614 2 5615 ); 5616 5617 guest.reboot_linux(0, None); 5618 5619 // Check still there after reboot 5620 // 1 network interfaces + default localhost ==> 2 interfaces 5621 assert_eq!( 5622 guest 5623 .ssh_command("ip -o link | wc -l") 5624 .unwrap() 5625 .trim() 5626 .parse::<u32>() 5627 .unwrap_or_default(), 5628 2 5629 ); 5630 }); 5631 5632 let _ = child.kill(); 5633 let output = child.wait_with_output().unwrap(); 5634 5635 handle_child_output(r, &output); 5636 } 5637 5638 #[test] 5639 fn test_initramfs() { 5640 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5641 let guest = Guest::new(Box::new(focal)); 5642 let mut workload_path = dirs::home_dir().unwrap(); 5643 workload_path.push("workloads"); 5644 5645 #[cfg(target_arch = "x86_64")] 5646 let mut kernels = vec![direct_kernel_boot_path()]; 5647 #[cfg(target_arch = "aarch64")] 5648 let kernels = vec![direct_kernel_boot_path()]; 5649 5650 #[cfg(target_arch = "x86_64")] 5651 { 5652 let mut pvh_kernel_path = workload_path.clone(); 5653 pvh_kernel_path.push("vmlinux"); 5654 kernels.push(pvh_kernel_path); 5655 } 5656 5657 let mut initramfs_path = workload_path; 5658 initramfs_path.push("alpine_initramfs.img"); 5659 5660 let test_string = String::from("axz34i9rylotd8n50wbv6kcj7f2qushme1pg"); 5661 let cmdline = format!("console=hvc0 quiet TEST_STRING={test_string}"); 5662 5663 kernels.iter().for_each(|k_path| { 5664 let mut child = GuestCommand::new(&guest) 5665 .args(["--kernel", k_path.to_str().unwrap()]) 5666 .args(["--initramfs", initramfs_path.to_str().unwrap()]) 5667 .args(["--cmdline", &cmdline]) 5668 .capture_output() 5669 .spawn() 5670 .unwrap(); 5671 5672 thread::sleep(std::time::Duration::new(20, 0)); 5673 5674 let _ = child.kill(); 5675 let output = child.wait_with_output().unwrap(); 5676 5677 let r = std::panic::catch_unwind(|| { 5678 let s = String::from_utf8_lossy(&output.stdout); 5679 5680 assert_ne!(s.lines().position(|line| line == test_string), None); 5681 }); 5682 5683 handle_child_output(r, &output); 5684 }); 5685 } 5686 5687 // One thing to note about this test. The virtio-net device is heavily used 5688 // through each ssh command. There's no need to perform a dedicated test to 5689 // verify the migration went well for virtio-net. 5690 #[test] 5691 #[cfg(not(feature = "mshv"))] 5692 fn test_snapshot_restore_hotplug_virtiomem() { 5693 _test_snapshot_restore(true); 5694 } 5695 5696 #[test] 5697 fn test_snapshot_restore_basic() { 5698 _test_snapshot_restore(false); 5699 } 5700 5701 fn _test_snapshot_restore(use_hotplug: bool) { 5702 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5703 let guest = Guest::new(Box::new(focal)); 5704 let kernel_path = direct_kernel_boot_path(); 5705 5706 let api_socket_source = format!("{}.1", temp_api_path(&guest.tmp_dir)); 5707 5708 let net_id = "net123"; 5709 let net_params = format!( 5710 "id={},tap=,mac={},ip={},mask=255.255.255.0", 5711 net_id, guest.network.guest_mac, guest.network.host_ip 5712 ); 5713 let mut mem_params = "size=4G"; 5714 5715 if use_hotplug { 5716 mem_params = "size=4G,hotplug_method=virtio-mem,hotplug_size=32G" 5717 } 5718 5719 let cloudinit_params = format!( 5720 "path={},iommu=on", 5721 guest.disk_config.disk(DiskType::CloudInit).unwrap() 5722 ); 5723 5724 let socket = temp_vsock_path(&guest.tmp_dir); 5725 let event_path = temp_event_monitor_path(&guest.tmp_dir); 5726 5727 let mut child = GuestCommand::new(&guest) 5728 .args(["--api-socket", &api_socket_source]) 5729 .args(["--event-monitor", format!("path={event_path}").as_str()]) 5730 .args(["--cpus", "boot=4"]) 5731 .args(["--memory", mem_params]) 5732 .args(["--balloon", "size=0"]) 5733 .args(["--kernel", kernel_path.to_str().unwrap()]) 5734 .args([ 5735 "--disk", 5736 format!( 5737 "path={}", 5738 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 5739 ) 5740 .as_str(), 5741 "--disk", 5742 cloudinit_params.as_str(), 5743 ]) 5744 .args(["--net", net_params.as_str()]) 5745 .args(["--vsock", format!("cid=3,socket={socket}").as_str()]) 5746 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5747 .capture_output() 5748 .spawn() 5749 .unwrap(); 5750 5751 let console_text = String::from("On a branch floating down river a cricket, singing."); 5752 // Create the snapshot directory 5753 let snapshot_dir = temp_snapshot_dir_path(&guest.tmp_dir); 5754 5755 let r = std::panic::catch_unwind(|| { 5756 guest.wait_vm_boot(None).unwrap(); 5757 5758 // Check the number of vCPUs 5759 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 4); 5760 // Check the guest RAM 5761 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 5762 if use_hotplug { 5763 // Increase guest RAM with virtio-mem 5764 resize_command( 5765 &api_socket_source, 5766 None, 5767 Some(6 << 30), 5768 None, 5769 Some(&event_path), 5770 ); 5771 thread::sleep(std::time::Duration::new(5, 0)); 5772 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 5773 // Use balloon to remove RAM from the VM 5774 resize_command( 5775 &api_socket_source, 5776 None, 5777 None, 5778 Some(1 << 30), 5779 Some(&event_path), 5780 ); 5781 thread::sleep(std::time::Duration::new(5, 0)); 5782 let total_memory = guest.get_total_memory().unwrap_or_default(); 5783 assert!(total_memory > 4_800_000); 5784 assert!(total_memory < 5_760_000); 5785 } 5786 // Check the guest virtio-devices, e.g. block, rng, vsock, console, and net 5787 guest.check_devices_common(Some(&socket), Some(&console_text), None); 5788 5789 // x86_64: We check that removing and adding back the virtio-net device 5790 // does not break the snapshot/restore support for virtio-pci. 5791 // This is an important thing to test as the hotplug will 5792 // trigger a PCI BAR reprogramming, which is a good way of 5793 // checking if the stored resources are correctly restored. 5794 // Unplug the virtio-net device 5795 // AArch64: Device hotplug is currently not supported, skipping here. 5796 #[cfg(target_arch = "x86_64")] 5797 { 5798 assert!(remote_command( 5799 &api_socket_source, 5800 "remove-device", 5801 Some(net_id), 5802 )); 5803 thread::sleep(std::time::Duration::new(10, 0)); 5804 let latest_events = [&MetaEvent { 5805 event: "device-removed".to_string(), 5806 device_id: Some(net_id.to_string()), 5807 }]; 5808 assert!(check_latest_events_exact(&latest_events, &event_path)); 5809 5810 // Plug the virtio-net device again 5811 assert!(remote_command( 5812 &api_socket_source, 5813 "add-net", 5814 Some(net_params.as_str()), 5815 )); 5816 thread::sleep(std::time::Duration::new(10, 0)); 5817 } 5818 5819 // Pause the VM 5820 assert!(remote_command(&api_socket_source, "pause", None)); 5821 let latest_events = [ 5822 &MetaEvent { 5823 event: "pausing".to_string(), 5824 device_id: None, 5825 }, 5826 &MetaEvent { 5827 event: "paused".to_string(), 5828 device_id: None, 5829 }, 5830 ]; 5831 assert!(check_latest_events_exact(&latest_events, &event_path)); 5832 5833 // Take a snapshot from the VM 5834 assert!(remote_command( 5835 &api_socket_source, 5836 "snapshot", 5837 Some(format!("file://{snapshot_dir}").as_str()), 5838 )); 5839 5840 // Wait to make sure the snapshot is completed 5841 thread::sleep(std::time::Duration::new(10, 0)); 5842 5843 let latest_events = [ 5844 &MetaEvent { 5845 event: "snapshotting".to_string(), 5846 device_id: None, 5847 }, 5848 &MetaEvent { 5849 event: "snapshotted".to_string(), 5850 device_id: None, 5851 }, 5852 ]; 5853 assert!(check_latest_events_exact(&latest_events, &event_path)); 5854 }); 5855 5856 // Shutdown the source VM and check console output 5857 let _ = child.kill(); 5858 let output = child.wait_with_output().unwrap(); 5859 handle_child_output(r, &output); 5860 5861 let r = std::panic::catch_unwind(|| { 5862 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 5863 }); 5864 5865 handle_child_output(r, &output); 5866 5867 // Remove the vsock socket file. 5868 Command::new("rm") 5869 .arg("-f") 5870 .arg(socket.as_str()) 5871 .output() 5872 .unwrap(); 5873 5874 let api_socket_restored = format!("{}.2", temp_api_path(&guest.tmp_dir)); 5875 let event_path_restored = format!("{}.2", temp_event_monitor_path(&guest.tmp_dir)); 5876 5877 // Restore the VM from the snapshot 5878 let mut child = GuestCommand::new(&guest) 5879 .args(["--api-socket", &api_socket_restored]) 5880 .args([ 5881 "--event-monitor", 5882 format!("path={event_path_restored}").as_str(), 5883 ]) 5884 .args([ 5885 "--restore", 5886 format!("source_url=file://{snapshot_dir}").as_str(), 5887 ]) 5888 .capture_output() 5889 .spawn() 5890 .unwrap(); 5891 5892 // Wait for the VM to be restored 5893 thread::sleep(std::time::Duration::new(10, 0)); 5894 let expected_events = [ 5895 &MetaEvent { 5896 event: "starting".to_string(), 5897 device_id: None, 5898 }, 5899 &MetaEvent { 5900 event: "activated".to_string(), 5901 device_id: Some("__console".to_string()), 5902 }, 5903 &MetaEvent { 5904 event: "activated".to_string(), 5905 device_id: Some("__rng".to_string()), 5906 }, 5907 &MetaEvent { 5908 event: "restoring".to_string(), 5909 device_id: None, 5910 }, 5911 ]; 5912 assert!(check_sequential_events( 5913 &expected_events, 5914 &event_path_restored 5915 )); 5916 let latest_events = [&MetaEvent { 5917 event: "restored".to_string(), 5918 device_id: None, 5919 }]; 5920 assert!(check_latest_events_exact( 5921 &latest_events, 5922 &event_path_restored 5923 )); 5924 5925 let r = std::panic::catch_unwind(|| { 5926 // Resume the VM 5927 assert!(remote_command(&api_socket_restored, "resume", None)); 5928 let latest_events = [ 5929 &MetaEvent { 5930 event: "resuming".to_string(), 5931 device_id: None, 5932 }, 5933 &MetaEvent { 5934 event: "resumed".to_string(), 5935 device_id: None, 5936 }, 5937 ]; 5938 assert!(check_latest_events_exact( 5939 &latest_events, 5940 &event_path_restored 5941 )); 5942 5943 // Perform same checks to validate VM has been properly restored 5944 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 4); 5945 let total_memory = guest.get_total_memory().unwrap_or_default(); 5946 if !use_hotplug { 5947 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 5948 } else { 5949 assert!(total_memory > 4_800_000); 5950 assert!(total_memory < 5_760_000); 5951 // Deflate balloon to restore entire RAM to the VM 5952 resize_command(&api_socket_restored, None, None, Some(0), None); 5953 thread::sleep(std::time::Duration::new(5, 0)); 5954 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 5955 // Decrease guest RAM with virtio-mem 5956 resize_command(&api_socket_restored, None, Some(5 << 30), None, None); 5957 thread::sleep(std::time::Duration::new(5, 0)); 5958 let total_memory = guest.get_total_memory().unwrap_or_default(); 5959 assert!(total_memory > 4_800_000); 5960 assert!(total_memory < 5_760_000); 5961 } 5962 5963 guest.check_devices_common(Some(&socket), Some(&console_text), None); 5964 }); 5965 // Shutdown the target VM and check console output 5966 let _ = child.kill(); 5967 let output = child.wait_with_output().unwrap(); 5968 handle_child_output(r, &output); 5969 5970 let r = std::panic::catch_unwind(|| { 5971 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 5972 }); 5973 5974 handle_child_output(r, &output); 5975 } 5976 5977 #[test] 5978 fn test_counters() { 5979 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5980 let guest = Guest::new(Box::new(focal)); 5981 let api_socket = temp_api_path(&guest.tmp_dir); 5982 5983 let mut cmd = GuestCommand::new(&guest); 5984 cmd.args(["--cpus", "boot=1"]) 5985 .args(["--memory", "size=512M"]) 5986 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 5987 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5988 .default_disks() 5989 .args(["--net", guest.default_net_string().as_str()]) 5990 .args(["--api-socket", &api_socket]) 5991 .capture_output(); 5992 5993 let mut child = cmd.spawn().unwrap(); 5994 5995 let r = std::panic::catch_unwind(|| { 5996 guest.wait_vm_boot(None).unwrap(); 5997 5998 let orig_counters = get_counters(&api_socket); 5999 guest 6000 .ssh_command("dd if=/dev/zero of=test count=8 bs=1M") 6001 .unwrap(); 6002 6003 let new_counters = get_counters(&api_socket); 6004 6005 // Check that all the counters have increased 6006 assert!(new_counters > orig_counters); 6007 }); 6008 6009 let _ = child.kill(); 6010 let output = child.wait_with_output().unwrap(); 6011 6012 handle_child_output(r, &output); 6013 } 6014 6015 #[test] 6016 #[cfg(feature = "guest_debug")] 6017 fn test_coredump() { 6018 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6019 let guest = Guest::new(Box::new(focal)); 6020 let api_socket = temp_api_path(&guest.tmp_dir); 6021 6022 let mut cmd = GuestCommand::new(&guest); 6023 cmd.args(["--cpus", "boot=4"]) 6024 .args(["--memory", "size=4G"]) 6025 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 6026 .default_disks() 6027 .args(["--net", guest.default_net_string().as_str()]) 6028 .args(["--api-socket", &api_socket]) 6029 .capture_output(); 6030 6031 let mut child = cmd.spawn().unwrap(); 6032 let vmcore_file = temp_vmcore_file_path(&guest.tmp_dir); 6033 6034 let r = std::panic::catch_unwind(|| { 6035 guest.wait_vm_boot(None).unwrap(); 6036 6037 assert!(remote_command(&api_socket, "pause", None)); 6038 6039 assert!(remote_command( 6040 &api_socket, 6041 "coredump", 6042 Some(format!("file://{vmcore_file}").as_str()), 6043 )); 6044 6045 // the num of CORE notes should equals to vcpu 6046 let readelf_core_num_cmd = 6047 format!("readelf --all {vmcore_file} |grep CORE |grep -v Type |wc -l"); 6048 let core_num_in_elf = exec_host_command_output(&readelf_core_num_cmd); 6049 assert_eq!(String::from_utf8_lossy(&core_num_in_elf.stdout).trim(), "4"); 6050 6051 // the num of QEMU notes should equals to vcpu 6052 let readelf_vmm_num_cmd = format!("readelf --all {vmcore_file} |grep QEMU |wc -l"); 6053 let vmm_num_in_elf = exec_host_command_output(&readelf_vmm_num_cmd); 6054 assert_eq!(String::from_utf8_lossy(&vmm_num_in_elf.stdout).trim(), "4"); 6055 }); 6056 6057 let _ = child.kill(); 6058 let output = child.wait_with_output().unwrap(); 6059 6060 handle_child_output(r, &output); 6061 } 6062 6063 #[test] 6064 fn test_watchdog() { 6065 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6066 let guest = Guest::new(Box::new(focal)); 6067 let api_socket = temp_api_path(&guest.tmp_dir); 6068 6069 let kernel_path = direct_kernel_boot_path(); 6070 6071 let mut cmd = GuestCommand::new(&guest); 6072 cmd.args(["--cpus", "boot=1"]) 6073 .args(["--memory", "size=512M"]) 6074 .args(["--kernel", kernel_path.to_str().unwrap()]) 6075 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6076 .default_disks() 6077 .args(["--net", guest.default_net_string().as_str()]) 6078 .args(["--watchdog"]) 6079 .args(["--api-socket", &api_socket]) 6080 .capture_output(); 6081 6082 let mut child = cmd.spawn().unwrap(); 6083 6084 let r = std::panic::catch_unwind(|| { 6085 guest.wait_vm_boot(None).unwrap(); 6086 6087 let mut expected_reboot_count = 1; 6088 6089 // Enable the watchdog with a 15s timeout 6090 enable_guest_watchdog(&guest, 15); 6091 6092 // Reboot and check that systemd has activated the watchdog 6093 guest.ssh_command("sudo reboot").unwrap(); 6094 guest.wait_vm_boot(None).unwrap(); 6095 expected_reboot_count += 1; 6096 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6097 assert_eq!( 6098 guest 6099 .ssh_command("sudo journalctl | grep -c -- \"Watchdog started\"") 6100 .unwrap() 6101 .trim() 6102 .parse::<u32>() 6103 .unwrap_or_default(), 6104 2 6105 ); 6106 6107 // Allow some normal time to elapse to check we don't get spurious reboots 6108 thread::sleep(std::time::Duration::new(40, 0)); 6109 // Check no reboot 6110 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6111 6112 // Trigger a panic (sync first). We need to do this inside a screen with a delay so the SSH command returns. 6113 guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap(); 6114 // Allow some time for the watchdog to trigger (max 30s) and reboot to happen 6115 guest.wait_vm_boot(Some(50)).unwrap(); 6116 // Check a reboot is triggerred by the watchdog 6117 expected_reboot_count += 1; 6118 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6119 6120 #[cfg(target_arch = "x86_64")] 6121 { 6122 // Now pause the VM and remain offline for 30s 6123 assert!(remote_command(&api_socket, "pause", None)); 6124 thread::sleep(std::time::Duration::new(30, 0)); 6125 assert!(remote_command(&api_socket, "resume", None)); 6126 6127 // Check no reboot 6128 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6129 } 6130 }); 6131 6132 let _ = child.kill(); 6133 let output = child.wait_with_output().unwrap(); 6134 6135 handle_child_output(r, &output); 6136 } 6137 6138 #[test] 6139 fn test_tap_from_fd() { 6140 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6141 let guest = Guest::new(Box::new(focal)); 6142 let kernel_path = direct_kernel_boot_path(); 6143 6144 // Create a TAP interface with multi-queue enabled 6145 let num_queue_pairs: usize = 2; 6146 6147 use std::str::FromStr; 6148 let taps = net_util::open_tap( 6149 Some("chtap0"), 6150 Some(std::net::Ipv4Addr::from_str(&guest.network.host_ip).unwrap()), 6151 None, 6152 &mut None, 6153 None, 6154 num_queue_pairs, 6155 Some(libc::O_RDWR | libc::O_NONBLOCK), 6156 ) 6157 .unwrap(); 6158 6159 let mut child = GuestCommand::new(&guest) 6160 .args(["--cpus", &format!("boot={num_queue_pairs}")]) 6161 .args(["--memory", "size=512M"]) 6162 .args(["--kernel", kernel_path.to_str().unwrap()]) 6163 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6164 .default_disks() 6165 .args([ 6166 "--net", 6167 &format!( 6168 "fd=[{},{}],mac={},num_queues={}", 6169 taps[0].as_raw_fd(), 6170 taps[1].as_raw_fd(), 6171 guest.network.guest_mac, 6172 num_queue_pairs * 2 6173 ), 6174 ]) 6175 .capture_output() 6176 .spawn() 6177 .unwrap(); 6178 6179 let r = std::panic::catch_unwind(|| { 6180 guest.wait_vm_boot(None).unwrap(); 6181 6182 assert_eq!( 6183 guest 6184 .ssh_command("ip -o link | wc -l") 6185 .unwrap() 6186 .trim() 6187 .parse::<u32>() 6188 .unwrap_or_default(), 6189 2 6190 ); 6191 6192 guest.reboot_linux(0, None); 6193 6194 assert_eq!( 6195 guest 6196 .ssh_command("ip -o link | wc -l") 6197 .unwrap() 6198 .trim() 6199 .parse::<u32>() 6200 .unwrap_or_default(), 6201 2 6202 ); 6203 }); 6204 6205 let _ = child.kill(); 6206 let output = child.wait_with_output().unwrap(); 6207 6208 handle_child_output(r, &output); 6209 } 6210 6211 // By design, a guest VM won't be able to connect to the host 6212 // machine when using a macvtap network interface (while it can 6213 // communicate externally). As a workaround, this integration 6214 // test creates two macvtap interfaces in 'bridge' mode on the 6215 // same physical net interface, one for the guest and one for 6216 // the host. With additional setup on the IP address and the 6217 // routing table, it enables the communications between the 6218 // guest VM and the host machine. 6219 // Details: https://wiki.libvirt.org/page/TroubleshootMacvtapHostFail 6220 fn _test_macvtap(hotplug: bool, guest_macvtap_name: &str, host_macvtap_name: &str) { 6221 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6222 let guest = Guest::new(Box::new(focal)); 6223 let api_socket = temp_api_path(&guest.tmp_dir); 6224 6225 #[cfg(target_arch = "x86_64")] 6226 let kernel_path = direct_kernel_boot_path(); 6227 #[cfg(target_arch = "aarch64")] 6228 let kernel_path = edk2_path(); 6229 6230 let phy_net = "eth0"; 6231 6232 // Create a macvtap interface for the guest VM to use 6233 assert!(exec_host_command_status(&format!( 6234 "sudo ip link add link {phy_net} name {guest_macvtap_name} type macvtap mod bridge" 6235 )) 6236 .success()); 6237 assert!(exec_host_command_status(&format!( 6238 "sudo ip link set {} address {} up", 6239 guest_macvtap_name, guest.network.guest_mac 6240 )) 6241 .success()); 6242 assert!( 6243 exec_host_command_status(&format!("sudo ip link show {guest_macvtap_name}")).success() 6244 ); 6245 6246 let tap_index = 6247 fs::read_to_string(format!("/sys/class/net/{guest_macvtap_name}/ifindex")).unwrap(); 6248 let tap_device = format!("/dev/tap{}", tap_index.trim()); 6249 6250 assert!(exec_host_command_status(&format!("sudo chown $UID.$UID {tap_device}")).success()); 6251 6252 let cstr_tap_device = std::ffi::CString::new(tap_device).unwrap(); 6253 let tap_fd1 = unsafe { libc::open(cstr_tap_device.as_ptr(), libc::O_RDWR) }; 6254 assert!(tap_fd1 > 0); 6255 let tap_fd2 = unsafe { libc::open(cstr_tap_device.as_ptr(), libc::O_RDWR) }; 6256 assert!(tap_fd2 > 0); 6257 6258 // Create a macvtap on the same physical net interface for 6259 // the host machine to use 6260 assert!(exec_host_command_status(&format!( 6261 "sudo ip link add link {phy_net} name {host_macvtap_name} type macvtap mod bridge" 6262 )) 6263 .success()); 6264 // Use default mask "255.255.255.0" 6265 assert!(exec_host_command_status(&format!( 6266 "sudo ip address add {}/24 dev {}", 6267 guest.network.host_ip, host_macvtap_name 6268 )) 6269 .success()); 6270 assert!( 6271 exec_host_command_status(&format!("sudo ip link set dev {host_macvtap_name} up")) 6272 .success() 6273 ); 6274 6275 let mut guest_command = GuestCommand::new(&guest); 6276 guest_command 6277 .args(["--cpus", "boot=2"]) 6278 .args(["--memory", "size=512M"]) 6279 .args(["--kernel", kernel_path.to_str().unwrap()]) 6280 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6281 .default_disks() 6282 .args(["--api-socket", &api_socket]); 6283 6284 let net_params = format!( 6285 "fd=[{},{}],mac={},num_queues=4", 6286 tap_fd1, tap_fd2, guest.network.guest_mac 6287 ); 6288 6289 if !hotplug { 6290 guest_command.args(["--net", &net_params]); 6291 } 6292 6293 let mut child = guest_command.capture_output().spawn().unwrap(); 6294 6295 if hotplug { 6296 // Give some time to the VMM process to listen to the API 6297 // socket. This is the only requirement to avoid the following 6298 // call to ch-remote from failing. 6299 thread::sleep(std::time::Duration::new(10, 0)); 6300 // Hotplug the virtio-net device 6301 let (cmd_success, cmd_output) = 6302 remote_command_w_output(&api_socket, "add-net", Some(&net_params)); 6303 assert!(cmd_success); 6304 #[cfg(target_arch = "x86_64")] 6305 assert!(String::from_utf8_lossy(&cmd_output) 6306 .contains("{\"id\":\"_net2\",\"bdf\":\"0000:00:05.0\"}")); 6307 #[cfg(target_arch = "aarch64")] 6308 assert!(String::from_utf8_lossy(&cmd_output) 6309 .contains("{\"id\":\"_net0\",\"bdf\":\"0000:00:05.0\"}")); 6310 } 6311 6312 // The functional connectivity provided by the virtio-net device 6313 // gets tested through wait_vm_boot() as it expects to receive a 6314 // HTTP request, and through the SSH command as well. 6315 let r = std::panic::catch_unwind(|| { 6316 guest.wait_vm_boot(None).unwrap(); 6317 6318 assert_eq!( 6319 guest 6320 .ssh_command("ip -o link | wc -l") 6321 .unwrap() 6322 .trim() 6323 .parse::<u32>() 6324 .unwrap_or_default(), 6325 2 6326 ); 6327 6328 guest.reboot_linux(0, None); 6329 6330 assert_eq!( 6331 guest 6332 .ssh_command("ip -o link | wc -l") 6333 .unwrap() 6334 .trim() 6335 .parse::<u32>() 6336 .unwrap_or_default(), 6337 2 6338 ); 6339 }); 6340 6341 let _ = child.kill(); 6342 6343 exec_host_command_status(&format!("sudo ip link del {guest_macvtap_name}")); 6344 exec_host_command_status(&format!("sudo ip link del {host_macvtap_name}")); 6345 6346 let output = child.wait_with_output().unwrap(); 6347 6348 handle_child_output(r, &output); 6349 } 6350 6351 #[test] 6352 #[cfg_attr(target_arch = "aarch64", ignore = "See #5443")] 6353 fn test_macvtap() { 6354 _test_macvtap(false, "guestmacvtap0", "hostmacvtap0") 6355 } 6356 6357 #[test] 6358 #[cfg_attr(target_arch = "aarch64", ignore = "See #5443")] 6359 fn test_macvtap_hotplug() { 6360 _test_macvtap(true, "guestmacvtap1", "hostmacvtap1") 6361 } 6362 6363 #[test] 6364 #[cfg(not(feature = "mshv"))] 6365 fn test_ovs_dpdk() { 6366 let focal1 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6367 let guest1 = Guest::new(Box::new(focal1)); 6368 6369 let focal2 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6370 let guest2 = Guest::new(Box::new(focal2)); 6371 let api_socket_source = format!("{}.1", temp_api_path(&guest2.tmp_dir)); 6372 6373 let (mut child1, mut child2) = 6374 setup_ovs_dpdk_guests(&guest1, &guest2, &api_socket_source, false); 6375 6376 // Create the snapshot directory 6377 let snapshot_dir = temp_snapshot_dir_path(&guest2.tmp_dir); 6378 6379 let r = std::panic::catch_unwind(|| { 6380 // Remove one of the two ports from the OVS bridge 6381 assert!(exec_host_command_status("ovs-vsctl del-port vhost-user1").success()); 6382 6383 // Spawn a new netcat listener in the first VM 6384 let guest_ip = guest1.network.guest_ip.clone(); 6385 thread::spawn(move || { 6386 ssh_command_ip( 6387 "nc -l 12345", 6388 &guest_ip, 6389 DEFAULT_SSH_RETRIES, 6390 DEFAULT_SSH_TIMEOUT, 6391 ) 6392 .unwrap(); 6393 }); 6394 6395 // Wait for the server to be listening 6396 thread::sleep(std::time::Duration::new(5, 0)); 6397 6398 // Check the connection fails this time 6399 assert!(guest2.ssh_command("nc -vz 172.100.0.1 12345").is_err()); 6400 6401 // Add the OVS port back 6402 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface vhost-user1 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient1").success()); 6403 6404 // And finally check the connection is functional again 6405 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap(); 6406 6407 // Pause the VM 6408 assert!(remote_command(&api_socket_source, "pause", None)); 6409 6410 // Take a snapshot from the VM 6411 assert!(remote_command( 6412 &api_socket_source, 6413 "snapshot", 6414 Some(format!("file://{snapshot_dir}").as_str()), 6415 )); 6416 6417 // Wait to make sure the snapshot is completed 6418 thread::sleep(std::time::Duration::new(10, 0)); 6419 }); 6420 6421 // Shutdown the source VM 6422 let _ = child2.kill(); 6423 let output = child2.wait_with_output().unwrap(); 6424 handle_child_output(r, &output); 6425 6426 // Remove the vhost-user socket file. 6427 Command::new("rm") 6428 .arg("-f") 6429 .arg("/tmp/dpdkvhostclient2") 6430 .output() 6431 .unwrap(); 6432 6433 let api_socket_restored = format!("{}.2", temp_api_path(&guest2.tmp_dir)); 6434 // Restore the VM from the snapshot 6435 let mut child2 = GuestCommand::new(&guest2) 6436 .args(["--api-socket", &api_socket_restored]) 6437 .args([ 6438 "--restore", 6439 format!("source_url=file://{snapshot_dir}").as_str(), 6440 ]) 6441 .capture_output() 6442 .spawn() 6443 .unwrap(); 6444 6445 // Wait for the VM to be restored 6446 thread::sleep(std::time::Duration::new(10, 0)); 6447 6448 let r = std::panic::catch_unwind(|| { 6449 // Resume the VM 6450 assert!(remote_command(&api_socket_restored, "resume", None)); 6451 6452 // Spawn a new netcat listener in the first VM 6453 let guest_ip = guest1.network.guest_ip.clone(); 6454 thread::spawn(move || { 6455 ssh_command_ip( 6456 "nc -l 12345", 6457 &guest_ip, 6458 DEFAULT_SSH_RETRIES, 6459 DEFAULT_SSH_TIMEOUT, 6460 ) 6461 .unwrap(); 6462 }); 6463 6464 // Wait for the server to be listening 6465 thread::sleep(std::time::Duration::new(5, 0)); 6466 6467 // And check the connection is still functional after restore 6468 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap(); 6469 }); 6470 6471 let _ = child1.kill(); 6472 let _ = child2.kill(); 6473 6474 let output = child1.wait_with_output().unwrap(); 6475 child2.wait().unwrap(); 6476 6477 cleanup_ovs_dpdk(); 6478 6479 handle_child_output(r, &output); 6480 } 6481 6482 fn setup_spdk_nvme(nvme_dir: &std::path::Path) { 6483 cleanup_spdk_nvme(); 6484 6485 assert!(exec_host_command_status(&format!( 6486 "mkdir -p {}", 6487 nvme_dir.join("nvme-vfio-user").to_str().unwrap() 6488 )) 6489 .success()); 6490 assert!(exec_host_command_status(&format!( 6491 "truncate {} -s 128M", 6492 nvme_dir.join("test-disk.raw").to_str().unwrap() 6493 )) 6494 .success()); 6495 assert!(exec_host_command_status(&format!( 6496 "mkfs.ext4 {}", 6497 nvme_dir.join("test-disk.raw").to_str().unwrap() 6498 )) 6499 .success()); 6500 6501 // Start the SPDK nvmf_tgt daemon to present NVMe device as a VFIO user device 6502 Command::new("/usr/local/bin/spdk-nvme/nvmf_tgt") 6503 .args(["-i", "0", "-m", "0x1"]) 6504 .spawn() 6505 .unwrap(); 6506 thread::sleep(std::time::Duration::new(2, 0)); 6507 6508 assert!(exec_host_command_status( 6509 "/usr/local/bin/spdk-nvme/rpc.py nvmf_create_transport -t VFIOUSER" 6510 ) 6511 .success()); 6512 assert!(exec_host_command_status(&format!( 6513 "/usr/local/bin/spdk-nvme/rpc.py bdev_aio_create {} test 512", 6514 nvme_dir.join("test-disk.raw").to_str().unwrap() 6515 )) 6516 .success()); 6517 assert!(exec_host_command_status( 6518 "/usr/local/bin/spdk-nvme/rpc.py nvmf_create_subsystem nqn.2019-07.io.spdk:cnode -a -s test" 6519 ) 6520 .success()); 6521 assert!(exec_host_command_status( 6522 "/usr/local/bin/spdk-nvme/rpc.py nvmf_subsystem_add_ns nqn.2019-07.io.spdk:cnode test" 6523 ) 6524 .success()); 6525 assert!(exec_host_command_status(&format!( 6526 "/usr/local/bin/spdk-nvme/rpc.py nvmf_subsystem_add_listener nqn.2019-07.io.spdk:cnode -t VFIOUSER -a {} -s 0", 6527 nvme_dir.join("nvme-vfio-user").to_str().unwrap() 6528 )) 6529 .success()); 6530 } 6531 6532 fn cleanup_spdk_nvme() { 6533 exec_host_command_status("pkill -f nvmf_tgt"); 6534 } 6535 6536 #[test] 6537 fn test_vfio_user() { 6538 let jammy_image = JAMMY_IMAGE_NAME.to_string(); 6539 let jammy = UbuntuDiskConfig::new(jammy_image); 6540 let guest = Guest::new(Box::new(jammy)); 6541 6542 let spdk_nvme_dir = guest.tmp_dir.as_path().join("test-vfio-user"); 6543 setup_spdk_nvme(spdk_nvme_dir.as_path()); 6544 6545 let api_socket = temp_api_path(&guest.tmp_dir); 6546 let mut child = GuestCommand::new(&guest) 6547 .args(["--api-socket", &api_socket]) 6548 .args(["--cpus", "boot=1"]) 6549 .args(["--memory", "size=512M,shared=on,hugepages=on"]) 6550 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 6551 .args(["--serial", "tty", "--console", "off"]) 6552 .default_disks() 6553 .default_net() 6554 .capture_output() 6555 .spawn() 6556 .unwrap(); 6557 6558 let r = std::panic::catch_unwind(|| { 6559 guest.wait_vm_boot(None).unwrap(); 6560 6561 // Hotplug the SPDK-NVMe device to the VM 6562 let (cmd_success, cmd_output) = remote_command_w_output( 6563 &api_socket, 6564 "add-user-device", 6565 Some(&format!( 6566 "socket={},id=vfio_user0", 6567 spdk_nvme_dir 6568 .as_path() 6569 .join("nvme-vfio-user/cntrl") 6570 .to_str() 6571 .unwrap(), 6572 )), 6573 ); 6574 assert!(cmd_success); 6575 assert!(String::from_utf8_lossy(&cmd_output) 6576 .contains("{\"id\":\"vfio_user0\",\"bdf\":\"0000:00:05.0\"}")); 6577 6578 thread::sleep(std::time::Duration::new(10, 0)); 6579 6580 // Check both if /dev/nvme exists and if the block size is 128M. 6581 assert_eq!( 6582 guest 6583 .ssh_command("lsblk | grep nvme0n1 | grep -c 128M") 6584 .unwrap() 6585 .trim() 6586 .parse::<u32>() 6587 .unwrap_or_default(), 6588 1 6589 ); 6590 6591 // Check changes persist after reboot 6592 assert_eq!( 6593 guest.ssh_command("sudo mount /dev/nvme0n1 /mnt").unwrap(), 6594 "" 6595 ); 6596 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), "lost+found\n"); 6597 guest 6598 .ssh_command("echo test123 | sudo tee /mnt/test") 6599 .unwrap(); 6600 assert_eq!(guest.ssh_command("sudo umount /mnt").unwrap(), ""); 6601 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), ""); 6602 6603 guest.reboot_linux(0, None); 6604 assert_eq!( 6605 guest.ssh_command("sudo mount /dev/nvme0n1 /mnt").unwrap(), 6606 "" 6607 ); 6608 assert_eq!( 6609 guest.ssh_command("sudo cat /mnt/test").unwrap().trim(), 6610 "test123" 6611 ); 6612 }); 6613 6614 cleanup_spdk_nvme(); 6615 6616 let _ = child.kill(); 6617 let output = child.wait_with_output().unwrap(); 6618 6619 handle_child_output(r, &output); 6620 } 6621 6622 #[test] 6623 #[cfg(target_arch = "x86_64")] 6624 fn test_vdpa_block() { 6625 // Before trying to run the test, verify the vdpa_sim_blk module is correctly loaded. 6626 if !exec_host_command_status("lsmod | grep vdpa_sim_blk").success() { 6627 return; 6628 } 6629 6630 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6631 let guest = Guest::new(Box::new(focal)); 6632 let api_socket = temp_api_path(&guest.tmp_dir); 6633 6634 let kernel_path = direct_kernel_boot_path(); 6635 6636 let mut child = GuestCommand::new(&guest) 6637 .args(["--cpus", "boot=2"]) 6638 .args(["--memory", "size=512M,hugepages=on"]) 6639 .args(["--kernel", kernel_path.to_str().unwrap()]) 6640 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6641 .default_disks() 6642 .default_net() 6643 .args(["--vdpa", "path=/dev/vhost-vdpa-0,num_queues=1"]) 6644 .args(["--platform", "num_pci_segments=2,iommu_segments=1"]) 6645 .args(["--api-socket", &api_socket]) 6646 .capture_output() 6647 .spawn() 6648 .unwrap(); 6649 6650 let r = std::panic::catch_unwind(|| { 6651 guest.wait_vm_boot(None).unwrap(); 6652 6653 // Check both if /dev/vdc exists and if the block size is 128M. 6654 assert_eq!( 6655 guest 6656 .ssh_command("lsblk | grep vdc | grep -c 128M") 6657 .unwrap() 6658 .trim() 6659 .parse::<u32>() 6660 .unwrap_or_default(), 6661 1 6662 ); 6663 6664 // Check the content of the block device after we wrote to it. 6665 // The vpda-sim-blk should let us read what we previously wrote. 6666 guest 6667 .ssh_command("sudo bash -c 'echo foobar > /dev/vdc'") 6668 .unwrap(); 6669 assert_eq!( 6670 guest.ssh_command("sudo head -1 /dev/vdc").unwrap().trim(), 6671 "foobar" 6672 ); 6673 6674 // Hotplug an extra vDPA block device behind the vIOMMU 6675 // Add a new vDPA device to the VM 6676 let (cmd_success, cmd_output) = remote_command_w_output( 6677 &api_socket, 6678 "add-vdpa", 6679 Some("id=myvdpa0,path=/dev/vhost-vdpa-1,num_queues=1,pci_segment=1,iommu=on"), 6680 ); 6681 assert!(cmd_success); 6682 assert!(String::from_utf8_lossy(&cmd_output) 6683 .contains("{\"id\":\"myvdpa0\",\"bdf\":\"0001:00:01.0\"}")); 6684 6685 thread::sleep(std::time::Duration::new(10, 0)); 6686 6687 // Check IOMMU setup 6688 assert!(guest 6689 .does_device_vendor_pair_match("0x1057", "0x1af4") 6690 .unwrap_or_default()); 6691 assert_eq!( 6692 guest 6693 .ssh_command("ls /sys/kernel/iommu_groups/0/devices") 6694 .unwrap() 6695 .trim(), 6696 "0001:00:01.0" 6697 ); 6698 6699 // Check both if /dev/vdd exists and if the block size is 128M. 6700 assert_eq!( 6701 guest 6702 .ssh_command("lsblk | grep vdd | grep -c 128M") 6703 .unwrap() 6704 .trim() 6705 .parse::<u32>() 6706 .unwrap_or_default(), 6707 1 6708 ); 6709 6710 // Write some content to the block device we've just plugged. 6711 guest 6712 .ssh_command("sudo bash -c 'echo foobar > /dev/vdd'") 6713 .unwrap(); 6714 6715 // Check we can read the content back. 6716 assert_eq!( 6717 guest.ssh_command("sudo head -1 /dev/vdd").unwrap().trim(), 6718 "foobar" 6719 ); 6720 6721 // Unplug the device 6722 let cmd_success = remote_command(&api_socket, "remove-device", Some("myvdpa0")); 6723 assert!(cmd_success); 6724 thread::sleep(std::time::Duration::new(10, 0)); 6725 6726 // Check /dev/vdd doesn't exist anymore 6727 assert_eq!( 6728 guest 6729 .ssh_command("lsblk | grep -c vdd || true") 6730 .unwrap() 6731 .trim() 6732 .parse::<u32>() 6733 .unwrap_or(1), 6734 0 6735 ); 6736 }); 6737 6738 let _ = child.kill(); 6739 let output = child.wait_with_output().unwrap(); 6740 6741 handle_child_output(r, &output); 6742 } 6743 6744 #[test] 6745 #[cfg(target_arch = "x86_64")] 6746 fn test_vdpa_net() { 6747 // Before trying to run the test, verify the vdpa_sim_net module is correctly loaded. 6748 if !exec_host_command_status("lsmod | grep vdpa_sim_net").success() { 6749 return; 6750 } 6751 6752 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6753 let guest = Guest::new(Box::new(focal)); 6754 6755 let kernel_path = direct_kernel_boot_path(); 6756 6757 let mut child = GuestCommand::new(&guest) 6758 .args(["--cpus", "boot=2"]) 6759 .args(["--memory", "size=512M,hugepages=on"]) 6760 .args(["--kernel", kernel_path.to_str().unwrap()]) 6761 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6762 .default_disks() 6763 .default_net() 6764 .args(["--vdpa", "path=/dev/vhost-vdpa-2,num_queues=2"]) 6765 .capture_output() 6766 .spawn() 6767 .unwrap(); 6768 6769 let r = std::panic::catch_unwind(|| { 6770 guest.wait_vm_boot(None).unwrap(); 6771 6772 // Check we can find network interface related to vDPA device 6773 assert_eq!( 6774 guest 6775 .ssh_command("ip -o link | grep -c ens6") 6776 .unwrap() 6777 .trim() 6778 .parse::<u32>() 6779 .unwrap_or(0), 6780 1 6781 ); 6782 6783 guest 6784 .ssh_command("sudo ip addr add 172.16.1.2/24 dev ens6") 6785 .unwrap(); 6786 guest.ssh_command("sudo ip link set up dev ens6").unwrap(); 6787 6788 // Check there is no packet yet on both TX/RX of the network interface 6789 assert_eq!( 6790 guest 6791 .ssh_command("ip -j -p -s link show ens6 | grep -c '\"packets\": 0'") 6792 .unwrap() 6793 .trim() 6794 .parse::<u32>() 6795 .unwrap_or(0), 6796 2 6797 ); 6798 6799 // Send 6 packets with ping command 6800 guest.ssh_command("ping 172.16.1.10 -c 6 || true").unwrap(); 6801 6802 // Check we can find 6 packets on both TX/RX of the network interface 6803 assert_eq!( 6804 guest 6805 .ssh_command("ip -j -p -s link show ens6 | grep -c '\"packets\": 6'") 6806 .unwrap() 6807 .trim() 6808 .parse::<u32>() 6809 .unwrap_or(0), 6810 2 6811 ); 6812 6813 // No need to check for hotplug as we already tested it through 6814 // test_vdpa_block() 6815 }); 6816 6817 let _ = child.kill(); 6818 let output = child.wait_with_output().unwrap(); 6819 6820 handle_child_output(r, &output); 6821 } 6822 6823 #[test] 6824 #[cfg(target_arch = "x86_64")] 6825 fn test_tpm() { 6826 let focal = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string()); 6827 let guest = Guest::new(Box::new(focal)); 6828 6829 let (mut swtpm_command, swtpm_socket_path) = prepare_swtpm_daemon(&guest.tmp_dir); 6830 6831 let mut guest_cmd = GuestCommand::new(&guest); 6832 guest_cmd 6833 .args(["--cpus", "boot=1"]) 6834 .args(["--memory", "size=512M"]) 6835 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 6836 .args(["--tpm", &format!("socket={swtpm_socket_path}")]) 6837 .capture_output() 6838 .default_disks() 6839 .default_net(); 6840 6841 // Start swtpm daemon 6842 let mut swtpm_child = swtpm_command.spawn().unwrap(); 6843 thread::sleep(std::time::Duration::new(10, 0)); 6844 let mut child = guest_cmd.spawn().unwrap(); 6845 let r = std::panic::catch_unwind(|| { 6846 guest.wait_vm_boot(None).unwrap(); 6847 assert_eq!( 6848 guest.ssh_command("ls /dev/tpm0").unwrap().trim(), 6849 "/dev/tpm0" 6850 ); 6851 guest.ssh_command("sudo tpm2_selftest -f").unwrap(); 6852 guest 6853 .ssh_command("echo 'hello' > /tmp/checksum_test; ") 6854 .unwrap(); 6855 guest.ssh_command("cmp <(sudo tpm2_pcrevent /tmp/checksum_test | grep sha256 | awk '{print $2}') <(sha256sum /tmp/checksum_test| awk '{print $1}')").unwrap(); 6856 }); 6857 6858 let _ = swtpm_child.kill(); 6859 let _d_out = swtpm_child.wait_with_output().unwrap(); 6860 6861 let _ = child.kill(); 6862 let output = child.wait_with_output().unwrap(); 6863 6864 handle_child_output(r, &output); 6865 } 6866 } 6867 6868 mod dbus_api { 6869 use crate::*; 6870 6871 // Start cloud-hypervisor with no VM parameters, running both the HTTP 6872 // and DBus APIs. Alternate calls to the external APIs (HTTP and DBus) 6873 // to create a VM, boot it, and verify that it can be shut down and then 6874 // booted again. 6875 #[test] 6876 fn test_api_dbus_and_http_interleaved() { 6877 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6878 let guest = Guest::new(Box::new(focal)); 6879 let dbus_api = TargetApi::new_dbus_api(&guest.tmp_dir); 6880 let http_api = TargetApi::new_http_api(&guest.tmp_dir); 6881 6882 let mut child = GuestCommand::new(&guest) 6883 .args(dbus_api.guest_args()) 6884 .args(http_api.guest_args()) 6885 .capture_output() 6886 .spawn() 6887 .unwrap(); 6888 6889 thread::sleep(std::time::Duration::new(1, 0)); 6890 6891 // Verify API servers are running 6892 assert!(dbus_api.remote_command("ping", None)); 6893 assert!(http_api.remote_command("ping", None)); 6894 6895 // Create the VM first 6896 let cpu_count: u8 = 4; 6897 let request_body = guest.api_create_body( 6898 cpu_count, 6899 direct_kernel_boot_path().to_str().unwrap(), 6900 DIRECT_KERNEL_BOOT_CMDLINE, 6901 ); 6902 6903 let temp_config_path = guest.tmp_dir.as_path().join("config"); 6904 std::fs::write(&temp_config_path, request_body).unwrap(); 6905 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 6906 6907 let r = std::panic::catch_unwind(|| { 6908 // Create the VM 6909 assert!(dbus_api.remote_command("create", Some(create_config),)); 6910 6911 // Then boot it 6912 assert!(http_api.remote_command("boot", None)); 6913 guest.wait_vm_boot(None).unwrap(); 6914 6915 // Check that the VM booted as expected 6916 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 6917 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 6918 6919 // Sync and shutdown without powering off to prevent filesystem 6920 // corruption. 6921 guest.ssh_command("sync").unwrap(); 6922 guest.ssh_command("sudo shutdown -H now").unwrap(); 6923 6924 // Wait for the guest to be fully shutdown 6925 thread::sleep(std::time::Duration::new(20, 0)); 6926 6927 // Then shutdown the VM 6928 assert!(dbus_api.remote_command("shutdown", None)); 6929 6930 // Then boot it again 6931 assert!(http_api.remote_command("boot", None)); 6932 guest.wait_vm_boot(None).unwrap(); 6933 6934 // Check that the VM booted as expected 6935 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 6936 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 6937 }); 6938 6939 let _ = child.kill(); 6940 let output = child.wait_with_output().unwrap(); 6941 6942 handle_child_output(r, &output); 6943 } 6944 6945 #[test] 6946 fn test_api_dbus_create_boot() { 6947 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6948 let guest = Guest::new(Box::new(focal)); 6949 6950 _test_api_create_boot(TargetApi::new_dbus_api(&guest.tmp_dir), guest) 6951 } 6952 6953 #[test] 6954 fn test_api_dbus_shutdown() { 6955 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6956 let guest = Guest::new(Box::new(focal)); 6957 6958 _test_api_shutdown(TargetApi::new_dbus_api(&guest.tmp_dir), guest) 6959 } 6960 6961 #[test] 6962 fn test_api_dbus_delete() { 6963 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6964 let guest = Guest::new(Box::new(focal)); 6965 6966 _test_api_delete(TargetApi::new_dbus_api(&guest.tmp_dir), guest); 6967 } 6968 6969 #[test] 6970 fn test_api_dbus_pause_resume() { 6971 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6972 let guest = Guest::new(Box::new(focal)); 6973 6974 _test_api_pause_resume(TargetApi::new_dbus_api(&guest.tmp_dir), guest) 6975 } 6976 } 6977 6978 mod common_sequential { 6979 #[cfg(not(feature = "mshv"))] 6980 use crate::*; 6981 6982 #[test] 6983 #[cfg(not(feature = "mshv"))] 6984 fn test_memory_mergeable_on() { 6985 test_memory_mergeable(true) 6986 } 6987 } 6988 6989 mod windows { 6990 use crate::*; 6991 use once_cell::sync::Lazy; 6992 6993 static NEXT_DISK_ID: Lazy<Mutex<u8>> = Lazy::new(|| Mutex::new(1)); 6994 6995 struct WindowsGuest { 6996 guest: Guest, 6997 auth: PasswordAuth, 6998 } 6999 7000 trait FsType { 7001 const FS_FAT: u8; 7002 const FS_NTFS: u8; 7003 } 7004 impl FsType for WindowsGuest { 7005 const FS_FAT: u8 = 0; 7006 const FS_NTFS: u8 = 1; 7007 } 7008 7009 impl WindowsGuest { 7010 fn new() -> Self { 7011 let disk = WindowsDiskConfig::new(WINDOWS_IMAGE_NAME.to_string()); 7012 let guest = Guest::new(Box::new(disk)); 7013 let auth = PasswordAuth { 7014 username: String::from("administrator"), 7015 password: String::from("Admin123"), 7016 }; 7017 7018 WindowsGuest { guest, auth } 7019 } 7020 7021 fn guest(&self) -> &Guest { 7022 &self.guest 7023 } 7024 7025 fn ssh_cmd(&self, cmd: &str) -> String { 7026 ssh_command_ip_with_auth( 7027 cmd, 7028 &self.auth, 7029 &self.guest.network.guest_ip, 7030 DEFAULT_SSH_RETRIES, 7031 DEFAULT_SSH_TIMEOUT, 7032 ) 7033 .unwrap() 7034 } 7035 7036 fn cpu_count(&self) -> u8 { 7037 self.ssh_cmd("powershell -Command \"(Get-CimInstance win32_computersystem).NumberOfLogicalProcessors\"") 7038 .trim() 7039 .parse::<u8>() 7040 .unwrap_or(0) 7041 } 7042 7043 fn ram_size(&self) -> usize { 7044 self.ssh_cmd("powershell -Command \"(Get-CimInstance win32_computersystem).TotalPhysicalMemory\"") 7045 .trim() 7046 .parse::<usize>() 7047 .unwrap_or(0) 7048 } 7049 7050 fn netdev_count(&self) -> u8 { 7051 self.ssh_cmd("powershell -Command \"netsh int ipv4 show interfaces | Select-String ethernet | Measure-Object -Line | Format-Table -HideTableHeaders\"") 7052 .trim() 7053 .parse::<u8>() 7054 .unwrap_or(0) 7055 } 7056 7057 fn disk_count(&self) -> u8 { 7058 self.ssh_cmd("powershell -Command \"Get-Disk | Measure-Object -Line | Format-Table -HideTableHeaders\"") 7059 .trim() 7060 .parse::<u8>() 7061 .unwrap_or(0) 7062 } 7063 7064 fn reboot(&self) { 7065 let _ = self.ssh_cmd("shutdown /r /t 0"); 7066 } 7067 7068 fn shutdown(&self) { 7069 let _ = self.ssh_cmd("shutdown /s /t 0"); 7070 } 7071 7072 fn run_dnsmasq(&self) -> std::process::Child { 7073 let listen_address = format!("--listen-address={}", self.guest.network.host_ip); 7074 let dhcp_host = format!( 7075 "--dhcp-host={},{}", 7076 self.guest.network.guest_mac, self.guest.network.guest_ip 7077 ); 7078 let dhcp_range = format!( 7079 "--dhcp-range=eth,{},{}", 7080 self.guest.network.guest_ip, self.guest.network.guest_ip 7081 ); 7082 7083 Command::new("dnsmasq") 7084 .arg("--no-daemon") 7085 .arg("--log-queries") 7086 .arg(listen_address.as_str()) 7087 .arg("--except-interface=lo") 7088 .arg("--bind-dynamic") // Allow listening to host_ip while the interface is not ready yet. 7089 .arg("--conf-file=/dev/null") 7090 .arg(dhcp_host.as_str()) 7091 .arg(dhcp_range.as_str()) 7092 .spawn() 7093 .unwrap() 7094 } 7095 7096 // TODO Cleanup image file explicitly after test, if there's some space issues. 7097 fn disk_new(&self, fs: u8, sz: usize) -> String { 7098 let mut guard = NEXT_DISK_ID.lock().unwrap(); 7099 let id = *guard; 7100 *guard = id + 1; 7101 7102 let img = PathBuf::from(format!("/tmp/test-hotplug-{id}.raw")); 7103 let _ = fs::remove_file(&img); 7104 7105 // Create an image file 7106 let out = Command::new("qemu-img") 7107 .args([ 7108 "create", 7109 "-f", 7110 "raw", 7111 img.to_str().unwrap(), 7112 format!("{sz}m").as_str(), 7113 ]) 7114 .output() 7115 .expect("qemu-img command failed") 7116 .stdout; 7117 println!("{out:?}"); 7118 7119 // Associate image to a loop device 7120 let out = Command::new("losetup") 7121 .args(["--show", "-f", img.to_str().unwrap()]) 7122 .output() 7123 .expect("failed to create loop device") 7124 .stdout; 7125 let _tmp = String::from_utf8_lossy(&out); 7126 let loop_dev = _tmp.trim(); 7127 println!("{out:?}"); 7128 7129 // Create a partition table 7130 // echo 'type=7' | sudo sfdisk "${LOOP}" 7131 let mut child = Command::new("sfdisk") 7132 .args([loop_dev]) 7133 .stdin(Stdio::piped()) 7134 .spawn() 7135 .unwrap(); 7136 let stdin = child.stdin.as_mut().expect("failed to open stdin"); 7137 stdin 7138 .write_all("type=7".as_bytes()) 7139 .expect("failed to write stdin"); 7140 let out = child.wait_with_output().expect("sfdisk failed").stdout; 7141 println!("{out:?}"); 7142 7143 // Disengage the loop device 7144 let out = Command::new("losetup") 7145 .args(["-d", loop_dev]) 7146 .output() 7147 .expect("loop device not found") 7148 .stdout; 7149 println!("{out:?}"); 7150 7151 // Re-associate loop device pointing to the partition only 7152 let out = Command::new("losetup") 7153 .args([ 7154 "--show", 7155 "--offset", 7156 (512 * 2048).to_string().as_str(), 7157 "-f", 7158 img.to_str().unwrap(), 7159 ]) 7160 .output() 7161 .expect("failed to create loop device") 7162 .stdout; 7163 let _tmp = String::from_utf8_lossy(&out); 7164 let loop_dev = _tmp.trim(); 7165 println!("{out:?}"); 7166 7167 // Create filesystem. 7168 let fs_cmd = match fs { 7169 WindowsGuest::FS_FAT => "mkfs.msdos", 7170 WindowsGuest::FS_NTFS => "mkfs.ntfs", 7171 _ => panic!("Unknown filesystem type '{fs}'"), 7172 }; 7173 let out = Command::new(fs_cmd) 7174 .args([&loop_dev]) 7175 .output() 7176 .unwrap_or_else(|_| panic!("{fs_cmd} failed")) 7177 .stdout; 7178 println!("{out:?}"); 7179 7180 // Disengage the loop device 7181 let out = Command::new("losetup") 7182 .args(["-d", loop_dev]) 7183 .output() 7184 .unwrap_or_else(|_| panic!("loop device '{loop_dev}' not found")) 7185 .stdout; 7186 println!("{out:?}"); 7187 7188 img.to_str().unwrap().to_string() 7189 } 7190 7191 fn disks_set_rw(&self) { 7192 let _ = self.ssh_cmd("powershell -Command \"Get-Disk | Where-Object IsOffline -eq $True | Set-Disk -IsReadOnly $False\""); 7193 } 7194 7195 fn disks_online(&self) { 7196 let _ = self.ssh_cmd("powershell -Command \"Get-Disk | Where-Object IsOffline -eq $True | Set-Disk -IsOffline $False\""); 7197 } 7198 7199 fn disk_file_put(&self, fname: &str, data: &str) { 7200 let _ = self.ssh_cmd(&format!( 7201 "powershell -Command \"'{data}' | Set-Content -Path {fname}\"" 7202 )); 7203 } 7204 7205 fn disk_file_read(&self, fname: &str) -> String { 7206 self.ssh_cmd(&format!( 7207 "powershell -Command \"Get-Content -Path {fname}\"" 7208 )) 7209 } 7210 7211 fn wait_for_boot(&self) -> bool { 7212 let cmd = "dir /b c:\\ | find \"Windows\""; 7213 let tmo_max = 180; 7214 // The timeout increase by n*1+n*2+n*3+..., therefore the initial 7215 // interval must be small. 7216 let tmo_int = 2; 7217 let out = ssh_command_ip_with_auth( 7218 cmd, 7219 &self.auth, 7220 &self.guest.network.guest_ip, 7221 { 7222 let mut ret = 1; 7223 let mut tmo_acc = 0; 7224 loop { 7225 tmo_acc += tmo_int * ret; 7226 if tmo_acc >= tmo_max { 7227 break; 7228 } 7229 ret += 1; 7230 } 7231 ret 7232 }, 7233 tmo_int, 7234 ) 7235 .unwrap(); 7236 7237 if "Windows" == out.trim() { 7238 return true; 7239 } 7240 7241 false 7242 } 7243 } 7244 7245 fn vcpu_threads_count(pid: u32) -> u8 { 7246 // ps -T -p 12345 | grep vcpu | wc -l 7247 let out = Command::new("ps") 7248 .args(["-T", "-p", format!("{pid}").as_str()]) 7249 .output() 7250 .expect("ps command failed") 7251 .stdout; 7252 return String::from_utf8_lossy(&out).matches("vcpu").count() as u8; 7253 } 7254 7255 fn netdev_ctrl_threads_count(pid: u32) -> u8 { 7256 // ps -T -p 12345 | grep "_net[0-9]*_ctrl" | wc -l 7257 let out = Command::new("ps") 7258 .args(["-T", "-p", format!("{pid}").as_str()]) 7259 .output() 7260 .expect("ps command failed") 7261 .stdout; 7262 let mut n = 0; 7263 String::from_utf8_lossy(&out) 7264 .split_whitespace() 7265 .for_each(|s| n += (s.starts_with("_net") && s.ends_with("_ctrl")) as u8); // _net1_ctrl 7266 n 7267 } 7268 7269 fn disk_ctrl_threads_count(pid: u32) -> u8 { 7270 // ps -T -p 15782 | grep "_disk[0-9]*_q0" | wc -l 7271 let out = Command::new("ps") 7272 .args(["-T", "-p", format!("{pid}").as_str()]) 7273 .output() 7274 .expect("ps command failed") 7275 .stdout; 7276 let mut n = 0; 7277 String::from_utf8_lossy(&out) 7278 .split_whitespace() 7279 .for_each(|s| n += (s.starts_with("_disk") && s.ends_with("_q0")) as u8); // _disk0_q0, don't care about multiple queues as they're related to the same hdd 7280 n 7281 } 7282 7283 #[test] 7284 fn test_windows_guest() { 7285 let windows_guest = WindowsGuest::new(); 7286 7287 let mut child = GuestCommand::new(windows_guest.guest()) 7288 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 7289 .args(["--memory", "size=4G"]) 7290 .args(["--kernel", edk2_path().to_str().unwrap()]) 7291 .args(["--serial", "tty"]) 7292 .args(["--console", "off"]) 7293 .default_disks() 7294 .default_net() 7295 .capture_output() 7296 .spawn() 7297 .unwrap(); 7298 7299 let fd = child.stdout.as_ref().unwrap().as_raw_fd(); 7300 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7301 let fd = child.stderr.as_ref().unwrap().as_raw_fd(); 7302 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7303 7304 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE); 7305 7306 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7307 7308 let r = std::panic::catch_unwind(|| { 7309 // Wait to make sure Windows boots up 7310 assert!(windows_guest.wait_for_boot()); 7311 7312 windows_guest.shutdown(); 7313 }); 7314 7315 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7316 let _ = child.kill(); 7317 let output = child.wait_with_output().unwrap(); 7318 7319 let _ = child_dnsmasq.kill(); 7320 let _ = child_dnsmasq.wait(); 7321 7322 handle_child_output(r, &output); 7323 } 7324 7325 #[test] 7326 fn test_windows_guest_multiple_queues() { 7327 let windows_guest = WindowsGuest::new(); 7328 7329 let mut ovmf_path = dirs::home_dir().unwrap(); 7330 ovmf_path.push("workloads"); 7331 ovmf_path.push(OVMF_NAME); 7332 7333 let mut child = GuestCommand::new(windows_guest.guest()) 7334 .args(["--cpus", "boot=4,kvm_hyperv=on"]) 7335 .args(["--memory", "size=4G"]) 7336 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7337 .args(["--serial", "tty"]) 7338 .args(["--console", "off"]) 7339 .args([ 7340 "--disk", 7341 format!( 7342 "path={},num_queues=4", 7343 windows_guest 7344 .guest() 7345 .disk_config 7346 .disk(DiskType::OperatingSystem) 7347 .unwrap() 7348 ) 7349 .as_str(), 7350 ]) 7351 .args([ 7352 "--net", 7353 format!( 7354 "tap=,mac={},ip={},mask=255.255.255.0,num_queues=8", 7355 windows_guest.guest().network.guest_mac, 7356 windows_guest.guest().network.host_ip 7357 ) 7358 .as_str(), 7359 ]) 7360 .capture_output() 7361 .spawn() 7362 .unwrap(); 7363 7364 let fd = child.stdout.as_ref().unwrap().as_raw_fd(); 7365 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7366 let fd = child.stderr.as_ref().unwrap().as_raw_fd(); 7367 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7368 7369 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE); 7370 7371 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7372 7373 let r = std::panic::catch_unwind(|| { 7374 // Wait to make sure Windows boots up 7375 assert!(windows_guest.wait_for_boot()); 7376 7377 windows_guest.shutdown(); 7378 }); 7379 7380 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7381 let _ = child.kill(); 7382 let output = child.wait_with_output().unwrap(); 7383 7384 let _ = child_dnsmasq.kill(); 7385 let _ = child_dnsmasq.wait(); 7386 7387 handle_child_output(r, &output); 7388 } 7389 7390 #[test] 7391 #[cfg(not(feature = "mshv"))] 7392 #[ignore = "See #4327"] 7393 fn test_windows_guest_snapshot_restore() { 7394 let windows_guest = WindowsGuest::new(); 7395 7396 let mut ovmf_path = dirs::home_dir().unwrap(); 7397 ovmf_path.push("workloads"); 7398 ovmf_path.push(OVMF_NAME); 7399 7400 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 7401 let api_socket_source = format!("{}.1", temp_api_path(&tmp_dir)); 7402 7403 let mut child = GuestCommand::new(windows_guest.guest()) 7404 .args(["--api-socket", &api_socket_source]) 7405 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 7406 .args(["--memory", "size=4G"]) 7407 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7408 .args(["--serial", "tty"]) 7409 .args(["--console", "off"]) 7410 .default_disks() 7411 .default_net() 7412 .capture_output() 7413 .spawn() 7414 .unwrap(); 7415 7416 let fd = child.stdout.as_ref().unwrap().as_raw_fd(); 7417 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7418 let fd = child.stderr.as_ref().unwrap().as_raw_fd(); 7419 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7420 7421 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE); 7422 7423 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7424 7425 // Wait to make sure Windows boots up 7426 assert!(windows_guest.wait_for_boot()); 7427 7428 let snapshot_dir = temp_snapshot_dir_path(&tmp_dir); 7429 7430 // Pause the VM 7431 assert!(remote_command(&api_socket_source, "pause", None)); 7432 7433 // Take a snapshot from the VM 7434 assert!(remote_command( 7435 &api_socket_source, 7436 "snapshot", 7437 Some(format!("file://{snapshot_dir}").as_str()), 7438 )); 7439 7440 // Wait to make sure the snapshot is completed 7441 thread::sleep(std::time::Duration::new(30, 0)); 7442 7443 let _ = child.kill(); 7444 child.wait().unwrap(); 7445 7446 let api_socket_restored = format!("{}.2", temp_api_path(&tmp_dir)); 7447 7448 // Restore the VM from the snapshot 7449 let mut child = GuestCommand::new(windows_guest.guest()) 7450 .args(["--api-socket", &api_socket_restored]) 7451 .args([ 7452 "--restore", 7453 format!("source_url=file://{snapshot_dir}").as_str(), 7454 ]) 7455 .capture_output() 7456 .spawn() 7457 .unwrap(); 7458 7459 // Wait for the VM to be restored 7460 thread::sleep(std::time::Duration::new(20, 0)); 7461 7462 let r = std::panic::catch_unwind(|| { 7463 // Resume the VM 7464 assert!(remote_command(&api_socket_restored, "resume", None)); 7465 7466 windows_guest.shutdown(); 7467 }); 7468 7469 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7470 let _ = child.kill(); 7471 let output = child.wait_with_output().unwrap(); 7472 7473 let _ = child_dnsmasq.kill(); 7474 let _ = child_dnsmasq.wait(); 7475 7476 handle_child_output(r, &output); 7477 } 7478 7479 #[test] 7480 #[cfg(not(feature = "mshv"))] 7481 #[cfg(not(target_arch = "aarch64"))] 7482 fn test_windows_guest_cpu_hotplug() { 7483 let windows_guest = WindowsGuest::new(); 7484 7485 let mut ovmf_path = dirs::home_dir().unwrap(); 7486 ovmf_path.push("workloads"); 7487 ovmf_path.push(OVMF_NAME); 7488 7489 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 7490 let api_socket = temp_api_path(&tmp_dir); 7491 7492 let mut child = GuestCommand::new(windows_guest.guest()) 7493 .args(["--api-socket", &api_socket]) 7494 .args(["--cpus", "boot=2,max=8,kvm_hyperv=on"]) 7495 .args(["--memory", "size=4G"]) 7496 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7497 .args(["--serial", "tty"]) 7498 .args(["--console", "off"]) 7499 .default_disks() 7500 .default_net() 7501 .capture_output() 7502 .spawn() 7503 .unwrap(); 7504 7505 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7506 7507 let r = std::panic::catch_unwind(|| { 7508 // Wait to make sure Windows boots up 7509 assert!(windows_guest.wait_for_boot()); 7510 7511 let vcpu_num = 2; 7512 // Check the initial number of CPUs the guest sees 7513 assert_eq!(windows_guest.cpu_count(), vcpu_num); 7514 // Check the initial number of vcpu threads in the CH process 7515 assert_eq!(vcpu_threads_count(child.id()), vcpu_num); 7516 7517 let vcpu_num = 6; 7518 // Hotplug some CPUs 7519 resize_command(&api_socket, Some(vcpu_num), None, None, None); 7520 // Wait to make sure CPUs are added 7521 thread::sleep(std::time::Duration::new(10, 0)); 7522 // Check the guest sees the correct number 7523 assert_eq!(windows_guest.cpu_count(), vcpu_num); 7524 // Check the CH process has the correct number of vcpu threads 7525 assert_eq!(vcpu_threads_count(child.id()), vcpu_num); 7526 7527 let vcpu_num = 4; 7528 // Remove some CPUs. Note that Windows doesn't support hot-remove. 7529 resize_command(&api_socket, Some(vcpu_num), None, None, None); 7530 // Wait to make sure CPUs are removed 7531 thread::sleep(std::time::Duration::new(10, 0)); 7532 // Reboot to let Windows catch up 7533 windows_guest.reboot(); 7534 // Wait to make sure Windows completely rebooted 7535 thread::sleep(std::time::Duration::new(60, 0)); 7536 // Check the guest sees the correct number 7537 assert_eq!(windows_guest.cpu_count(), vcpu_num); 7538 // Check the CH process has the correct number of vcpu threads 7539 assert_eq!(vcpu_threads_count(child.id()), vcpu_num); 7540 7541 windows_guest.shutdown(); 7542 }); 7543 7544 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7545 let _ = child.kill(); 7546 let output = child.wait_with_output().unwrap(); 7547 7548 let _ = child_dnsmasq.kill(); 7549 let _ = child_dnsmasq.wait(); 7550 7551 handle_child_output(r, &output); 7552 } 7553 7554 #[test] 7555 #[cfg(not(feature = "mshv"))] 7556 #[cfg(not(target_arch = "aarch64"))] 7557 fn test_windows_guest_ram_hotplug() { 7558 let windows_guest = WindowsGuest::new(); 7559 7560 let mut ovmf_path = dirs::home_dir().unwrap(); 7561 ovmf_path.push("workloads"); 7562 ovmf_path.push(OVMF_NAME); 7563 7564 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 7565 let api_socket = temp_api_path(&tmp_dir); 7566 7567 let mut child = GuestCommand::new(windows_guest.guest()) 7568 .args(["--api-socket", &api_socket]) 7569 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 7570 .args(["--memory", "size=2G,hotplug_size=5G"]) 7571 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7572 .args(["--serial", "tty"]) 7573 .args(["--console", "off"]) 7574 .default_disks() 7575 .default_net() 7576 .capture_output() 7577 .spawn() 7578 .unwrap(); 7579 7580 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7581 7582 let r = std::panic::catch_unwind(|| { 7583 // Wait to make sure Windows boots up 7584 assert!(windows_guest.wait_for_boot()); 7585 7586 let ram_size = 2 * 1024 * 1024 * 1024; 7587 // Check the initial number of RAM the guest sees 7588 let current_ram_size = windows_guest.ram_size(); 7589 // This size seems to be reserved by the system and thus the 7590 // reported amount differs by this constant value. 7591 let reserved_ram_size = ram_size - current_ram_size; 7592 // Verify that there's not more than 4mb constant diff wasted 7593 // by the reserved ram. 7594 assert!(reserved_ram_size < 4 * 1024 * 1024); 7595 7596 let ram_size = 4 * 1024 * 1024 * 1024; 7597 // Hotplug some RAM 7598 resize_command(&api_socket, None, Some(ram_size), None, None); 7599 // Wait to make sure RAM has been added 7600 thread::sleep(std::time::Duration::new(10, 0)); 7601 // Check the guest sees the correct number 7602 assert_eq!(windows_guest.ram_size(), ram_size - reserved_ram_size); 7603 7604 let ram_size = 3 * 1024 * 1024 * 1024; 7605 // Unplug some RAM. Note that hot-remove most likely won't work. 7606 resize_command(&api_socket, None, Some(ram_size), None, None); 7607 // Wait to make sure RAM has been added 7608 thread::sleep(std::time::Duration::new(10, 0)); 7609 // Reboot to let Windows catch up 7610 windows_guest.reboot(); 7611 // Wait to make sure guest completely rebooted 7612 thread::sleep(std::time::Duration::new(60, 0)); 7613 // Check the guest sees the correct number 7614 assert_eq!(windows_guest.ram_size(), ram_size - reserved_ram_size); 7615 7616 windows_guest.shutdown(); 7617 }); 7618 7619 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7620 let _ = child.kill(); 7621 let output = child.wait_with_output().unwrap(); 7622 7623 let _ = child_dnsmasq.kill(); 7624 let _ = child_dnsmasq.wait(); 7625 7626 handle_child_output(r, &output); 7627 } 7628 7629 #[test] 7630 #[cfg(not(feature = "mshv"))] 7631 fn test_windows_guest_netdev_hotplug() { 7632 let windows_guest = WindowsGuest::new(); 7633 7634 let mut ovmf_path = dirs::home_dir().unwrap(); 7635 ovmf_path.push("workloads"); 7636 ovmf_path.push(OVMF_NAME); 7637 7638 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 7639 let api_socket = temp_api_path(&tmp_dir); 7640 7641 let mut child = GuestCommand::new(windows_guest.guest()) 7642 .args(["--api-socket", &api_socket]) 7643 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 7644 .args(["--memory", "size=4G"]) 7645 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7646 .args(["--serial", "tty"]) 7647 .args(["--console", "off"]) 7648 .default_disks() 7649 .default_net() 7650 .capture_output() 7651 .spawn() 7652 .unwrap(); 7653 7654 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7655 7656 let r = std::panic::catch_unwind(|| { 7657 // Wait to make sure Windows boots up 7658 assert!(windows_guest.wait_for_boot()); 7659 7660 // Initially present network device 7661 let netdev_num = 1; 7662 assert_eq!(windows_guest.netdev_count(), netdev_num); 7663 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 7664 7665 // Hotplug network device 7666 let (cmd_success, cmd_output) = remote_command_w_output( 7667 &api_socket, 7668 "add-net", 7669 Some(windows_guest.guest().default_net_string().as_str()), 7670 ); 7671 assert!(cmd_success); 7672 assert!(String::from_utf8_lossy(&cmd_output).contains("\"id\":\"_net2\"")); 7673 thread::sleep(std::time::Duration::new(5, 0)); 7674 // Verify the device is on the system 7675 let netdev_num = 2; 7676 assert_eq!(windows_guest.netdev_count(), netdev_num); 7677 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 7678 7679 // Remove network device 7680 let cmd_success = remote_command(&api_socket, "remove-device", Some("_net2")); 7681 assert!(cmd_success); 7682 thread::sleep(std::time::Duration::new(5, 0)); 7683 // Verify the device has been removed 7684 let netdev_num = 1; 7685 assert_eq!(windows_guest.netdev_count(), netdev_num); 7686 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 7687 7688 windows_guest.shutdown(); 7689 }); 7690 7691 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7692 let _ = child.kill(); 7693 let output = child.wait_with_output().unwrap(); 7694 7695 let _ = child_dnsmasq.kill(); 7696 let _ = child_dnsmasq.wait(); 7697 7698 handle_child_output(r, &output); 7699 } 7700 7701 #[test] 7702 #[cfg(not(feature = "mshv"))] 7703 #[cfg(not(target_arch = "aarch64"))] 7704 fn test_windows_guest_disk_hotplug() { 7705 let windows_guest = WindowsGuest::new(); 7706 7707 let mut ovmf_path = dirs::home_dir().unwrap(); 7708 ovmf_path.push("workloads"); 7709 ovmf_path.push(OVMF_NAME); 7710 7711 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 7712 let api_socket = temp_api_path(&tmp_dir); 7713 7714 let mut child = GuestCommand::new(windows_guest.guest()) 7715 .args(["--api-socket", &api_socket]) 7716 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 7717 .args(["--memory", "size=4G"]) 7718 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7719 .args(["--serial", "tty"]) 7720 .args(["--console", "off"]) 7721 .default_disks() 7722 .default_net() 7723 .capture_output() 7724 .spawn() 7725 .unwrap(); 7726 7727 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7728 7729 let disk = windows_guest.disk_new(WindowsGuest::FS_FAT, 100); 7730 7731 let r = std::panic::catch_unwind(|| { 7732 // Wait to make sure Windows boots up 7733 assert!(windows_guest.wait_for_boot()); 7734 7735 // Initially present disk device 7736 let disk_num = 1; 7737 assert_eq!(windows_guest.disk_count(), disk_num); 7738 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 7739 7740 // Hotplug disk device 7741 let (cmd_success, cmd_output) = remote_command_w_output( 7742 &api_socket, 7743 "add-disk", 7744 Some(format!("path={disk},readonly=off").as_str()), 7745 ); 7746 assert!(cmd_success); 7747 assert!(String::from_utf8_lossy(&cmd_output).contains("\"id\":\"_disk2\"")); 7748 thread::sleep(std::time::Duration::new(5, 0)); 7749 // Online disk device 7750 windows_guest.disks_set_rw(); 7751 windows_guest.disks_online(); 7752 // Verify the device is on the system 7753 let disk_num = 2; 7754 assert_eq!(windows_guest.disk_count(), disk_num); 7755 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 7756 7757 let data = "hello"; 7758 let fname = "d:\\world"; 7759 windows_guest.disk_file_put(fname, data); 7760 7761 // Unmount disk device 7762 let cmd_success = remote_command(&api_socket, "remove-device", Some("_disk2")); 7763 assert!(cmd_success); 7764 thread::sleep(std::time::Duration::new(5, 0)); 7765 // Verify the device has been removed 7766 let disk_num = 1; 7767 assert_eq!(windows_guest.disk_count(), disk_num); 7768 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 7769 7770 // Remount and check the file exists with the expected contents 7771 let (cmd_success, _cmd_output) = remote_command_w_output( 7772 &api_socket, 7773 "add-disk", 7774 Some(format!("path={disk},readonly=off").as_str()), 7775 ); 7776 assert!(cmd_success); 7777 thread::sleep(std::time::Duration::new(5, 0)); 7778 let out = windows_guest.disk_file_read(fname); 7779 assert_eq!(data, out.trim()); 7780 7781 // Intentionally no unmount, it'll happen at shutdown. 7782 7783 windows_guest.shutdown(); 7784 }); 7785 7786 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7787 let _ = child.kill(); 7788 let output = child.wait_with_output().unwrap(); 7789 7790 let _ = child_dnsmasq.kill(); 7791 let _ = child_dnsmasq.wait(); 7792 7793 handle_child_output(r, &output); 7794 } 7795 7796 #[test] 7797 #[cfg(not(feature = "mshv"))] 7798 #[cfg(not(target_arch = "aarch64"))] 7799 fn test_windows_guest_disk_hotplug_multi() { 7800 let windows_guest = WindowsGuest::new(); 7801 7802 let mut ovmf_path = dirs::home_dir().unwrap(); 7803 ovmf_path.push("workloads"); 7804 ovmf_path.push(OVMF_NAME); 7805 7806 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 7807 let api_socket = temp_api_path(&tmp_dir); 7808 7809 let mut child = GuestCommand::new(windows_guest.guest()) 7810 .args(["--api-socket", &api_socket]) 7811 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 7812 .args(["--memory", "size=2G"]) 7813 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7814 .args(["--serial", "tty"]) 7815 .args(["--console", "off"]) 7816 .default_disks() 7817 .default_net() 7818 .capture_output() 7819 .spawn() 7820 .unwrap(); 7821 7822 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7823 7824 // Predefined data to used at various test stages 7825 let disk_test_data: [[String; 4]; 2] = [ 7826 [ 7827 "_disk2".to_string(), 7828 windows_guest.disk_new(WindowsGuest::FS_FAT, 123), 7829 "d:\\world".to_string(), 7830 "hello".to_string(), 7831 ], 7832 [ 7833 "_disk3".to_string(), 7834 windows_guest.disk_new(WindowsGuest::FS_NTFS, 333), 7835 "e:\\hello".to_string(), 7836 "world".to_string(), 7837 ], 7838 ]; 7839 7840 let r = std::panic::catch_unwind(|| { 7841 // Wait to make sure Windows boots up 7842 assert!(windows_guest.wait_for_boot()); 7843 7844 // Initially present disk device 7845 let disk_num = 1; 7846 assert_eq!(windows_guest.disk_count(), disk_num); 7847 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 7848 7849 for it in &disk_test_data { 7850 let disk_id = it[0].as_str(); 7851 let disk = it[1].as_str(); 7852 // Hotplug disk device 7853 let (cmd_success, cmd_output) = remote_command_w_output( 7854 &api_socket, 7855 "add-disk", 7856 Some(format!("path={disk},readonly=off").as_str()), 7857 ); 7858 assert!(cmd_success); 7859 assert!(String::from_utf8_lossy(&cmd_output) 7860 .contains(format!("\"id\":\"{disk_id}\"").as_str())); 7861 thread::sleep(std::time::Duration::new(5, 0)); 7862 // Online disk devices 7863 windows_guest.disks_set_rw(); 7864 windows_guest.disks_online(); 7865 } 7866 // Verify the devices are on the system 7867 let disk_num = (disk_test_data.len() + 1) as u8; 7868 assert_eq!(windows_guest.disk_count(), disk_num); 7869 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 7870 7871 // Put test data 7872 for it in &disk_test_data { 7873 let fname = it[2].as_str(); 7874 let data = it[3].as_str(); 7875 windows_guest.disk_file_put(fname, data); 7876 } 7877 7878 // Unmount disk devices 7879 for it in &disk_test_data { 7880 let disk_id = it[0].as_str(); 7881 let cmd_success = remote_command(&api_socket, "remove-device", Some(disk_id)); 7882 assert!(cmd_success); 7883 thread::sleep(std::time::Duration::new(5, 0)); 7884 } 7885 7886 // Verify the devices have been removed 7887 let disk_num = 1; 7888 assert_eq!(windows_guest.disk_count(), disk_num); 7889 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 7890 7891 // Remount 7892 for it in &disk_test_data { 7893 let disk = it[1].as_str(); 7894 let (cmd_success, _cmd_output) = remote_command_w_output( 7895 &api_socket, 7896 "add-disk", 7897 Some(format!("path={disk},readonly=off").as_str()), 7898 ); 7899 assert!(cmd_success); 7900 thread::sleep(std::time::Duration::new(5, 0)); 7901 } 7902 7903 // Check the files exists with the expected contents 7904 for it in &disk_test_data { 7905 let fname = it[2].as_str(); 7906 let data = it[3].as_str(); 7907 let out = windows_guest.disk_file_read(fname); 7908 assert_eq!(data, out.trim()); 7909 } 7910 7911 // Intentionally no unmount, it'll happen at shutdown. 7912 7913 windows_guest.shutdown(); 7914 }); 7915 7916 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7917 let _ = child.kill(); 7918 let output = child.wait_with_output().unwrap(); 7919 7920 let _ = child_dnsmasq.kill(); 7921 let _ = child_dnsmasq.wait(); 7922 7923 handle_child_output(r, &output); 7924 } 7925 7926 #[test] 7927 #[cfg(not(feature = "mshv"))] 7928 #[cfg(not(target_arch = "aarch64"))] 7929 fn test_windows_guest_netdev_multi() { 7930 let windows_guest = WindowsGuest::new(); 7931 7932 let mut ovmf_path = dirs::home_dir().unwrap(); 7933 ovmf_path.push("workloads"); 7934 ovmf_path.push(OVMF_NAME); 7935 7936 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 7937 let api_socket = temp_api_path(&tmp_dir); 7938 7939 let mut child = GuestCommand::new(windows_guest.guest()) 7940 .args(["--api-socket", &api_socket]) 7941 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 7942 .args(["--memory", "size=4G"]) 7943 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7944 .args(["--serial", "tty"]) 7945 .args(["--console", "off"]) 7946 .default_disks() 7947 // The multi net dev config is borrowed from test_multiple_network_interfaces 7948 .args([ 7949 "--net", 7950 windows_guest.guest().default_net_string().as_str(), 7951 "--net", 7952 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0", 7953 "--net", 7954 "tap=mytap42,mac=fe:1f:9e:e1:60:f2,ip=192.168.4.1,mask=255.255.255.0", 7955 ]) 7956 .capture_output() 7957 .spawn() 7958 .unwrap(); 7959 7960 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7961 7962 let r = std::panic::catch_unwind(|| { 7963 // Wait to make sure Windows boots up 7964 assert!(windows_guest.wait_for_boot()); 7965 7966 let netdev_num = 3; 7967 assert_eq!(windows_guest.netdev_count(), netdev_num); 7968 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 7969 7970 let tap_count = exec_host_command_output("ip link | grep -c mytap42"); 7971 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1"); 7972 7973 windows_guest.shutdown(); 7974 }); 7975 7976 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7977 let _ = child.kill(); 7978 let output = child.wait_with_output().unwrap(); 7979 7980 let _ = child_dnsmasq.kill(); 7981 let _ = child_dnsmasq.wait(); 7982 7983 handle_child_output(r, &output); 7984 } 7985 } 7986 7987 #[cfg(target_arch = "x86_64")] 7988 mod sgx { 7989 use crate::*; 7990 7991 #[test] 7992 fn test_sgx() { 7993 let jammy_image = JAMMY_IMAGE_NAME.to_string(); 7994 let jammy = UbuntuDiskConfig::new(jammy_image); 7995 let guest = Guest::new(Box::new(jammy)); 7996 7997 let mut child = GuestCommand::new(&guest) 7998 .args(["--cpus", "boot=1"]) 7999 .args(["--memory", "size=512M"]) 8000 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 8001 .default_disks() 8002 .default_net() 8003 .args(["--sgx-epc", "id=epc0,size=64M"]) 8004 .capture_output() 8005 .spawn() 8006 .unwrap(); 8007 8008 let r = std::panic::catch_unwind(|| { 8009 guest.wait_vm_boot(None).unwrap(); 8010 8011 // Check if SGX is correctly detected in the guest. 8012 guest.check_sgx_support().unwrap(); 8013 8014 // Validate the SGX EPC section is 64MiB. 8015 assert_eq!( 8016 guest 8017 .ssh_command("cpuid -l 0x12 -s 2 | grep 'section size' | cut -d '=' -f 2") 8018 .unwrap() 8019 .trim(), 8020 "0x0000000004000000" 8021 ); 8022 }); 8023 8024 let _ = child.kill(); 8025 let output = child.wait_with_output().unwrap(); 8026 8027 handle_child_output(r, &output); 8028 } 8029 } 8030 8031 #[cfg(target_arch = "x86_64")] 8032 mod vfio { 8033 use crate::*; 8034 8035 #[test] 8036 // The VFIO integration test starts cloud-hypervisor guest with 3 TAP 8037 // backed networking interfaces, bound through a simple bridge on the host. 8038 // So if the nested cloud-hypervisor succeeds in getting a directly 8039 // assigned interface from its cloud-hypervisor host, we should be able to 8040 // ssh into it, and verify that it's running with the right kernel command 8041 // line (We tag the command line from cloud-hypervisor for that purpose). 8042 // The third device is added to validate that hotplug works correctly since 8043 // it is being added to the L2 VM through hotplugging mechanism. 8044 // Also, we pass-through a vitio-blk device to the L2 VM to test the 32-bit 8045 // vfio device support 8046 fn test_vfio() { 8047 setup_vfio_network_interfaces(); 8048 8049 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 8050 let guest = Guest::new_from_ip_range(Box::new(focal), "172.18", 0); 8051 8052 let mut workload_path = dirs::home_dir().unwrap(); 8053 workload_path.push("workloads"); 8054 8055 let kernel_path = direct_kernel_boot_path(); 8056 8057 let mut vfio_path = workload_path.clone(); 8058 vfio_path.push("vfio"); 8059 8060 let mut cloud_init_vfio_base_path = vfio_path.clone(); 8061 cloud_init_vfio_base_path.push("cloudinit.img"); 8062 8063 // We copy our cloudinit into the vfio mount point, for the nested 8064 // cloud-hypervisor guest to use. 8065 rate_limited_copy( 8066 guest.disk_config.disk(DiskType::CloudInit).unwrap(), 8067 &cloud_init_vfio_base_path, 8068 ) 8069 .expect("copying of cloud-init disk failed"); 8070 8071 let mut vfio_disk_path = workload_path.clone(); 8072 vfio_disk_path.push("vfio.img"); 8073 8074 // Create the vfio disk image 8075 let output = Command::new("mkfs.ext4") 8076 .arg("-d") 8077 .arg(vfio_path.to_str().unwrap()) 8078 .arg(vfio_disk_path.to_str().unwrap()) 8079 .arg("2g") 8080 .output() 8081 .unwrap(); 8082 if !output.status.success() { 8083 eprintln!("{}", String::from_utf8_lossy(&output.stderr)); 8084 panic!("mkfs.ext4 command generated an error"); 8085 } 8086 8087 let mut blk_file_path = workload_path; 8088 blk_file_path.push("blk.img"); 8089 8090 let vfio_tap0 = "vfio-tap0"; 8091 let vfio_tap1 = "vfio-tap1"; 8092 let vfio_tap2 = "vfio-tap2"; 8093 let vfio_tap3 = "vfio-tap3"; 8094 8095 let mut child = GuestCommand::new(&guest) 8096 .args(["--cpus", "boot=4"]) 8097 .args(["--memory", "size=2G,hugepages=on,shared=on"]) 8098 .args(["--kernel", kernel_path.to_str().unwrap()]) 8099 .args([ 8100 "--disk", 8101 format!( 8102 "path={}", 8103 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 8104 ) 8105 .as_str(), 8106 "--disk", 8107 format!( 8108 "path={}", 8109 guest.disk_config.disk(DiskType::CloudInit).unwrap() 8110 ) 8111 .as_str(), 8112 "--disk", 8113 format!("path={}", vfio_disk_path.to_str().unwrap()).as_str(), 8114 "--disk", 8115 format!("path={},iommu=on", blk_file_path.to_str().unwrap()).as_str(), 8116 ]) 8117 .args([ 8118 "--cmdline", 8119 format!( 8120 "{DIRECT_KERNEL_BOOT_CMDLINE} kvm-intel.nested=1 vfio_iommu_type1.allow_unsafe_interrupts" 8121 ) 8122 .as_str(), 8123 ]) 8124 .args([ 8125 "--net", 8126 format!("tap={},mac={}", vfio_tap0, guest.network.guest_mac).as_str(), 8127 "--net", 8128 format!( 8129 "tap={},mac={},iommu=on", 8130 vfio_tap1, guest.network.l2_guest_mac1 8131 ) 8132 .as_str(), 8133 "--net", 8134 format!( 8135 "tap={},mac={},iommu=on", 8136 vfio_tap2, guest.network.l2_guest_mac2 8137 ) 8138 .as_str(), 8139 "--net", 8140 format!( 8141 "tap={},mac={},iommu=on", 8142 vfio_tap3, guest.network.l2_guest_mac3 8143 ) 8144 .as_str(), 8145 ]) 8146 .capture_output() 8147 .spawn() 8148 .unwrap(); 8149 8150 thread::sleep(std::time::Duration::new(30, 0)); 8151 8152 let r = std::panic::catch_unwind(|| { 8153 guest.ssh_command_l1("sudo systemctl start vfio").unwrap(); 8154 thread::sleep(std::time::Duration::new(120, 0)); 8155 8156 // We booted our cloud hypervisor L2 guest with a "VFIOTAG" tag 8157 // added to its kernel command line. 8158 // Let's ssh into it and verify that it's there. If it is it means 8159 // we're in the right guest (The L2 one) because the QEMU L1 guest 8160 // does not have this command line tag. 8161 assert_eq!( 8162 guest 8163 .ssh_command_l2_1("grep -c VFIOTAG /proc/cmdline") 8164 .unwrap() 8165 .trim() 8166 .parse::<u32>() 8167 .unwrap_or_default(), 8168 1 8169 ); 8170 8171 // Let's also verify from the second virtio-net device passed to 8172 // the L2 VM. 8173 assert_eq!( 8174 guest 8175 .ssh_command_l2_2("grep -c VFIOTAG /proc/cmdline") 8176 .unwrap() 8177 .trim() 8178 .parse::<u32>() 8179 .unwrap_or_default(), 8180 1 8181 ); 8182 8183 // Check the amount of PCI devices appearing in L2 VM. 8184 assert_eq!( 8185 guest 8186 .ssh_command_l2_1("ls /sys/bus/pci/devices | wc -l") 8187 .unwrap() 8188 .trim() 8189 .parse::<u32>() 8190 .unwrap_or_default(), 8191 8, 8192 ); 8193 8194 // Check both if /dev/vdc exists and if the block size is 16M in L2 VM 8195 assert_eq!( 8196 guest 8197 .ssh_command_l2_1("lsblk | grep vdc | grep -c 16M") 8198 .unwrap() 8199 .trim() 8200 .parse::<u32>() 8201 .unwrap_or_default(), 8202 1 8203 ); 8204 8205 // Hotplug an extra virtio-net device through L2 VM. 8206 guest 8207 .ssh_command_l1( 8208 "echo 0000:00:09.0 | sudo tee /sys/bus/pci/devices/0000:00:09.0/driver/unbind", 8209 ) 8210 .unwrap(); 8211 guest 8212 .ssh_command_l1("echo 0000:00:09.0 | sudo tee /sys/bus/pci/drivers/vfio-pci/bind") 8213 .unwrap(); 8214 let vfio_hotplug_output = guest 8215 .ssh_command_l1( 8216 "sudo /mnt/ch-remote \ 8217 --api-socket /tmp/ch_api.sock \ 8218 add-device path=/sys/bus/pci/devices/0000:00:09.0,id=vfio123", 8219 ) 8220 .unwrap(); 8221 assert!(vfio_hotplug_output.contains("{\"id\":\"vfio123\",\"bdf\":\"0000:00:08.0\"}")); 8222 8223 thread::sleep(std::time::Duration::new(10, 0)); 8224 8225 // Let's also verify from the third virtio-net device passed to 8226 // the L2 VM. This third device has been hotplugged through the L2 8227 // VM, so this is our way to validate hotplug works for VFIO PCI. 8228 assert_eq!( 8229 guest 8230 .ssh_command_l2_3("grep -c VFIOTAG /proc/cmdline") 8231 .unwrap() 8232 .trim() 8233 .parse::<u32>() 8234 .unwrap_or_default(), 8235 1 8236 ); 8237 8238 // Check the amount of PCI devices appearing in L2 VM. 8239 // There should be one more device than before, raising the count 8240 // up to 9 PCI devices. 8241 assert_eq!( 8242 guest 8243 .ssh_command_l2_1("ls /sys/bus/pci/devices | wc -l") 8244 .unwrap() 8245 .trim() 8246 .parse::<u32>() 8247 .unwrap_or_default(), 8248 9, 8249 ); 8250 8251 // Let's now verify that we can correctly remove the virtio-net 8252 // device through the "remove-device" command responsible for 8253 // unplugging VFIO devices. 8254 guest 8255 .ssh_command_l1( 8256 "sudo /mnt/ch-remote \ 8257 --api-socket /tmp/ch_api.sock \ 8258 remove-device vfio123", 8259 ) 8260 .unwrap(); 8261 thread::sleep(std::time::Duration::new(10, 0)); 8262 8263 // Check the amount of PCI devices appearing in L2 VM is back down 8264 // to 8 devices. 8265 assert_eq!( 8266 guest 8267 .ssh_command_l2_1("ls /sys/bus/pci/devices | wc -l") 8268 .unwrap() 8269 .trim() 8270 .parse::<u32>() 8271 .unwrap_or_default(), 8272 8, 8273 ); 8274 8275 // Perform memory hotplug in L2 and validate the memory is showing 8276 // up as expected. In order to check, we will use the virtio-net 8277 // device already passed through L2 as a VFIO device, this will 8278 // verify that VFIO devices are functional with memory hotplug. 8279 assert!(guest.get_total_memory_l2().unwrap_or_default() > 480_000); 8280 guest 8281 .ssh_command_l2_1( 8282 "sudo bash -c 'echo online > /sys/devices/system/memory/auto_online_blocks'", 8283 ) 8284 .unwrap(); 8285 guest 8286 .ssh_command_l1( 8287 "sudo /mnt/ch-remote \ 8288 --api-socket /tmp/ch_api.sock \ 8289 resize --memory=1073741824", 8290 ) 8291 .unwrap(); 8292 assert!(guest.get_total_memory_l2().unwrap_or_default() > 960_000); 8293 }); 8294 8295 let _ = child.kill(); 8296 let output = child.wait_with_output().unwrap(); 8297 8298 cleanup_vfio_network_interfaces(); 8299 8300 handle_child_output(r, &output); 8301 } 8302 8303 fn test_nvidia_card_memory_hotplug(hotplug_method: &str) { 8304 let jammy = UbuntuDiskConfig::new(JAMMY_NVIDIA_IMAGE_NAME.to_string()); 8305 let guest = Guest::new(Box::new(jammy)); 8306 let api_socket = temp_api_path(&guest.tmp_dir); 8307 8308 let mut child = GuestCommand::new(&guest) 8309 .args(["--cpus", "boot=4"]) 8310 .args([ 8311 "--memory", 8312 format!("size=4G,hotplug_size=4G,hotplug_method={hotplug_method}").as_str(), 8313 ]) 8314 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 8315 .args(["--device", "path=/sys/bus/pci/devices/0000:31:00.0/"]) 8316 .args(["--api-socket", &api_socket]) 8317 .default_disks() 8318 .default_net() 8319 .capture_output() 8320 .spawn() 8321 .unwrap(); 8322 8323 let r = std::panic::catch_unwind(|| { 8324 guest.wait_vm_boot(None).unwrap(); 8325 8326 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 8327 8328 guest.enable_memory_hotplug(); 8329 8330 // Add RAM to the VM 8331 let desired_ram = 6 << 30; 8332 resize_command(&api_socket, None, Some(desired_ram), None, None); 8333 thread::sleep(std::time::Duration::new(30, 0)); 8334 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 8335 8336 // Check the VFIO device works when RAM is increased to 6GiB 8337 guest.check_nvidia_gpu(); 8338 }); 8339 8340 let _ = child.kill(); 8341 let output = child.wait_with_output().unwrap(); 8342 8343 handle_child_output(r, &output); 8344 } 8345 8346 #[test] 8347 fn test_nvidia_card_memory_hotplug_acpi() { 8348 test_nvidia_card_memory_hotplug("acpi") 8349 } 8350 8351 #[test] 8352 fn test_nvidia_card_memory_hotplug_virtio_mem() { 8353 test_nvidia_card_memory_hotplug("virtio-mem") 8354 } 8355 8356 #[test] 8357 fn test_nvidia_card_pci_hotplug() { 8358 let jammy = UbuntuDiskConfig::new(JAMMY_NVIDIA_IMAGE_NAME.to_string()); 8359 let guest = Guest::new(Box::new(jammy)); 8360 let api_socket = temp_api_path(&guest.tmp_dir); 8361 8362 let mut child = GuestCommand::new(&guest) 8363 .args(["--cpus", "boot=4"]) 8364 .args(["--memory", "size=4G"]) 8365 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 8366 .args(["--api-socket", &api_socket]) 8367 .default_disks() 8368 .default_net() 8369 .capture_output() 8370 .spawn() 8371 .unwrap(); 8372 8373 let r = std::panic::catch_unwind(|| { 8374 guest.wait_vm_boot(None).unwrap(); 8375 8376 // Hotplug the card to the VM 8377 let (cmd_success, cmd_output) = remote_command_w_output( 8378 &api_socket, 8379 "add-device", 8380 Some("id=vfio0,path=/sys/bus/pci/devices/0000:31:00.0/"), 8381 ); 8382 assert!(cmd_success); 8383 assert!(String::from_utf8_lossy(&cmd_output) 8384 .contains("{\"id\":\"vfio0\",\"bdf\":\"0000:00:06.0\"}")); 8385 8386 thread::sleep(std::time::Duration::new(10, 0)); 8387 8388 // Check the VFIO device works after hotplug 8389 guest.check_nvidia_gpu(); 8390 }); 8391 8392 let _ = child.kill(); 8393 let output = child.wait_with_output().unwrap(); 8394 8395 handle_child_output(r, &output); 8396 } 8397 8398 #[test] 8399 fn test_nvidia_card_reboot() { 8400 let jammy = UbuntuDiskConfig::new(JAMMY_NVIDIA_IMAGE_NAME.to_string()); 8401 let guest = Guest::new(Box::new(jammy)); 8402 let api_socket = temp_api_path(&guest.tmp_dir); 8403 8404 let mut child = GuestCommand::new(&guest) 8405 .args(["--cpus", "boot=4"]) 8406 .args(["--memory", "size=4G"]) 8407 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 8408 .args(["--device", "path=/sys/bus/pci/devices/0000:31:00.0/"]) 8409 .args(["--api-socket", &api_socket]) 8410 .default_disks() 8411 .default_net() 8412 .capture_output() 8413 .spawn() 8414 .unwrap(); 8415 8416 let r = std::panic::catch_unwind(|| { 8417 guest.wait_vm_boot(None).unwrap(); 8418 8419 // Check the VFIO device works after boot 8420 guest.check_nvidia_gpu(); 8421 8422 guest.reboot_linux(0, None); 8423 8424 // Check the VFIO device works after reboot 8425 guest.check_nvidia_gpu(); 8426 }); 8427 8428 let _ = child.kill(); 8429 let output = child.wait_with_output().unwrap(); 8430 8431 handle_child_output(r, &output); 8432 } 8433 } 8434 8435 mod live_migration { 8436 use crate::*; 8437 8438 fn start_live_migration( 8439 migration_socket: &str, 8440 src_api_socket: &str, 8441 dest_api_socket: &str, 8442 local: bool, 8443 ) -> bool { 8444 // Start to receive migration from the destintion VM 8445 let mut receive_migration = Command::new(clh_command("ch-remote")) 8446 .args([ 8447 "--api-socket", 8448 dest_api_socket, 8449 "receive-migration", 8450 &format! {"unix:{migration_socket}"}, 8451 ]) 8452 .stderr(Stdio::piped()) 8453 .stdout(Stdio::piped()) 8454 .spawn() 8455 .unwrap(); 8456 // Give it '1s' to make sure the 'migration_socket' file is properly created 8457 thread::sleep(std::time::Duration::new(1, 0)); 8458 // Start to send migration from the source VM 8459 8460 let mut args = [ 8461 "--api-socket".to_string(), 8462 src_api_socket.to_string(), 8463 "send-migration".to_string(), 8464 format! {"unix:{migration_socket}"}, 8465 ] 8466 .to_vec(); 8467 8468 if local { 8469 args.insert(3, "--local".to_string()); 8470 } 8471 8472 let mut send_migration = Command::new(clh_command("ch-remote")) 8473 .args(&args) 8474 .stderr(Stdio::piped()) 8475 .stdout(Stdio::piped()) 8476 .spawn() 8477 .unwrap(); 8478 8479 // The 'send-migration' command should be executed successfully within the given timeout 8480 let send_success = if let Some(status) = send_migration 8481 .wait_timeout(std::time::Duration::from_secs(30)) 8482 .unwrap() 8483 { 8484 status.success() 8485 } else { 8486 false 8487 }; 8488 8489 if !send_success { 8490 let _ = send_migration.kill(); 8491 let output = send_migration.wait_with_output().unwrap(); 8492 eprintln!("\n\n==== Start 'send_migration' output ====\n\n---stdout---\n{}\n\n---stderr---\n{}\n\n==== End 'send_migration' output ====\n\n", 8493 String::from_utf8_lossy(&output.stdout), String::from_utf8_lossy(&output.stderr)); 8494 } 8495 8496 // The 'receive-migration' command should be executed successfully within the given timeout 8497 let receive_success = if let Some(status) = receive_migration 8498 .wait_timeout(std::time::Duration::from_secs(30)) 8499 .unwrap() 8500 { 8501 status.success() 8502 } else { 8503 false 8504 }; 8505 8506 if !receive_success { 8507 let _ = receive_migration.kill(); 8508 let output = receive_migration.wait_with_output().unwrap(); 8509 eprintln!("\n\n==== Start 'receive_migration' output ====\n\n---stdout---\n{}\n\n---stderr---\n{}\n\n==== End 'receive_migration' output ====\n\n", 8510 String::from_utf8_lossy(&output.stdout), String::from_utf8_lossy(&output.stderr)); 8511 } 8512 8513 send_success && receive_success 8514 } 8515 8516 fn print_and_panic(src_vm: Child, dest_vm: Child, ovs_vm: Option<Child>, message: &str) -> ! { 8517 let mut src_vm = src_vm; 8518 let mut dest_vm = dest_vm; 8519 8520 let _ = src_vm.kill(); 8521 let src_output = src_vm.wait_with_output().unwrap(); 8522 eprintln!( 8523 "\n\n==== Start 'source_vm' stdout ====\n\n{}\n\n==== End 'source_vm' stdout ====", 8524 String::from_utf8_lossy(&src_output.stdout) 8525 ); 8526 eprintln!( 8527 "\n\n==== Start 'source_vm' stderr ====\n\n{}\n\n==== End 'source_vm' stderr ====", 8528 String::from_utf8_lossy(&src_output.stderr) 8529 ); 8530 let _ = dest_vm.kill(); 8531 let dest_output = dest_vm.wait_with_output().unwrap(); 8532 eprintln!( 8533 "\n\n==== Start 'destination_vm' stdout ====\n\n{}\n\n==== End 'destination_vm' stdout ====", 8534 String::from_utf8_lossy(&dest_output.stdout) 8535 ); 8536 eprintln!( 8537 "\n\n==== Start 'destination_vm' stderr ====\n\n{}\n\n==== End 'destination_vm' stderr ====", 8538 String::from_utf8_lossy(&dest_output.stderr) 8539 ); 8540 8541 if let Some(ovs_vm) = ovs_vm { 8542 let mut ovs_vm = ovs_vm; 8543 let _ = ovs_vm.kill(); 8544 let ovs_output = ovs_vm.wait_with_output().unwrap(); 8545 eprintln!( 8546 "\n\n==== Start 'ovs_vm' stdout ====\n\n{}\n\n==== End 'ovs_vm' stdout ====", 8547 String::from_utf8_lossy(&ovs_output.stdout) 8548 ); 8549 eprintln!( 8550 "\n\n==== Start 'ovs_vm' stderr ====\n\n{}\n\n==== End 'ovs_vm' stderr ====", 8551 String::from_utf8_lossy(&ovs_output.stderr) 8552 ); 8553 8554 cleanup_ovs_dpdk(); 8555 } 8556 8557 panic!("Test failed: {message}") 8558 } 8559 8560 // This test exercises the local live-migration between two Cloud Hypervisor VMs on the 8561 // same host. It ensures the following behaviors: 8562 // 1. The source VM is up and functional (including various virtio-devices are working properly); 8563 // 2. The 'send-migration' and 'receive-migration' command finished successfully; 8564 // 3. The source VM terminated gracefully after live migration; 8565 // 4. The destination VM is functional (including various virtio-devices are working properly) after 8566 // live migration; 8567 // Note: This test does not use vsock as we can't create two identical vsock on the same host. 8568 fn _test_live_migration(upgrade_test: bool, local: bool) { 8569 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 8570 let guest = Guest::new(Box::new(focal)); 8571 let kernel_path = direct_kernel_boot_path(); 8572 let console_text = String::from("On a branch floating down river a cricket, singing."); 8573 let net_id = "net123"; 8574 let net_params = format!( 8575 "id={},tap=,mac={},ip={},mask=255.255.255.0", 8576 net_id, guest.network.guest_mac, guest.network.host_ip 8577 ); 8578 8579 let memory_param: &[&str] = if local { 8580 &["--memory", "size=4G,shared=on"] 8581 } else { 8582 &["--memory", "size=4G"] 8583 }; 8584 8585 let boot_vcpus = 2; 8586 let max_vcpus = 4; 8587 8588 let pmem_temp_file = TempFile::new().unwrap(); 8589 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 8590 std::process::Command::new("mkfs.ext4") 8591 .arg(pmem_temp_file.as_path()) 8592 .output() 8593 .expect("Expect creating disk image to succeed"); 8594 let pmem_path = String::from("/dev/pmem0"); 8595 8596 // Start the source VM 8597 let src_vm_path = if !upgrade_test { 8598 clh_command("cloud-hypervisor") 8599 } else { 8600 cloud_hypervisor_release_path() 8601 }; 8602 let src_api_socket = temp_api_path(&guest.tmp_dir); 8603 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 8604 src_vm_cmd 8605 .args([ 8606 "--cpus", 8607 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 8608 ]) 8609 .args(memory_param) 8610 .args(["--kernel", kernel_path.to_str().unwrap()]) 8611 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 8612 .default_disks() 8613 .args(["--net", net_params.as_str()]) 8614 .args(["--api-socket", &src_api_socket]) 8615 .args([ 8616 "--pmem", 8617 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 8618 ]); 8619 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 8620 8621 // Start the destination VM 8622 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 8623 dest_api_socket.push_str(".dest"); 8624 let mut dest_child = GuestCommand::new(&guest) 8625 .args(["--api-socket", &dest_api_socket]) 8626 .capture_output() 8627 .spawn() 8628 .unwrap(); 8629 8630 let r = std::panic::catch_unwind(|| { 8631 guest.wait_vm_boot(None).unwrap(); 8632 8633 // Make sure the source VM is functaionl 8634 // Check the number of vCPUs 8635 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 8636 8637 // Check the guest RAM 8638 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 8639 8640 // Check the guest virtio-devices, e.g. block, rng, console, and net 8641 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 8642 8643 // x86_64: Following what's done in the `test_snapshot_restore`, we need 8644 // to make sure that removing and adding back the virtio-net device does 8645 // not break the live-migration support for virtio-pci. 8646 #[cfg(target_arch = "x86_64")] 8647 { 8648 assert!(remote_command( 8649 &src_api_socket, 8650 "remove-device", 8651 Some(net_id), 8652 )); 8653 thread::sleep(std::time::Duration::new(10, 0)); 8654 8655 // Plug the virtio-net device again 8656 assert!(remote_command( 8657 &src_api_socket, 8658 "add-net", 8659 Some(net_params.as_str()), 8660 )); 8661 thread::sleep(std::time::Duration::new(10, 0)); 8662 } 8663 8664 // Start the live-migration 8665 let migration_socket = String::from( 8666 guest 8667 .tmp_dir 8668 .as_path() 8669 .join("live-migration.sock") 8670 .to_str() 8671 .unwrap(), 8672 ); 8673 8674 assert!( 8675 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 8676 "Unsuccessful command: 'send-migration' or 'receive-migration'." 8677 ); 8678 }); 8679 8680 // Check and report any errors occured during the live-migration 8681 if r.is_err() { 8682 print_and_panic( 8683 src_child, 8684 dest_child, 8685 None, 8686 "Error occured during live-migration", 8687 ); 8688 } 8689 8690 // Check the source vm has been terminated successful (give it '3s' to settle) 8691 thread::sleep(std::time::Duration::new(3, 0)); 8692 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 8693 print_and_panic( 8694 src_child, 8695 dest_child, 8696 None, 8697 "source VM was not terminated successfully.", 8698 ); 8699 }; 8700 8701 // Post live-migration check to make sure the destination VM is funcational 8702 let r = std::panic::catch_unwind(|| { 8703 // Perform same checks to validate VM has been properly migrated 8704 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 8705 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 8706 8707 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 8708 }); 8709 8710 // Clean-up the destination VM and make sure it terminated correctly 8711 let _ = dest_child.kill(); 8712 let dest_output = dest_child.wait_with_output().unwrap(); 8713 handle_child_output(r, &dest_output); 8714 8715 // Check the destination VM has the expected 'concole_text' from its output 8716 let r = std::panic::catch_unwind(|| { 8717 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 8718 }); 8719 handle_child_output(r, &dest_output); 8720 } 8721 8722 fn _test_live_migration_balloon(upgrade_test: bool, local: bool) { 8723 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 8724 let guest = Guest::new(Box::new(focal)); 8725 let kernel_path = direct_kernel_boot_path(); 8726 let console_text = String::from("On a branch floating down river a cricket, singing."); 8727 let net_id = "net123"; 8728 let net_params = format!( 8729 "id={},tap=,mac={},ip={},mask=255.255.255.0", 8730 net_id, guest.network.guest_mac, guest.network.host_ip 8731 ); 8732 8733 let memory_param: &[&str] = if local { 8734 &[ 8735 "--memory", 8736 "size=4G,hotplug_method=virtio-mem,hotplug_size=8G,shared=on", 8737 "--balloon", 8738 "size=0", 8739 ] 8740 } else { 8741 &[ 8742 "--memory", 8743 "size=4G,hotplug_method=virtio-mem,hotplug_size=8G", 8744 "--balloon", 8745 "size=0", 8746 ] 8747 }; 8748 8749 let boot_vcpus = 2; 8750 let max_vcpus = 4; 8751 8752 let pmem_temp_file = TempFile::new().unwrap(); 8753 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 8754 std::process::Command::new("mkfs.ext4") 8755 .arg(pmem_temp_file.as_path()) 8756 .output() 8757 .expect("Expect creating disk image to succeed"); 8758 let pmem_path = String::from("/dev/pmem0"); 8759 8760 // Start the source VM 8761 let src_vm_path = if !upgrade_test { 8762 clh_command("cloud-hypervisor") 8763 } else { 8764 cloud_hypervisor_release_path() 8765 }; 8766 let src_api_socket = temp_api_path(&guest.tmp_dir); 8767 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 8768 src_vm_cmd 8769 .args([ 8770 "--cpus", 8771 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 8772 ]) 8773 .args(memory_param) 8774 .args(["--kernel", kernel_path.to_str().unwrap()]) 8775 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 8776 .default_disks() 8777 .args(["--net", net_params.as_str()]) 8778 .args(["--api-socket", &src_api_socket]) 8779 .args([ 8780 "--pmem", 8781 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 8782 ]); 8783 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 8784 8785 // Start the destination VM 8786 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 8787 dest_api_socket.push_str(".dest"); 8788 let mut dest_child = GuestCommand::new(&guest) 8789 .args(["--api-socket", &dest_api_socket]) 8790 .capture_output() 8791 .spawn() 8792 .unwrap(); 8793 8794 let r = std::panic::catch_unwind(|| { 8795 guest.wait_vm_boot(None).unwrap(); 8796 8797 // Make sure the source VM is functaionl 8798 // Check the number of vCPUs 8799 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 8800 8801 // Check the guest RAM 8802 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 8803 // Increase the guest RAM 8804 resize_command(&src_api_socket, None, Some(6 << 30), None, None); 8805 thread::sleep(std::time::Duration::new(5, 0)); 8806 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 8807 // Use balloon to remove RAM from the VM 8808 resize_command(&src_api_socket, None, None, Some(1 << 30), None); 8809 thread::sleep(std::time::Duration::new(5, 0)); 8810 let total_memory = guest.get_total_memory().unwrap_or_default(); 8811 assert!(total_memory > 4_800_000); 8812 assert!(total_memory < 5_760_000); 8813 8814 // Check the guest virtio-devices, e.g. block, rng, console, and net 8815 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 8816 8817 // x86_64: Following what's done in the `test_snapshot_restore`, we need 8818 // to make sure that removing and adding back the virtio-net device does 8819 // not break the live-migration support for virtio-pci. 8820 #[cfg(target_arch = "x86_64")] 8821 { 8822 assert!(remote_command( 8823 &src_api_socket, 8824 "remove-device", 8825 Some(net_id), 8826 )); 8827 thread::sleep(std::time::Duration::new(10, 0)); 8828 8829 // Plug the virtio-net device again 8830 assert!(remote_command( 8831 &src_api_socket, 8832 "add-net", 8833 Some(net_params.as_str()), 8834 )); 8835 thread::sleep(std::time::Duration::new(10, 0)); 8836 } 8837 8838 // Start the live-migration 8839 let migration_socket = String::from( 8840 guest 8841 .tmp_dir 8842 .as_path() 8843 .join("live-migration.sock") 8844 .to_str() 8845 .unwrap(), 8846 ); 8847 8848 assert!( 8849 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 8850 "Unsuccessful command: 'send-migration' or 'receive-migration'." 8851 ); 8852 }); 8853 8854 // Check and report any errors occured during the live-migration 8855 if r.is_err() { 8856 print_and_panic( 8857 src_child, 8858 dest_child, 8859 None, 8860 "Error occured during live-migration", 8861 ); 8862 } 8863 8864 // Check the source vm has been terminated successful (give it '3s' to settle) 8865 thread::sleep(std::time::Duration::new(3, 0)); 8866 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 8867 print_and_panic( 8868 src_child, 8869 dest_child, 8870 None, 8871 "source VM was not terminated successfully.", 8872 ); 8873 }; 8874 8875 // Post live-migration check to make sure the destination VM is funcational 8876 let r = std::panic::catch_unwind(|| { 8877 // Perform same checks to validate VM has been properly migrated 8878 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 8879 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 8880 8881 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 8882 8883 // Perform checks on guest RAM using balloon 8884 let total_memory = guest.get_total_memory().unwrap_or_default(); 8885 assert!(total_memory > 4_800_000); 8886 assert!(total_memory < 5_760_000); 8887 // Deflate balloon to restore entire RAM to the VM 8888 resize_command(&dest_api_socket, None, None, Some(0), None); 8889 thread::sleep(std::time::Duration::new(5, 0)); 8890 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 8891 // Decrease guest RAM with virtio-mem 8892 resize_command(&dest_api_socket, None, Some(5 << 30), None, None); 8893 thread::sleep(std::time::Duration::new(5, 0)); 8894 let total_memory = guest.get_total_memory().unwrap_or_default(); 8895 assert!(total_memory > 4_800_000); 8896 assert!(total_memory < 5_760_000); 8897 }); 8898 8899 // Clean-up the destination VM and make sure it terminated correctly 8900 let _ = dest_child.kill(); 8901 let dest_output = dest_child.wait_with_output().unwrap(); 8902 handle_child_output(r, &dest_output); 8903 8904 // Check the destination VM has the expected 'concole_text' from its output 8905 let r = std::panic::catch_unwind(|| { 8906 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 8907 }); 8908 handle_child_output(r, &dest_output); 8909 } 8910 8911 fn _test_live_migration_numa(upgrade_test: bool, local: bool) { 8912 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 8913 let guest = Guest::new(Box::new(focal)); 8914 let kernel_path = direct_kernel_boot_path(); 8915 let console_text = String::from("On a branch floating down river a cricket, singing."); 8916 let net_id = "net123"; 8917 let net_params = format!( 8918 "id={},tap=,mac={},ip={},mask=255.255.255.0", 8919 net_id, guest.network.guest_mac, guest.network.host_ip 8920 ); 8921 8922 let memory_param: &[&str] = if local { 8923 &[ 8924 "--memory", 8925 "size=0,hotplug_method=virtio-mem,shared=on", 8926 "--memory-zone", 8927 "id=mem0,size=1G,hotplug_size=4G,shared=on", 8928 "--memory-zone", 8929 "id=mem1,size=1G,hotplug_size=4G,shared=on", 8930 "--memory-zone", 8931 "id=mem2,size=2G,hotplug_size=4G,shared=on", 8932 "--numa", 8933 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0", 8934 "--numa", 8935 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1", 8936 "--numa", 8937 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2", 8938 ] 8939 } else { 8940 &[ 8941 "--memory", 8942 "size=0,hotplug_method=virtio-mem", 8943 "--memory-zone", 8944 "id=mem0,size=1G,hotplug_size=4G", 8945 "--memory-zone", 8946 "id=mem1,size=1G,hotplug_size=4G", 8947 "--memory-zone", 8948 "id=mem2,size=2G,hotplug_size=4G", 8949 "--numa", 8950 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0", 8951 "--numa", 8952 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1", 8953 "--numa", 8954 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2", 8955 ] 8956 }; 8957 8958 let boot_vcpus = 6; 8959 let max_vcpus = 12; 8960 8961 let pmem_temp_file = TempFile::new().unwrap(); 8962 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 8963 std::process::Command::new("mkfs.ext4") 8964 .arg(pmem_temp_file.as_path()) 8965 .output() 8966 .expect("Expect creating disk image to succeed"); 8967 let pmem_path = String::from("/dev/pmem0"); 8968 8969 // Start the source VM 8970 let src_vm_path = if !upgrade_test { 8971 clh_command("cloud-hypervisor") 8972 } else { 8973 cloud_hypervisor_release_path() 8974 }; 8975 let src_api_socket = temp_api_path(&guest.tmp_dir); 8976 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 8977 src_vm_cmd 8978 .args([ 8979 "--cpus", 8980 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 8981 ]) 8982 .args(memory_param) 8983 .args(["--kernel", kernel_path.to_str().unwrap()]) 8984 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 8985 .default_disks() 8986 .args(["--net", net_params.as_str()]) 8987 .args(["--api-socket", &src_api_socket]) 8988 .args([ 8989 "--pmem", 8990 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 8991 ]); 8992 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 8993 8994 // Start the destination VM 8995 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 8996 dest_api_socket.push_str(".dest"); 8997 let mut dest_child = GuestCommand::new(&guest) 8998 .args(["--api-socket", &dest_api_socket]) 8999 .capture_output() 9000 .spawn() 9001 .unwrap(); 9002 9003 let r = std::panic::catch_unwind(|| { 9004 guest.wait_vm_boot(None).unwrap(); 9005 9006 // Make sure the source VM is functaionl 9007 // Check the number of vCPUs 9008 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9009 9010 // Check the guest RAM 9011 assert!(guest.get_total_memory().unwrap_or_default() > 2_880_000); 9012 9013 // Check the guest virtio-devices, e.g. block, rng, console, and net 9014 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9015 9016 // Check the NUMA parameters are applied correctly and resize 9017 // each zone to test the case where we migrate a VM with the 9018 // virtio-mem regions being used. 9019 { 9020 guest.check_numa_common( 9021 Some(&[960_000, 960_000, 1_920_000]), 9022 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 9023 Some(&["10 15 20", "20 10 25", "25 30 10"]), 9024 ); 9025 9026 // AArch64 currently does not support hotplug, and therefore we only 9027 // test hotplug-related function on x86_64 here. 9028 #[cfg(target_arch = "x86_64")] 9029 { 9030 guest.enable_memory_hotplug(); 9031 9032 // Resize every memory zone and check each associated NUMA node 9033 // has been assigned the right amount of memory. 9034 resize_zone_command(&src_api_socket, "mem0", "2G"); 9035 resize_zone_command(&src_api_socket, "mem1", "2G"); 9036 resize_zone_command(&src_api_socket, "mem2", "3G"); 9037 thread::sleep(std::time::Duration::new(5, 0)); 9038 9039 guest.check_numa_common(Some(&[1_920_000, 1_920_000, 1_920_000]), None, None); 9040 } 9041 } 9042 9043 // x86_64: Following what's done in the `test_snapshot_restore`, we need 9044 // to make sure that removing and adding back the virtio-net device does 9045 // not break the live-migration support for virtio-pci. 9046 #[cfg(target_arch = "x86_64")] 9047 { 9048 assert!(remote_command( 9049 &src_api_socket, 9050 "remove-device", 9051 Some(net_id), 9052 )); 9053 thread::sleep(std::time::Duration::new(10, 0)); 9054 9055 // Plug the virtio-net device again 9056 assert!(remote_command( 9057 &src_api_socket, 9058 "add-net", 9059 Some(net_params.as_str()), 9060 )); 9061 thread::sleep(std::time::Duration::new(10, 0)); 9062 } 9063 9064 // Start the live-migration 9065 let migration_socket = String::from( 9066 guest 9067 .tmp_dir 9068 .as_path() 9069 .join("live-migration.sock") 9070 .to_str() 9071 .unwrap(), 9072 ); 9073 9074 assert!( 9075 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9076 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9077 ); 9078 }); 9079 9080 // Check and report any errors occured during the live-migration 9081 if r.is_err() { 9082 print_and_panic( 9083 src_child, 9084 dest_child, 9085 None, 9086 "Error occured during live-migration", 9087 ); 9088 } 9089 9090 // Check the source vm has been terminated successful (give it '3s' to settle) 9091 thread::sleep(std::time::Duration::new(3, 0)); 9092 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 9093 print_and_panic( 9094 src_child, 9095 dest_child, 9096 None, 9097 "source VM was not terminated successfully.", 9098 ); 9099 }; 9100 9101 // Post live-migration check to make sure the destination VM is funcational 9102 let r = std::panic::catch_unwind(|| { 9103 // Perform same checks to validate VM has been properly migrated 9104 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9105 #[cfg(target_arch = "x86_64")] 9106 assert!(guest.get_total_memory().unwrap_or_default() > 6_720_000); 9107 #[cfg(target_arch = "aarch64")] 9108 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9109 9110 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9111 9112 // Perform NUMA related checks 9113 { 9114 #[cfg(target_arch = "aarch64")] 9115 { 9116 guest.check_numa_common( 9117 Some(&[960_000, 960_000, 1_920_000]), 9118 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 9119 Some(&["10 15 20", "20 10 25", "25 30 10"]), 9120 ); 9121 } 9122 9123 // AArch64 currently does not support hotplug, and therefore we only 9124 // test hotplug-related function on x86_64 here. 9125 #[cfg(target_arch = "x86_64")] 9126 { 9127 guest.check_numa_common( 9128 Some(&[1_920_000, 1_920_000, 2_880_000]), 9129 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 9130 Some(&["10 15 20", "20 10 25", "25 30 10"]), 9131 ); 9132 9133 guest.enable_memory_hotplug(); 9134 9135 // Resize every memory zone and check each associated NUMA node 9136 // has been assigned the right amount of memory. 9137 resize_zone_command(&dest_api_socket, "mem0", "4G"); 9138 resize_zone_command(&dest_api_socket, "mem1", "4G"); 9139 resize_zone_command(&dest_api_socket, "mem2", "4G"); 9140 // Resize to the maximum amount of CPUs and check each NUMA 9141 // node has been assigned the right CPUs set. 9142 resize_command(&dest_api_socket, Some(max_vcpus), None, None, None); 9143 thread::sleep(std::time::Duration::new(5, 0)); 9144 9145 guest.check_numa_common( 9146 Some(&[3_840_000, 3_840_000, 3_840_000]), 9147 Some(&[vec![0, 1, 2, 9], vec![3, 4, 6, 7, 8], vec![5, 10, 11]]), 9148 None, 9149 ); 9150 } 9151 } 9152 }); 9153 9154 // Clean-up the destination VM and make sure it terminated correctly 9155 let _ = dest_child.kill(); 9156 let dest_output = dest_child.wait_with_output().unwrap(); 9157 handle_child_output(r, &dest_output); 9158 9159 // Check the destination VM has the expected 'concole_text' from its output 9160 let r = std::panic::catch_unwind(|| { 9161 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 9162 }); 9163 handle_child_output(r, &dest_output); 9164 } 9165 9166 fn _test_live_migration_watchdog(upgrade_test: bool, local: bool) { 9167 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9168 let guest = Guest::new(Box::new(focal)); 9169 let kernel_path = direct_kernel_boot_path(); 9170 let console_text = String::from("On a branch floating down river a cricket, singing."); 9171 let net_id = "net123"; 9172 let net_params = format!( 9173 "id={},tap=,mac={},ip={},mask=255.255.255.0", 9174 net_id, guest.network.guest_mac, guest.network.host_ip 9175 ); 9176 9177 let memory_param: &[&str] = if local { 9178 &["--memory", "size=4G,shared=on"] 9179 } else { 9180 &["--memory", "size=4G"] 9181 }; 9182 9183 let boot_vcpus = 2; 9184 let max_vcpus = 4; 9185 9186 let pmem_temp_file = TempFile::new().unwrap(); 9187 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 9188 std::process::Command::new("mkfs.ext4") 9189 .arg(pmem_temp_file.as_path()) 9190 .output() 9191 .expect("Expect creating disk image to succeed"); 9192 let pmem_path = String::from("/dev/pmem0"); 9193 9194 // Start the source VM 9195 let src_vm_path = if !upgrade_test { 9196 clh_command("cloud-hypervisor") 9197 } else { 9198 cloud_hypervisor_release_path() 9199 }; 9200 let src_api_socket = temp_api_path(&guest.tmp_dir); 9201 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 9202 src_vm_cmd 9203 .args([ 9204 "--cpus", 9205 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 9206 ]) 9207 .args(memory_param) 9208 .args(["--kernel", kernel_path.to_str().unwrap()]) 9209 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9210 .default_disks() 9211 .args(["--net", net_params.as_str()]) 9212 .args(["--api-socket", &src_api_socket]) 9213 .args([ 9214 "--pmem", 9215 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 9216 ]) 9217 .args(["--watchdog"]); 9218 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 9219 9220 // Start the destination VM 9221 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 9222 dest_api_socket.push_str(".dest"); 9223 let mut dest_child = GuestCommand::new(&guest) 9224 .args(["--api-socket", &dest_api_socket]) 9225 .capture_output() 9226 .spawn() 9227 .unwrap(); 9228 9229 let r = std::panic::catch_unwind(|| { 9230 guest.wait_vm_boot(None).unwrap(); 9231 9232 // Make sure the source VM is functaionl 9233 // Check the number of vCPUs 9234 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9235 // Check the guest RAM 9236 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9237 // Check the guest virtio-devices, e.g. block, rng, console, and net 9238 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9239 // x86_64: Following what's done in the `test_snapshot_restore`, we need 9240 // to make sure that removing and adding back the virtio-net device does 9241 // not break the live-migration support for virtio-pci. 9242 #[cfg(target_arch = "x86_64")] 9243 { 9244 assert!(remote_command( 9245 &src_api_socket, 9246 "remove-device", 9247 Some(net_id), 9248 )); 9249 thread::sleep(std::time::Duration::new(10, 0)); 9250 9251 // Plug the virtio-net device again 9252 assert!(remote_command( 9253 &src_api_socket, 9254 "add-net", 9255 Some(net_params.as_str()), 9256 )); 9257 thread::sleep(std::time::Duration::new(10, 0)); 9258 } 9259 9260 // Enable watchdog and ensure its functional 9261 let mut expected_reboot_count = 1; 9262 // Enable the watchdog with a 15s timeout 9263 enable_guest_watchdog(&guest, 15); 9264 // Reboot and check that systemd has activated the watchdog 9265 guest.ssh_command("sudo reboot").unwrap(); 9266 guest.wait_vm_boot(None).unwrap(); 9267 expected_reboot_count += 1; 9268 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9269 assert_eq!( 9270 guest 9271 .ssh_command("sudo journalctl | grep -c -- \"Watchdog started\"") 9272 .unwrap() 9273 .trim() 9274 .parse::<u32>() 9275 .unwrap_or_default(), 9276 2 9277 ); 9278 // Allow some normal time to elapse to check we don't get spurious reboots 9279 thread::sleep(std::time::Duration::new(40, 0)); 9280 // Check no reboot 9281 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9282 9283 // Start the live-migration 9284 let migration_socket = String::from( 9285 guest 9286 .tmp_dir 9287 .as_path() 9288 .join("live-migration.sock") 9289 .to_str() 9290 .unwrap(), 9291 ); 9292 9293 assert!( 9294 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9295 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9296 ); 9297 }); 9298 9299 // Check and report any errors occured during the live-migration 9300 if r.is_err() { 9301 print_and_panic( 9302 src_child, 9303 dest_child, 9304 None, 9305 "Error occured during live-migration", 9306 ); 9307 } 9308 9309 // Check the source vm has been terminated successful (give it '3s' to settle) 9310 thread::sleep(std::time::Duration::new(3, 0)); 9311 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 9312 print_and_panic( 9313 src_child, 9314 dest_child, 9315 None, 9316 "source VM was not terminated successfully.", 9317 ); 9318 }; 9319 9320 // Post live-migration check to make sure the destination VM is funcational 9321 let r = std::panic::catch_unwind(|| { 9322 // Perform same checks to validate VM has been properly migrated 9323 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9324 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9325 9326 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9327 9328 // Perform checks on watchdog 9329 let mut expected_reboot_count = 2; 9330 9331 // Allow some normal time to elapse to check we don't get spurious reboots 9332 thread::sleep(std::time::Duration::new(40, 0)); 9333 // Check no reboot 9334 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9335 9336 // Trigger a panic (sync first). We need to do this inside a screen with a delay so the SSH command returns. 9337 guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap(); 9338 // Allow some time for the watchdog to trigger (max 30s) and reboot to happen 9339 guest.wait_vm_boot(Some(50)).unwrap(); 9340 // Check a reboot is triggerred by the watchdog 9341 expected_reboot_count += 1; 9342 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9343 9344 #[cfg(target_arch = "x86_64")] 9345 { 9346 // Now pause the VM and remain offline for 30s 9347 assert!(remote_command(&dest_api_socket, "pause", None)); 9348 thread::sleep(std::time::Duration::new(30, 0)); 9349 assert!(remote_command(&dest_api_socket, "resume", None)); 9350 9351 // Check no reboot 9352 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9353 } 9354 }); 9355 9356 // Clean-up the destination VM and make sure it terminated correctly 9357 let _ = dest_child.kill(); 9358 let dest_output = dest_child.wait_with_output().unwrap(); 9359 handle_child_output(r, &dest_output); 9360 9361 // Check the destination VM has the expected 'concole_text' from its output 9362 let r = std::panic::catch_unwind(|| { 9363 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 9364 }); 9365 handle_child_output(r, &dest_output); 9366 } 9367 9368 fn _test_live_migration_ovs_dpdk(upgrade_test: bool, local: bool) { 9369 let ovs_focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9370 let ovs_guest = Guest::new(Box::new(ovs_focal)); 9371 9372 let migration_focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9373 let migration_guest = Guest::new(Box::new(migration_focal)); 9374 let src_api_socket = temp_api_path(&migration_guest.tmp_dir); 9375 9376 // Start two VMs that are connected through ovs-dpdk and one of the VMs is the source VM for live-migration 9377 let (mut ovs_child, mut src_child) = 9378 setup_ovs_dpdk_guests(&ovs_guest, &migration_guest, &src_api_socket, upgrade_test); 9379 9380 // Start the destination VM 9381 let mut dest_api_socket = temp_api_path(&migration_guest.tmp_dir); 9382 dest_api_socket.push_str(".dest"); 9383 let mut dest_child = GuestCommand::new(&migration_guest) 9384 .args(["--api-socket", &dest_api_socket]) 9385 .capture_output() 9386 .spawn() 9387 .unwrap(); 9388 9389 let r = std::panic::catch_unwind(|| { 9390 // Give it '1s' to make sure the 'dest_api_socket' file is properly created 9391 thread::sleep(std::time::Duration::new(1, 0)); 9392 9393 // Start the live-migration 9394 let migration_socket = String::from( 9395 migration_guest 9396 .tmp_dir 9397 .as_path() 9398 .join("live-migration.sock") 9399 .to_str() 9400 .unwrap(), 9401 ); 9402 9403 assert!( 9404 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9405 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9406 ); 9407 }); 9408 9409 // Check and report any errors occured during the live-migration 9410 if r.is_err() { 9411 print_and_panic( 9412 src_child, 9413 dest_child, 9414 Some(ovs_child), 9415 "Error occured during live-migration", 9416 ); 9417 } 9418 9419 // Check the source vm has been terminated successful (give it '3s' to settle) 9420 thread::sleep(std::time::Duration::new(3, 0)); 9421 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 9422 print_and_panic( 9423 src_child, 9424 dest_child, 9425 Some(ovs_child), 9426 "source VM was not terminated successfully.", 9427 ); 9428 }; 9429 9430 // Post live-migration check to make sure the destination VM is funcational 9431 let r = std::panic::catch_unwind(|| { 9432 // Perform same checks to validate VM has been properly migrated 9433 // Spawn a new netcat listener in the OVS VM 9434 let guest_ip = ovs_guest.network.guest_ip.clone(); 9435 thread::spawn(move || { 9436 ssh_command_ip( 9437 "nc -l 12345", 9438 &guest_ip, 9439 DEFAULT_SSH_RETRIES, 9440 DEFAULT_SSH_TIMEOUT, 9441 ) 9442 .unwrap(); 9443 }); 9444 9445 // Wait for the server to be listening 9446 thread::sleep(std::time::Duration::new(5, 0)); 9447 9448 // And check the connection is still functional after live-migration 9449 migration_guest 9450 .ssh_command("nc -vz 172.100.0.1 12345") 9451 .unwrap(); 9452 }); 9453 9454 // Clean-up the destination VM and OVS VM, and make sure they terminated correctly 9455 let _ = dest_child.kill(); 9456 let _ = ovs_child.kill(); 9457 let dest_output = dest_child.wait_with_output().unwrap(); 9458 let ovs_output = ovs_child.wait_with_output().unwrap(); 9459 9460 cleanup_ovs_dpdk(); 9461 9462 handle_child_output(r, &dest_output); 9463 handle_child_output(Ok(()), &ovs_output); 9464 } 9465 9466 mod live_migration_parallel { 9467 use super::*; 9468 #[test] 9469 fn test_live_migration_basic() { 9470 _test_live_migration(false, false) 9471 } 9472 9473 #[test] 9474 fn test_live_migration_local() { 9475 _test_live_migration(false, true) 9476 } 9477 9478 #[test] 9479 #[cfg(not(feature = "mshv"))] 9480 fn test_live_migration_numa() { 9481 _test_live_migration_numa(false, false) 9482 } 9483 9484 #[test] 9485 #[cfg(not(feature = "mshv"))] 9486 fn test_live_migration_numa_local() { 9487 _test_live_migration_numa(false, true) 9488 } 9489 9490 #[test] 9491 fn test_live_migration_watchdog() { 9492 _test_live_migration_watchdog(false, false) 9493 } 9494 9495 #[test] 9496 fn test_live_migration_watchdog_local() { 9497 _test_live_migration_watchdog(false, true) 9498 } 9499 9500 #[test] 9501 fn test_live_migration_balloon() { 9502 _test_live_migration_balloon(false, false) 9503 } 9504 9505 #[test] 9506 fn test_live_migration_balloon_local() { 9507 _test_live_migration_balloon(false, true) 9508 } 9509 9510 #[test] 9511 fn test_live_upgrade_basic() { 9512 _test_live_migration(true, false) 9513 } 9514 9515 #[test] 9516 fn test_live_upgrade_local() { 9517 _test_live_migration(true, true) 9518 } 9519 9520 #[test] 9521 #[cfg(not(feature = "mshv"))] 9522 fn test_live_upgrade_numa() { 9523 _test_live_migration_numa(true, false) 9524 } 9525 9526 #[test] 9527 #[cfg(not(feature = "mshv"))] 9528 fn test_live_upgrade_numa_local() { 9529 _test_live_migration_numa(true, true) 9530 } 9531 9532 #[test] 9533 fn test_live_upgrade_watchdog() { 9534 _test_live_migration_watchdog(true, false) 9535 } 9536 9537 #[test] 9538 fn test_live_upgrade_watchdog_local() { 9539 _test_live_migration_watchdog(true, true) 9540 } 9541 9542 #[test] 9543 fn test_live_upgrade_balloon() { 9544 _test_live_migration_balloon(true, false) 9545 } 9546 9547 #[test] 9548 fn test_live_upgrade_balloon_local() { 9549 _test_live_migration_balloon(true, true) 9550 } 9551 } 9552 9553 mod live_migration_sequential { 9554 #[cfg(target_arch = "x86_64")] 9555 #[cfg(not(feature = "mshv"))] 9556 use super::*; 9557 9558 // Require to run ovs-dpdk tests sequentially because they rely on the same ovs-dpdk setup 9559 #[test] 9560 #[cfg(target_arch = "x86_64")] 9561 #[cfg(not(feature = "mshv"))] 9562 fn test_live_migration_ovs_dpdk() { 9563 _test_live_migration_ovs_dpdk(false, false); 9564 } 9565 9566 #[test] 9567 #[cfg(target_arch = "x86_64")] 9568 #[cfg(not(feature = "mshv"))] 9569 fn test_live_migration_ovs_dpdk_local() { 9570 _test_live_migration_ovs_dpdk(false, true); 9571 } 9572 9573 #[test] 9574 #[cfg(target_arch = "x86_64")] 9575 #[cfg(not(feature = "mshv"))] 9576 fn test_live_upgrade_ovs_dpdk() { 9577 _test_live_migration_ovs_dpdk(true, false); 9578 } 9579 9580 #[test] 9581 #[cfg(target_arch = "x86_64")] 9582 #[cfg(not(feature = "mshv"))] 9583 fn test_live_upgrade_ovs_dpdk_local() { 9584 _test_live_migration_ovs_dpdk(true, true); 9585 } 9586 } 9587 } 9588 9589 #[cfg(target_arch = "aarch64")] 9590 mod aarch64_acpi { 9591 use crate::*; 9592 9593 #[test] 9594 fn test_simple_launch_acpi() { 9595 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9596 9597 vec![Box::new(focal)].drain(..).for_each(|disk_config| { 9598 let guest = Guest::new(disk_config); 9599 9600 let mut child = GuestCommand::new(&guest) 9601 .args(["--cpus", "boot=1"]) 9602 .args(["--memory", "size=512M"]) 9603 .args(["--kernel", edk2_path().to_str().unwrap()]) 9604 .default_disks() 9605 .default_net() 9606 .args(["--serial", "tty", "--console", "off"]) 9607 .capture_output() 9608 .spawn() 9609 .unwrap(); 9610 9611 let r = std::panic::catch_unwind(|| { 9612 guest.wait_vm_boot(Some(120)).unwrap(); 9613 9614 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 9615 assert!(guest.get_total_memory().unwrap_or_default() > 400_000); 9616 assert_eq!(guest.get_pci_bridge_class().unwrap_or_default(), "0x060000"); 9617 }); 9618 9619 let _ = child.kill(); 9620 let output = child.wait_with_output().unwrap(); 9621 9622 handle_child_output(r, &output); 9623 }); 9624 } 9625 9626 #[test] 9627 fn test_guest_numa_nodes_acpi() { 9628 _test_guest_numa_nodes(true); 9629 } 9630 9631 #[test] 9632 fn test_cpu_topology_421_acpi() { 9633 test_cpu_topology(4, 2, 1, true); 9634 } 9635 9636 #[test] 9637 fn test_cpu_topology_142_acpi() { 9638 test_cpu_topology(1, 4, 2, true); 9639 } 9640 9641 #[test] 9642 fn test_cpu_topology_262_acpi() { 9643 test_cpu_topology(2, 6, 2, true); 9644 } 9645 9646 #[test] 9647 fn test_power_button_acpi() { 9648 _test_power_button(true); 9649 } 9650 9651 #[test] 9652 fn test_virtio_iommu() { 9653 _test_virtio_iommu(true) 9654 } 9655 } 9656 9657 mod rate_limiter { 9658 use super::*; 9659 9660 // Check if the 'measured' rate is within the expected 'difference' (in percentage) 9661 // compared to given 'limit' rate. 9662 fn check_rate_limit(measured: f64, limit: f64, difference: f64) -> bool { 9663 let upper_limit = limit * (1_f64 + difference); 9664 let lower_limit = limit * (1_f64 - difference); 9665 9666 if measured > lower_limit && measured < upper_limit { 9667 return true; 9668 } 9669 9670 eprintln!( 9671 "\n\n==== check_rate_limit failed! ====\n\nmeasured={measured}, , lower_limit={lower_limit}, upper_limit={upper_limit}\n\n" 9672 ); 9673 9674 false 9675 } 9676 9677 fn _test_rate_limiter_net(rx: bool) { 9678 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9679 let guest = Guest::new(Box::new(focal)); 9680 9681 let test_timeout = 10; 9682 let num_queues = 2; 9683 let queue_size = 256; 9684 let bw_size = 10485760_u64; // bytes 9685 let bw_refill_time = 100; // ms 9686 let limit_bps = (bw_size * 8 * 1000) as f64 / bw_refill_time as f64; 9687 9688 let net_params = format!( 9689 "tap=,mac={},ip={},mask=255.255.255.0,num_queues={},queue_size={},bw_size={},bw_refill_time={}", 9690 guest.network.guest_mac, 9691 guest.network.host_ip, 9692 num_queues, 9693 queue_size, 9694 bw_size, 9695 bw_refill_time, 9696 ); 9697 9698 let mut child = GuestCommand::new(&guest) 9699 .args(["--cpus", &format!("boot={}", num_queues / 2)]) 9700 .args(["--memory", "size=4G"]) 9701 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 9702 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9703 .default_disks() 9704 .args(["--net", net_params.as_str()]) 9705 .capture_output() 9706 .spawn() 9707 .unwrap(); 9708 9709 let r = std::panic::catch_unwind(|| { 9710 guest.wait_vm_boot(None).unwrap(); 9711 let measured_bps = 9712 measure_virtio_net_throughput(test_timeout, num_queues / 2, &guest, rx, true) 9713 .unwrap(); 9714 assert!(check_rate_limit(measured_bps, limit_bps, 0.1)); 9715 }); 9716 9717 let _ = child.kill(); 9718 let output = child.wait_with_output().unwrap(); 9719 handle_child_output(r, &output); 9720 } 9721 9722 #[test] 9723 fn test_rate_limiter_net_rx() { 9724 _test_rate_limiter_net(true); 9725 } 9726 9727 #[test] 9728 fn test_rate_limiter_net_tx() { 9729 _test_rate_limiter_net(false); 9730 } 9731 9732 fn _test_rate_limiter_block(bandwidth: bool) { 9733 let test_timeout = 10; 9734 let num_queues = 1; 9735 let fio_ops = FioOps::RandRW; 9736 9737 let bw_size = if bandwidth { 9738 10485760_u64 // bytes 9739 } else { 9740 100_u64 // I/O 9741 }; 9742 let bw_refill_time = 100; // ms 9743 let limit_rate = (bw_size * 1000) as f64 / bw_refill_time as f64; 9744 9745 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9746 let guest = Guest::new(Box::new(focal)); 9747 let api_socket = temp_api_path(&guest.tmp_dir); 9748 let test_img_dir = TempDir::new_with_prefix("/var/tmp/ch").unwrap(); 9749 let blk_rate_limiter_test_img = 9750 String::from(test_img_dir.as_path().join("blk.img").to_str().unwrap()); 9751 9752 // Create the test block image 9753 assert!(exec_host_command_output(&format!( 9754 "dd if=/dev/zero of={blk_rate_limiter_test_img} bs=1M count=1024" 9755 )) 9756 .status 9757 .success()); 9758 9759 let test_blk_params = if bandwidth { 9760 format!( 9761 "path={blk_rate_limiter_test_img},bw_size={bw_size},bw_refill_time={bw_refill_time}" 9762 ) 9763 } else { 9764 format!( 9765 "path={blk_rate_limiter_test_img},ops_size={bw_size},ops_refill_time={bw_refill_time}" 9766 ) 9767 }; 9768 9769 let mut child = GuestCommand::new(&guest) 9770 .args(["--cpus", &format!("boot={num_queues}")]) 9771 .args(["--memory", "size=4G"]) 9772 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 9773 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9774 .args([ 9775 "--disk", 9776 format!( 9777 "path={}", 9778 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 9779 ) 9780 .as_str(), 9781 "--disk", 9782 format!( 9783 "path={}", 9784 guest.disk_config.disk(DiskType::CloudInit).unwrap() 9785 ) 9786 .as_str(), 9787 "--disk", 9788 test_blk_params.as_str(), 9789 ]) 9790 .default_net() 9791 .args(["--api-socket", &api_socket]) 9792 .capture_output() 9793 .spawn() 9794 .unwrap(); 9795 9796 let r = std::panic::catch_unwind(|| { 9797 guest.wait_vm_boot(None).unwrap(); 9798 9799 let fio_command = format!( 9800 "sudo fio --filename=/dev/vdc --name=test --output-format=json \ 9801 --direct=1 --bs=4k --ioengine=io_uring --iodepth=64 \ 9802 --rw={fio_ops} --runtime={test_timeout} --numjobs={num_queues}" 9803 ); 9804 let output = guest.ssh_command(&fio_command).unwrap(); 9805 9806 // Parse fio output 9807 let measured_rate = if bandwidth { 9808 parse_fio_output(&output, &fio_ops, num_queues).unwrap() 9809 } else { 9810 parse_fio_output_iops(&output, &fio_ops, num_queues).unwrap() 9811 }; 9812 assert!(check_rate_limit(measured_rate, limit_rate, 0.1)); 9813 }); 9814 9815 let _ = child.kill(); 9816 let output = child.wait_with_output().unwrap(); 9817 handle_child_output(r, &output); 9818 } 9819 9820 #[test] 9821 fn test_rate_limiter_block_bandwidth() { 9822 _test_rate_limiter_block(true) 9823 } 9824 9825 #[test] 9826 fn test_rate_limiter_block_iops() { 9827 _test_rate_limiter_block(false) 9828 } 9829 } 9830