1 // Copyright © 2020 Intel Corporation 2 // 3 // SPDX-License-Identifier: Apache-2.0 4 // 5 #![allow(clippy::undocumented_unsafe_blocks)] 6 // When enabling the `mshv` feature, we skip quite some tests and 7 // hence have known dead-code. This annotation silences dead-code 8 // related warnings for our quality workflow to pass. 9 #![allow(dead_code)] 10 11 extern crate test_infra; 12 13 use net_util::MacAddr; 14 use std::collections::HashMap; 15 use std::fs; 16 use std::io; 17 use std::io::BufRead; 18 use std::io::Read; 19 use std::io::Seek; 20 use std::io::Write; 21 use std::os::unix::io::AsRawFd; 22 use std::path::PathBuf; 23 use std::process::{Child, Command, Stdio}; 24 use std::string::String; 25 use std::sync::mpsc; 26 use std::sync::mpsc::Receiver; 27 use std::sync::Mutex; 28 use std::thread; 29 use test_infra::*; 30 use vmm_sys_util::{tempdir::TempDir, tempfile::TempFile}; 31 use wait_timeout::ChildExt; 32 33 // Constant taken from the VMM crate. 34 const MAX_NUM_PCI_SEGMENTS: u16 = 96; 35 36 #[cfg(target_arch = "x86_64")] 37 mod x86_64 { 38 pub const FOCAL_IMAGE_NAME: &str = "focal-server-cloudimg-amd64-custom-20210609-0.raw"; 39 pub const JAMMY_NVIDIA_IMAGE_NAME: &str = "jammy-server-cloudimg-amd64-nvidia.raw"; 40 pub const FOCAL_IMAGE_NAME_QCOW2: &str = "focal-server-cloudimg-amd64-custom-20210609-0.qcow2"; 41 pub const FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE: &str = 42 "focal-server-cloudimg-amd64-custom-20210609-0-backing.qcow2"; 43 pub const FOCAL_IMAGE_NAME_VHD: &str = "focal-server-cloudimg-amd64-custom-20210609-0.vhd"; 44 pub const FOCAL_IMAGE_NAME_VHDX: &str = "focal-server-cloudimg-amd64-custom-20210609-0.vhdx"; 45 pub const JAMMY_IMAGE_NAME: &str = "jammy-server-cloudimg-amd64-custom-20230119-0.raw"; 46 pub const WINDOWS_IMAGE_NAME: &str = "windows-server-2022-amd64-2.raw"; 47 pub const OVMF_NAME: &str = "CLOUDHV.fd"; 48 pub const GREP_SERIAL_IRQ_CMD: &str = "grep -c 'IO-APIC.*ttyS0' /proc/interrupts || true"; 49 } 50 51 #[cfg(target_arch = "x86_64")] 52 use x86_64::*; 53 54 #[cfg(target_arch = "aarch64")] 55 mod aarch64 { 56 pub const BIONIC_IMAGE_NAME: &str = "bionic-server-cloudimg-arm64.raw"; 57 pub const FOCAL_IMAGE_NAME: &str = "focal-server-cloudimg-arm64-custom-20210929-0.raw"; 58 pub const FOCAL_IMAGE_UPDATE_KERNEL_NAME: &str = 59 "focal-server-cloudimg-arm64-custom-20210929-0-update-kernel.raw"; 60 pub const FOCAL_IMAGE_NAME_QCOW2: &str = "focal-server-cloudimg-arm64-custom-20210929-0.qcow2"; 61 pub const FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE: &str = 62 "focal-server-cloudimg-arm64-custom-20210929-0-backing.qcow2"; 63 pub const FOCAL_IMAGE_NAME_VHD: &str = "focal-server-cloudimg-arm64-custom-20210929-0.vhd"; 64 pub const FOCAL_IMAGE_NAME_VHDX: &str = "focal-server-cloudimg-arm64-custom-20210929-0.vhdx"; 65 pub const JAMMY_IMAGE_NAME: &str = "jammy-server-cloudimg-arm64-custom-20220329-0.raw"; 66 pub const WINDOWS_IMAGE_NAME: &str = "windows-11-iot-enterprise-aarch64.raw"; 67 pub const OVMF_NAME: &str = "CLOUDHV_EFI.fd"; 68 pub const GREP_SERIAL_IRQ_CMD: &str = "grep -c 'GICv3.*uart-pl011' /proc/interrupts || true"; 69 pub const GREP_PMU_IRQ_CMD: &str = "grep -c 'GICv3.*arm-pmu' /proc/interrupts || true"; 70 } 71 72 #[cfg(target_arch = "aarch64")] 73 use aarch64::*; 74 75 const DIRECT_KERNEL_BOOT_CMDLINE: &str = 76 "root=/dev/vda1 console=hvc0 rw systemd.journald.forward_to_console=1"; 77 78 const CONSOLE_TEST_STRING: &str = "Started OpenBSD Secure Shell server"; 79 80 // This enum exists to make it more convenient to 81 // implement test for both D-Bus and REST APIs. 82 enum TargetApi { 83 // API socket 84 HttpApi(String), 85 // well known service name, object path 86 DBusApi(String, String), 87 } 88 89 impl TargetApi { 90 fn new_http_api(tmp_dir: &TempDir) -> Self { 91 Self::HttpApi(temp_api_path(tmp_dir)) 92 } 93 94 fn new_dbus_api(tmp_dir: &TempDir) -> Self { 95 // `tmp_dir` is in the form of "/tmp/chXXXXXX" 96 // and we take the `chXXXXXX` part as a unique identifier for the guest 97 let id = tmp_dir.as_path().file_name().unwrap().to_str().unwrap(); 98 99 Self::DBusApi( 100 format!("org.cloudhypervisor.{id}"), 101 format!("/org/cloudhypervisor/{id}"), 102 ) 103 } 104 105 fn guest_args(&self) -> Vec<&str> { 106 match self { 107 TargetApi::HttpApi(api_socket) => { 108 vec!["--api-socket", api_socket.as_str()] 109 } 110 TargetApi::DBusApi(service_name, object_path) => { 111 vec![ 112 "--dbus-service-name", 113 service_name.as_str(), 114 "--dbus-object-path", 115 object_path.as_str(), 116 ] 117 } 118 } 119 } 120 121 fn remote_args(&self) -> Vec<&str> { 122 // `guest_args` and `remote_args` are consistent with each other 123 self.guest_args() 124 } 125 126 fn remote_command(&self, command: &str, arg: Option<&str>) -> bool { 127 let mut cmd = Command::new(clh_command("ch-remote")); 128 cmd.args(self.remote_args()); 129 cmd.arg(command); 130 131 if let Some(arg) = arg { 132 cmd.arg(arg); 133 } 134 135 let output = cmd.output().unwrap(); 136 if output.status.success() { 137 true 138 } else { 139 eprintln!("Error running ch-remote command: {:?}", &cmd); 140 let stderr = String::from_utf8_lossy(&output.stderr); 141 eprintln!("stderr: {stderr}"); 142 false 143 } 144 } 145 } 146 147 // Start cloud-hypervisor with no VM parameters, only the API server running. 148 // From the API: Create a VM, boot it and check that it looks as expected. 149 fn _test_api_create_boot(target_api: TargetApi, guest: Guest) { 150 let mut child = GuestCommand::new(&guest) 151 .args(target_api.guest_args()) 152 .capture_output() 153 .spawn() 154 .unwrap(); 155 156 thread::sleep(std::time::Duration::new(1, 0)); 157 158 // Verify API server is running 159 assert!(target_api.remote_command("ping", None)); 160 161 // Create the VM first 162 let cpu_count: u8 = 4; 163 let request_body = guest.api_create_body( 164 cpu_count, 165 direct_kernel_boot_path().to_str().unwrap(), 166 DIRECT_KERNEL_BOOT_CMDLINE, 167 ); 168 169 let temp_config_path = guest.tmp_dir.as_path().join("config"); 170 std::fs::write(&temp_config_path, request_body).unwrap(); 171 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 172 173 assert!(target_api.remote_command("create", Some(create_config),)); 174 175 // Then boot it 176 assert!(target_api.remote_command("boot", None)); 177 thread::sleep(std::time::Duration::new(20, 0)); 178 179 let r = std::panic::catch_unwind(|| { 180 // Check that the VM booted as expected 181 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 182 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 183 }); 184 185 let _ = child.kill(); 186 let output = child.wait_with_output().unwrap(); 187 188 handle_child_output(r, &output); 189 } 190 191 // Start cloud-hypervisor with no VM parameters, only the API server running. 192 // From the API: Create a VM, boot it and check it can be shutdown and then 193 // booted again 194 fn _test_api_shutdown(target_api: TargetApi, guest: Guest) { 195 let mut child = GuestCommand::new(&guest) 196 .args(target_api.guest_args()) 197 .capture_output() 198 .spawn() 199 .unwrap(); 200 201 thread::sleep(std::time::Duration::new(1, 0)); 202 203 // Verify API server is running 204 assert!(target_api.remote_command("ping", None)); 205 206 // Create the VM first 207 let cpu_count: u8 = 4; 208 let request_body = guest.api_create_body( 209 cpu_count, 210 direct_kernel_boot_path().to_str().unwrap(), 211 DIRECT_KERNEL_BOOT_CMDLINE, 212 ); 213 214 let temp_config_path = guest.tmp_dir.as_path().join("config"); 215 std::fs::write(&temp_config_path, request_body).unwrap(); 216 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 217 218 let r = std::panic::catch_unwind(|| { 219 assert!(target_api.remote_command("create", Some(create_config))); 220 221 // Then boot it 222 assert!(target_api.remote_command("boot", None)); 223 224 guest.wait_vm_boot(None).unwrap(); 225 226 // Check that the VM booted as expected 227 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 228 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 229 230 // Sync and shutdown without powering off to prevent filesystem 231 // corruption. 232 guest.ssh_command("sync").unwrap(); 233 guest.ssh_command("sudo shutdown -H now").unwrap(); 234 235 // Wait for the guest to be fully shutdown 236 thread::sleep(std::time::Duration::new(20, 0)); 237 238 // Then shut it down 239 assert!(target_api.remote_command("shutdown", None)); 240 241 // Then boot it again 242 assert!(target_api.remote_command("boot", None)); 243 244 guest.wait_vm_boot(None).unwrap(); 245 246 // Check that the VM booted as expected 247 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 248 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 249 }); 250 251 let _ = child.kill(); 252 let output = child.wait_with_output().unwrap(); 253 254 handle_child_output(r, &output); 255 } 256 257 // Start cloud-hypervisor with no VM parameters, only the API server running. 258 // From the API: Create a VM, boot it and check it can be deleted and then recreated 259 // booted again. 260 fn _test_api_delete(target_api: TargetApi, guest: Guest) { 261 let mut child = GuestCommand::new(&guest) 262 .args(target_api.guest_args()) 263 .capture_output() 264 .spawn() 265 .unwrap(); 266 267 thread::sleep(std::time::Duration::new(1, 0)); 268 269 // Verify API server is running 270 assert!(target_api.remote_command("ping", None)); 271 272 // Create the VM first 273 let cpu_count: u8 = 4; 274 let request_body = guest.api_create_body( 275 cpu_count, 276 direct_kernel_boot_path().to_str().unwrap(), 277 DIRECT_KERNEL_BOOT_CMDLINE, 278 ); 279 let temp_config_path = guest.tmp_dir.as_path().join("config"); 280 std::fs::write(&temp_config_path, request_body).unwrap(); 281 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 282 283 let r = std::panic::catch_unwind(|| { 284 assert!(target_api.remote_command("create", Some(create_config))); 285 286 // Then boot it 287 assert!(target_api.remote_command("boot", None)); 288 289 guest.wait_vm_boot(None).unwrap(); 290 291 // Check that the VM booted as expected 292 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 293 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 294 295 // Sync and shutdown without powering off to prevent filesystem 296 // corruption. 297 guest.ssh_command("sync").unwrap(); 298 guest.ssh_command("sudo shutdown -H now").unwrap(); 299 300 // Wait for the guest to be fully shutdown 301 thread::sleep(std::time::Duration::new(20, 0)); 302 303 // Then delete it 304 assert!(target_api.remote_command("delete", None)); 305 306 assert!(target_api.remote_command("create", Some(create_config))); 307 308 // Then boot it again 309 assert!(target_api.remote_command("boot", None)); 310 311 guest.wait_vm_boot(None).unwrap(); 312 313 // Check that the VM booted as expected 314 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 315 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 316 }); 317 318 let _ = child.kill(); 319 let output = child.wait_with_output().unwrap(); 320 321 handle_child_output(r, &output); 322 } 323 324 // Start cloud-hypervisor with no VM parameters, only the API server running. 325 // From the API: Create a VM, boot it and check that it looks as expected. 326 // Then we pause the VM, check that it's no longer available. 327 // Finally we resume the VM and check that it's available. 328 fn _test_api_pause_resume(target_api: TargetApi, guest: Guest) { 329 let mut child = GuestCommand::new(&guest) 330 .args(target_api.guest_args()) 331 .capture_output() 332 .spawn() 333 .unwrap(); 334 335 thread::sleep(std::time::Duration::new(1, 0)); 336 337 // Verify API server is running 338 assert!(target_api.remote_command("ping", None)); 339 340 // Create the VM first 341 let cpu_count: u8 = 4; 342 let request_body = guest.api_create_body( 343 cpu_count, 344 direct_kernel_boot_path().to_str().unwrap(), 345 DIRECT_KERNEL_BOOT_CMDLINE, 346 ); 347 348 let temp_config_path = guest.tmp_dir.as_path().join("config"); 349 std::fs::write(&temp_config_path, request_body).unwrap(); 350 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 351 352 assert!(target_api.remote_command("create", Some(create_config))); 353 354 // Then boot it 355 assert!(target_api.remote_command("boot", None)); 356 thread::sleep(std::time::Duration::new(20, 0)); 357 358 let r = std::panic::catch_unwind(|| { 359 // Check that the VM booted as expected 360 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 361 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 362 363 // We now pause the VM 364 assert!(target_api.remote_command("pause", None)); 365 366 // Check pausing again fails 367 assert!(!target_api.remote_command("pause", None)); 368 369 thread::sleep(std::time::Duration::new(2, 0)); 370 371 // SSH into the VM should fail 372 assert!(ssh_command_ip( 373 "grep -c processor /proc/cpuinfo", 374 &guest.network.guest_ip, 375 2, 376 5 377 ) 378 .is_err()); 379 380 // Resume the VM 381 assert!(target_api.remote_command("resume", None)); 382 383 // Check resuming again fails 384 assert!(!target_api.remote_command("resume", None)); 385 386 thread::sleep(std::time::Duration::new(2, 0)); 387 388 // Now we should be able to SSH back in and get the right number of CPUs 389 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 390 }); 391 392 let _ = child.kill(); 393 let output = child.wait_with_output().unwrap(); 394 395 handle_child_output(r, &output); 396 } 397 398 fn prepare_virtiofsd(tmp_dir: &TempDir, shared_dir: &str) -> (std::process::Child, String) { 399 let mut workload_path = dirs::home_dir().unwrap(); 400 workload_path.push("workloads"); 401 402 let mut virtiofsd_path = workload_path; 403 virtiofsd_path.push("virtiofsd"); 404 let virtiofsd_path = String::from(virtiofsd_path.to_str().unwrap()); 405 406 let virtiofsd_socket_path = 407 String::from(tmp_dir.as_path().join("virtiofs.sock").to_str().unwrap()); 408 409 // Start the daemon 410 let child = Command::new(virtiofsd_path.as_str()) 411 .args(["--shared-dir", shared_dir]) 412 .args(["--socket-path", virtiofsd_socket_path.as_str()]) 413 .args(["--cache", "never"]) 414 .spawn() 415 .unwrap(); 416 417 thread::sleep(std::time::Duration::new(10, 0)); 418 419 (child, virtiofsd_socket_path) 420 } 421 422 fn prepare_vubd( 423 tmp_dir: &TempDir, 424 blk_img: &str, 425 num_queues: usize, 426 rdonly: bool, 427 direct: bool, 428 ) -> (std::process::Child, String) { 429 let mut workload_path = dirs::home_dir().unwrap(); 430 workload_path.push("workloads"); 431 432 let mut blk_file_path = workload_path; 433 blk_file_path.push(blk_img); 434 let blk_file_path = String::from(blk_file_path.to_str().unwrap()); 435 436 let vubd_socket_path = String::from(tmp_dir.as_path().join("vub.sock").to_str().unwrap()); 437 438 // Start the daemon 439 let child = Command::new(clh_command("vhost_user_block")) 440 .args([ 441 "--block-backend", 442 format!( 443 "path={blk_file_path},socket={vubd_socket_path},num_queues={num_queues},readonly={rdonly},direct={direct}" 444 ) 445 .as_str(), 446 ]) 447 .spawn() 448 .unwrap(); 449 450 thread::sleep(std::time::Duration::new(10, 0)); 451 452 (child, vubd_socket_path) 453 } 454 455 fn temp_vsock_path(tmp_dir: &TempDir) -> String { 456 String::from(tmp_dir.as_path().join("vsock").to_str().unwrap()) 457 } 458 459 fn temp_api_path(tmp_dir: &TempDir) -> String { 460 String::from( 461 tmp_dir 462 .as_path() 463 .join("cloud-hypervisor.sock") 464 .to_str() 465 .unwrap(), 466 ) 467 } 468 469 fn temp_event_monitor_path(tmp_dir: &TempDir) -> String { 470 String::from(tmp_dir.as_path().join("event.json").to_str().unwrap()) 471 } 472 473 // Creates the directory and returns the path. 474 fn temp_snapshot_dir_path(tmp_dir: &TempDir) -> String { 475 let snapshot_dir = String::from(tmp_dir.as_path().join("snapshot").to_str().unwrap()); 476 std::fs::create_dir(&snapshot_dir).unwrap(); 477 snapshot_dir 478 } 479 480 fn temp_vmcore_file_path(tmp_dir: &TempDir) -> String { 481 let vmcore_file = String::from(tmp_dir.as_path().join("vmcore").to_str().unwrap()); 482 vmcore_file 483 } 484 485 // Creates the path for direct kernel boot and return the path. 486 // For x86_64, this function returns the vmlinux kernel path. 487 // For AArch64, this function returns the PE kernel path. 488 fn direct_kernel_boot_path() -> PathBuf { 489 let mut workload_path = dirs::home_dir().unwrap(); 490 workload_path.push("workloads"); 491 492 let mut kernel_path = workload_path; 493 #[cfg(target_arch = "x86_64")] 494 kernel_path.push("vmlinux"); 495 #[cfg(target_arch = "aarch64")] 496 kernel_path.push("Image"); 497 498 kernel_path 499 } 500 501 fn edk2_path() -> PathBuf { 502 let mut workload_path = dirs::home_dir().unwrap(); 503 workload_path.push("workloads"); 504 let mut edk2_path = workload_path; 505 edk2_path.push(OVMF_NAME); 506 507 edk2_path 508 } 509 510 fn cloud_hypervisor_release_path() -> String { 511 let mut workload_path = dirs::home_dir().unwrap(); 512 workload_path.push("workloads"); 513 514 let mut ch_release_path = workload_path; 515 #[cfg(target_arch = "x86_64")] 516 ch_release_path.push("cloud-hypervisor-static"); 517 #[cfg(target_arch = "aarch64")] 518 ch_release_path.push("cloud-hypervisor-static-aarch64"); 519 520 ch_release_path.into_os_string().into_string().unwrap() 521 } 522 523 fn prepare_vhost_user_net_daemon( 524 tmp_dir: &TempDir, 525 ip: &str, 526 tap: Option<&str>, 527 mtu: Option<u16>, 528 num_queues: usize, 529 client_mode: bool, 530 ) -> (std::process::Command, String) { 531 let vunet_socket_path = String::from(tmp_dir.as_path().join("vunet.sock").to_str().unwrap()); 532 533 // Start the daemon 534 let mut net_params = format!( 535 "ip={ip},mask=255.255.255.0,socket={vunet_socket_path},num_queues={num_queues},queue_size=1024,client={client_mode}" 536 ); 537 538 if let Some(tap) = tap { 539 net_params.push_str(format!(",tap={tap}").as_str()); 540 } 541 542 if let Some(mtu) = mtu { 543 net_params.push_str(format!(",mtu={mtu}").as_str()); 544 } 545 546 let mut command = Command::new(clh_command("vhost_user_net")); 547 command.args(["--net-backend", net_params.as_str()]); 548 549 (command, vunet_socket_path) 550 } 551 552 fn prepare_swtpm_daemon(tmp_dir: &TempDir) -> (std::process::Command, String) { 553 let swtpm_tpm_dir = String::from(tmp_dir.as_path().join("swtpm").to_str().unwrap()); 554 let swtpm_socket_path = String::from( 555 tmp_dir 556 .as_path() 557 .join("swtpm") 558 .join("swtpm.sock") 559 .to_str() 560 .unwrap(), 561 ); 562 std::fs::create_dir(&swtpm_tpm_dir).unwrap(); 563 564 let mut swtpm_command = Command::new("swtpm"); 565 let swtpm_args = [ 566 "socket", 567 "--tpmstate", 568 &format!("dir={swtpm_tpm_dir}"), 569 "--ctrl", 570 &format!("type=unixio,path={swtpm_socket_path}"), 571 "--flags", 572 "startup-clear", 573 "--tpm2", 574 ]; 575 swtpm_command.args(swtpm_args); 576 577 (swtpm_command, swtpm_socket_path) 578 } 579 580 fn remote_command(api_socket: &str, command: &str, arg: Option<&str>) -> bool { 581 let mut cmd = Command::new(clh_command("ch-remote")); 582 cmd.args(["--api-socket", api_socket, command]); 583 584 if let Some(arg) = arg { 585 cmd.arg(arg); 586 } 587 let output = cmd.output().unwrap(); 588 if output.status.success() { 589 true 590 } else { 591 eprintln!("Error running ch-remote command: {:?}", &cmd); 592 let stderr = String::from_utf8_lossy(&output.stderr); 593 eprintln!("stderr: {stderr}"); 594 false 595 } 596 } 597 598 fn remote_command_w_output(api_socket: &str, command: &str, arg: Option<&str>) -> (bool, Vec<u8>) { 599 let mut cmd = Command::new(clh_command("ch-remote")); 600 cmd.args(["--api-socket", api_socket, command]); 601 602 if let Some(arg) = arg { 603 cmd.arg(arg); 604 } 605 606 let output = cmd.output().expect("Failed to launch ch-remote"); 607 608 (output.status.success(), output.stdout) 609 } 610 611 fn resize_command( 612 api_socket: &str, 613 desired_vcpus: Option<u8>, 614 desired_ram: Option<usize>, 615 desired_balloon: Option<usize>, 616 event_file: Option<&str>, 617 ) -> bool { 618 let mut cmd = Command::new(clh_command("ch-remote")); 619 cmd.args(["--api-socket", api_socket, "resize"]); 620 621 if let Some(desired_vcpus) = desired_vcpus { 622 cmd.args(["--cpus", &format!("{desired_vcpus}")]); 623 } 624 625 if let Some(desired_ram) = desired_ram { 626 cmd.args(["--memory", &format!("{desired_ram}")]); 627 } 628 629 if let Some(desired_balloon) = desired_balloon { 630 cmd.args(["--balloon", &format!("{desired_balloon}")]); 631 } 632 633 let ret = cmd.status().expect("Failed to launch ch-remote").success(); 634 635 if let Some(event_path) = event_file { 636 let latest_events = [ 637 &MetaEvent { 638 event: "resizing".to_string(), 639 device_id: None, 640 }, 641 &MetaEvent { 642 event: "resized".to_string(), 643 device_id: None, 644 }, 645 ]; 646 assert!(check_latest_events_exact(&latest_events, event_path)); 647 } 648 649 ret 650 } 651 652 fn resize_zone_command(api_socket: &str, id: &str, desired_size: &str) -> bool { 653 let mut cmd = Command::new(clh_command("ch-remote")); 654 cmd.args([ 655 "--api-socket", 656 api_socket, 657 "resize-zone", 658 "--id", 659 id, 660 "--size", 661 desired_size, 662 ]); 663 664 cmd.status().expect("Failed to launch ch-remote").success() 665 } 666 667 // setup OVS-DPDK bridge and ports 668 fn setup_ovs_dpdk() { 669 // setup OVS-DPDK 670 assert!(exec_host_command_status("service openvswitch-switch start").success()); 671 assert!(exec_host_command_status("ovs-vsctl init").success()); 672 assert!( 673 exec_host_command_status("ovs-vsctl set Open_vSwitch . other_config:dpdk-init=true") 674 .success() 675 ); 676 assert!(exec_host_command_status("service openvswitch-switch restart").success()); 677 678 // Create OVS-DPDK bridge and ports 679 assert!(exec_host_command_status( 680 "ovs-vsctl add-br ovsbr0 -- set bridge ovsbr0 datapath_type=netdev", 681 ) 682 .success()); 683 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface vhost-user1 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient1").success()); 684 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user2 -- set Interface vhost-user2 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient2").success()); 685 assert!(exec_host_command_status("ip link set up dev ovsbr0").success()); 686 assert!(exec_host_command_status("service openvswitch-switch restart").success()); 687 } 688 fn cleanup_ovs_dpdk() { 689 assert!(exec_host_command_status("ovs-vsctl del-br ovsbr0").success()); 690 exec_host_command_status("rm -f ovs-vsctl /tmp/dpdkvhostclient1 /tmp/dpdkvhostclient2"); 691 } 692 // Setup two guests and ensure they are connected through ovs-dpdk 693 fn setup_ovs_dpdk_guests( 694 guest1: &Guest, 695 guest2: &Guest, 696 api_socket: &str, 697 release_binary: bool, 698 ) -> (Child, Child) { 699 setup_ovs_dpdk(); 700 701 let clh_path = if !release_binary { 702 clh_command("cloud-hypervisor") 703 } else { 704 cloud_hypervisor_release_path() 705 }; 706 707 let mut child1 = GuestCommand::new_with_binary_path(guest1, &clh_path) 708 .args(["--cpus", "boot=2"]) 709 .args(["--memory", "size=0,shared=on"]) 710 .args(["--memory-zone", "id=mem0,size=1G,shared=on,host_numa_node=0"]) 711 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 712 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 713 .default_disks() 714 .args(["--net", guest1.default_net_string().as_str(), "--net", "vhost_user=true,socket=/tmp/dpdkvhostclient1,num_queues=2,queue_size=256,vhost_mode=server"]) 715 .capture_output() 716 .spawn() 717 .unwrap(); 718 719 #[cfg(target_arch = "x86_64")] 720 let guest_net_iface = "ens5"; 721 #[cfg(target_arch = "aarch64")] 722 let guest_net_iface = "enp0s5"; 723 724 let r = std::panic::catch_unwind(|| { 725 guest1.wait_vm_boot(None).unwrap(); 726 727 guest1 728 .ssh_command(&format!( 729 "sudo ip addr add 172.100.0.1/24 dev {guest_net_iface}" 730 )) 731 .unwrap(); 732 guest1 733 .ssh_command(&format!("sudo ip link set up dev {guest_net_iface}")) 734 .unwrap(); 735 736 let guest_ip = guest1.network.guest_ip.clone(); 737 thread::spawn(move || { 738 ssh_command_ip( 739 "nc -l 12345", 740 &guest_ip, 741 DEFAULT_SSH_RETRIES, 742 DEFAULT_SSH_TIMEOUT, 743 ) 744 .unwrap(); 745 }); 746 }); 747 if r.is_err() { 748 cleanup_ovs_dpdk(); 749 750 let _ = child1.kill(); 751 let output = child1.wait_with_output().unwrap(); 752 handle_child_output(r, &output); 753 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return 754 } 755 756 let mut child2 = GuestCommand::new_with_binary_path(guest2, &clh_path) 757 .args(["--api-socket", api_socket]) 758 .args(["--cpus", "boot=2"]) 759 .args(["--memory", "size=0,shared=on"]) 760 .args(["--memory-zone", "id=mem0,size=1G,shared=on,host_numa_node=0"]) 761 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 762 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 763 .default_disks() 764 .args(["--net", guest2.default_net_string().as_str(), "--net", "vhost_user=true,socket=/tmp/dpdkvhostclient2,num_queues=2,queue_size=256,vhost_mode=server"]) 765 .capture_output() 766 .spawn() 767 .unwrap(); 768 769 let r = std::panic::catch_unwind(|| { 770 guest2.wait_vm_boot(None).unwrap(); 771 772 guest2 773 .ssh_command(&format!( 774 "sudo ip addr add 172.100.0.2/24 dev {guest_net_iface}" 775 )) 776 .unwrap(); 777 guest2 778 .ssh_command(&format!("sudo ip link set up dev {guest_net_iface}")) 779 .unwrap(); 780 781 // Check the connection works properly between the two VMs 782 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap(); 783 }); 784 if r.is_err() { 785 cleanup_ovs_dpdk(); 786 787 let _ = child1.kill(); 788 let _ = child2.kill(); 789 let output = child2.wait_with_output().unwrap(); 790 handle_child_output(r, &output); 791 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return 792 } 793 794 (child1, child2) 795 } 796 797 enum FwType { 798 Ovmf, 799 RustHypervisorFirmware, 800 } 801 802 fn fw_path(_fw_type: FwType) -> String { 803 let mut workload_path = dirs::home_dir().unwrap(); 804 workload_path.push("workloads"); 805 806 let mut fw_path = workload_path; 807 #[cfg(target_arch = "aarch64")] 808 fw_path.push("CLOUDHV_EFI.fd"); 809 #[cfg(target_arch = "x86_64")] 810 { 811 match _fw_type { 812 FwType::Ovmf => fw_path.push(OVMF_NAME), 813 FwType::RustHypervisorFirmware => fw_path.push("hypervisor-fw"), 814 } 815 } 816 817 fw_path.to_str().unwrap().to_string() 818 } 819 820 struct MetaEvent { 821 event: String, 822 device_id: Option<String>, 823 } 824 825 impl MetaEvent { 826 pub fn match_with_json_event(&self, v: &serde_json::Value) -> bool { 827 let mut matched = false; 828 if v["event"].as_str().unwrap() == self.event { 829 if let Some(device_id) = &self.device_id { 830 if v["properties"]["id"].as_str().unwrap() == device_id { 831 matched = true 832 } 833 } else { 834 matched = true; 835 } 836 } 837 matched 838 } 839 } 840 841 // Parse the event_monitor file based on the format that each event 842 // is followed by a double newline 843 fn parse_event_file(event_file: &str) -> Vec<serde_json::Value> { 844 let content = fs::read(event_file).unwrap(); 845 let mut ret = Vec::new(); 846 for entry in String::from_utf8_lossy(&content) 847 .trim() 848 .split("\n\n") 849 .collect::<Vec<&str>>() 850 { 851 ret.push(serde_json::from_str(entry).unwrap()); 852 } 853 854 ret 855 } 856 857 // Return true if all events from the input 'expected_events' are matched sequentially 858 // with events from the 'event_file' 859 fn check_sequential_events(expected_events: &[&MetaEvent], event_file: &str) -> bool { 860 let json_events = parse_event_file(event_file); 861 let len = expected_events.len(); 862 let mut idx = 0; 863 for e in &json_events { 864 if idx == len { 865 break; 866 } 867 if expected_events[idx].match_with_json_event(e) { 868 idx += 1; 869 } 870 } 871 872 idx == len 873 } 874 875 // Return true if all events from the input 'expected_events' are matched exactly 876 // with events from the 'event_file' 877 fn check_sequential_events_exact(expected_events: &[&MetaEvent], event_file: &str) -> bool { 878 let json_events = parse_event_file(event_file); 879 assert!(expected_events.len() <= json_events.len()); 880 let json_events = &json_events[..expected_events.len()]; 881 882 for (idx, e) in json_events.iter().enumerate() { 883 if !expected_events[idx].match_with_json_event(e) { 884 return false; 885 } 886 } 887 888 true 889 } 890 891 // Return true if events from the input 'expected_events' are matched exactly 892 // with the most recent events from the 'event_file' 893 fn check_latest_events_exact(latest_events: &[&MetaEvent], event_file: &str) -> bool { 894 let json_events = parse_event_file(event_file); 895 assert!(latest_events.len() <= json_events.len()); 896 let json_events = &json_events[(json_events.len() - latest_events.len())..]; 897 898 for (idx, e) in json_events.iter().enumerate() { 899 if !latest_events[idx].match_with_json_event(e) { 900 return false; 901 } 902 } 903 904 true 905 } 906 907 fn test_cpu_topology(threads_per_core: u8, cores_per_package: u8, packages: u8, use_fw: bool) { 908 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 909 let guest = Guest::new(Box::new(focal)); 910 let total_vcpus = threads_per_core * cores_per_package * packages; 911 let direct_kernel_boot_path = direct_kernel_boot_path(); 912 let mut kernel_path = direct_kernel_boot_path.to_str().unwrap(); 913 let fw_path = fw_path(FwType::RustHypervisorFirmware); 914 if use_fw { 915 kernel_path = fw_path.as_str(); 916 } 917 918 let mut child = GuestCommand::new(&guest) 919 .args([ 920 "--cpus", 921 &format!( 922 "boot={total_vcpus},topology={threads_per_core}:{cores_per_package}:1:{packages}" 923 ), 924 ]) 925 .args(["--memory", "size=512M"]) 926 .args(["--kernel", kernel_path]) 927 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 928 .default_disks() 929 .default_net() 930 .capture_output() 931 .spawn() 932 .unwrap(); 933 934 let r = std::panic::catch_unwind(|| { 935 guest.wait_vm_boot(None).unwrap(); 936 assert_eq!( 937 guest.get_cpu_count().unwrap_or_default(), 938 u32::from(total_vcpus) 939 ); 940 assert_eq!( 941 guest 942 .ssh_command("lscpu | grep \"per core\" | cut -f 2 -d \":\" | sed \"s# *##\"") 943 .unwrap() 944 .trim() 945 .parse::<u8>() 946 .unwrap_or(0), 947 threads_per_core 948 ); 949 950 assert_eq!( 951 guest 952 .ssh_command("lscpu | grep \"per socket\" | cut -f 2 -d \":\" | sed \"s# *##\"") 953 .unwrap() 954 .trim() 955 .parse::<u8>() 956 .unwrap_or(0), 957 cores_per_package 958 ); 959 960 assert_eq!( 961 guest 962 .ssh_command("lscpu | grep \"Socket\" | cut -f 2 -d \":\" | sed \"s# *##\"") 963 .unwrap() 964 .trim() 965 .parse::<u8>() 966 .unwrap_or(0), 967 packages 968 ); 969 }); 970 971 let _ = child.kill(); 972 let output = child.wait_with_output().unwrap(); 973 974 handle_child_output(r, &output); 975 } 976 977 #[allow(unused_variables)] 978 fn _test_guest_numa_nodes(acpi: bool) { 979 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 980 let guest = Guest::new(Box::new(focal)); 981 let api_socket = temp_api_path(&guest.tmp_dir); 982 #[cfg(target_arch = "x86_64")] 983 let kernel_path = direct_kernel_boot_path(); 984 #[cfg(target_arch = "aarch64")] 985 let kernel_path = if acpi { 986 edk2_path() 987 } else { 988 direct_kernel_boot_path() 989 }; 990 991 let mut child = GuestCommand::new(&guest) 992 .args(["--cpus", "boot=6,max=12"]) 993 .args(["--memory", "size=0,hotplug_method=virtio-mem"]) 994 .args([ 995 "--memory-zone", 996 "id=mem0,size=1G,hotplug_size=3G", 997 "--memory-zone", 998 "id=mem1,size=2G,hotplug_size=3G", 999 "--memory-zone", 1000 "id=mem2,size=3G,hotplug_size=3G", 1001 ]) 1002 .args([ 1003 "--numa", 1004 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0", 1005 "--numa", 1006 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1", 1007 "--numa", 1008 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2", 1009 ]) 1010 .args(["--kernel", kernel_path.to_str().unwrap()]) 1011 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1012 .args(["--api-socket", &api_socket]) 1013 .capture_output() 1014 .default_disks() 1015 .default_net() 1016 .spawn() 1017 .unwrap(); 1018 1019 let r = std::panic::catch_unwind(|| { 1020 guest.wait_vm_boot(None).unwrap(); 1021 1022 guest.check_numa_common( 1023 Some(&[960_000, 1_920_000, 2_880_000]), 1024 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 1025 Some(&["10 15 20", "20 10 25", "25 30 10"]), 1026 ); 1027 1028 // AArch64 currently does not support hotplug, and therefore we only 1029 // test hotplug-related function on x86_64 here. 1030 #[cfg(target_arch = "x86_64")] 1031 { 1032 guest.enable_memory_hotplug(); 1033 1034 // Resize every memory zone and check each associated NUMA node 1035 // has been assigned the right amount of memory. 1036 resize_zone_command(&api_socket, "mem0", "4G"); 1037 resize_zone_command(&api_socket, "mem1", "4G"); 1038 resize_zone_command(&api_socket, "mem2", "4G"); 1039 // Resize to the maximum amount of CPUs and check each NUMA 1040 // node has been assigned the right CPUs set. 1041 resize_command(&api_socket, Some(12), None, None, None); 1042 thread::sleep(std::time::Duration::new(5, 0)); 1043 1044 guest.check_numa_common( 1045 Some(&[3_840_000, 3_840_000, 3_840_000]), 1046 Some(&[vec![0, 1, 2, 9], vec![3, 4, 6, 7, 8], vec![5, 10, 11]]), 1047 None, 1048 ); 1049 } 1050 }); 1051 1052 let _ = child.kill(); 1053 let output = child.wait_with_output().unwrap(); 1054 1055 handle_child_output(r, &output); 1056 } 1057 1058 #[allow(unused_variables)] 1059 fn _test_power_button(acpi: bool) { 1060 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1061 let guest = Guest::new(Box::new(focal)); 1062 let mut cmd = GuestCommand::new(&guest); 1063 let api_socket = temp_api_path(&guest.tmp_dir); 1064 1065 #[cfg(target_arch = "x86_64")] 1066 let kernel_path = direct_kernel_boot_path(); 1067 #[cfg(target_arch = "aarch64")] 1068 let kernel_path = if acpi { 1069 edk2_path() 1070 } else { 1071 direct_kernel_boot_path() 1072 }; 1073 1074 cmd.args(["--cpus", "boot=1"]) 1075 .args(["--memory", "size=512M"]) 1076 .args(["--kernel", kernel_path.to_str().unwrap()]) 1077 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1078 .capture_output() 1079 .default_disks() 1080 .default_net() 1081 .args(["--api-socket", &api_socket]); 1082 1083 let child = cmd.spawn().unwrap(); 1084 1085 let r = std::panic::catch_unwind(|| { 1086 guest.wait_vm_boot(None).unwrap(); 1087 assert!(remote_command(&api_socket, "power-button", None)); 1088 }); 1089 1090 let output = child.wait_with_output().unwrap(); 1091 assert!(output.status.success()); 1092 handle_child_output(r, &output); 1093 } 1094 1095 type PrepareNetDaemon = dyn Fn( 1096 &TempDir, 1097 &str, 1098 Option<&str>, 1099 Option<u16>, 1100 usize, 1101 bool, 1102 ) -> (std::process::Command, String); 1103 1104 fn test_vhost_user_net( 1105 tap: Option<&str>, 1106 num_queues: usize, 1107 prepare_daemon: &PrepareNetDaemon, 1108 generate_host_mac: bool, 1109 client_mode_daemon: bool, 1110 ) { 1111 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1112 let guest = Guest::new(Box::new(focal)); 1113 let api_socket = temp_api_path(&guest.tmp_dir); 1114 1115 let kernel_path = direct_kernel_boot_path(); 1116 1117 let host_mac = if generate_host_mac { 1118 Some(MacAddr::local_random()) 1119 } else { 1120 None 1121 }; 1122 1123 let mtu = Some(3000); 1124 1125 let (mut daemon_command, vunet_socket_path) = prepare_daemon( 1126 &guest.tmp_dir, 1127 &guest.network.host_ip, 1128 tap, 1129 mtu, 1130 num_queues, 1131 client_mode_daemon, 1132 ); 1133 1134 let net_params = format!( 1135 "vhost_user=true,mac={},socket={},num_queues={},queue_size=1024{},vhost_mode={},mtu=3000", 1136 guest.network.guest_mac, 1137 vunet_socket_path, 1138 num_queues, 1139 if let Some(host_mac) = host_mac { 1140 format!(",host_mac={host_mac}") 1141 } else { 1142 "".to_owned() 1143 }, 1144 if client_mode_daemon { 1145 "server" 1146 } else { 1147 "client" 1148 }, 1149 ); 1150 1151 let mut ch_command = GuestCommand::new(&guest); 1152 ch_command 1153 .args(["--cpus", format!("boot={}", num_queues / 2).as_str()]) 1154 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"]) 1155 .args(["--kernel", kernel_path.to_str().unwrap()]) 1156 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1157 .default_disks() 1158 .args(["--net", net_params.as_str()]) 1159 .args(["--api-socket", &api_socket]) 1160 .capture_output(); 1161 1162 let mut daemon_child: std::process::Child; 1163 let mut child: std::process::Child; 1164 1165 if client_mode_daemon { 1166 child = ch_command.spawn().unwrap(); 1167 // Make sure the VMM is waiting for the backend to connect 1168 thread::sleep(std::time::Duration::new(10, 0)); 1169 daemon_child = daemon_command.spawn().unwrap(); 1170 } else { 1171 daemon_child = daemon_command.spawn().unwrap(); 1172 // Make sure the backend is waiting for the VMM to connect 1173 thread::sleep(std::time::Duration::new(10, 0)); 1174 child = ch_command.spawn().unwrap(); 1175 } 1176 1177 let r = std::panic::catch_unwind(|| { 1178 guest.wait_vm_boot(None).unwrap(); 1179 1180 if let Some(tap_name) = tap { 1181 let tap_count = exec_host_command_output(&format!("ip link | grep -c {tap_name}")); 1182 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1"); 1183 } 1184 1185 if let Some(host_mac) = tap { 1186 let mac_count = exec_host_command_output(&format!("ip link | grep -c {host_mac}")); 1187 assert_eq!(String::from_utf8_lossy(&mac_count.stdout).trim(), "1"); 1188 } 1189 1190 #[cfg(target_arch = "aarch64")] 1191 let iface = "enp0s4"; 1192 #[cfg(target_arch = "x86_64")] 1193 let iface = "ens4"; 1194 1195 assert_eq!( 1196 guest 1197 .ssh_command(format!("cat /sys/class/net/{iface}/mtu").as_str()) 1198 .unwrap() 1199 .trim(), 1200 "3000" 1201 ); 1202 1203 // 1 network interface + default localhost ==> 2 interfaces 1204 // It's important to note that this test is fully exercising the 1205 // vhost-user-net implementation and the associated backend since 1206 // it does not define any --net network interface. That means all 1207 // the ssh communication in that test happens through the network 1208 // interface backed by vhost-user-net. 1209 assert_eq!( 1210 guest 1211 .ssh_command("ip -o link | wc -l") 1212 .unwrap() 1213 .trim() 1214 .parse::<u32>() 1215 .unwrap_or_default(), 1216 2 1217 ); 1218 1219 // The following pci devices will appear on guest with PCI-MSI 1220 // interrupt vectors assigned. 1221 // 1 virtio-console with 3 vectors: config, Rx, Tx 1222 // 1 virtio-blk with 2 vectors: config, Request 1223 // 1 virtio-blk with 2 vectors: config, Request 1224 // 1 virtio-rng with 2 vectors: config, Request 1225 // Since virtio-net has 2 queue pairs, its vectors is as follows: 1226 // 1 virtio-net with 5 vectors: config, Rx (2), Tx (2) 1227 // Based on the above, the total vectors should 14. 1228 #[cfg(target_arch = "x86_64")] 1229 let grep_cmd = "grep -c PCI-MSI /proc/interrupts"; 1230 #[cfg(target_arch = "aarch64")] 1231 let grep_cmd = "grep -c ITS-MSI /proc/interrupts"; 1232 assert_eq!( 1233 guest 1234 .ssh_command(grep_cmd) 1235 .unwrap() 1236 .trim() 1237 .parse::<u32>() 1238 .unwrap_or_default(), 1239 10 + (num_queues as u32) 1240 ); 1241 1242 // ACPI feature is needed. 1243 #[cfg(target_arch = "x86_64")] 1244 { 1245 guest.enable_memory_hotplug(); 1246 1247 // Add RAM to the VM 1248 let desired_ram = 1024 << 20; 1249 resize_command(&api_socket, None, Some(desired_ram), None, None); 1250 1251 thread::sleep(std::time::Duration::new(10, 0)); 1252 1253 // Here by simply checking the size (through ssh), we validate 1254 // the connection is still working, which means vhost-user-net 1255 // keeps working after the resize. 1256 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 1257 } 1258 }); 1259 1260 let _ = child.kill(); 1261 let output = child.wait_with_output().unwrap(); 1262 1263 thread::sleep(std::time::Duration::new(5, 0)); 1264 let _ = daemon_child.kill(); 1265 let _ = daemon_child.wait(); 1266 1267 handle_child_output(r, &output); 1268 } 1269 1270 type PrepareBlkDaemon = dyn Fn(&TempDir, &str, usize, bool, bool) -> (std::process::Child, String); 1271 1272 fn test_vhost_user_blk( 1273 num_queues: usize, 1274 readonly: bool, 1275 direct: bool, 1276 prepare_vhost_user_blk_daemon: Option<&PrepareBlkDaemon>, 1277 ) { 1278 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1279 let guest = Guest::new(Box::new(focal)); 1280 let api_socket = temp_api_path(&guest.tmp_dir); 1281 1282 let kernel_path = direct_kernel_boot_path(); 1283 1284 let (blk_params, daemon_child) = { 1285 let prepare_daemon = prepare_vhost_user_blk_daemon.unwrap(); 1286 // Start the daemon 1287 let (daemon_child, vubd_socket_path) = 1288 prepare_daemon(&guest.tmp_dir, "blk.img", num_queues, readonly, direct); 1289 1290 ( 1291 format!( 1292 "vhost_user=true,socket={vubd_socket_path},num_queues={num_queues},queue_size=128", 1293 ), 1294 Some(daemon_child), 1295 ) 1296 }; 1297 1298 let mut child = GuestCommand::new(&guest) 1299 .args(["--cpus", format!("boot={num_queues}").as_str()]) 1300 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"]) 1301 .args(["--kernel", kernel_path.to_str().unwrap()]) 1302 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1303 .args([ 1304 "--disk", 1305 format!( 1306 "path={}", 1307 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 1308 ) 1309 .as_str(), 1310 "--disk", 1311 format!( 1312 "path={}", 1313 guest.disk_config.disk(DiskType::CloudInit).unwrap() 1314 ) 1315 .as_str(), 1316 "--disk", 1317 blk_params.as_str(), 1318 ]) 1319 .default_net() 1320 .args(["--api-socket", &api_socket]) 1321 .capture_output() 1322 .spawn() 1323 .unwrap(); 1324 1325 let r = std::panic::catch_unwind(|| { 1326 guest.wait_vm_boot(None).unwrap(); 1327 1328 // Check both if /dev/vdc exists and if the block size is 16M. 1329 assert_eq!( 1330 guest 1331 .ssh_command("lsblk | grep vdc | grep -c 16M") 1332 .unwrap() 1333 .trim() 1334 .parse::<u32>() 1335 .unwrap_or_default(), 1336 1 1337 ); 1338 1339 // Check if this block is RO or RW. 1340 assert_eq!( 1341 guest 1342 .ssh_command("lsblk | grep vdc | awk '{print $5}'") 1343 .unwrap() 1344 .trim() 1345 .parse::<u32>() 1346 .unwrap_or_default(), 1347 readonly as u32 1348 ); 1349 1350 // Check if the number of queues in /sys/block/vdc/mq matches the 1351 // expected num_queues. 1352 assert_eq!( 1353 guest 1354 .ssh_command("ls -ll /sys/block/vdc/mq | grep ^d | wc -l") 1355 .unwrap() 1356 .trim() 1357 .parse::<u32>() 1358 .unwrap_or_default(), 1359 num_queues as u32 1360 ); 1361 1362 // Mount the device 1363 let mount_ro_rw_flag = if readonly { "ro,noload" } else { "rw" }; 1364 guest.ssh_command("mkdir mount_image").unwrap(); 1365 guest 1366 .ssh_command( 1367 format!("sudo mount -o {mount_ro_rw_flag} -t ext4 /dev/vdc mount_image/").as_str(), 1368 ) 1369 .unwrap(); 1370 1371 // Check the content of the block device. The file "foo" should 1372 // contain "bar". 1373 assert_eq!( 1374 guest.ssh_command("cat mount_image/foo").unwrap().trim(), 1375 "bar" 1376 ); 1377 1378 // ACPI feature is needed. 1379 #[cfg(target_arch = "x86_64")] 1380 { 1381 guest.enable_memory_hotplug(); 1382 1383 // Add RAM to the VM 1384 let desired_ram = 1024 << 20; 1385 resize_command(&api_socket, None, Some(desired_ram), None, None); 1386 1387 thread::sleep(std::time::Duration::new(10, 0)); 1388 1389 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 1390 1391 // Check again the content of the block device after the resize 1392 // has been performed. 1393 assert_eq!( 1394 guest.ssh_command("cat mount_image/foo").unwrap().trim(), 1395 "bar" 1396 ); 1397 } 1398 1399 // Unmount the device 1400 guest.ssh_command("sudo umount /dev/vdc").unwrap(); 1401 guest.ssh_command("rm -r mount_image").unwrap(); 1402 }); 1403 1404 let _ = child.kill(); 1405 let output = child.wait_with_output().unwrap(); 1406 1407 if let Some(mut daemon_child) = daemon_child { 1408 thread::sleep(std::time::Duration::new(5, 0)); 1409 let _ = daemon_child.kill(); 1410 let _ = daemon_child.wait(); 1411 } 1412 1413 handle_child_output(r, &output); 1414 } 1415 1416 fn test_boot_from_vhost_user_blk( 1417 num_queues: usize, 1418 readonly: bool, 1419 direct: bool, 1420 prepare_vhost_user_blk_daemon: Option<&PrepareBlkDaemon>, 1421 ) { 1422 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1423 let guest = Guest::new(Box::new(focal)); 1424 1425 let kernel_path = direct_kernel_boot_path(); 1426 1427 let disk_path = guest.disk_config.disk(DiskType::OperatingSystem).unwrap(); 1428 1429 let (blk_boot_params, daemon_child) = { 1430 let prepare_daemon = prepare_vhost_user_blk_daemon.unwrap(); 1431 // Start the daemon 1432 let (daemon_child, vubd_socket_path) = prepare_daemon( 1433 &guest.tmp_dir, 1434 disk_path.as_str(), 1435 num_queues, 1436 readonly, 1437 direct, 1438 ); 1439 1440 ( 1441 format!( 1442 "vhost_user=true,socket={vubd_socket_path},num_queues={num_queues},queue_size=128", 1443 ), 1444 Some(daemon_child), 1445 ) 1446 }; 1447 1448 let mut child = GuestCommand::new(&guest) 1449 .args(["--cpus", format!("boot={num_queues}").as_str()]) 1450 .args(["--memory", "size=512M,shared=on"]) 1451 .args(["--kernel", kernel_path.to_str().unwrap()]) 1452 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1453 .args([ 1454 "--disk", 1455 blk_boot_params.as_str(), 1456 "--disk", 1457 format!( 1458 "path={}", 1459 guest.disk_config.disk(DiskType::CloudInit).unwrap() 1460 ) 1461 .as_str(), 1462 ]) 1463 .default_net() 1464 .capture_output() 1465 .spawn() 1466 .unwrap(); 1467 1468 let r = std::panic::catch_unwind(|| { 1469 guest.wait_vm_boot(None).unwrap(); 1470 1471 // Just check the VM booted correctly. 1472 assert_eq!(guest.get_cpu_count().unwrap_or_default(), num_queues as u32); 1473 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 1474 }); 1475 let _ = child.kill(); 1476 let output = child.wait_with_output().unwrap(); 1477 1478 if let Some(mut daemon_child) = daemon_child { 1479 thread::sleep(std::time::Duration::new(5, 0)); 1480 let _ = daemon_child.kill(); 1481 let _ = daemon_child.wait(); 1482 } 1483 1484 handle_child_output(r, &output); 1485 } 1486 1487 fn _test_virtio_fs( 1488 prepare_daemon: &dyn Fn(&TempDir, &str) -> (std::process::Child, String), 1489 hotplug: bool, 1490 pci_segment: Option<u16>, 1491 ) { 1492 #[cfg(target_arch = "aarch64")] 1493 let focal_image = if hotplug { 1494 FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string() 1495 } else { 1496 FOCAL_IMAGE_NAME.to_string() 1497 }; 1498 #[cfg(target_arch = "x86_64")] 1499 let focal_image = FOCAL_IMAGE_NAME.to_string(); 1500 let focal = UbuntuDiskConfig::new(focal_image); 1501 let guest = Guest::new(Box::new(focal)); 1502 let api_socket = temp_api_path(&guest.tmp_dir); 1503 1504 let mut workload_path = dirs::home_dir().unwrap(); 1505 workload_path.push("workloads"); 1506 1507 let mut shared_dir = workload_path; 1508 shared_dir.push("shared_dir"); 1509 1510 #[cfg(target_arch = "x86_64")] 1511 let kernel_path = direct_kernel_boot_path(); 1512 #[cfg(target_arch = "aarch64")] 1513 let kernel_path = if hotplug { 1514 edk2_path() 1515 } else { 1516 direct_kernel_boot_path() 1517 }; 1518 1519 let (mut daemon_child, virtiofsd_socket_path) = 1520 prepare_daemon(&guest.tmp_dir, shared_dir.to_str().unwrap()); 1521 1522 let mut guest_command = GuestCommand::new(&guest); 1523 guest_command 1524 .args(["--cpus", "boot=1"]) 1525 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"]) 1526 .args(["--kernel", kernel_path.to_str().unwrap()]) 1527 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1528 .default_disks() 1529 .default_net() 1530 .args(["--api-socket", &api_socket]); 1531 if pci_segment.is_some() { 1532 guest_command.args([ 1533 "--platform", 1534 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 1535 ]); 1536 } 1537 1538 let fs_params = format!( 1539 "id=myfs0,tag=myfs,socket={},num_queues=1,queue_size=1024{}", 1540 virtiofsd_socket_path, 1541 if let Some(pci_segment) = pci_segment { 1542 format!(",pci_segment={pci_segment}") 1543 } else { 1544 "".to_owned() 1545 } 1546 ); 1547 1548 if !hotplug { 1549 guest_command.args(["--fs", fs_params.as_str()]); 1550 } 1551 1552 let mut child = guest_command.capture_output().spawn().unwrap(); 1553 1554 let r = std::panic::catch_unwind(|| { 1555 guest.wait_vm_boot(None).unwrap(); 1556 1557 if hotplug { 1558 // Add fs to the VM 1559 let (cmd_success, cmd_output) = 1560 remote_command_w_output(&api_socket, "add-fs", Some(&fs_params)); 1561 assert!(cmd_success); 1562 1563 if let Some(pci_segment) = pci_segment { 1564 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 1565 "{{\"id\":\"myfs0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 1566 ))); 1567 } else { 1568 assert!(String::from_utf8_lossy(&cmd_output) 1569 .contains("{\"id\":\"myfs0\",\"bdf\":\"0000:00:06.0\"}")); 1570 } 1571 1572 thread::sleep(std::time::Duration::new(10, 0)); 1573 } 1574 1575 // Mount shared directory through virtio_fs filesystem 1576 guest 1577 .ssh_command("mkdir -p mount_dir && sudo mount -t virtiofs myfs mount_dir/") 1578 .unwrap(); 1579 1580 // Check file1 exists and its content is "foo" 1581 assert_eq!( 1582 guest.ssh_command("cat mount_dir/file1").unwrap().trim(), 1583 "foo" 1584 ); 1585 // Check file2 does not exist 1586 guest 1587 .ssh_command("[ ! -f 'mount_dir/file2' ] || true") 1588 .unwrap(); 1589 1590 // Check file3 exists and its content is "bar" 1591 assert_eq!( 1592 guest.ssh_command("cat mount_dir/file3").unwrap().trim(), 1593 "bar" 1594 ); 1595 1596 // ACPI feature is needed. 1597 #[cfg(target_arch = "x86_64")] 1598 { 1599 guest.enable_memory_hotplug(); 1600 1601 // Add RAM to the VM 1602 let desired_ram = 1024 << 20; 1603 resize_command(&api_socket, None, Some(desired_ram), None, None); 1604 1605 thread::sleep(std::time::Duration::new(30, 0)); 1606 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 1607 1608 // After the resize, check again that file1 exists and its 1609 // content is "foo". 1610 assert_eq!( 1611 guest.ssh_command("cat mount_dir/file1").unwrap().trim(), 1612 "foo" 1613 ); 1614 } 1615 1616 if hotplug { 1617 // Remove from VM 1618 guest.ssh_command("sudo umount mount_dir").unwrap(); 1619 assert!(remote_command(&api_socket, "remove-device", Some("myfs0"))); 1620 } 1621 }); 1622 1623 let (r, hotplug_daemon_child) = if r.is_ok() && hotplug { 1624 thread::sleep(std::time::Duration::new(10, 0)); 1625 let (daemon_child, virtiofsd_socket_path) = 1626 prepare_daemon(&guest.tmp_dir, shared_dir.to_str().unwrap()); 1627 1628 let r = std::panic::catch_unwind(|| { 1629 thread::sleep(std::time::Duration::new(10, 0)); 1630 let fs_params = format!( 1631 "id=myfs0,tag=myfs,socket={},num_queues=1,queue_size=1024{}", 1632 virtiofsd_socket_path, 1633 if let Some(pci_segment) = pci_segment { 1634 format!(",pci_segment={pci_segment}") 1635 } else { 1636 "".to_owned() 1637 } 1638 ); 1639 1640 // Add back and check it works 1641 let (cmd_success, cmd_output) = 1642 remote_command_w_output(&api_socket, "add-fs", Some(&fs_params)); 1643 assert!(cmd_success); 1644 if let Some(pci_segment) = pci_segment { 1645 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 1646 "{{\"id\":\"myfs0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 1647 ))); 1648 } else { 1649 assert!(String::from_utf8_lossy(&cmd_output) 1650 .contains("{\"id\":\"myfs0\",\"bdf\":\"0000:00:06.0\"}")); 1651 } 1652 1653 thread::sleep(std::time::Duration::new(10, 0)); 1654 // Mount shared directory through virtio_fs filesystem 1655 guest 1656 .ssh_command("mkdir -p mount_dir && sudo mount -t virtiofs myfs mount_dir/") 1657 .unwrap(); 1658 1659 // Check file1 exists and its content is "foo" 1660 assert_eq!( 1661 guest.ssh_command("cat mount_dir/file1").unwrap().trim(), 1662 "foo" 1663 ); 1664 }); 1665 1666 (r, Some(daemon_child)) 1667 } else { 1668 (r, None) 1669 }; 1670 1671 let _ = child.kill(); 1672 let output = child.wait_with_output().unwrap(); 1673 1674 let _ = daemon_child.kill(); 1675 let _ = daemon_child.wait(); 1676 1677 if let Some(mut daemon_child) = hotplug_daemon_child { 1678 let _ = daemon_child.kill(); 1679 let _ = daemon_child.wait(); 1680 } 1681 1682 handle_child_output(r, &output); 1683 } 1684 1685 fn test_virtio_pmem(discard_writes: bool, specify_size: bool) { 1686 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1687 let guest = Guest::new(Box::new(focal)); 1688 1689 let kernel_path = direct_kernel_boot_path(); 1690 1691 let pmem_temp_file = TempFile::new().unwrap(); 1692 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 1693 1694 std::process::Command::new("mkfs.ext4") 1695 .arg(pmem_temp_file.as_path()) 1696 .output() 1697 .expect("Expect creating disk image to succeed"); 1698 1699 let mut child = GuestCommand::new(&guest) 1700 .args(["--cpus", "boot=1"]) 1701 .args(["--memory", "size=512M"]) 1702 .args(["--kernel", kernel_path.to_str().unwrap()]) 1703 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1704 .default_disks() 1705 .default_net() 1706 .args([ 1707 "--pmem", 1708 format!( 1709 "file={}{}{}", 1710 pmem_temp_file.as_path().to_str().unwrap(), 1711 if specify_size { ",size=128M" } else { "" }, 1712 if discard_writes { 1713 ",discard_writes=on" 1714 } else { 1715 "" 1716 } 1717 ) 1718 .as_str(), 1719 ]) 1720 .capture_output() 1721 .spawn() 1722 .unwrap(); 1723 1724 let r = std::panic::catch_unwind(|| { 1725 guest.wait_vm_boot(None).unwrap(); 1726 1727 // Check for the presence of /dev/pmem0 1728 assert_eq!( 1729 guest.ssh_command("ls /dev/pmem0").unwrap().trim(), 1730 "/dev/pmem0" 1731 ); 1732 1733 // Check changes persist after reboot 1734 assert_eq!(guest.ssh_command("sudo mount /dev/pmem0 /mnt").unwrap(), ""); 1735 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), "lost+found\n"); 1736 guest 1737 .ssh_command("echo test123 | sudo tee /mnt/test") 1738 .unwrap(); 1739 assert_eq!(guest.ssh_command("sudo umount /mnt").unwrap(), ""); 1740 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), ""); 1741 1742 guest.reboot_linux(0, None); 1743 assert_eq!(guest.ssh_command("sudo mount /dev/pmem0 /mnt").unwrap(), ""); 1744 assert_eq!( 1745 guest 1746 .ssh_command("sudo cat /mnt/test || true") 1747 .unwrap() 1748 .trim(), 1749 if discard_writes { "" } else { "test123" } 1750 ); 1751 }); 1752 1753 let _ = child.kill(); 1754 let output = child.wait_with_output().unwrap(); 1755 1756 handle_child_output(r, &output); 1757 } 1758 1759 fn get_fd_count(pid: u32) -> usize { 1760 fs::read_dir(format!("/proc/{pid}/fd")).unwrap().count() 1761 } 1762 1763 fn _test_virtio_vsock(hotplug: bool) { 1764 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1765 let guest = Guest::new(Box::new(focal)); 1766 1767 #[cfg(target_arch = "x86_64")] 1768 let kernel_path = direct_kernel_boot_path(); 1769 #[cfg(target_arch = "aarch64")] 1770 let kernel_path = if hotplug { 1771 edk2_path() 1772 } else { 1773 direct_kernel_boot_path() 1774 }; 1775 1776 let socket = temp_vsock_path(&guest.tmp_dir); 1777 let api_socket = temp_api_path(&guest.tmp_dir); 1778 1779 let mut cmd = GuestCommand::new(&guest); 1780 cmd.args(["--api-socket", &api_socket]); 1781 cmd.args(["--cpus", "boot=1"]); 1782 cmd.args(["--memory", "size=512M"]); 1783 cmd.args(["--kernel", kernel_path.to_str().unwrap()]); 1784 cmd.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]); 1785 cmd.default_disks(); 1786 cmd.default_net(); 1787 1788 if !hotplug { 1789 cmd.args(["--vsock", format!("cid=3,socket={socket}").as_str()]); 1790 } 1791 1792 let mut child = cmd.capture_output().spawn().unwrap(); 1793 1794 let r = std::panic::catch_unwind(|| { 1795 guest.wait_vm_boot(None).unwrap(); 1796 1797 if hotplug { 1798 let (cmd_success, cmd_output) = remote_command_w_output( 1799 &api_socket, 1800 "add-vsock", 1801 Some(format!("cid=3,socket={socket},id=test0").as_str()), 1802 ); 1803 assert!(cmd_success); 1804 assert!(String::from_utf8_lossy(&cmd_output) 1805 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 1806 thread::sleep(std::time::Duration::new(10, 0)); 1807 // Check adding a second one fails 1808 assert!(!remote_command( 1809 &api_socket, 1810 "add-vsock", 1811 Some("cid=1234,socket=/tmp/fail") 1812 )); 1813 } 1814 1815 // Validate vsock works as expected. 1816 guest.check_vsock(socket.as_str()); 1817 guest.reboot_linux(0, None); 1818 // Validate vsock still works after a reboot. 1819 guest.check_vsock(socket.as_str()); 1820 1821 if hotplug { 1822 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 1823 } 1824 }); 1825 1826 let _ = child.kill(); 1827 let output = child.wait_with_output().unwrap(); 1828 1829 handle_child_output(r, &output); 1830 } 1831 1832 fn get_ksm_pages_shared() -> u32 { 1833 fs::read_to_string("/sys/kernel/mm/ksm/pages_shared") 1834 .unwrap() 1835 .trim() 1836 .parse::<u32>() 1837 .unwrap() 1838 } 1839 1840 fn test_memory_mergeable(mergeable: bool) { 1841 let memory_param = if mergeable { 1842 "mergeable=on" 1843 } else { 1844 "mergeable=off" 1845 }; 1846 1847 // We are assuming the rest of the system in our CI is not using mergeable memeory 1848 let ksm_ps_init = get_ksm_pages_shared(); 1849 assert!(ksm_ps_init == 0); 1850 1851 let focal1 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1852 let guest1 = Guest::new(Box::new(focal1)); 1853 let mut child1 = GuestCommand::new(&guest1) 1854 .args(["--cpus", "boot=1"]) 1855 .args(["--memory", format!("size=512M,{memory_param}").as_str()]) 1856 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 1857 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1858 .default_disks() 1859 .args(["--net", guest1.default_net_string().as_str()]) 1860 .args(["--serial", "tty", "--console", "off"]) 1861 .capture_output() 1862 .spawn() 1863 .unwrap(); 1864 1865 let r = std::panic::catch_unwind(|| { 1866 guest1.wait_vm_boot(None).unwrap(); 1867 }); 1868 if r.is_err() { 1869 let _ = child1.kill(); 1870 let output = child1.wait_with_output().unwrap(); 1871 handle_child_output(r, &output); 1872 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return 1873 } 1874 1875 let ksm_ps_guest1 = get_ksm_pages_shared(); 1876 1877 let focal2 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1878 let guest2 = Guest::new(Box::new(focal2)); 1879 let mut child2 = GuestCommand::new(&guest2) 1880 .args(["--cpus", "boot=1"]) 1881 .args(["--memory", format!("size=512M,{memory_param}").as_str()]) 1882 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 1883 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1884 .default_disks() 1885 .args(["--net", guest2.default_net_string().as_str()]) 1886 .args(["--serial", "tty", "--console", "off"]) 1887 .capture_output() 1888 .spawn() 1889 .unwrap(); 1890 1891 let r = std::panic::catch_unwind(|| { 1892 guest2.wait_vm_boot(None).unwrap(); 1893 let ksm_ps_guest2 = get_ksm_pages_shared(); 1894 1895 if mergeable { 1896 println!( 1897 "ksm pages_shared after vm1 booted '{ksm_ps_guest1}', ksm pages_shared after vm2 booted '{ksm_ps_guest2}'" 1898 ); 1899 // We are expecting the number of shared pages to increase as the number of VM increases 1900 assert!(ksm_ps_guest1 < ksm_ps_guest2); 1901 } else { 1902 assert!(ksm_ps_guest1 == 0); 1903 assert!(ksm_ps_guest2 == 0); 1904 } 1905 }); 1906 1907 let _ = child1.kill(); 1908 let _ = child2.kill(); 1909 1910 let output = child1.wait_with_output().unwrap(); 1911 child2.wait().unwrap(); 1912 1913 handle_child_output(r, &output); 1914 } 1915 1916 fn _get_vmm_overhead(pid: u32, guest_memory_size: u32) -> HashMap<String, u32> { 1917 let smaps = fs::File::open(format!("/proc/{pid}/smaps")).unwrap(); 1918 let reader = io::BufReader::new(smaps); 1919 1920 let mut skip_map: bool = false; 1921 let mut region_name: String = "".to_string(); 1922 let mut region_maps = HashMap::new(); 1923 for line in reader.lines() { 1924 let l = line.unwrap(); 1925 1926 if l.contains('-') { 1927 let values: Vec<&str> = l.split_whitespace().collect(); 1928 region_name = values.last().unwrap().trim().to_string(); 1929 if region_name == "0" { 1930 region_name = "anonymous".to_string() 1931 } 1932 } 1933 1934 // Each section begins with something that looks like: 1935 // Size: 2184 kB 1936 if l.starts_with("Size:") { 1937 let values: Vec<&str> = l.split_whitespace().collect(); 1938 let map_size = values[1].parse::<u32>().unwrap(); 1939 // We skip the assigned guest RAM map, its RSS is only 1940 // dependent on the guest actual memory usage. 1941 // Everything else can be added to the VMM overhead. 1942 skip_map = map_size >= guest_memory_size; 1943 continue; 1944 } 1945 1946 // If this is a map we're taking into account, then we only 1947 // count the RSS. The sum of all counted RSS is the VMM overhead. 1948 if !skip_map && l.starts_with("Rss:") { 1949 let values: Vec<&str> = l.split_whitespace().collect(); 1950 let value = values[1].trim().parse::<u32>().unwrap(); 1951 *region_maps.entry(region_name.clone()).or_insert(0) += value; 1952 } 1953 } 1954 1955 region_maps 1956 } 1957 1958 fn get_vmm_overhead(pid: u32, guest_memory_size: u32) -> u32 { 1959 let mut total = 0; 1960 1961 for (region_name, value) in &_get_vmm_overhead(pid, guest_memory_size) { 1962 eprintln!("{region_name}: {value}"); 1963 total += value; 1964 } 1965 1966 total 1967 } 1968 1969 fn process_rss_kib(pid: u32) -> usize { 1970 let command = format!("ps -q {pid} -o rss="); 1971 let rss = exec_host_command_output(&command); 1972 String::from_utf8_lossy(&rss.stdout).trim().parse().unwrap() 1973 } 1974 1975 // 10MB is our maximum accepted overhead. 1976 const MAXIMUM_VMM_OVERHEAD_KB: u32 = 10 * 1024; 1977 1978 #[derive(PartialEq, Eq, PartialOrd)] 1979 struct Counters { 1980 rx_bytes: u64, 1981 rx_frames: u64, 1982 tx_bytes: u64, 1983 tx_frames: u64, 1984 read_bytes: u64, 1985 write_bytes: u64, 1986 read_ops: u64, 1987 write_ops: u64, 1988 } 1989 1990 fn get_counters(api_socket: &str) -> Counters { 1991 // Get counters 1992 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "counters", None); 1993 assert!(cmd_success); 1994 1995 let counters: HashMap<&str, HashMap<&str, u64>> = 1996 serde_json::from_slice(&cmd_output).unwrap_or_default(); 1997 1998 let rx_bytes = *counters.get("_net2").unwrap().get("rx_bytes").unwrap(); 1999 let rx_frames = *counters.get("_net2").unwrap().get("rx_frames").unwrap(); 2000 let tx_bytes = *counters.get("_net2").unwrap().get("tx_bytes").unwrap(); 2001 let tx_frames = *counters.get("_net2").unwrap().get("tx_frames").unwrap(); 2002 2003 let read_bytes = *counters.get("_disk0").unwrap().get("read_bytes").unwrap(); 2004 let write_bytes = *counters.get("_disk0").unwrap().get("write_bytes").unwrap(); 2005 let read_ops = *counters.get("_disk0").unwrap().get("read_ops").unwrap(); 2006 let write_ops = *counters.get("_disk0").unwrap().get("write_ops").unwrap(); 2007 2008 Counters { 2009 rx_bytes, 2010 rx_frames, 2011 tx_bytes, 2012 tx_frames, 2013 read_bytes, 2014 write_bytes, 2015 read_ops, 2016 write_ops, 2017 } 2018 } 2019 2020 fn pty_read(mut pty: std::fs::File) -> Receiver<String> { 2021 let (tx, rx) = mpsc::channel::<String>(); 2022 thread::spawn(move || loop { 2023 thread::sleep(std::time::Duration::new(1, 0)); 2024 let mut buf = [0; 512]; 2025 match pty.read(&mut buf) { 2026 Ok(_) => { 2027 let output = std::str::from_utf8(&buf).unwrap().to_string(); 2028 match tx.send(output) { 2029 Ok(_) => (), 2030 Err(_) => break, 2031 } 2032 } 2033 Err(_) => break, 2034 } 2035 }); 2036 rx 2037 } 2038 2039 fn get_pty_path(api_socket: &str, pty_type: &str) -> PathBuf { 2040 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None); 2041 assert!(cmd_success); 2042 let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default(); 2043 assert_eq!("Pty", info["config"][pty_type]["mode"]); 2044 PathBuf::from( 2045 info["config"][pty_type]["file"] 2046 .as_str() 2047 .expect("Missing pty path"), 2048 ) 2049 } 2050 2051 // VFIO test network setup. 2052 // We reserve a different IP class for it: 172.18.0.0/24. 2053 #[cfg(target_arch = "x86_64")] 2054 fn setup_vfio_network_interfaces() { 2055 // 'vfio-br0' 2056 assert!(exec_host_command_status("sudo ip link add name vfio-br0 type bridge").success()); 2057 assert!(exec_host_command_status("sudo ip link set vfio-br0 up").success()); 2058 assert!(exec_host_command_status("sudo ip addr add 172.18.0.1/24 dev vfio-br0").success()); 2059 // 'vfio-tap0' 2060 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap0 mode tap").success()); 2061 assert!(exec_host_command_status("sudo ip link set vfio-tap0 master vfio-br0").success()); 2062 assert!(exec_host_command_status("sudo ip link set vfio-tap0 up").success()); 2063 // 'vfio-tap1' 2064 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap1 mode tap").success()); 2065 assert!(exec_host_command_status("sudo ip link set vfio-tap1 master vfio-br0").success()); 2066 assert!(exec_host_command_status("sudo ip link set vfio-tap1 up").success()); 2067 // 'vfio-tap2' 2068 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap2 mode tap").success()); 2069 assert!(exec_host_command_status("sudo ip link set vfio-tap2 master vfio-br0").success()); 2070 assert!(exec_host_command_status("sudo ip link set vfio-tap2 up").success()); 2071 // 'vfio-tap3' 2072 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap3 mode tap").success()); 2073 assert!(exec_host_command_status("sudo ip link set vfio-tap3 master vfio-br0").success()); 2074 assert!(exec_host_command_status("sudo ip link set vfio-tap3 up").success()); 2075 } 2076 2077 // Tear VFIO test network down 2078 #[cfg(target_arch = "x86_64")] 2079 fn cleanup_vfio_network_interfaces() { 2080 assert!(exec_host_command_status("sudo ip link del vfio-br0").success()); 2081 assert!(exec_host_command_status("sudo ip link del vfio-tap0").success()); 2082 assert!(exec_host_command_status("sudo ip link del vfio-tap1").success()); 2083 assert!(exec_host_command_status("sudo ip link del vfio-tap2").success()); 2084 assert!(exec_host_command_status("sudo ip link del vfio-tap3").success()); 2085 } 2086 2087 fn balloon_size(api_socket: &str) -> u64 { 2088 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None); 2089 assert!(cmd_success); 2090 2091 let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default(); 2092 let total_mem = &info["config"]["memory"]["size"] 2093 .to_string() 2094 .parse::<u64>() 2095 .unwrap(); 2096 let actual_mem = &info["memory_actual_size"] 2097 .to_string() 2098 .parse::<u64>() 2099 .unwrap(); 2100 total_mem - actual_mem 2101 } 2102 2103 fn vm_state(api_socket: &str) -> String { 2104 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None); 2105 assert!(cmd_success); 2106 2107 let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default(); 2108 let state = &info["state"].as_str().unwrap(); 2109 2110 state.to_string() 2111 } 2112 2113 // This test validates that it can find the virtio-iommu device at first. 2114 // It also verifies that both disks and the network card are attached to 2115 // the virtual IOMMU by looking at /sys/kernel/iommu_groups directory. 2116 // The last interesting part of this test is that it exercises the network 2117 // interface attached to the virtual IOMMU since this is the one used to 2118 // send all commands through SSH. 2119 fn _test_virtio_iommu(acpi: bool) { 2120 // Virtio-iommu support is ready in recent kernel (v5.14). But the kernel in 2121 // Focal image is still old. 2122 // So if ACPI is enabled on AArch64, we use a modified Focal image in which 2123 // the kernel binary has been updated. 2124 #[cfg(target_arch = "aarch64")] 2125 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string(); 2126 #[cfg(target_arch = "x86_64")] 2127 let focal_image = FOCAL_IMAGE_NAME.to_string(); 2128 let focal = UbuntuDiskConfig::new(focal_image); 2129 let guest = Guest::new(Box::new(focal)); 2130 2131 #[cfg(target_arch = "x86_64")] 2132 let kernel_path = direct_kernel_boot_path(); 2133 #[cfg(target_arch = "aarch64")] 2134 let kernel_path = if acpi { 2135 edk2_path() 2136 } else { 2137 direct_kernel_boot_path() 2138 }; 2139 2140 let mut child = GuestCommand::new(&guest) 2141 .args(["--cpus", "boot=1"]) 2142 .args(["--memory", "size=512M"]) 2143 .args(["--kernel", kernel_path.to_str().unwrap()]) 2144 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2145 .args([ 2146 "--disk", 2147 format!( 2148 "path={},iommu=on", 2149 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 2150 ) 2151 .as_str(), 2152 "--disk", 2153 format!( 2154 "path={},iommu=on", 2155 guest.disk_config.disk(DiskType::CloudInit).unwrap() 2156 ) 2157 .as_str(), 2158 ]) 2159 .args(["--net", guest.default_net_string_w_iommu().as_str()]) 2160 .capture_output() 2161 .spawn() 2162 .unwrap(); 2163 2164 let r = std::panic::catch_unwind(|| { 2165 guest.wait_vm_boot(None).unwrap(); 2166 2167 // Verify the virtio-iommu device is present. 2168 assert!(guest 2169 .does_device_vendor_pair_match("0x1057", "0x1af4") 2170 .unwrap_or_default()); 2171 2172 // On AArch64, if the guest system boots from FDT, the behavior of IOMMU is a bit 2173 // different with ACPI. 2174 // All devices on the PCI bus will be attached to the virtual IOMMU, except the 2175 // virtio-iommu device itself. So these devices will all be added to IOMMU groups, 2176 // and appear under folder '/sys/kernel/iommu_groups/'. 2177 // The result is, in the case of FDT, IOMMU group '0' contains "0000:00:01.0" 2178 // which is the console. The first disk "0000:00:02.0" is in group '1'. 2179 // While on ACPI, console device is not attached to IOMMU. So the IOMMU group '0' 2180 // contains "0000:00:02.0" which is the first disk. 2181 // 2182 // Verify the iommu group of the first disk. 2183 let iommu_group = !acpi as i32; 2184 assert_eq!( 2185 guest 2186 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str()) 2187 .unwrap() 2188 .trim(), 2189 "0000:00:02.0" 2190 ); 2191 2192 // Verify the iommu group of the second disk. 2193 let iommu_group = if acpi { 1 } else { 2 }; 2194 assert_eq!( 2195 guest 2196 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str()) 2197 .unwrap() 2198 .trim(), 2199 "0000:00:03.0" 2200 ); 2201 2202 // Verify the iommu group of the network card. 2203 let iommu_group = if acpi { 2 } else { 3 }; 2204 assert_eq!( 2205 guest 2206 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str()) 2207 .unwrap() 2208 .trim(), 2209 "0000:00:04.0" 2210 ); 2211 }); 2212 2213 let _ = child.kill(); 2214 let output = child.wait_with_output().unwrap(); 2215 2216 handle_child_output(r, &output); 2217 } 2218 2219 fn get_reboot_count(guest: &Guest) -> u32 { 2220 guest 2221 .ssh_command("sudo last | grep -c reboot") 2222 .unwrap() 2223 .trim() 2224 .parse::<u32>() 2225 .unwrap_or_default() 2226 } 2227 2228 fn enable_guest_watchdog(guest: &Guest, watchdog_sec: u32) { 2229 // Check for PCI device 2230 assert!(guest 2231 .does_device_vendor_pair_match("0x1063", "0x1af4") 2232 .unwrap_or_default()); 2233 2234 // Enable systemd watchdog 2235 guest 2236 .ssh_command(&format!( 2237 "echo RuntimeWatchdogSec={watchdog_sec}s | sudo tee -a /etc/systemd/system.conf" 2238 )) 2239 .unwrap(); 2240 } 2241 2242 fn make_guest_panic(guest: &Guest) { 2243 // Check for pvpanic device 2244 assert!(guest 2245 .does_device_vendor_pair_match("0x0011", "0x1b36") 2246 .unwrap_or_default()); 2247 2248 // Trigger guest a panic 2249 guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap(); 2250 } 2251 2252 mod common_parallel { 2253 use std::{fs::OpenOptions, io::SeekFrom}; 2254 2255 use crate::*; 2256 2257 #[test] 2258 #[cfg(target_arch = "x86_64")] 2259 fn test_focal_hypervisor_fw() { 2260 test_simple_launch(fw_path(FwType::RustHypervisorFirmware), FOCAL_IMAGE_NAME) 2261 } 2262 2263 #[test] 2264 #[cfg(target_arch = "x86_64")] 2265 fn test_focal_ovmf() { 2266 test_simple_launch(fw_path(FwType::Ovmf), FOCAL_IMAGE_NAME) 2267 } 2268 2269 #[cfg(target_arch = "x86_64")] 2270 fn test_simple_launch(fw_path: String, disk_path: &str) { 2271 let disk_config = Box::new(UbuntuDiskConfig::new(disk_path.to_string())); 2272 let guest = Guest::new(disk_config); 2273 let event_path = temp_event_monitor_path(&guest.tmp_dir); 2274 2275 let mut child = GuestCommand::new(&guest) 2276 .args(["--cpus", "boot=1"]) 2277 .args(["--memory", "size=512M"]) 2278 .args(["--kernel", fw_path.as_str()]) 2279 .default_disks() 2280 .default_net() 2281 .args(["--serial", "tty", "--console", "off"]) 2282 .args(["--event-monitor", format!("path={event_path}").as_str()]) 2283 .capture_output() 2284 .spawn() 2285 .unwrap(); 2286 2287 let r = std::panic::catch_unwind(|| { 2288 guest.wait_vm_boot(Some(120)).unwrap(); 2289 2290 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 2291 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 2292 assert_eq!(guest.get_pci_bridge_class().unwrap_or_default(), "0x060000"); 2293 2294 let expected_sequential_events = [ 2295 &MetaEvent { 2296 event: "starting".to_string(), 2297 device_id: None, 2298 }, 2299 &MetaEvent { 2300 event: "booting".to_string(), 2301 device_id: None, 2302 }, 2303 &MetaEvent { 2304 event: "booted".to_string(), 2305 device_id: None, 2306 }, 2307 &MetaEvent { 2308 event: "activated".to_string(), 2309 device_id: Some("_disk0".to_string()), 2310 }, 2311 &MetaEvent { 2312 event: "reset".to_string(), 2313 device_id: Some("_disk0".to_string()), 2314 }, 2315 ]; 2316 assert!(check_sequential_events( 2317 &expected_sequential_events, 2318 &event_path 2319 )); 2320 2321 // It's been observed on the Bionic image that udev and snapd 2322 // services can cause some delay in the VM's shutdown. Disabling 2323 // them improves the reliability of this test. 2324 let _ = guest.ssh_command("sudo systemctl disable udev"); 2325 let _ = guest.ssh_command("sudo systemctl stop udev"); 2326 let _ = guest.ssh_command("sudo systemctl disable snapd"); 2327 let _ = guest.ssh_command("sudo systemctl stop snapd"); 2328 2329 guest.ssh_command("sudo poweroff").unwrap(); 2330 thread::sleep(std::time::Duration::new(20, 0)); 2331 let latest_events = [ 2332 &MetaEvent { 2333 event: "shutdown".to_string(), 2334 device_id: None, 2335 }, 2336 &MetaEvent { 2337 event: "deleted".to_string(), 2338 device_id: None, 2339 }, 2340 &MetaEvent { 2341 event: "shutdown".to_string(), 2342 device_id: None, 2343 }, 2344 ]; 2345 assert!(check_latest_events_exact(&latest_events, &event_path)); 2346 }); 2347 2348 let _ = child.kill(); 2349 let output = child.wait_with_output().unwrap(); 2350 2351 handle_child_output(r, &output); 2352 } 2353 2354 #[test] 2355 fn test_multi_cpu() { 2356 let jammy_image = JAMMY_IMAGE_NAME.to_string(); 2357 let jammy = UbuntuDiskConfig::new(jammy_image); 2358 let guest = Guest::new(Box::new(jammy)); 2359 2360 let mut cmd = GuestCommand::new(&guest); 2361 cmd.args(["--cpus", "boot=2,max=4"]) 2362 .args(["--memory", "size=512M"]) 2363 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2364 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2365 .capture_output() 2366 .default_disks() 2367 .default_net(); 2368 2369 let mut child = cmd.spawn().unwrap(); 2370 2371 let r = std::panic::catch_unwind(|| { 2372 guest.wait_vm_boot(Some(120)).unwrap(); 2373 2374 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 2375 2376 assert_eq!( 2377 guest 2378 .ssh_command( 2379 r#"sudo dmesg | grep "smp: Brought up" | sed "s/\[\ *[0-9.]*\] //""# 2380 ) 2381 .unwrap() 2382 .trim(), 2383 "smp: Brought up 1 node, 2 CPUs" 2384 ); 2385 }); 2386 2387 let _ = child.kill(); 2388 let output = child.wait_with_output().unwrap(); 2389 2390 handle_child_output(r, &output); 2391 } 2392 2393 #[test] 2394 fn test_cpu_topology_421() { 2395 test_cpu_topology(4, 2, 1, false); 2396 } 2397 2398 #[test] 2399 fn test_cpu_topology_142() { 2400 test_cpu_topology(1, 4, 2, false); 2401 } 2402 2403 #[test] 2404 fn test_cpu_topology_262() { 2405 test_cpu_topology(2, 6, 2, false); 2406 } 2407 2408 #[test] 2409 #[cfg(target_arch = "x86_64")] 2410 #[cfg(not(feature = "mshv"))] 2411 fn test_cpu_physical_bits() { 2412 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2413 let guest = Guest::new(Box::new(focal)); 2414 let max_phys_bits: u8 = 36; 2415 let mut child = GuestCommand::new(&guest) 2416 .args(["--cpus", &format!("max_phys_bits={max_phys_bits}")]) 2417 .args(["--memory", "size=512M"]) 2418 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2419 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2420 .default_disks() 2421 .default_net() 2422 .capture_output() 2423 .spawn() 2424 .unwrap(); 2425 2426 let r = std::panic::catch_unwind(|| { 2427 guest.wait_vm_boot(None).unwrap(); 2428 2429 assert!( 2430 guest 2431 .ssh_command("lscpu | grep \"Address sizes:\" | cut -f 2 -d \":\" | sed \"s# *##\" | cut -f 1 -d \" \"") 2432 .unwrap() 2433 .trim() 2434 .parse::<u8>() 2435 .unwrap_or(max_phys_bits + 1) <= max_phys_bits, 2436 ); 2437 }); 2438 2439 let _ = child.kill(); 2440 let output = child.wait_with_output().unwrap(); 2441 2442 handle_child_output(r, &output); 2443 } 2444 2445 #[test] 2446 fn test_cpu_affinity() { 2447 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2448 let guest = Guest::new(Box::new(focal)); 2449 2450 // We need the host to have at least 4 CPUs if we want to be able 2451 // to run this test. 2452 let host_cpus_count = exec_host_command_output("nproc"); 2453 assert!( 2454 String::from_utf8_lossy(&host_cpus_count.stdout) 2455 .trim() 2456 .parse::<u16>() 2457 .unwrap_or(0) 2458 >= 4 2459 ); 2460 2461 let mut child = GuestCommand::new(&guest) 2462 .args(["--cpus", "boot=2,affinity=[0@[0,2],1@[1,3]]"]) 2463 .args(["--memory", "size=512M"]) 2464 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2465 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2466 .default_disks() 2467 .default_net() 2468 .capture_output() 2469 .spawn() 2470 .unwrap(); 2471 2472 let r = std::panic::catch_unwind(|| { 2473 guest.wait_vm_boot(None).unwrap(); 2474 let pid = child.id(); 2475 let taskset_vcpu0 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep vcpu0 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2476 assert_eq!(String::from_utf8_lossy(&taskset_vcpu0.stdout).trim(), "0,2"); 2477 let taskset_vcpu1 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep vcpu1 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2478 assert_eq!(String::from_utf8_lossy(&taskset_vcpu1.stdout).trim(), "1,3"); 2479 }); 2480 2481 let _ = child.kill(); 2482 let output = child.wait_with_output().unwrap(); 2483 2484 handle_child_output(r, &output); 2485 } 2486 2487 #[test] 2488 #[cfg(not(feature = "mshv"))] 2489 fn test_large_vm() { 2490 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2491 let guest = Guest::new(Box::new(focal)); 2492 let mut cmd = GuestCommand::new(&guest); 2493 cmd.args(["--cpus", "boot=48"]) 2494 .args(["--memory", "size=5120M"]) 2495 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2496 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2497 .args(["--serial", "tty"]) 2498 .args(["--console", "off"]) 2499 .capture_output() 2500 .default_disks() 2501 .default_net(); 2502 2503 let mut child = cmd.spawn().unwrap(); 2504 2505 guest.wait_vm_boot(None).unwrap(); 2506 2507 let r = std::panic::catch_unwind(|| { 2508 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 48); 2509 assert_eq!( 2510 guest 2511 .ssh_command("lscpu | grep \"On-line\" | cut -f 2 -d \":\" | sed \"s# *##\"") 2512 .unwrap() 2513 .trim(), 2514 "0-47" 2515 ); 2516 2517 assert!(guest.get_total_memory().unwrap_or_default() > 5_000_000); 2518 }); 2519 2520 let _ = child.kill(); 2521 let output = child.wait_with_output().unwrap(); 2522 2523 handle_child_output(r, &output); 2524 } 2525 2526 #[test] 2527 #[cfg(not(feature = "mshv"))] 2528 fn test_huge_memory() { 2529 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2530 let guest = Guest::new(Box::new(focal)); 2531 let mut cmd = GuestCommand::new(&guest); 2532 cmd.args(["--cpus", "boot=1"]) 2533 .args(["--memory", "size=128G"]) 2534 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2535 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2536 .capture_output() 2537 .default_disks() 2538 .default_net(); 2539 2540 let mut child = cmd.spawn().unwrap(); 2541 2542 guest.wait_vm_boot(Some(120)).unwrap(); 2543 2544 let r = std::panic::catch_unwind(|| { 2545 assert!(guest.get_total_memory().unwrap_or_default() > 128_000_000); 2546 }); 2547 2548 let _ = child.kill(); 2549 let output = child.wait_with_output().unwrap(); 2550 2551 handle_child_output(r, &output); 2552 } 2553 2554 #[test] 2555 fn test_power_button() { 2556 _test_power_button(false); 2557 } 2558 2559 #[test] 2560 #[cfg(not(feature = "mshv"))] 2561 fn test_user_defined_memory_regions() { 2562 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2563 let guest = Guest::new(Box::new(focal)); 2564 let api_socket = temp_api_path(&guest.tmp_dir); 2565 2566 let kernel_path = direct_kernel_boot_path(); 2567 2568 let mut child = GuestCommand::new(&guest) 2569 .args(["--cpus", "boot=1"]) 2570 .args(["--memory", "size=0,hotplug_method=virtio-mem"]) 2571 .args([ 2572 "--memory-zone", 2573 "id=mem0,size=1G,hotplug_size=2G", 2574 "--memory-zone", 2575 "id=mem1,size=1G,shared=on", 2576 "--memory-zone", 2577 "id=mem2,size=1G,host_numa_node=0,hotplug_size=2G", 2578 ]) 2579 .args(["--kernel", kernel_path.to_str().unwrap()]) 2580 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2581 .args(["--api-socket", &api_socket]) 2582 .capture_output() 2583 .default_disks() 2584 .default_net() 2585 .spawn() 2586 .unwrap(); 2587 2588 let r = std::panic::catch_unwind(|| { 2589 guest.wait_vm_boot(None).unwrap(); 2590 2591 assert!(guest.get_total_memory().unwrap_or_default() > 2_880_000); 2592 2593 guest.enable_memory_hotplug(); 2594 2595 resize_zone_command(&api_socket, "mem0", "3G"); 2596 thread::sleep(std::time::Duration::new(5, 0)); 2597 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000); 2598 resize_zone_command(&api_socket, "mem2", "3G"); 2599 thread::sleep(std::time::Duration::new(5, 0)); 2600 assert!(guest.get_total_memory().unwrap_or_default() > 6_720_000); 2601 resize_zone_command(&api_socket, "mem0", "2G"); 2602 thread::sleep(std::time::Duration::new(5, 0)); 2603 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 2604 resize_zone_command(&api_socket, "mem2", "2G"); 2605 thread::sleep(std::time::Duration::new(5, 0)); 2606 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000); 2607 2608 guest.reboot_linux(0, None); 2609 2610 // Check the amount of RAM after reboot 2611 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000); 2612 assert!(guest.get_total_memory().unwrap_or_default() < 5_760_000); 2613 2614 // Check if we can still resize down to the initial 'boot'size 2615 resize_zone_command(&api_socket, "mem0", "1G"); 2616 thread::sleep(std::time::Duration::new(5, 0)); 2617 assert!(guest.get_total_memory().unwrap_or_default() < 4_800_000); 2618 resize_zone_command(&api_socket, "mem2", "1G"); 2619 thread::sleep(std::time::Duration::new(5, 0)); 2620 assert!(guest.get_total_memory().unwrap_or_default() < 3_840_000); 2621 }); 2622 2623 let _ = child.kill(); 2624 let output = child.wait_with_output().unwrap(); 2625 2626 handle_child_output(r, &output); 2627 } 2628 2629 #[test] 2630 #[cfg(not(feature = "mshv"))] 2631 fn test_guest_numa_nodes() { 2632 _test_guest_numa_nodes(false); 2633 } 2634 2635 #[test] 2636 #[cfg(target_arch = "x86_64")] 2637 fn test_iommu_segments() { 2638 let focal_image = FOCAL_IMAGE_NAME.to_string(); 2639 let focal = UbuntuDiskConfig::new(focal_image); 2640 let guest = Guest::new(Box::new(focal)); 2641 2642 // Prepare another disk file for the virtio-disk device 2643 let test_disk_path = String::from( 2644 guest 2645 .tmp_dir 2646 .as_path() 2647 .join("test-disk.raw") 2648 .to_str() 2649 .unwrap(), 2650 ); 2651 assert!( 2652 exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success() 2653 ); 2654 assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success()); 2655 2656 let api_socket = temp_api_path(&guest.tmp_dir); 2657 let mut cmd = GuestCommand::new(&guest); 2658 2659 cmd.args(["--cpus", "boot=1"]) 2660 .args(["--api-socket", &api_socket]) 2661 .args(["--memory", "size=512M"]) 2662 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2663 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2664 .args([ 2665 "--platform", 2666 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS},iommu_segments=[1]"), 2667 ]) 2668 .default_disks() 2669 .capture_output() 2670 .default_net(); 2671 2672 let mut child = cmd.spawn().unwrap(); 2673 2674 guest.wait_vm_boot(None).unwrap(); 2675 2676 let r = std::panic::catch_unwind(|| { 2677 let (cmd_success, cmd_output) = remote_command_w_output( 2678 &api_socket, 2679 "add-disk", 2680 Some( 2681 format!( 2682 "path={},id=test0,pci_segment=1,iommu=on", 2683 test_disk_path.as_str() 2684 ) 2685 .as_str(), 2686 ), 2687 ); 2688 assert!(cmd_success); 2689 assert!(String::from_utf8_lossy(&cmd_output) 2690 .contains("{\"id\":\"test0\",\"bdf\":\"0001:00:01.0\"}")); 2691 2692 // Check IOMMU setup 2693 assert!(guest 2694 .does_device_vendor_pair_match("0x1057", "0x1af4") 2695 .unwrap_or_default()); 2696 assert_eq!( 2697 guest 2698 .ssh_command("ls /sys/kernel/iommu_groups/0/devices") 2699 .unwrap() 2700 .trim(), 2701 "0001:00:01.0" 2702 ); 2703 }); 2704 2705 let _ = child.kill(); 2706 let output = child.wait_with_output().unwrap(); 2707 2708 handle_child_output(r, &output); 2709 } 2710 2711 #[test] 2712 fn test_pci_msi() { 2713 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2714 let guest = Guest::new(Box::new(focal)); 2715 let mut cmd = GuestCommand::new(&guest); 2716 cmd.args(["--cpus", "boot=1"]) 2717 .args(["--memory", "size=512M"]) 2718 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2719 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2720 .capture_output() 2721 .default_disks() 2722 .default_net(); 2723 2724 let mut child = cmd.spawn().unwrap(); 2725 2726 guest.wait_vm_boot(None).unwrap(); 2727 2728 #[cfg(target_arch = "x86_64")] 2729 let grep_cmd = "grep -c PCI-MSI /proc/interrupts"; 2730 #[cfg(target_arch = "aarch64")] 2731 let grep_cmd = "grep -c ITS-MSI /proc/interrupts"; 2732 2733 let r = std::panic::catch_unwind(|| { 2734 assert_eq!( 2735 guest 2736 .ssh_command(grep_cmd) 2737 .unwrap() 2738 .trim() 2739 .parse::<u32>() 2740 .unwrap_or_default(), 2741 12 2742 ); 2743 }); 2744 2745 let _ = child.kill(); 2746 let output = child.wait_with_output().unwrap(); 2747 2748 handle_child_output(r, &output); 2749 } 2750 2751 #[test] 2752 fn test_virtio_net_ctrl_queue() { 2753 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2754 let guest = Guest::new(Box::new(focal)); 2755 let mut cmd = GuestCommand::new(&guest); 2756 cmd.args(["--cpus", "boot=1"]) 2757 .args(["--memory", "size=512M"]) 2758 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2759 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2760 .args(["--net", guest.default_net_string_w_mtu(3000).as_str()]) 2761 .capture_output() 2762 .default_disks(); 2763 2764 let mut child = cmd.spawn().unwrap(); 2765 2766 guest.wait_vm_boot(None).unwrap(); 2767 2768 #[cfg(target_arch = "aarch64")] 2769 let iface = "enp0s4"; 2770 #[cfg(target_arch = "x86_64")] 2771 let iface = "ens4"; 2772 2773 let r = std::panic::catch_unwind(|| { 2774 assert_eq!( 2775 guest 2776 .ssh_command( 2777 format!("sudo ethtool -K {iface} rx-gro-hw off && echo success").as_str() 2778 ) 2779 .unwrap() 2780 .trim(), 2781 "success" 2782 ); 2783 assert_eq!( 2784 guest 2785 .ssh_command(format!("cat /sys/class/net/{iface}/mtu").as_str()) 2786 .unwrap() 2787 .trim(), 2788 "3000" 2789 ); 2790 }); 2791 2792 let _ = child.kill(); 2793 let output = child.wait_with_output().unwrap(); 2794 2795 handle_child_output(r, &output); 2796 } 2797 2798 #[test] 2799 #[cfg(not(feature = "mshv"))] 2800 fn test_pci_multiple_segments() { 2801 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2802 let guest = Guest::new(Box::new(focal)); 2803 2804 // Prepare another disk file for the virtio-disk device 2805 let test_disk_path = String::from( 2806 guest 2807 .tmp_dir 2808 .as_path() 2809 .join("test-disk.raw") 2810 .to_str() 2811 .unwrap(), 2812 ); 2813 assert!( 2814 exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success() 2815 ); 2816 assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success()); 2817 2818 let mut cmd = GuestCommand::new(&guest); 2819 cmd.args(["--cpus", "boot=1"]) 2820 .args(["--memory", "size=512M"]) 2821 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2822 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2823 .args([ 2824 "--platform", 2825 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 2826 ]) 2827 .args([ 2828 "--disk", 2829 format!( 2830 "path={}", 2831 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 2832 ) 2833 .as_str(), 2834 "--disk", 2835 format!( 2836 "path={}", 2837 guest.disk_config.disk(DiskType::CloudInit).unwrap() 2838 ) 2839 .as_str(), 2840 "--disk", 2841 format!("path={test_disk_path},pci_segment=15").as_str(), 2842 ]) 2843 .capture_output() 2844 .default_net(); 2845 2846 let mut child = cmd.spawn().unwrap(); 2847 2848 guest.wait_vm_boot(None).unwrap(); 2849 2850 let grep_cmd = "lspci | grep \"Host bridge\" | wc -l"; 2851 2852 let r = std::panic::catch_unwind(|| { 2853 // There should be MAX_NUM_PCI_SEGMENTS PCI host bridges in the guest. 2854 assert_eq!( 2855 guest 2856 .ssh_command(grep_cmd) 2857 .unwrap() 2858 .trim() 2859 .parse::<u16>() 2860 .unwrap_or_default(), 2861 MAX_NUM_PCI_SEGMENTS 2862 ); 2863 2864 // Check both if /dev/vdc exists and if the block size is 4M. 2865 assert_eq!( 2866 guest 2867 .ssh_command("lsblk | grep vdc | grep -c 4M") 2868 .unwrap() 2869 .trim() 2870 .parse::<u32>() 2871 .unwrap_or_default(), 2872 1 2873 ); 2874 2875 // Mount the device. 2876 guest.ssh_command("mkdir mount_image").unwrap(); 2877 guest 2878 .ssh_command("sudo mount -o rw -t ext4 /dev/vdc mount_image/") 2879 .unwrap(); 2880 // Grant all users with write permission. 2881 guest.ssh_command("sudo chmod a+w mount_image/").unwrap(); 2882 2883 // Write something to the device. 2884 guest 2885 .ssh_command("sudo echo \"bar\" >> mount_image/foo") 2886 .unwrap(); 2887 2888 // Check the content of the block device. The file "foo" should 2889 // contain "bar". 2890 assert_eq!( 2891 guest 2892 .ssh_command("sudo cat mount_image/foo") 2893 .unwrap() 2894 .trim(), 2895 "bar" 2896 ); 2897 }); 2898 2899 let _ = child.kill(); 2900 let output = child.wait_with_output().unwrap(); 2901 2902 handle_child_output(r, &output); 2903 } 2904 2905 #[test] 2906 fn test_direct_kernel_boot() { 2907 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2908 let guest = Guest::new(Box::new(focal)); 2909 2910 let kernel_path = direct_kernel_boot_path(); 2911 2912 let mut child = GuestCommand::new(&guest) 2913 .args(["--cpus", "boot=1"]) 2914 .args(["--memory", "size=512M"]) 2915 .args(["--kernel", kernel_path.to_str().unwrap()]) 2916 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2917 .default_disks() 2918 .default_net() 2919 .capture_output() 2920 .spawn() 2921 .unwrap(); 2922 2923 let r = std::panic::catch_unwind(|| { 2924 guest.wait_vm_boot(None).unwrap(); 2925 2926 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 2927 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 2928 2929 let grep_cmd = if cfg!(target_arch = "x86_64") { 2930 "grep -c PCI-MSI /proc/interrupts" 2931 } else { 2932 "grep -c ITS-MSI /proc/interrupts" 2933 }; 2934 assert_eq!( 2935 guest 2936 .ssh_command(grep_cmd) 2937 .unwrap() 2938 .trim() 2939 .parse::<u32>() 2940 .unwrap_or_default(), 2941 12 2942 ); 2943 }); 2944 2945 let _ = child.kill(); 2946 let output = child.wait_with_output().unwrap(); 2947 2948 handle_child_output(r, &output); 2949 } 2950 2951 fn _test_virtio_block(image_name: &str, disable_io_uring: bool) { 2952 let focal = UbuntuDiskConfig::new(image_name.to_string()); 2953 let guest = Guest::new(Box::new(focal)); 2954 2955 let mut workload_path = dirs::home_dir().unwrap(); 2956 workload_path.push("workloads"); 2957 2958 let mut blk_file_path = workload_path; 2959 blk_file_path.push("blk.img"); 2960 2961 let kernel_path = direct_kernel_boot_path(); 2962 2963 let mut cloud_child = GuestCommand::new(&guest) 2964 .args(["--cpus", "boot=4"]) 2965 .args(["--memory", "size=512M,shared=on"]) 2966 .args(["--kernel", kernel_path.to_str().unwrap()]) 2967 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2968 .args([ 2969 "--disk", 2970 format!( 2971 "path={}", 2972 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 2973 ) 2974 .as_str(), 2975 "--disk", 2976 format!( 2977 "path={}", 2978 guest.disk_config.disk(DiskType::CloudInit).unwrap() 2979 ) 2980 .as_str(), 2981 "--disk", 2982 format!( 2983 "path={},readonly=on,direct=on,num_queues=4,_disable_io_uring={}", 2984 blk_file_path.to_str().unwrap(), 2985 disable_io_uring 2986 ) 2987 .as_str(), 2988 ]) 2989 .default_net() 2990 .capture_output() 2991 .spawn() 2992 .unwrap(); 2993 2994 let r = std::panic::catch_unwind(|| { 2995 guest.wait_vm_boot(None).unwrap(); 2996 2997 // Check both if /dev/vdc exists and if the block size is 16M. 2998 assert_eq!( 2999 guest 3000 .ssh_command("lsblk | grep vdc | grep -c 16M") 3001 .unwrap() 3002 .trim() 3003 .parse::<u32>() 3004 .unwrap_or_default(), 3005 1 3006 ); 3007 3008 // Check both if /dev/vdc exists and if this block is RO. 3009 assert_eq!( 3010 guest 3011 .ssh_command("lsblk | grep vdc | awk '{print $5}'") 3012 .unwrap() 3013 .trim() 3014 .parse::<u32>() 3015 .unwrap_or_default(), 3016 1 3017 ); 3018 3019 // Check if the number of queues is 4. 3020 assert_eq!( 3021 guest 3022 .ssh_command("ls -ll /sys/block/vdc/mq | grep ^d | wc -l") 3023 .unwrap() 3024 .trim() 3025 .parse::<u32>() 3026 .unwrap_or_default(), 3027 4 3028 ); 3029 }); 3030 3031 let _ = cloud_child.kill(); 3032 let output = cloud_child.wait_with_output().unwrap(); 3033 3034 handle_child_output(r, &output); 3035 } 3036 3037 #[test] 3038 fn test_virtio_block() { 3039 _test_virtio_block(FOCAL_IMAGE_NAME, false) 3040 } 3041 3042 #[test] 3043 fn test_virtio_block_disable_io_uring() { 3044 _test_virtio_block(FOCAL_IMAGE_NAME, true) 3045 } 3046 3047 #[test] 3048 fn test_virtio_block_qcow2() { 3049 _test_virtio_block(FOCAL_IMAGE_NAME_QCOW2, false) 3050 } 3051 3052 #[test] 3053 fn test_virtio_block_qcow2_backing_file() { 3054 _test_virtio_block(FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE, false) 3055 } 3056 3057 #[test] 3058 fn test_virtio_block_vhd() { 3059 let mut workload_path = dirs::home_dir().unwrap(); 3060 workload_path.push("workloads"); 3061 3062 let mut raw_file_path = workload_path.clone(); 3063 let mut vhd_file_path = workload_path; 3064 raw_file_path.push(FOCAL_IMAGE_NAME); 3065 vhd_file_path.push(FOCAL_IMAGE_NAME_VHD); 3066 3067 // Generate VHD file from RAW file 3068 std::process::Command::new("qemu-img") 3069 .arg("convert") 3070 .arg("-p") 3071 .args(["-f", "raw"]) 3072 .args(["-O", "vpc"]) 3073 .args(["-o", "subformat=fixed"]) 3074 .arg(raw_file_path.to_str().unwrap()) 3075 .arg(vhd_file_path.to_str().unwrap()) 3076 .output() 3077 .expect("Expect generating VHD image from RAW image"); 3078 3079 _test_virtio_block(FOCAL_IMAGE_NAME_VHD, false) 3080 } 3081 3082 #[test] 3083 fn test_virtio_block_vhdx() { 3084 let mut workload_path = dirs::home_dir().unwrap(); 3085 workload_path.push("workloads"); 3086 3087 let mut raw_file_path = workload_path.clone(); 3088 let mut vhdx_file_path = workload_path; 3089 raw_file_path.push(FOCAL_IMAGE_NAME); 3090 vhdx_file_path.push(FOCAL_IMAGE_NAME_VHDX); 3091 3092 // Generate dynamic VHDX file from RAW file 3093 std::process::Command::new("qemu-img") 3094 .arg("convert") 3095 .arg("-p") 3096 .args(["-f", "raw"]) 3097 .args(["-O", "vhdx"]) 3098 .arg(raw_file_path.to_str().unwrap()) 3099 .arg(vhdx_file_path.to_str().unwrap()) 3100 .output() 3101 .expect("Expect generating dynamic VHDx image from RAW image"); 3102 3103 _test_virtio_block(FOCAL_IMAGE_NAME_VHDX, false) 3104 } 3105 3106 #[test] 3107 fn test_virtio_block_dynamic_vhdx_expand() { 3108 const VIRTUAL_DISK_SIZE: u64 = 100 << 20; 3109 const EMPTY_VHDX_FILE_SIZE: u64 = 8 << 20; 3110 const FULL_VHDX_FILE_SIZE: u64 = 112 << 20; 3111 const DYNAMIC_VHDX_NAME: &str = "dynamic.vhdx"; 3112 3113 let mut workload_path = dirs::home_dir().unwrap(); 3114 workload_path.push("workloads"); 3115 3116 let mut vhdx_file_path = workload_path; 3117 vhdx_file_path.push(DYNAMIC_VHDX_NAME); 3118 let vhdx_path = vhdx_file_path.to_str().unwrap(); 3119 3120 // Generate a 100 MiB dynamic VHDX file 3121 std::process::Command::new("qemu-img") 3122 .arg("create") 3123 .args(["-f", "vhdx"]) 3124 .arg(vhdx_path) 3125 .arg(VIRTUAL_DISK_SIZE.to_string()) 3126 .output() 3127 .expect("Expect generating dynamic VHDx image from RAW image"); 3128 3129 // Check if the size matches with empty VHDx file size 3130 assert_eq!(vhdx_image_size(vhdx_path), EMPTY_VHDX_FILE_SIZE); 3131 3132 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3133 let guest = Guest::new(Box::new(focal)); 3134 let kernel_path = direct_kernel_boot_path(); 3135 3136 let mut cloud_child = GuestCommand::new(&guest) 3137 .args(["--cpus", "boot=1"]) 3138 .args(["--memory", "size=512M"]) 3139 .args(["--kernel", kernel_path.to_str().unwrap()]) 3140 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3141 .args([ 3142 "--disk", 3143 format!( 3144 "path={}", 3145 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 3146 ) 3147 .as_str(), 3148 "--disk", 3149 format!( 3150 "path={}", 3151 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3152 ) 3153 .as_str(), 3154 "--disk", 3155 format!("path={vhdx_path}").as_str(), 3156 ]) 3157 .default_net() 3158 .capture_output() 3159 .spawn() 3160 .unwrap(); 3161 3162 let r = std::panic::catch_unwind(|| { 3163 guest.wait_vm_boot(None).unwrap(); 3164 3165 // Check both if /dev/vdc exists and if the block size is 100 MiB. 3166 assert_eq!( 3167 guest 3168 .ssh_command("lsblk | grep vdc | grep -c 100M") 3169 .unwrap() 3170 .trim() 3171 .parse::<u32>() 3172 .unwrap_or_default(), 3173 1 3174 ); 3175 3176 // Write 100 MB of data to the VHDx disk 3177 guest 3178 .ssh_command("sudo dd if=/dev/urandom of=/dev/vdc bs=1M count=100") 3179 .unwrap(); 3180 }); 3181 3182 // Check if the size matches with expected expanded VHDx file size 3183 assert_eq!(vhdx_image_size(vhdx_path), FULL_VHDX_FILE_SIZE); 3184 3185 let _ = cloud_child.kill(); 3186 let output = cloud_child.wait_with_output().unwrap(); 3187 3188 handle_child_output(r, &output); 3189 } 3190 3191 fn vhdx_image_size(disk_name: &str) -> u64 { 3192 std::fs::File::open(disk_name) 3193 .unwrap() 3194 .seek(SeekFrom::End(0)) 3195 .unwrap() 3196 } 3197 3198 #[test] 3199 fn test_virtio_block_direct_and_firmware() { 3200 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3201 let guest = Guest::new(Box::new(focal)); 3202 3203 // The OS disk must be copied to a location that is not backed by 3204 // tmpfs, otherwise the syscall openat(2) with O_DIRECT simply fails 3205 // with EINVAL because tmpfs doesn't support this flag. 3206 let mut workloads_path = dirs::home_dir().unwrap(); 3207 workloads_path.push("workloads"); 3208 let os_dir = TempDir::new_in(workloads_path.as_path()).unwrap(); 3209 let mut os_path = os_dir.as_path().to_path_buf(); 3210 os_path.push("osdisk.img"); 3211 rate_limited_copy( 3212 guest.disk_config.disk(DiskType::OperatingSystem).unwrap(), 3213 os_path.as_path(), 3214 ) 3215 .expect("copying of OS disk failed"); 3216 3217 let mut child = GuestCommand::new(&guest) 3218 .args(["--cpus", "boot=1"]) 3219 .args(["--memory", "size=512M"]) 3220 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 3221 .args([ 3222 "--disk", 3223 format!("path={},direct=on", os_path.as_path().to_str().unwrap()).as_str(), 3224 "--disk", 3225 format!( 3226 "path={}", 3227 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3228 ) 3229 .as_str(), 3230 ]) 3231 .default_net() 3232 .capture_output() 3233 .spawn() 3234 .unwrap(); 3235 3236 let r = std::panic::catch_unwind(|| { 3237 guest.wait_vm_boot(Some(120)).unwrap(); 3238 }); 3239 3240 let _ = child.kill(); 3241 let output = child.wait_with_output().unwrap(); 3242 3243 handle_child_output(r, &output); 3244 } 3245 3246 #[test] 3247 fn test_vhost_user_net_default() { 3248 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, false, false) 3249 } 3250 3251 #[test] 3252 fn test_vhost_user_net_named_tap() { 3253 test_vhost_user_net( 3254 Some("mytap0"), 3255 2, 3256 &prepare_vhost_user_net_daemon, 3257 false, 3258 false, 3259 ) 3260 } 3261 3262 #[test] 3263 fn test_vhost_user_net_existing_tap() { 3264 test_vhost_user_net( 3265 Some("vunet-tap0"), 3266 2, 3267 &prepare_vhost_user_net_daemon, 3268 false, 3269 false, 3270 ) 3271 } 3272 3273 #[test] 3274 fn test_vhost_user_net_multiple_queues() { 3275 test_vhost_user_net(None, 4, &prepare_vhost_user_net_daemon, false, false) 3276 } 3277 3278 #[test] 3279 fn test_vhost_user_net_tap_multiple_queues() { 3280 test_vhost_user_net( 3281 Some("vunet-tap1"), 3282 4, 3283 &prepare_vhost_user_net_daemon, 3284 false, 3285 false, 3286 ) 3287 } 3288 3289 #[test] 3290 fn test_vhost_user_net_host_mac() { 3291 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, true, false) 3292 } 3293 3294 #[test] 3295 fn test_vhost_user_net_client_mode() { 3296 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, false, true) 3297 } 3298 3299 #[test] 3300 fn test_vhost_user_blk_default() { 3301 test_vhost_user_blk(2, false, false, Some(&prepare_vubd)) 3302 } 3303 3304 #[test] 3305 fn test_vhost_user_blk_readonly() { 3306 test_vhost_user_blk(1, true, false, Some(&prepare_vubd)) 3307 } 3308 3309 #[test] 3310 fn test_vhost_user_blk_direct() { 3311 test_vhost_user_blk(1, false, true, Some(&prepare_vubd)) 3312 } 3313 3314 #[test] 3315 fn test_boot_from_vhost_user_blk_default() { 3316 test_boot_from_vhost_user_blk(1, false, false, Some(&prepare_vubd)) 3317 } 3318 3319 #[test] 3320 #[cfg(target_arch = "x86_64")] 3321 fn test_split_irqchip() { 3322 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3323 let guest = Guest::new(Box::new(focal)); 3324 3325 let mut child = GuestCommand::new(&guest) 3326 .args(["--cpus", "boot=1"]) 3327 .args(["--memory", "size=512M"]) 3328 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3329 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3330 .default_disks() 3331 .default_net() 3332 .capture_output() 3333 .spawn() 3334 .unwrap(); 3335 3336 let r = std::panic::catch_unwind(|| { 3337 guest.wait_vm_boot(None).unwrap(); 3338 3339 assert_eq!( 3340 guest 3341 .ssh_command("grep -c IO-APIC.*timer /proc/interrupts || true") 3342 .unwrap() 3343 .trim() 3344 .parse::<u32>() 3345 .unwrap_or(1), 3346 0 3347 ); 3348 assert_eq!( 3349 guest 3350 .ssh_command("grep -c IO-APIC.*cascade /proc/interrupts || true") 3351 .unwrap() 3352 .trim() 3353 .parse::<u32>() 3354 .unwrap_or(1), 3355 0 3356 ); 3357 }); 3358 3359 let _ = child.kill(); 3360 let output = child.wait_with_output().unwrap(); 3361 3362 handle_child_output(r, &output); 3363 } 3364 3365 #[test] 3366 #[cfg(target_arch = "x86_64")] 3367 fn test_dmi_serial_number() { 3368 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3369 let guest = Guest::new(Box::new(focal)); 3370 3371 let mut child = GuestCommand::new(&guest) 3372 .args(["--cpus", "boot=1"]) 3373 .args(["--memory", "size=512M"]) 3374 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3375 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3376 .args(["--platform", "serial_number=a=b;c=d"]) 3377 .default_disks() 3378 .default_net() 3379 .capture_output() 3380 .spawn() 3381 .unwrap(); 3382 3383 let r = std::panic::catch_unwind(|| { 3384 guest.wait_vm_boot(None).unwrap(); 3385 3386 assert_eq!( 3387 guest 3388 .ssh_command("sudo cat /sys/class/dmi/id/product_serial") 3389 .unwrap() 3390 .trim(), 3391 "a=b;c=d" 3392 ); 3393 }); 3394 3395 let _ = child.kill(); 3396 let output = child.wait_with_output().unwrap(); 3397 3398 handle_child_output(r, &output); 3399 } 3400 3401 #[test] 3402 #[cfg(target_arch = "x86_64")] 3403 fn test_dmi_uuid() { 3404 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3405 let guest = Guest::new(Box::new(focal)); 3406 3407 let mut child = GuestCommand::new(&guest) 3408 .args(["--cpus", "boot=1"]) 3409 .args(["--memory", "size=512M"]) 3410 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3411 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3412 .args(["--platform", "uuid=1e8aa28a-435d-4027-87f4-40dceff1fa0a"]) 3413 .default_disks() 3414 .default_net() 3415 .capture_output() 3416 .spawn() 3417 .unwrap(); 3418 3419 let r = std::panic::catch_unwind(|| { 3420 guest.wait_vm_boot(None).unwrap(); 3421 3422 assert_eq!( 3423 guest 3424 .ssh_command("sudo cat /sys/class/dmi/id/product_uuid") 3425 .unwrap() 3426 .trim(), 3427 "1e8aa28a-435d-4027-87f4-40dceff1fa0a" 3428 ); 3429 }); 3430 3431 let _ = child.kill(); 3432 let output = child.wait_with_output().unwrap(); 3433 3434 handle_child_output(r, &output); 3435 } 3436 3437 #[test] 3438 #[cfg(target_arch = "x86_64")] 3439 fn test_dmi_oem_strings() { 3440 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3441 let guest = Guest::new(Box::new(focal)); 3442 3443 let s1 = "io.systemd.credential:xx=yy"; 3444 let s2 = "This is a test string"; 3445 3446 let oem_strings = format!("oem_strings=[{s1},{s2}]"); 3447 3448 let mut child = GuestCommand::new(&guest) 3449 .args(["--cpus", "boot=1"]) 3450 .args(["--memory", "size=512M"]) 3451 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3452 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3453 .args(["--platform", &oem_strings]) 3454 .default_disks() 3455 .default_net() 3456 .capture_output() 3457 .spawn() 3458 .unwrap(); 3459 3460 let r = std::panic::catch_unwind(|| { 3461 guest.wait_vm_boot(None).unwrap(); 3462 3463 assert_eq!( 3464 guest 3465 .ssh_command("sudo dmidecode --oem-string count") 3466 .unwrap() 3467 .trim(), 3468 "2" 3469 ); 3470 3471 assert_eq!( 3472 guest 3473 .ssh_command("sudo dmidecode --oem-string 1") 3474 .unwrap() 3475 .trim(), 3476 s1 3477 ); 3478 3479 assert_eq!( 3480 guest 3481 .ssh_command("sudo dmidecode --oem-string 2") 3482 .unwrap() 3483 .trim(), 3484 s2 3485 ); 3486 }); 3487 3488 let _ = child.kill(); 3489 let output = child.wait_with_output().unwrap(); 3490 3491 handle_child_output(r, &output); 3492 } 3493 3494 #[test] 3495 fn test_virtio_fs() { 3496 _test_virtio_fs(&prepare_virtiofsd, false, None) 3497 } 3498 3499 #[test] 3500 fn test_virtio_fs_hotplug() { 3501 _test_virtio_fs(&prepare_virtiofsd, true, None) 3502 } 3503 3504 #[test] 3505 #[cfg(not(feature = "mshv"))] 3506 fn test_virtio_fs_multi_segment_hotplug() { 3507 _test_virtio_fs(&prepare_virtiofsd, true, Some(15)) 3508 } 3509 3510 #[test] 3511 #[cfg(not(feature = "mshv"))] 3512 fn test_virtio_fs_multi_segment() { 3513 _test_virtio_fs(&prepare_virtiofsd, false, Some(15)) 3514 } 3515 3516 #[test] 3517 fn test_virtio_pmem_persist_writes() { 3518 test_virtio_pmem(false, false) 3519 } 3520 3521 #[test] 3522 fn test_virtio_pmem_discard_writes() { 3523 test_virtio_pmem(true, false) 3524 } 3525 3526 #[test] 3527 fn test_virtio_pmem_with_size() { 3528 test_virtio_pmem(true, true) 3529 } 3530 3531 #[test] 3532 fn test_boot_from_virtio_pmem() { 3533 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3534 let guest = Guest::new(Box::new(focal)); 3535 3536 let kernel_path = direct_kernel_boot_path(); 3537 3538 let mut child = GuestCommand::new(&guest) 3539 .args(["--cpus", "boot=1"]) 3540 .args(["--memory", "size=512M"]) 3541 .args(["--kernel", kernel_path.to_str().unwrap()]) 3542 .args([ 3543 "--disk", 3544 format!( 3545 "path={}", 3546 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3547 ) 3548 .as_str(), 3549 ]) 3550 .default_net() 3551 .args([ 3552 "--pmem", 3553 format!( 3554 "file={},size={}", 3555 guest.disk_config.disk(DiskType::OperatingSystem).unwrap(), 3556 fs::metadata(guest.disk_config.disk(DiskType::OperatingSystem).unwrap()) 3557 .unwrap() 3558 .len() 3559 ) 3560 .as_str(), 3561 ]) 3562 .args([ 3563 "--cmdline", 3564 DIRECT_KERNEL_BOOT_CMDLINE 3565 .replace("vda1", "pmem0p1") 3566 .as_str(), 3567 ]) 3568 .capture_output() 3569 .spawn() 3570 .unwrap(); 3571 3572 let r = std::panic::catch_unwind(|| { 3573 guest.wait_vm_boot(None).unwrap(); 3574 3575 // Simple checks to validate the VM booted properly 3576 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 3577 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 3578 }); 3579 3580 let _ = child.kill(); 3581 let output = child.wait_with_output().unwrap(); 3582 3583 handle_child_output(r, &output); 3584 } 3585 3586 #[test] 3587 fn test_multiple_network_interfaces() { 3588 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3589 let guest = Guest::new(Box::new(focal)); 3590 3591 let kernel_path = direct_kernel_boot_path(); 3592 3593 let mut child = GuestCommand::new(&guest) 3594 .args(["--cpus", "boot=1"]) 3595 .args(["--memory", "size=512M"]) 3596 .args(["--kernel", kernel_path.to_str().unwrap()]) 3597 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3598 .default_disks() 3599 .args([ 3600 "--net", 3601 guest.default_net_string().as_str(), 3602 "--net", 3603 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0", 3604 "--net", 3605 "tap=mytap1,mac=fe:1f:9e:e1:60:f2,ip=192.168.4.1,mask=255.255.255.0", 3606 ]) 3607 .capture_output() 3608 .spawn() 3609 .unwrap(); 3610 3611 let r = std::panic::catch_unwind(|| { 3612 guest.wait_vm_boot(None).unwrap(); 3613 3614 let tap_count = exec_host_command_output("ip link | grep -c mytap1"); 3615 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1"); 3616 3617 // 3 network interfaces + default localhost ==> 4 interfaces 3618 assert_eq!( 3619 guest 3620 .ssh_command("ip -o link | wc -l") 3621 .unwrap() 3622 .trim() 3623 .parse::<u32>() 3624 .unwrap_or_default(), 3625 4 3626 ); 3627 }); 3628 3629 let _ = child.kill(); 3630 let output = child.wait_with_output().unwrap(); 3631 3632 handle_child_output(r, &output); 3633 } 3634 3635 #[test] 3636 #[cfg(target_arch = "aarch64")] 3637 fn test_pmu_on() { 3638 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3639 let guest = Guest::new(Box::new(focal)); 3640 let mut child = GuestCommand::new(&guest) 3641 .args(["--cpus", "boot=1"]) 3642 .args(["--memory", "size=512M"]) 3643 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3644 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3645 .default_disks() 3646 .default_net() 3647 .capture_output() 3648 .spawn() 3649 .unwrap(); 3650 3651 let r = std::panic::catch_unwind(|| { 3652 guest.wait_vm_boot(None).unwrap(); 3653 3654 // Test that PMU exists. 3655 assert_eq!( 3656 guest 3657 .ssh_command(GREP_PMU_IRQ_CMD) 3658 .unwrap() 3659 .trim() 3660 .parse::<u32>() 3661 .unwrap_or_default(), 3662 1 3663 ); 3664 }); 3665 3666 let _ = child.kill(); 3667 let output = child.wait_with_output().unwrap(); 3668 3669 handle_child_output(r, &output); 3670 } 3671 3672 #[test] 3673 fn test_serial_off() { 3674 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3675 let guest = Guest::new(Box::new(focal)); 3676 let mut child = GuestCommand::new(&guest) 3677 .args(["--cpus", "boot=1"]) 3678 .args(["--memory", "size=512M"]) 3679 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3680 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3681 .default_disks() 3682 .default_net() 3683 .args(["--serial", "off"]) 3684 .capture_output() 3685 .spawn() 3686 .unwrap(); 3687 3688 let r = std::panic::catch_unwind(|| { 3689 guest.wait_vm_boot(None).unwrap(); 3690 3691 // Test that there is no ttyS0 3692 assert_eq!( 3693 guest 3694 .ssh_command(GREP_SERIAL_IRQ_CMD) 3695 .unwrap() 3696 .trim() 3697 .parse::<u32>() 3698 .unwrap_or(1), 3699 0 3700 ); 3701 }); 3702 3703 let _ = child.kill(); 3704 let output = child.wait_with_output().unwrap(); 3705 3706 handle_child_output(r, &output); 3707 } 3708 3709 #[test] 3710 fn test_serial_null() { 3711 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3712 let guest = Guest::new(Box::new(focal)); 3713 let mut cmd = GuestCommand::new(&guest); 3714 #[cfg(target_arch = "x86_64")] 3715 let console_str: &str = "console=ttyS0"; 3716 #[cfg(target_arch = "aarch64")] 3717 let console_str: &str = "console=ttyAMA0"; 3718 3719 cmd.args(["--cpus", "boot=1"]) 3720 .args(["--memory", "size=512M"]) 3721 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3722 .args([ 3723 "--cmdline", 3724 DIRECT_KERNEL_BOOT_CMDLINE 3725 .replace("console=hvc0 ", console_str) 3726 .as_str(), 3727 ]) 3728 .default_disks() 3729 .default_net() 3730 .args(["--serial", "null"]) 3731 .args(["--console", "off"]) 3732 .capture_output(); 3733 3734 let mut child = cmd.spawn().unwrap(); 3735 3736 let r = std::panic::catch_unwind(|| { 3737 guest.wait_vm_boot(None).unwrap(); 3738 3739 // Test that there is a ttyS0 3740 assert_eq!( 3741 guest 3742 .ssh_command(GREP_SERIAL_IRQ_CMD) 3743 .unwrap() 3744 .trim() 3745 .parse::<u32>() 3746 .unwrap_or_default(), 3747 1 3748 ); 3749 }); 3750 3751 let _ = child.kill(); 3752 let output = child.wait_with_output().unwrap(); 3753 handle_child_output(r, &output); 3754 3755 let r = std::panic::catch_unwind(|| { 3756 assert!(!String::from_utf8_lossy(&output.stdout).contains(CONSOLE_TEST_STRING)); 3757 }); 3758 3759 handle_child_output(r, &output); 3760 } 3761 3762 #[test] 3763 fn test_serial_tty() { 3764 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3765 let guest = Guest::new(Box::new(focal)); 3766 3767 let kernel_path = direct_kernel_boot_path(); 3768 3769 #[cfg(target_arch = "x86_64")] 3770 let console_str: &str = "console=ttyS0"; 3771 #[cfg(target_arch = "aarch64")] 3772 let console_str: &str = "console=ttyAMA0"; 3773 3774 let mut child = GuestCommand::new(&guest) 3775 .args(["--cpus", "boot=1"]) 3776 .args(["--memory", "size=512M"]) 3777 .args(["--kernel", kernel_path.to_str().unwrap()]) 3778 .args([ 3779 "--cmdline", 3780 DIRECT_KERNEL_BOOT_CMDLINE 3781 .replace("console=hvc0 ", console_str) 3782 .as_str(), 3783 ]) 3784 .default_disks() 3785 .default_net() 3786 .args(["--serial", "tty"]) 3787 .args(["--console", "off"]) 3788 .capture_output() 3789 .spawn() 3790 .unwrap(); 3791 3792 let r = std::panic::catch_unwind(|| { 3793 guest.wait_vm_boot(None).unwrap(); 3794 3795 // Test that there is a ttyS0 3796 assert_eq!( 3797 guest 3798 .ssh_command(GREP_SERIAL_IRQ_CMD) 3799 .unwrap() 3800 .trim() 3801 .parse::<u32>() 3802 .unwrap_or_default(), 3803 1 3804 ); 3805 }); 3806 3807 // This sleep is needed to wait for the login prompt 3808 thread::sleep(std::time::Duration::new(2, 0)); 3809 3810 let _ = child.kill(); 3811 let output = child.wait_with_output().unwrap(); 3812 handle_child_output(r, &output); 3813 3814 let r = std::panic::catch_unwind(|| { 3815 assert!(String::from_utf8_lossy(&output.stdout).contains(CONSOLE_TEST_STRING)); 3816 }); 3817 3818 handle_child_output(r, &output); 3819 } 3820 3821 #[test] 3822 fn test_serial_file() { 3823 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3824 let guest = Guest::new(Box::new(focal)); 3825 3826 let serial_path = guest.tmp_dir.as_path().join("/tmp/serial-output"); 3827 #[cfg(target_arch = "x86_64")] 3828 let console_str: &str = "console=ttyS0"; 3829 #[cfg(target_arch = "aarch64")] 3830 let console_str: &str = "console=ttyAMA0"; 3831 3832 let mut child = GuestCommand::new(&guest) 3833 .args(["--cpus", "boot=1"]) 3834 .args(["--memory", "size=512M"]) 3835 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3836 .args([ 3837 "--cmdline", 3838 DIRECT_KERNEL_BOOT_CMDLINE 3839 .replace("console=hvc0 ", console_str) 3840 .as_str(), 3841 ]) 3842 .default_disks() 3843 .default_net() 3844 .args([ 3845 "--serial", 3846 format!("file={}", serial_path.to_str().unwrap()).as_str(), 3847 ]) 3848 .capture_output() 3849 .spawn() 3850 .unwrap(); 3851 3852 let r = std::panic::catch_unwind(|| { 3853 guest.wait_vm_boot(None).unwrap(); 3854 3855 // Test that there is a ttyS0 3856 assert_eq!( 3857 guest 3858 .ssh_command(GREP_SERIAL_IRQ_CMD) 3859 .unwrap() 3860 .trim() 3861 .parse::<u32>() 3862 .unwrap_or_default(), 3863 1 3864 ); 3865 3866 guest.ssh_command("sudo shutdown -h now").unwrap(); 3867 }); 3868 3869 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 3870 let _ = child.kill(); 3871 let output = child.wait_with_output().unwrap(); 3872 handle_child_output(r, &output); 3873 3874 let r = std::panic::catch_unwind(|| { 3875 // Check that the cloud-hypervisor binary actually terminated 3876 assert!(output.status.success()); 3877 3878 // Do this check after shutdown of the VM as an easy way to ensure 3879 // all writes are flushed to disk 3880 let mut f = std::fs::File::open(serial_path).unwrap(); 3881 let mut buf = String::new(); 3882 f.read_to_string(&mut buf).unwrap(); 3883 assert!(buf.contains(CONSOLE_TEST_STRING)); 3884 }); 3885 3886 handle_child_output(r, &output); 3887 } 3888 3889 #[test] 3890 fn test_pty_interaction() { 3891 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3892 let guest = Guest::new(Box::new(focal)); 3893 let api_socket = temp_api_path(&guest.tmp_dir); 3894 let serial_option = if cfg!(target_arch = "x86_64") { 3895 " console=ttyS0" 3896 } else { 3897 " console=ttyAMA0" 3898 }; 3899 let cmdline = DIRECT_KERNEL_BOOT_CMDLINE.to_owned() + serial_option; 3900 3901 let mut child = GuestCommand::new(&guest) 3902 .args(["--cpus", "boot=1"]) 3903 .args(["--memory", "size=512M"]) 3904 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3905 .args(["--cmdline", &cmdline]) 3906 .default_disks() 3907 .default_net() 3908 .args(["--serial", "null"]) 3909 .args(["--console", "pty"]) 3910 .args(["--api-socket", &api_socket]) 3911 .spawn() 3912 .unwrap(); 3913 3914 let r = std::panic::catch_unwind(|| { 3915 guest.wait_vm_boot(None).unwrap(); 3916 // Get pty fd for console 3917 let console_path = get_pty_path(&api_socket, "console"); 3918 // TODO: Get serial pty test working 3919 let mut cf = std::fs::OpenOptions::new() 3920 .write(true) 3921 .read(true) 3922 .open(console_path) 3923 .unwrap(); 3924 3925 // Some dumb sleeps but we don't want to write 3926 // before the console is up and we don't want 3927 // to try and write the next line before the 3928 // login process is ready. 3929 thread::sleep(std::time::Duration::new(5, 0)); 3930 assert_eq!(cf.write(b"cloud\n").unwrap(), 6); 3931 thread::sleep(std::time::Duration::new(2, 0)); 3932 assert_eq!(cf.write(b"cloud123\n").unwrap(), 9); 3933 thread::sleep(std::time::Duration::new(2, 0)); 3934 assert_eq!(cf.write(b"echo test_pty_console\n").unwrap(), 22); 3935 thread::sleep(std::time::Duration::new(2, 0)); 3936 3937 // read pty and ensure they have a login shell 3938 // some fairly hacky workarounds to avoid looping 3939 // forever in case the channel is blocked getting output 3940 let ptyc = pty_read(cf); 3941 let mut empty = 0; 3942 let mut prev = String::new(); 3943 loop { 3944 thread::sleep(std::time::Duration::new(2, 0)); 3945 match ptyc.try_recv() { 3946 Ok(line) => { 3947 empty = 0; 3948 prev = prev + &line; 3949 if prev.contains("test_pty_console") { 3950 break; 3951 } 3952 } 3953 Err(mpsc::TryRecvError::Empty) => { 3954 empty += 1; 3955 assert!(empty <= 5, "No login on pty"); 3956 } 3957 _ => panic!("No login on pty"), 3958 } 3959 } 3960 3961 guest.ssh_command("sudo shutdown -h now").unwrap(); 3962 }); 3963 3964 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 3965 let _ = child.kill(); 3966 let output = child.wait_with_output().unwrap(); 3967 handle_child_output(r, &output); 3968 3969 let r = std::panic::catch_unwind(|| { 3970 // Check that the cloud-hypervisor binary actually terminated 3971 assert!(output.status.success()) 3972 }); 3973 handle_child_output(r, &output); 3974 } 3975 3976 #[test] 3977 fn test_virtio_console() { 3978 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3979 let guest = Guest::new(Box::new(focal)); 3980 3981 let kernel_path = direct_kernel_boot_path(); 3982 3983 let mut child = GuestCommand::new(&guest) 3984 .args(["--cpus", "boot=1"]) 3985 .args(["--memory", "size=512M"]) 3986 .args(["--kernel", kernel_path.to_str().unwrap()]) 3987 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3988 .default_disks() 3989 .default_net() 3990 .args(["--console", "tty"]) 3991 .args(["--serial", "null"]) 3992 .capture_output() 3993 .spawn() 3994 .unwrap(); 3995 3996 let text = String::from("On a branch floating down river a cricket, singing."); 3997 let cmd = format!("echo {text} | sudo tee /dev/hvc0"); 3998 3999 let r = std::panic::catch_unwind(|| { 4000 guest.wait_vm_boot(None).unwrap(); 4001 4002 assert!(guest 4003 .does_device_vendor_pair_match("0x1043", "0x1af4") 4004 .unwrap_or_default()); 4005 4006 guest.ssh_command(&cmd).unwrap(); 4007 }); 4008 4009 let _ = child.kill(); 4010 let output = child.wait_with_output().unwrap(); 4011 handle_child_output(r, &output); 4012 4013 let r = std::panic::catch_unwind(|| { 4014 assert!(String::from_utf8_lossy(&output.stdout).contains(&text)); 4015 }); 4016 4017 handle_child_output(r, &output); 4018 } 4019 4020 #[test] 4021 fn test_console_file() { 4022 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4023 let guest = Guest::new(Box::new(focal)); 4024 4025 let console_path = guest.tmp_dir.as_path().join("/tmp/console-output"); 4026 let mut child = GuestCommand::new(&guest) 4027 .args(["--cpus", "boot=1"]) 4028 .args(["--memory", "size=512M"]) 4029 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4030 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4031 .default_disks() 4032 .default_net() 4033 .args([ 4034 "--console", 4035 format!("file={}", console_path.to_str().unwrap()).as_str(), 4036 ]) 4037 .capture_output() 4038 .spawn() 4039 .unwrap(); 4040 4041 guest.wait_vm_boot(None).unwrap(); 4042 4043 guest.ssh_command("sudo shutdown -h now").unwrap(); 4044 4045 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 4046 let _ = child.kill(); 4047 let output = child.wait_with_output().unwrap(); 4048 4049 let r = std::panic::catch_unwind(|| { 4050 // Check that the cloud-hypervisor binary actually terminated 4051 assert!(output.status.success()); 4052 4053 // Do this check after shutdown of the VM as an easy way to ensure 4054 // all writes are flushed to disk 4055 let mut f = std::fs::File::open(console_path).unwrap(); 4056 let mut buf = String::new(); 4057 f.read_to_string(&mut buf).unwrap(); 4058 4059 if !buf.contains(CONSOLE_TEST_STRING) { 4060 eprintln!( 4061 "\n\n==== Console file output ====\n\n{buf}\n\n==== End console file output ====" 4062 ); 4063 } 4064 assert!(buf.contains(CONSOLE_TEST_STRING)); 4065 }); 4066 4067 handle_child_output(r, &output); 4068 } 4069 4070 #[test] 4071 #[cfg(target_arch = "x86_64")] 4072 #[cfg(not(feature = "mshv"))] 4073 #[ignore = "See #4324"] 4074 // The VFIO integration test starts cloud-hypervisor guest with 3 TAP 4075 // backed networking interfaces, bound through a simple bridge on the host. 4076 // So if the nested cloud-hypervisor succeeds in getting a directly 4077 // assigned interface from its cloud-hypervisor host, we should be able to 4078 // ssh into it, and verify that it's running with the right kernel command 4079 // line (We tag the command line from cloud-hypervisor for that purpose). 4080 // The third device is added to validate that hotplug works correctly since 4081 // it is being added to the L2 VM through hotplugging mechanism. 4082 // Also, we pass-through a virtio-blk device to the L2 VM to test the 32-bit 4083 // vfio device support 4084 fn test_vfio() { 4085 setup_vfio_network_interfaces(); 4086 4087 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4088 let guest = Guest::new_from_ip_range(Box::new(focal), "172.18", 0); 4089 4090 let mut workload_path = dirs::home_dir().unwrap(); 4091 workload_path.push("workloads"); 4092 4093 let kernel_path = direct_kernel_boot_path(); 4094 4095 let mut vfio_path = workload_path.clone(); 4096 vfio_path.push("vfio"); 4097 4098 let mut cloud_init_vfio_base_path = vfio_path.clone(); 4099 cloud_init_vfio_base_path.push("cloudinit.img"); 4100 4101 // We copy our cloudinit into the vfio mount point, for the nested 4102 // cloud-hypervisor guest to use. 4103 rate_limited_copy( 4104 guest.disk_config.disk(DiskType::CloudInit).unwrap(), 4105 &cloud_init_vfio_base_path, 4106 ) 4107 .expect("copying of cloud-init disk failed"); 4108 4109 let mut vfio_disk_path = workload_path.clone(); 4110 vfio_disk_path.push("vfio.img"); 4111 4112 // Create the vfio disk image 4113 let output = Command::new("mkfs.ext4") 4114 .arg("-d") 4115 .arg(vfio_path.to_str().unwrap()) 4116 .arg(vfio_disk_path.to_str().unwrap()) 4117 .arg("2g") 4118 .output() 4119 .unwrap(); 4120 if !output.status.success() { 4121 eprintln!("{}", String::from_utf8_lossy(&output.stderr)); 4122 panic!("mkfs.ext4 command generated an error"); 4123 } 4124 4125 let mut blk_file_path = workload_path; 4126 blk_file_path.push("blk.img"); 4127 4128 let vfio_tap0 = "vfio-tap0"; 4129 let vfio_tap1 = "vfio-tap1"; 4130 let vfio_tap2 = "vfio-tap2"; 4131 let vfio_tap3 = "vfio-tap3"; 4132 4133 let mut child = GuestCommand::new(&guest) 4134 .args(["--cpus", "boot=4"]) 4135 .args(["--memory", "size=2G,hugepages=on,shared=on"]) 4136 .args(["--kernel", kernel_path.to_str().unwrap()]) 4137 .args([ 4138 "--disk", 4139 format!( 4140 "path={}", 4141 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 4142 ) 4143 .as_str(), 4144 "--disk", 4145 format!( 4146 "path={}", 4147 guest.disk_config.disk(DiskType::CloudInit).unwrap() 4148 ) 4149 .as_str(), 4150 "--disk", 4151 format!("path={}", vfio_disk_path.to_str().unwrap()).as_str(), 4152 "--disk", 4153 format!("path={},iommu=on", blk_file_path.to_str().unwrap()).as_str(), 4154 ]) 4155 .args([ 4156 "--cmdline", 4157 format!( 4158 "{DIRECT_KERNEL_BOOT_CMDLINE} kvm-intel.nested=1 vfio_iommu_type1.allow_unsafe_interrupts" 4159 ) 4160 .as_str(), 4161 ]) 4162 .args([ 4163 "--net", 4164 format!("tap={},mac={}", vfio_tap0, guest.network.guest_mac).as_str(), 4165 "--net", 4166 format!( 4167 "tap={},mac={},iommu=on", 4168 vfio_tap1, guest.network.l2_guest_mac1 4169 ) 4170 .as_str(), 4171 "--net", 4172 format!( 4173 "tap={},mac={},iommu=on", 4174 vfio_tap2, guest.network.l2_guest_mac2 4175 ) 4176 .as_str(), 4177 "--net", 4178 format!( 4179 "tap={},mac={},iommu=on", 4180 vfio_tap3, guest.network.l2_guest_mac3 4181 ) 4182 .as_str(), 4183 ]) 4184 .capture_output() 4185 .spawn() 4186 .unwrap(); 4187 4188 thread::sleep(std::time::Duration::new(30, 0)); 4189 4190 let r = std::panic::catch_unwind(|| { 4191 guest.ssh_command_l1("sudo systemctl start vfio").unwrap(); 4192 thread::sleep(std::time::Duration::new(120, 0)); 4193 4194 // We booted our cloud hypervisor L2 guest with a "VFIOTAG" tag 4195 // added to its kernel command line. 4196 // Let's ssh into it and verify that it's there. If it is it means 4197 // we're in the right guest (The L2 one) because the QEMU L1 guest 4198 // does not have this command line tag. 4199 assert_eq!( 4200 guest 4201 .ssh_command_l2_1("grep -c VFIOTAG /proc/cmdline") 4202 .unwrap() 4203 .trim() 4204 .parse::<u32>() 4205 .unwrap_or_default(), 4206 1 4207 ); 4208 4209 // Let's also verify from the second virtio-net device passed to 4210 // the L2 VM. 4211 assert_eq!( 4212 guest 4213 .ssh_command_l2_2("grep -c VFIOTAG /proc/cmdline") 4214 .unwrap() 4215 .trim() 4216 .parse::<u32>() 4217 .unwrap_or_default(), 4218 1 4219 ); 4220 4221 // Check the amount of PCI devices appearing in L2 VM. 4222 assert_eq!( 4223 guest 4224 .ssh_command_l2_1("ls /sys/bus/pci/devices | wc -l") 4225 .unwrap() 4226 .trim() 4227 .parse::<u32>() 4228 .unwrap_or_default(), 4229 8, 4230 ); 4231 4232 // Check both if /dev/vdc exists and if the block size is 16M in L2 VM 4233 assert_eq!( 4234 guest 4235 .ssh_command_l2_1("lsblk | grep vdc | grep -c 16M") 4236 .unwrap() 4237 .trim() 4238 .parse::<u32>() 4239 .unwrap_or_default(), 4240 1 4241 ); 4242 4243 // Hotplug an extra virtio-net device through L2 VM. 4244 guest 4245 .ssh_command_l1( 4246 "echo 0000:00:09.0 | sudo tee /sys/bus/pci/devices/0000:00:09.0/driver/unbind", 4247 ) 4248 .unwrap(); 4249 guest 4250 .ssh_command_l1("echo 0000:00:09.0 | sudo tee /sys/bus/pci/drivers/vfio-pci/bind") 4251 .unwrap(); 4252 let vfio_hotplug_output = guest 4253 .ssh_command_l1( 4254 "sudo /mnt/ch-remote \ 4255 --api-socket /tmp/ch_api.sock \ 4256 add-device path=/sys/bus/pci/devices/0000:00:09.0,id=vfio123", 4257 ) 4258 .unwrap(); 4259 assert!(vfio_hotplug_output.contains("{\"id\":\"vfio123\",\"bdf\":\"0000:00:08.0\"}")); 4260 4261 thread::sleep(std::time::Duration::new(10, 0)); 4262 4263 // Let's also verify from the third virtio-net device passed to 4264 // the L2 VM. This third device has been hotplugged through the L2 4265 // VM, so this is our way to validate hotplug works for VFIO PCI. 4266 assert_eq!( 4267 guest 4268 .ssh_command_l2_3("grep -c VFIOTAG /proc/cmdline") 4269 .unwrap() 4270 .trim() 4271 .parse::<u32>() 4272 .unwrap_or_default(), 4273 1 4274 ); 4275 4276 // Check the amount of PCI devices appearing in L2 VM. 4277 // There should be one more device than before, raising the count 4278 // up to 9 PCI devices. 4279 assert_eq!( 4280 guest 4281 .ssh_command_l2_1("ls /sys/bus/pci/devices | wc -l") 4282 .unwrap() 4283 .trim() 4284 .parse::<u32>() 4285 .unwrap_or_default(), 4286 9, 4287 ); 4288 4289 // Let's now verify that we can correctly remove the virtio-net 4290 // device through the "remove-device" command responsible for 4291 // unplugging VFIO devices. 4292 guest 4293 .ssh_command_l1( 4294 "sudo /mnt/ch-remote \ 4295 --api-socket /tmp/ch_api.sock \ 4296 remove-device vfio123", 4297 ) 4298 .unwrap(); 4299 thread::sleep(std::time::Duration::new(10, 0)); 4300 4301 // Check the amount of PCI devices appearing in L2 VM is back down 4302 // to 8 devices. 4303 assert_eq!( 4304 guest 4305 .ssh_command_l2_1("ls /sys/bus/pci/devices | wc -l") 4306 .unwrap() 4307 .trim() 4308 .parse::<u32>() 4309 .unwrap_or_default(), 4310 8, 4311 ); 4312 4313 // Perform memory hotplug in L2 and validate the memory is showing 4314 // up as expected. In order to check, we will use the virtio-net 4315 // device already passed through L2 as a VFIO device, this will 4316 // verify that VFIO devices are functional with memory hotplug. 4317 assert!(guest.get_total_memory_l2().unwrap_or_default() > 480_000); 4318 guest 4319 .ssh_command_l2_1( 4320 "sudo bash -c 'echo online > /sys/devices/system/memory/auto_online_blocks'", 4321 ) 4322 .unwrap(); 4323 guest 4324 .ssh_command_l1( 4325 "sudo /mnt/ch-remote \ 4326 --api-socket /tmp/ch_api.sock \ 4327 resize --memory 1073741824", 4328 ) 4329 .unwrap(); 4330 assert!(guest.get_total_memory_l2().unwrap_or_default() > 960_000); 4331 }); 4332 4333 let _ = child.kill(); 4334 let output = child.wait_with_output().unwrap(); 4335 4336 cleanup_vfio_network_interfaces(); 4337 4338 handle_child_output(r, &output); 4339 } 4340 4341 #[test] 4342 fn test_direct_kernel_boot_noacpi() { 4343 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4344 let guest = Guest::new(Box::new(focal)); 4345 4346 let kernel_path = direct_kernel_boot_path(); 4347 4348 let mut child = GuestCommand::new(&guest) 4349 .args(["--cpus", "boot=1"]) 4350 .args(["--memory", "size=512M"]) 4351 .args(["--kernel", kernel_path.to_str().unwrap()]) 4352 .args([ 4353 "--cmdline", 4354 format!("{DIRECT_KERNEL_BOOT_CMDLINE} acpi=off").as_str(), 4355 ]) 4356 .default_disks() 4357 .default_net() 4358 .capture_output() 4359 .spawn() 4360 .unwrap(); 4361 4362 let r = std::panic::catch_unwind(|| { 4363 guest.wait_vm_boot(None).unwrap(); 4364 4365 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 4366 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4367 }); 4368 4369 let _ = child.kill(); 4370 let output = child.wait_with_output().unwrap(); 4371 4372 handle_child_output(r, &output); 4373 } 4374 4375 #[test] 4376 fn test_virtio_vsock() { 4377 _test_virtio_vsock(false) 4378 } 4379 4380 #[test] 4381 fn test_virtio_vsock_hotplug() { 4382 _test_virtio_vsock(true); 4383 } 4384 4385 #[test] 4386 fn test_api_http_shutdown() { 4387 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4388 let guest = Guest::new(Box::new(focal)); 4389 4390 _test_api_shutdown(TargetApi::new_http_api(&guest.tmp_dir), guest) 4391 } 4392 4393 #[test] 4394 fn test_api_http_delete() { 4395 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4396 let guest = Guest::new(Box::new(focal)); 4397 4398 _test_api_delete(TargetApi::new_http_api(&guest.tmp_dir), guest); 4399 } 4400 4401 #[test] 4402 fn test_api_http_pause_resume() { 4403 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4404 let guest = Guest::new(Box::new(focal)); 4405 4406 _test_api_pause_resume(TargetApi::new_http_api(&guest.tmp_dir), guest) 4407 } 4408 4409 #[test] 4410 fn test_api_http_create_boot() { 4411 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4412 let guest = Guest::new(Box::new(focal)); 4413 4414 _test_api_create_boot(TargetApi::new_http_api(&guest.tmp_dir), guest) 4415 } 4416 4417 #[test] 4418 fn test_virtio_iommu() { 4419 _test_virtio_iommu(cfg!(target_arch = "x86_64")) 4420 } 4421 4422 #[test] 4423 // We cannot force the software running in the guest to reprogram the BAR 4424 // with some different addresses, but we have a reliable way of testing it 4425 // with a standard Linux kernel. 4426 // By removing a device from the PCI tree, and then rescanning the tree, 4427 // Linux consistently chooses to reorganize the PCI device BARs to other 4428 // locations in the guest address space. 4429 // This test creates a dedicated PCI network device to be checked as being 4430 // properly probed first, then removing it, and adding it again by doing a 4431 // rescan. 4432 fn test_pci_bar_reprogramming() { 4433 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4434 let guest = Guest::new(Box::new(focal)); 4435 4436 #[cfg(target_arch = "x86_64")] 4437 let kernel_path = direct_kernel_boot_path(); 4438 #[cfg(target_arch = "aarch64")] 4439 let kernel_path = edk2_path(); 4440 4441 let mut child = GuestCommand::new(&guest) 4442 .args(["--cpus", "boot=1"]) 4443 .args(["--memory", "size=512M"]) 4444 .args(["--kernel", kernel_path.to_str().unwrap()]) 4445 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4446 .default_disks() 4447 .args([ 4448 "--net", 4449 guest.default_net_string().as_str(), 4450 "--net", 4451 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0", 4452 ]) 4453 .capture_output() 4454 .spawn() 4455 .unwrap(); 4456 4457 let r = std::panic::catch_unwind(|| { 4458 guest.wait_vm_boot(None).unwrap(); 4459 4460 // 2 network interfaces + default localhost ==> 3 interfaces 4461 assert_eq!( 4462 guest 4463 .ssh_command("ip -o link | wc -l") 4464 .unwrap() 4465 .trim() 4466 .parse::<u32>() 4467 .unwrap_or_default(), 4468 3 4469 ); 4470 4471 let init_bar_addr = guest 4472 .ssh_command( 4473 "sudo awk '{print $1; exit}' /sys/bus/pci/devices/0000:00:05.0/resource", 4474 ) 4475 .unwrap(); 4476 4477 // Remove the PCI device 4478 guest 4479 .ssh_command("echo 1 | sudo tee /sys/bus/pci/devices/0000:00:05.0/remove") 4480 .unwrap(); 4481 4482 // Only 1 network interface left + default localhost ==> 2 interfaces 4483 assert_eq!( 4484 guest 4485 .ssh_command("ip -o link | wc -l") 4486 .unwrap() 4487 .trim() 4488 .parse::<u32>() 4489 .unwrap_or_default(), 4490 2 4491 ); 4492 4493 // Remove the PCI device 4494 guest 4495 .ssh_command("echo 1 | sudo tee /sys/bus/pci/rescan") 4496 .unwrap(); 4497 4498 // Back to 2 network interface + default localhost ==> 3 interfaces 4499 assert_eq!( 4500 guest 4501 .ssh_command("ip -o link | wc -l") 4502 .unwrap() 4503 .trim() 4504 .parse::<u32>() 4505 .unwrap_or_default(), 4506 3 4507 ); 4508 4509 let new_bar_addr = guest 4510 .ssh_command( 4511 "sudo awk '{print $1; exit}' /sys/bus/pci/devices/0000:00:05.0/resource", 4512 ) 4513 .unwrap(); 4514 4515 // Let's compare the BAR addresses for our virtio-net device. 4516 // They should be different as we expect the BAR reprogramming 4517 // to have happened. 4518 assert_ne!(init_bar_addr, new_bar_addr); 4519 }); 4520 4521 let _ = child.kill(); 4522 let output = child.wait_with_output().unwrap(); 4523 4524 handle_child_output(r, &output); 4525 } 4526 4527 #[test] 4528 fn test_memory_mergeable_off() { 4529 test_memory_mergeable(false) 4530 } 4531 4532 #[test] 4533 #[cfg(target_arch = "x86_64")] 4534 fn test_cpu_hotplug() { 4535 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4536 let guest = Guest::new(Box::new(focal)); 4537 let api_socket = temp_api_path(&guest.tmp_dir); 4538 4539 let kernel_path = direct_kernel_boot_path(); 4540 4541 let mut child = GuestCommand::new(&guest) 4542 .args(["--cpus", "boot=2,max=4"]) 4543 .args(["--memory", "size=512M"]) 4544 .args(["--kernel", kernel_path.to_str().unwrap()]) 4545 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4546 .default_disks() 4547 .default_net() 4548 .args(["--api-socket", &api_socket]) 4549 .capture_output() 4550 .spawn() 4551 .unwrap(); 4552 4553 let r = std::panic::catch_unwind(|| { 4554 guest.wait_vm_boot(None).unwrap(); 4555 4556 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 4557 4558 // Resize the VM 4559 let desired_vcpus = 4; 4560 resize_command(&api_socket, Some(desired_vcpus), None, None, None); 4561 4562 guest 4563 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online") 4564 .unwrap(); 4565 guest 4566 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online") 4567 .unwrap(); 4568 thread::sleep(std::time::Duration::new(10, 0)); 4569 assert_eq!( 4570 guest.get_cpu_count().unwrap_or_default(), 4571 u32::from(desired_vcpus) 4572 ); 4573 4574 guest.reboot_linux(0, None); 4575 4576 assert_eq!( 4577 guest.get_cpu_count().unwrap_or_default(), 4578 u32::from(desired_vcpus) 4579 ); 4580 4581 // Resize the VM 4582 let desired_vcpus = 2; 4583 resize_command(&api_socket, Some(desired_vcpus), None, None, None); 4584 4585 thread::sleep(std::time::Duration::new(10, 0)); 4586 assert_eq!( 4587 guest.get_cpu_count().unwrap_or_default(), 4588 u32::from(desired_vcpus) 4589 ); 4590 4591 // Resize the VM back up to 4 4592 let desired_vcpus = 4; 4593 resize_command(&api_socket, Some(desired_vcpus), None, None, None); 4594 4595 guest 4596 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online") 4597 .unwrap(); 4598 guest 4599 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online") 4600 .unwrap(); 4601 thread::sleep(std::time::Duration::new(10, 0)); 4602 assert_eq!( 4603 guest.get_cpu_count().unwrap_or_default(), 4604 u32::from(desired_vcpus) 4605 ); 4606 }); 4607 4608 let _ = child.kill(); 4609 let output = child.wait_with_output().unwrap(); 4610 4611 handle_child_output(r, &output); 4612 } 4613 4614 #[test] 4615 fn test_memory_hotplug() { 4616 #[cfg(target_arch = "aarch64")] 4617 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string(); 4618 #[cfg(target_arch = "x86_64")] 4619 let focal_image = FOCAL_IMAGE_NAME.to_string(); 4620 let focal = UbuntuDiskConfig::new(focal_image); 4621 let guest = Guest::new(Box::new(focal)); 4622 let api_socket = temp_api_path(&guest.tmp_dir); 4623 4624 #[cfg(target_arch = "aarch64")] 4625 let kernel_path = edk2_path(); 4626 #[cfg(target_arch = "x86_64")] 4627 let kernel_path = direct_kernel_boot_path(); 4628 4629 let mut child = GuestCommand::new(&guest) 4630 .args(["--cpus", "boot=2,max=4"]) 4631 .args(["--memory", "size=512M,hotplug_size=8192M"]) 4632 .args(["--kernel", kernel_path.to_str().unwrap()]) 4633 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4634 .default_disks() 4635 .default_net() 4636 .args(["--balloon", "size=0"]) 4637 .args(["--api-socket", &api_socket]) 4638 .capture_output() 4639 .spawn() 4640 .unwrap(); 4641 4642 let r = std::panic::catch_unwind(|| { 4643 guest.wait_vm_boot(None).unwrap(); 4644 4645 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4646 4647 guest.enable_memory_hotplug(); 4648 4649 // Add RAM to the VM 4650 let desired_ram = 1024 << 20; 4651 resize_command(&api_socket, None, Some(desired_ram), None, None); 4652 4653 thread::sleep(std::time::Duration::new(10, 0)); 4654 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4655 4656 // Use balloon to remove RAM from the VM 4657 let desired_balloon = 512 << 20; 4658 resize_command(&api_socket, None, None, Some(desired_balloon), None); 4659 4660 thread::sleep(std::time::Duration::new(10, 0)); 4661 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4662 assert!(guest.get_total_memory().unwrap_or_default() < 960_000); 4663 4664 guest.reboot_linux(0, None); 4665 4666 assert!(guest.get_total_memory().unwrap_or_default() < 960_000); 4667 4668 // Use balloon add RAM to the VM 4669 let desired_balloon = 0; 4670 resize_command(&api_socket, None, None, Some(desired_balloon), None); 4671 4672 thread::sleep(std::time::Duration::new(10, 0)); 4673 4674 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4675 4676 guest.enable_memory_hotplug(); 4677 4678 // Add RAM to the VM 4679 let desired_ram = 2048 << 20; 4680 resize_command(&api_socket, None, Some(desired_ram), None, None); 4681 4682 thread::sleep(std::time::Duration::new(10, 0)); 4683 assert!(guest.get_total_memory().unwrap_or_default() > 1_920_000); 4684 4685 // Remove RAM to the VM (only applies after reboot) 4686 let desired_ram = 1024 << 20; 4687 resize_command(&api_socket, None, Some(desired_ram), None, None); 4688 4689 guest.reboot_linux(1, None); 4690 4691 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4692 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000); 4693 }); 4694 4695 let _ = child.kill(); 4696 let output = child.wait_with_output().unwrap(); 4697 4698 handle_child_output(r, &output); 4699 } 4700 4701 #[test] 4702 #[cfg(not(feature = "mshv"))] 4703 fn test_virtio_mem() { 4704 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4705 let guest = Guest::new(Box::new(focal)); 4706 let api_socket = temp_api_path(&guest.tmp_dir); 4707 4708 let kernel_path = direct_kernel_boot_path(); 4709 4710 let mut child = GuestCommand::new(&guest) 4711 .args(["--cpus", "boot=2,max=4"]) 4712 .args([ 4713 "--memory", 4714 "size=512M,hotplug_method=virtio-mem,hotplug_size=8192M", 4715 ]) 4716 .args(["--kernel", kernel_path.to_str().unwrap()]) 4717 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4718 .default_disks() 4719 .default_net() 4720 .args(["--api-socket", &api_socket]) 4721 .capture_output() 4722 .spawn() 4723 .unwrap(); 4724 4725 let r = std::panic::catch_unwind(|| { 4726 guest.wait_vm_boot(None).unwrap(); 4727 4728 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4729 4730 guest.enable_memory_hotplug(); 4731 4732 // Add RAM to the VM 4733 let desired_ram = 1024 << 20; 4734 resize_command(&api_socket, None, Some(desired_ram), None, None); 4735 4736 thread::sleep(std::time::Duration::new(10, 0)); 4737 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4738 4739 // Add RAM to the VM 4740 let desired_ram = 2048 << 20; 4741 resize_command(&api_socket, None, Some(desired_ram), None, None); 4742 4743 thread::sleep(std::time::Duration::new(10, 0)); 4744 assert!(guest.get_total_memory().unwrap_or_default() > 1_920_000); 4745 4746 // Remove RAM from the VM 4747 let desired_ram = 1024 << 20; 4748 resize_command(&api_socket, None, Some(desired_ram), None, None); 4749 4750 thread::sleep(std::time::Duration::new(10, 0)); 4751 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4752 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000); 4753 4754 guest.reboot_linux(0, None); 4755 4756 // Check the amount of memory after reboot is 1GiB 4757 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4758 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000); 4759 4760 // Check we can still resize to 512MiB 4761 let desired_ram = 512 << 20; 4762 resize_command(&api_socket, None, Some(desired_ram), None, None); 4763 thread::sleep(std::time::Duration::new(10, 0)); 4764 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4765 assert!(guest.get_total_memory().unwrap_or_default() < 960_000); 4766 }); 4767 4768 let _ = child.kill(); 4769 let output = child.wait_with_output().unwrap(); 4770 4771 handle_child_output(r, &output); 4772 } 4773 4774 #[test] 4775 #[cfg(target_arch = "x86_64")] 4776 #[cfg(not(feature = "mshv"))] 4777 // Test both vCPU and memory resizing together 4778 fn test_resize() { 4779 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4780 let guest = Guest::new(Box::new(focal)); 4781 let api_socket = temp_api_path(&guest.tmp_dir); 4782 4783 let kernel_path = direct_kernel_boot_path(); 4784 4785 let mut child = GuestCommand::new(&guest) 4786 .args(["--cpus", "boot=2,max=4"]) 4787 .args(["--memory", "size=512M,hotplug_size=8192M"]) 4788 .args(["--kernel", kernel_path.to_str().unwrap()]) 4789 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4790 .default_disks() 4791 .default_net() 4792 .args(["--api-socket", &api_socket]) 4793 .capture_output() 4794 .spawn() 4795 .unwrap(); 4796 4797 let r = std::panic::catch_unwind(|| { 4798 guest.wait_vm_boot(None).unwrap(); 4799 4800 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 4801 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4802 4803 guest.enable_memory_hotplug(); 4804 4805 // Resize the VM 4806 let desired_vcpus = 4; 4807 let desired_ram = 1024 << 20; 4808 resize_command( 4809 &api_socket, 4810 Some(desired_vcpus), 4811 Some(desired_ram), 4812 None, 4813 None, 4814 ); 4815 4816 guest 4817 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online") 4818 .unwrap(); 4819 guest 4820 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online") 4821 .unwrap(); 4822 thread::sleep(std::time::Duration::new(10, 0)); 4823 assert_eq!( 4824 guest.get_cpu_count().unwrap_or_default(), 4825 u32::from(desired_vcpus) 4826 ); 4827 4828 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4829 }); 4830 4831 let _ = child.kill(); 4832 let output = child.wait_with_output().unwrap(); 4833 4834 handle_child_output(r, &output); 4835 } 4836 4837 #[test] 4838 fn test_memory_overhead() { 4839 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4840 let guest = Guest::new(Box::new(focal)); 4841 4842 let kernel_path = direct_kernel_boot_path(); 4843 4844 let guest_memory_size_kb = 512 * 1024; 4845 4846 let mut child = GuestCommand::new(&guest) 4847 .args(["--cpus", "boot=1"]) 4848 .args(["--memory", format!("size={guest_memory_size_kb}K").as_str()]) 4849 .args(["--kernel", kernel_path.to_str().unwrap()]) 4850 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4851 .default_disks() 4852 .capture_output() 4853 .spawn() 4854 .unwrap(); 4855 4856 thread::sleep(std::time::Duration::new(20, 0)); 4857 4858 let r = std::panic::catch_unwind(|| { 4859 let overhead = get_vmm_overhead(child.id(), guest_memory_size_kb); 4860 eprintln!("Guest memory overhead: {overhead} vs {MAXIMUM_VMM_OVERHEAD_KB}"); 4861 assert!(overhead <= MAXIMUM_VMM_OVERHEAD_KB); 4862 }); 4863 4864 let _ = child.kill(); 4865 let output = child.wait_with_output().unwrap(); 4866 4867 handle_child_output(r, &output); 4868 } 4869 4870 #[test] 4871 fn test_disk_hotplug() { 4872 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4873 let guest = Guest::new(Box::new(focal)); 4874 4875 #[cfg(target_arch = "x86_64")] 4876 let kernel_path = direct_kernel_boot_path(); 4877 #[cfg(target_arch = "aarch64")] 4878 let kernel_path = edk2_path(); 4879 4880 let api_socket = temp_api_path(&guest.tmp_dir); 4881 4882 let mut child = GuestCommand::new(&guest) 4883 .args(["--api-socket", &api_socket]) 4884 .args(["--cpus", "boot=1"]) 4885 .args(["--memory", "size=512M"]) 4886 .args(["--kernel", kernel_path.to_str().unwrap()]) 4887 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4888 .default_disks() 4889 .default_net() 4890 .capture_output() 4891 .spawn() 4892 .unwrap(); 4893 4894 let r = std::panic::catch_unwind(|| { 4895 guest.wait_vm_boot(None).unwrap(); 4896 4897 // Check /dev/vdc is not there 4898 assert_eq!( 4899 guest 4900 .ssh_command("lsblk | grep -c vdc.*16M || true") 4901 .unwrap() 4902 .trim() 4903 .parse::<u32>() 4904 .unwrap_or(1), 4905 0 4906 ); 4907 4908 // Now let's add the extra disk. 4909 let mut blk_file_path = dirs::home_dir().unwrap(); 4910 blk_file_path.push("workloads"); 4911 blk_file_path.push("blk.img"); 4912 let (cmd_success, cmd_output) = remote_command_w_output( 4913 &api_socket, 4914 "add-disk", 4915 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 4916 ); 4917 assert!(cmd_success); 4918 assert!(String::from_utf8_lossy(&cmd_output) 4919 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 4920 4921 thread::sleep(std::time::Duration::new(10, 0)); 4922 4923 // Check that /dev/vdc exists and the block size is 16M. 4924 assert_eq!( 4925 guest 4926 .ssh_command("lsblk | grep vdc | grep -c 16M") 4927 .unwrap() 4928 .trim() 4929 .parse::<u32>() 4930 .unwrap_or_default(), 4931 1 4932 ); 4933 // And check the block device can be read. 4934 guest 4935 .ssh_command("sudo dd if=/dev/vdc of=/dev/null bs=1M iflag=direct count=16") 4936 .unwrap(); 4937 4938 // Let's remove it the extra disk. 4939 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 4940 thread::sleep(std::time::Duration::new(5, 0)); 4941 // And check /dev/vdc is not there 4942 assert_eq!( 4943 guest 4944 .ssh_command("lsblk | grep -c vdc.*16M || true") 4945 .unwrap() 4946 .trim() 4947 .parse::<u32>() 4948 .unwrap_or(1), 4949 0 4950 ); 4951 4952 // And add it back to validate unplug did work correctly. 4953 let (cmd_success, cmd_output) = remote_command_w_output( 4954 &api_socket, 4955 "add-disk", 4956 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 4957 ); 4958 assert!(cmd_success); 4959 assert!(String::from_utf8_lossy(&cmd_output) 4960 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 4961 4962 thread::sleep(std::time::Duration::new(10, 0)); 4963 4964 // Check that /dev/vdc exists and the block size is 16M. 4965 assert_eq!( 4966 guest 4967 .ssh_command("lsblk | grep vdc | grep -c 16M") 4968 .unwrap() 4969 .trim() 4970 .parse::<u32>() 4971 .unwrap_or_default(), 4972 1 4973 ); 4974 // And check the block device can be read. 4975 guest 4976 .ssh_command("sudo dd if=/dev/vdc of=/dev/null bs=1M iflag=direct count=16") 4977 .unwrap(); 4978 4979 // Reboot the VM. 4980 guest.reboot_linux(0, None); 4981 4982 // Check still there after reboot 4983 assert_eq!( 4984 guest 4985 .ssh_command("lsblk | grep vdc | grep -c 16M") 4986 .unwrap() 4987 .trim() 4988 .parse::<u32>() 4989 .unwrap_or_default(), 4990 1 4991 ); 4992 4993 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 4994 4995 thread::sleep(std::time::Duration::new(20, 0)); 4996 4997 // Check device has gone away 4998 assert_eq!( 4999 guest 5000 .ssh_command("lsblk | grep -c vdc.*16M || true") 5001 .unwrap() 5002 .trim() 5003 .parse::<u32>() 5004 .unwrap_or(1), 5005 0 5006 ); 5007 5008 guest.reboot_linux(1, None); 5009 5010 // Check device still absent 5011 assert_eq!( 5012 guest 5013 .ssh_command("lsblk | grep -c vdc.*16M || true") 5014 .unwrap() 5015 .trim() 5016 .parse::<u32>() 5017 .unwrap_or(1), 5018 0 5019 ); 5020 }); 5021 5022 let _ = child.kill(); 5023 let output = child.wait_with_output().unwrap(); 5024 5025 handle_child_output(r, &output); 5026 } 5027 5028 fn create_loop_device(backing_file_path: &str, block_size: u32, num_retries: usize) -> String { 5029 const LOOP_CONFIGURE: u64 = 0x4c0a; 5030 const LOOP_CTL_GET_FREE: u64 = 0x4c82; 5031 const LOOP_CTL_PATH: &str = "/dev/loop-control"; 5032 const LOOP_DEVICE_PREFIX: &str = "/dev/loop"; 5033 5034 #[repr(C)] 5035 struct LoopInfo64 { 5036 lo_device: u64, 5037 lo_inode: u64, 5038 lo_rdevice: u64, 5039 lo_offset: u64, 5040 lo_sizelimit: u64, 5041 lo_number: u32, 5042 lo_encrypt_type: u32, 5043 lo_encrypt_key_size: u32, 5044 lo_flags: u32, 5045 lo_file_name: [u8; 64], 5046 lo_crypt_name: [u8; 64], 5047 lo_encrypt_key: [u8; 32], 5048 lo_init: [u64; 2], 5049 } 5050 5051 impl Default for LoopInfo64 { 5052 fn default() -> Self { 5053 LoopInfo64 { 5054 lo_device: 0, 5055 lo_inode: 0, 5056 lo_rdevice: 0, 5057 lo_offset: 0, 5058 lo_sizelimit: 0, 5059 lo_number: 0, 5060 lo_encrypt_type: 0, 5061 lo_encrypt_key_size: 0, 5062 lo_flags: 0, 5063 lo_file_name: [0; 64], 5064 lo_crypt_name: [0; 64], 5065 lo_encrypt_key: [0; 32], 5066 lo_init: [0; 2], 5067 } 5068 } 5069 } 5070 5071 #[derive(Default)] 5072 #[repr(C)] 5073 struct LoopConfig { 5074 fd: u32, 5075 block_size: u32, 5076 info: LoopInfo64, 5077 _reserved: [u64; 8], 5078 } 5079 5080 // Open loop-control device 5081 let loop_ctl_file = OpenOptions::new() 5082 .read(true) 5083 .write(true) 5084 .open(LOOP_CTL_PATH) 5085 .unwrap(); 5086 5087 // Request a free loop device 5088 let loop_device_number = 5089 unsafe { libc::ioctl(loop_ctl_file.as_raw_fd(), LOOP_CTL_GET_FREE as _) }; 5090 5091 if loop_device_number < 0 { 5092 panic!("Couldn't find a free loop device"); 5093 } 5094 5095 // Create loop device path 5096 let loop_device_path = format!("{LOOP_DEVICE_PREFIX}{loop_device_number}"); 5097 5098 // Open loop device 5099 let loop_device_file = OpenOptions::new() 5100 .read(true) 5101 .write(true) 5102 .open(&loop_device_path) 5103 .unwrap(); 5104 5105 // Open backing file 5106 let backing_file = OpenOptions::new() 5107 .read(true) 5108 .write(true) 5109 .open(backing_file_path) 5110 .unwrap(); 5111 5112 let loop_config = LoopConfig { 5113 fd: backing_file.as_raw_fd() as u32, 5114 block_size, 5115 ..Default::default() 5116 }; 5117 5118 for i in 0..num_retries { 5119 let ret = unsafe { 5120 libc::ioctl( 5121 loop_device_file.as_raw_fd(), 5122 LOOP_CONFIGURE as _, 5123 &loop_config, 5124 ) 5125 }; 5126 if ret != 0 { 5127 if i < num_retries - 1 { 5128 println!( 5129 "Iteration {}: Failed to configure the loop device {}: {}", 5130 i, 5131 loop_device_path, 5132 std::io::Error::last_os_error() 5133 ); 5134 } else { 5135 panic!( 5136 "Failed {} times trying to configure the loop device {}: {}", 5137 num_retries, 5138 loop_device_path, 5139 std::io::Error::last_os_error() 5140 ); 5141 } 5142 } else { 5143 break; 5144 } 5145 5146 // Wait for a bit before retrying 5147 thread::sleep(std::time::Duration::new(5, 0)); 5148 } 5149 5150 loop_device_path 5151 } 5152 5153 #[test] 5154 fn test_virtio_block_topology() { 5155 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5156 let guest = Guest::new(Box::new(focal)); 5157 5158 let kernel_path = direct_kernel_boot_path(); 5159 let test_disk_path = guest.tmp_dir.as_path().join("test.img"); 5160 5161 let output = exec_host_command_output( 5162 format!( 5163 "qemu-img create -f raw {} 16M", 5164 test_disk_path.to_str().unwrap() 5165 ) 5166 .as_str(), 5167 ); 5168 if !output.status.success() { 5169 let stdout = String::from_utf8_lossy(&output.stdout); 5170 let stderr = String::from_utf8_lossy(&output.stderr); 5171 panic!("qemu-img command failed\nstdout\n{stdout}\nstderr\n{stderr}"); 5172 } 5173 5174 let loop_dev = create_loop_device(test_disk_path.to_str().unwrap(), 4096, 5); 5175 5176 let mut child = GuestCommand::new(&guest) 5177 .args(["--cpus", "boot=1"]) 5178 .args(["--memory", "size=512M"]) 5179 .args(["--kernel", kernel_path.to_str().unwrap()]) 5180 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5181 .args([ 5182 "--disk", 5183 format!( 5184 "path={}", 5185 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 5186 ) 5187 .as_str(), 5188 "--disk", 5189 format!( 5190 "path={}", 5191 guest.disk_config.disk(DiskType::CloudInit).unwrap() 5192 ) 5193 .as_str(), 5194 "--disk", 5195 format!("path={}", &loop_dev).as_str(), 5196 ]) 5197 .default_net() 5198 .capture_output() 5199 .spawn() 5200 .unwrap(); 5201 5202 let r = std::panic::catch_unwind(|| { 5203 guest.wait_vm_boot(None).unwrap(); 5204 5205 // MIN-IO column 5206 assert_eq!( 5207 guest 5208 .ssh_command("lsblk -t| grep vdc | awk '{print $3}'") 5209 .unwrap() 5210 .trim() 5211 .parse::<u32>() 5212 .unwrap_or_default(), 5213 4096 5214 ); 5215 // PHY-SEC column 5216 assert_eq!( 5217 guest 5218 .ssh_command("lsblk -t| grep vdc | awk '{print $5}'") 5219 .unwrap() 5220 .trim() 5221 .parse::<u32>() 5222 .unwrap_or_default(), 5223 4096 5224 ); 5225 // LOG-SEC column 5226 assert_eq!( 5227 guest 5228 .ssh_command("lsblk -t| grep vdc | awk '{print $6}'") 5229 .unwrap() 5230 .trim() 5231 .parse::<u32>() 5232 .unwrap_or_default(), 5233 4096 5234 ); 5235 }); 5236 5237 let _ = child.kill(); 5238 let output = child.wait_with_output().unwrap(); 5239 5240 handle_child_output(r, &output); 5241 5242 Command::new("losetup") 5243 .args(["-d", &loop_dev]) 5244 .output() 5245 .expect("loop device not found"); 5246 } 5247 5248 #[test] 5249 fn test_virtio_balloon_deflate_on_oom() { 5250 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5251 let guest = Guest::new(Box::new(focal)); 5252 5253 let kernel_path = direct_kernel_boot_path(); 5254 5255 let api_socket = temp_api_path(&guest.tmp_dir); 5256 5257 //Let's start a 4G guest with balloon occupied 2G memory 5258 let mut child = GuestCommand::new(&guest) 5259 .args(["--api-socket", &api_socket]) 5260 .args(["--cpus", "boot=1"]) 5261 .args(["--memory", "size=4G"]) 5262 .args(["--kernel", kernel_path.to_str().unwrap()]) 5263 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5264 .args(["--balloon", "size=2G,deflate_on_oom=on"]) 5265 .default_disks() 5266 .default_net() 5267 .capture_output() 5268 .spawn() 5269 .unwrap(); 5270 5271 let r = std::panic::catch_unwind(|| { 5272 guest.wait_vm_boot(None).unwrap(); 5273 5274 // Wait for balloon memory's initialization and check its size. 5275 // The virtio-balloon driver might take a few seconds to report the 5276 // balloon effective size back to the VMM. 5277 thread::sleep(std::time::Duration::new(20, 0)); 5278 5279 let orig_balloon = balloon_size(&api_socket); 5280 println!("The original balloon memory size is {orig_balloon} bytes"); 5281 assert!(orig_balloon == 2147483648); 5282 5283 // Two steps to verify if the 'deflate_on_oom' parameter works. 5284 // 1st: run a command to trigger an OOM in the guest. 5285 guest 5286 .ssh_command("echo f | sudo tee /proc/sysrq-trigger") 5287 .unwrap(); 5288 5289 // Give some time for the OOM to happen in the guest and be reported 5290 // back to the host. 5291 thread::sleep(std::time::Duration::new(20, 0)); 5292 5293 // 2nd: check balloon_mem's value to verify balloon has been automatically deflated 5294 let deflated_balloon = balloon_size(&api_socket); 5295 println!("After deflating, balloon memory size is {deflated_balloon} bytes"); 5296 // Verify the balloon size deflated 5297 assert!(deflated_balloon < 2147483648); 5298 }); 5299 5300 let _ = child.kill(); 5301 let output = child.wait_with_output().unwrap(); 5302 5303 handle_child_output(r, &output); 5304 } 5305 5306 #[test] 5307 #[cfg(not(feature = "mshv"))] 5308 fn test_virtio_balloon_free_page_reporting() { 5309 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5310 let guest = Guest::new(Box::new(focal)); 5311 5312 //Let's start a 4G guest with balloon occupied 2G memory 5313 let mut child = GuestCommand::new(&guest) 5314 .args(["--cpus", "boot=1"]) 5315 .args(["--memory", "size=4G"]) 5316 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 5317 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5318 .args(["--balloon", "size=0,free_page_reporting=on"]) 5319 .default_disks() 5320 .default_net() 5321 .capture_output() 5322 .spawn() 5323 .unwrap(); 5324 5325 let pid = child.id(); 5326 let r = std::panic::catch_unwind(|| { 5327 guest.wait_vm_boot(None).unwrap(); 5328 5329 // Check the initial RSS is less than 1GiB 5330 let rss = process_rss_kib(pid); 5331 println!("RSS {rss} < 1048576"); 5332 assert!(rss < 1048576); 5333 5334 // Spawn a command inside the guest to consume 2GiB of RAM for 60 5335 // seconds 5336 let guest_ip = guest.network.guest_ip.clone(); 5337 thread::spawn(move || { 5338 ssh_command_ip( 5339 "stress --vm 1 --vm-bytes 2G --vm-keep --timeout 60", 5340 &guest_ip, 5341 DEFAULT_SSH_RETRIES, 5342 DEFAULT_SSH_TIMEOUT, 5343 ) 5344 .unwrap(); 5345 }); 5346 5347 // Wait for 50 seconds to make sure the stress command is consuming 5348 // the expected amount of memory. 5349 thread::sleep(std::time::Duration::new(50, 0)); 5350 let rss = process_rss_kib(pid); 5351 println!("RSS {rss} >= 2097152"); 5352 assert!(rss >= 2097152); 5353 5354 // Wait for an extra minute to make sure the stress command has 5355 // completed and that the guest reported the free pages to the VMM 5356 // through the virtio-balloon device. We expect the RSS to be under 5357 // 2GiB. 5358 thread::sleep(std::time::Duration::new(60, 0)); 5359 let rss = process_rss_kib(pid); 5360 println!("RSS {rss} < 2097152"); 5361 assert!(rss < 2097152); 5362 }); 5363 5364 let _ = child.kill(); 5365 let output = child.wait_with_output().unwrap(); 5366 5367 handle_child_output(r, &output); 5368 } 5369 5370 #[test] 5371 fn test_pmem_hotplug() { 5372 _test_pmem_hotplug(None) 5373 } 5374 5375 #[test] 5376 fn test_pmem_multi_segment_hotplug() { 5377 _test_pmem_hotplug(Some(15)) 5378 } 5379 5380 fn _test_pmem_hotplug(pci_segment: Option<u16>) { 5381 #[cfg(target_arch = "aarch64")] 5382 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string(); 5383 #[cfg(target_arch = "x86_64")] 5384 let focal_image = FOCAL_IMAGE_NAME.to_string(); 5385 let focal = UbuntuDiskConfig::new(focal_image); 5386 let guest = Guest::new(Box::new(focal)); 5387 5388 #[cfg(target_arch = "x86_64")] 5389 let kernel_path = direct_kernel_boot_path(); 5390 #[cfg(target_arch = "aarch64")] 5391 let kernel_path = edk2_path(); 5392 5393 let api_socket = temp_api_path(&guest.tmp_dir); 5394 5395 let mut cmd = GuestCommand::new(&guest); 5396 5397 cmd.args(["--api-socket", &api_socket]) 5398 .args(["--cpus", "boot=1"]) 5399 .args(["--memory", "size=512M"]) 5400 .args(["--kernel", kernel_path.to_str().unwrap()]) 5401 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5402 .default_disks() 5403 .default_net() 5404 .capture_output(); 5405 5406 if pci_segment.is_some() { 5407 cmd.args([ 5408 "--platform", 5409 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 5410 ]); 5411 } 5412 5413 let mut child = cmd.spawn().unwrap(); 5414 5415 let r = std::panic::catch_unwind(|| { 5416 guest.wait_vm_boot(None).unwrap(); 5417 5418 // Check /dev/pmem0 is not there 5419 assert_eq!( 5420 guest 5421 .ssh_command("lsblk | grep -c pmem0 || true") 5422 .unwrap() 5423 .trim() 5424 .parse::<u32>() 5425 .unwrap_or(1), 5426 0 5427 ); 5428 5429 let pmem_temp_file = TempFile::new().unwrap(); 5430 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 5431 let (cmd_success, cmd_output) = remote_command_w_output( 5432 &api_socket, 5433 "add-pmem", 5434 Some(&format!( 5435 "file={},id=test0{}", 5436 pmem_temp_file.as_path().to_str().unwrap(), 5437 if let Some(pci_segment) = pci_segment { 5438 format!(",pci_segment={pci_segment}") 5439 } else { 5440 "".to_owned() 5441 } 5442 )), 5443 ); 5444 assert!(cmd_success); 5445 if let Some(pci_segment) = pci_segment { 5446 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 5447 "{{\"id\":\"test0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 5448 ))); 5449 } else { 5450 assert!(String::from_utf8_lossy(&cmd_output) 5451 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 5452 } 5453 5454 // Check that /dev/pmem0 exists and the block size is 128M 5455 assert_eq!( 5456 guest 5457 .ssh_command("lsblk | grep pmem0 | grep -c 128M") 5458 .unwrap() 5459 .trim() 5460 .parse::<u32>() 5461 .unwrap_or_default(), 5462 1 5463 ); 5464 5465 guest.reboot_linux(0, None); 5466 5467 // Check still there after reboot 5468 assert_eq!( 5469 guest 5470 .ssh_command("lsblk | grep pmem0 | grep -c 128M") 5471 .unwrap() 5472 .trim() 5473 .parse::<u32>() 5474 .unwrap_or_default(), 5475 1 5476 ); 5477 5478 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 5479 5480 thread::sleep(std::time::Duration::new(20, 0)); 5481 5482 // Check device has gone away 5483 assert_eq!( 5484 guest 5485 .ssh_command("lsblk | grep -c pmem0.*128M || true") 5486 .unwrap() 5487 .trim() 5488 .parse::<u32>() 5489 .unwrap_or(1), 5490 0 5491 ); 5492 5493 guest.reboot_linux(1, None); 5494 5495 // Check still absent after reboot 5496 assert_eq!( 5497 guest 5498 .ssh_command("lsblk | grep -c pmem0.*128M || true") 5499 .unwrap() 5500 .trim() 5501 .parse::<u32>() 5502 .unwrap_or(1), 5503 0 5504 ); 5505 }); 5506 5507 let _ = child.kill(); 5508 let output = child.wait_with_output().unwrap(); 5509 5510 handle_child_output(r, &output); 5511 } 5512 5513 #[test] 5514 fn test_net_hotplug() { 5515 _test_net_hotplug(None) 5516 } 5517 5518 #[test] 5519 fn test_net_multi_segment_hotplug() { 5520 _test_net_hotplug(Some(15)) 5521 } 5522 5523 fn _test_net_hotplug(pci_segment: Option<u16>) { 5524 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5525 let guest = Guest::new(Box::new(focal)); 5526 5527 #[cfg(target_arch = "x86_64")] 5528 let kernel_path = direct_kernel_boot_path(); 5529 #[cfg(target_arch = "aarch64")] 5530 let kernel_path = edk2_path(); 5531 5532 let api_socket = temp_api_path(&guest.tmp_dir); 5533 5534 // Boot without network 5535 let mut cmd = GuestCommand::new(&guest); 5536 5537 cmd.args(["--api-socket", &api_socket]) 5538 .args(["--cpus", "boot=1"]) 5539 .args(["--memory", "size=512M"]) 5540 .args(["--kernel", kernel_path.to_str().unwrap()]) 5541 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5542 .default_disks() 5543 .capture_output(); 5544 5545 if pci_segment.is_some() { 5546 cmd.args([ 5547 "--platform", 5548 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 5549 ]); 5550 } 5551 5552 let mut child = cmd.spawn().unwrap(); 5553 5554 thread::sleep(std::time::Duration::new(20, 0)); 5555 5556 let r = std::panic::catch_unwind(|| { 5557 // Add network 5558 let (cmd_success, cmd_output) = remote_command_w_output( 5559 &api_socket, 5560 "add-net", 5561 Some( 5562 format!( 5563 "{}{},id=test0", 5564 guest.default_net_string(), 5565 if let Some(pci_segment) = pci_segment { 5566 format!(",pci_segment={pci_segment}") 5567 } else { 5568 "".to_owned() 5569 } 5570 ) 5571 .as_str(), 5572 ), 5573 ); 5574 assert!(cmd_success); 5575 5576 if let Some(pci_segment) = pci_segment { 5577 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 5578 "{{\"id\":\"test0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 5579 ))); 5580 } else { 5581 assert!(String::from_utf8_lossy(&cmd_output) 5582 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:05.0\"}")); 5583 } 5584 5585 thread::sleep(std::time::Duration::new(5, 0)); 5586 5587 // 1 network interfaces + default localhost ==> 2 interfaces 5588 assert_eq!( 5589 guest 5590 .ssh_command("ip -o link | wc -l") 5591 .unwrap() 5592 .trim() 5593 .parse::<u32>() 5594 .unwrap_or_default(), 5595 2 5596 ); 5597 5598 // Remove network 5599 assert!(remote_command(&api_socket, "remove-device", Some("test0"),)); 5600 thread::sleep(std::time::Duration::new(5, 0)); 5601 5602 let (cmd_success, cmd_output) = remote_command_w_output( 5603 &api_socket, 5604 "add-net", 5605 Some( 5606 format!( 5607 "{}{},id=test1", 5608 guest.default_net_string(), 5609 if let Some(pci_segment) = pci_segment { 5610 format!(",pci_segment={pci_segment}") 5611 } else { 5612 "".to_owned() 5613 } 5614 ) 5615 .as_str(), 5616 ), 5617 ); 5618 assert!(cmd_success); 5619 5620 if let Some(pci_segment) = pci_segment { 5621 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 5622 "{{\"id\":\"test1\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 5623 ))); 5624 } else { 5625 assert!(String::from_utf8_lossy(&cmd_output) 5626 .contains("{\"id\":\"test1\",\"bdf\":\"0000:00:05.0\"}")); 5627 } 5628 5629 thread::sleep(std::time::Duration::new(5, 0)); 5630 5631 // 1 network interfaces + default localhost ==> 2 interfaces 5632 assert_eq!( 5633 guest 5634 .ssh_command("ip -o link | wc -l") 5635 .unwrap() 5636 .trim() 5637 .parse::<u32>() 5638 .unwrap_or_default(), 5639 2 5640 ); 5641 5642 guest.reboot_linux(0, None); 5643 5644 // Check still there after reboot 5645 // 1 network interfaces + default localhost ==> 2 interfaces 5646 assert_eq!( 5647 guest 5648 .ssh_command("ip -o link | wc -l") 5649 .unwrap() 5650 .trim() 5651 .parse::<u32>() 5652 .unwrap_or_default(), 5653 2 5654 ); 5655 }); 5656 5657 let _ = child.kill(); 5658 let output = child.wait_with_output().unwrap(); 5659 5660 handle_child_output(r, &output); 5661 } 5662 5663 #[test] 5664 fn test_initramfs() { 5665 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5666 let guest = Guest::new(Box::new(focal)); 5667 let mut workload_path = dirs::home_dir().unwrap(); 5668 workload_path.push("workloads"); 5669 5670 #[cfg(target_arch = "x86_64")] 5671 let mut kernels = vec![direct_kernel_boot_path()]; 5672 #[cfg(target_arch = "aarch64")] 5673 let kernels = vec![direct_kernel_boot_path()]; 5674 5675 #[cfg(target_arch = "x86_64")] 5676 { 5677 let mut pvh_kernel_path = workload_path.clone(); 5678 pvh_kernel_path.push("vmlinux"); 5679 kernels.push(pvh_kernel_path); 5680 } 5681 5682 let mut initramfs_path = workload_path; 5683 initramfs_path.push("alpine_initramfs.img"); 5684 5685 let test_string = String::from("axz34i9rylotd8n50wbv6kcj7f2qushme1pg"); 5686 let cmdline = format!("console=hvc0 quiet TEST_STRING={test_string}"); 5687 5688 kernels.iter().for_each(|k_path| { 5689 let mut child = GuestCommand::new(&guest) 5690 .args(["--kernel", k_path.to_str().unwrap()]) 5691 .args(["--initramfs", initramfs_path.to_str().unwrap()]) 5692 .args(["--cmdline", &cmdline]) 5693 .capture_output() 5694 .spawn() 5695 .unwrap(); 5696 5697 thread::sleep(std::time::Duration::new(20, 0)); 5698 5699 let _ = child.kill(); 5700 let output = child.wait_with_output().unwrap(); 5701 5702 let r = std::panic::catch_unwind(|| { 5703 let s = String::from_utf8_lossy(&output.stdout); 5704 5705 assert_ne!(s.lines().position(|line| line == test_string), None); 5706 }); 5707 5708 handle_child_output(r, &output); 5709 }); 5710 } 5711 5712 // One thing to note about this test. The virtio-net device is heavily used 5713 // through each ssh command. There's no need to perform a dedicated test to 5714 // verify the migration went well for virtio-net. 5715 #[test] 5716 #[cfg(not(feature = "mshv"))] 5717 fn test_snapshot_restore_hotplug_virtiomem() { 5718 _test_snapshot_restore(true); 5719 } 5720 5721 #[test] 5722 fn test_snapshot_restore_basic() { 5723 _test_snapshot_restore(false); 5724 } 5725 5726 fn _test_snapshot_restore(use_hotplug: bool) { 5727 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5728 let guest = Guest::new(Box::new(focal)); 5729 let kernel_path = direct_kernel_boot_path(); 5730 5731 let api_socket_source = format!("{}.1", temp_api_path(&guest.tmp_dir)); 5732 5733 let net_id = "net123"; 5734 let net_params = format!( 5735 "id={},tap=,mac={},ip={},mask=255.255.255.0", 5736 net_id, guest.network.guest_mac, guest.network.host_ip 5737 ); 5738 let mut mem_params = "size=4G"; 5739 5740 if use_hotplug { 5741 mem_params = "size=4G,hotplug_method=virtio-mem,hotplug_size=32G" 5742 } 5743 5744 let cloudinit_params = format!( 5745 "path={},iommu=on", 5746 guest.disk_config.disk(DiskType::CloudInit).unwrap() 5747 ); 5748 5749 let socket = temp_vsock_path(&guest.tmp_dir); 5750 let event_path = temp_event_monitor_path(&guest.tmp_dir); 5751 5752 let mut child = GuestCommand::new(&guest) 5753 .args(["--api-socket", &api_socket_source]) 5754 .args(["--event-monitor", format!("path={event_path}").as_str()]) 5755 .args(["--cpus", "boot=4"]) 5756 .args(["--memory", mem_params]) 5757 .args(["--balloon", "size=0"]) 5758 .args(["--kernel", kernel_path.to_str().unwrap()]) 5759 .args([ 5760 "--disk", 5761 format!( 5762 "path={}", 5763 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 5764 ) 5765 .as_str(), 5766 "--disk", 5767 cloudinit_params.as_str(), 5768 ]) 5769 .args(["--net", net_params.as_str()]) 5770 .args(["--vsock", format!("cid=3,socket={socket}").as_str()]) 5771 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5772 .capture_output() 5773 .spawn() 5774 .unwrap(); 5775 5776 let console_text = String::from("On a branch floating down river a cricket, singing."); 5777 // Create the snapshot directory 5778 let snapshot_dir = temp_snapshot_dir_path(&guest.tmp_dir); 5779 5780 let r = std::panic::catch_unwind(|| { 5781 guest.wait_vm_boot(None).unwrap(); 5782 5783 // Check the number of vCPUs 5784 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 4); 5785 // Check the guest RAM 5786 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 5787 if use_hotplug { 5788 // Increase guest RAM with virtio-mem 5789 resize_command( 5790 &api_socket_source, 5791 None, 5792 Some(6 << 30), 5793 None, 5794 Some(&event_path), 5795 ); 5796 thread::sleep(std::time::Duration::new(5, 0)); 5797 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 5798 // Use balloon to remove RAM from the VM 5799 resize_command( 5800 &api_socket_source, 5801 None, 5802 None, 5803 Some(1 << 30), 5804 Some(&event_path), 5805 ); 5806 thread::sleep(std::time::Duration::new(5, 0)); 5807 let total_memory = guest.get_total_memory().unwrap_or_default(); 5808 assert!(total_memory > 4_800_000); 5809 assert!(total_memory < 5_760_000); 5810 } 5811 // Check the guest virtio-devices, e.g. block, rng, vsock, console, and net 5812 guest.check_devices_common(Some(&socket), Some(&console_text), None); 5813 5814 // x86_64: We check that removing and adding back the virtio-net device 5815 // does not break the snapshot/restore support for virtio-pci. 5816 // This is an important thing to test as the hotplug will 5817 // trigger a PCI BAR reprogramming, which is a good way of 5818 // checking if the stored resources are correctly restored. 5819 // Unplug the virtio-net device 5820 // AArch64: Device hotplug is currently not supported, skipping here. 5821 #[cfg(target_arch = "x86_64")] 5822 { 5823 assert!(remote_command( 5824 &api_socket_source, 5825 "remove-device", 5826 Some(net_id), 5827 )); 5828 thread::sleep(std::time::Duration::new(10, 0)); 5829 let latest_events = [&MetaEvent { 5830 event: "device-removed".to_string(), 5831 device_id: Some(net_id.to_string()), 5832 }]; 5833 assert!(check_latest_events_exact(&latest_events, &event_path)); 5834 5835 // Plug the virtio-net device again 5836 assert!(remote_command( 5837 &api_socket_source, 5838 "add-net", 5839 Some(net_params.as_str()), 5840 )); 5841 thread::sleep(std::time::Duration::new(10, 0)); 5842 } 5843 5844 // Pause the VM 5845 assert!(remote_command(&api_socket_source, "pause", None)); 5846 let latest_events = [ 5847 &MetaEvent { 5848 event: "pausing".to_string(), 5849 device_id: None, 5850 }, 5851 &MetaEvent { 5852 event: "paused".to_string(), 5853 device_id: None, 5854 }, 5855 ]; 5856 assert!(check_latest_events_exact(&latest_events, &event_path)); 5857 5858 // Take a snapshot from the VM 5859 assert!(remote_command( 5860 &api_socket_source, 5861 "snapshot", 5862 Some(format!("file://{snapshot_dir}").as_str()), 5863 )); 5864 5865 // Wait to make sure the snapshot is completed 5866 thread::sleep(std::time::Duration::new(10, 0)); 5867 5868 let latest_events = [ 5869 &MetaEvent { 5870 event: "snapshotting".to_string(), 5871 device_id: None, 5872 }, 5873 &MetaEvent { 5874 event: "snapshotted".to_string(), 5875 device_id: None, 5876 }, 5877 ]; 5878 assert!(check_latest_events_exact(&latest_events, &event_path)); 5879 }); 5880 5881 // Shutdown the source VM and check console output 5882 let _ = child.kill(); 5883 let output = child.wait_with_output().unwrap(); 5884 handle_child_output(r, &output); 5885 5886 let r = std::panic::catch_unwind(|| { 5887 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 5888 }); 5889 5890 handle_child_output(r, &output); 5891 5892 // Remove the vsock socket file. 5893 Command::new("rm") 5894 .arg("-f") 5895 .arg(socket.as_str()) 5896 .output() 5897 .unwrap(); 5898 5899 let api_socket_restored = format!("{}.2", temp_api_path(&guest.tmp_dir)); 5900 let event_path_restored = format!("{}.2", temp_event_monitor_path(&guest.tmp_dir)); 5901 5902 // Restore the VM from the snapshot 5903 let mut child = GuestCommand::new(&guest) 5904 .args(["--api-socket", &api_socket_restored]) 5905 .args([ 5906 "--event-monitor", 5907 format!("path={event_path_restored}").as_str(), 5908 ]) 5909 .args([ 5910 "--restore", 5911 format!("source_url=file://{snapshot_dir}").as_str(), 5912 ]) 5913 .capture_output() 5914 .spawn() 5915 .unwrap(); 5916 5917 // Wait for the VM to be restored 5918 thread::sleep(std::time::Duration::new(10, 0)); 5919 let expected_events = [ 5920 &MetaEvent { 5921 event: "starting".to_string(), 5922 device_id: None, 5923 }, 5924 &MetaEvent { 5925 event: "activated".to_string(), 5926 device_id: Some("__console".to_string()), 5927 }, 5928 &MetaEvent { 5929 event: "activated".to_string(), 5930 device_id: Some("__rng".to_string()), 5931 }, 5932 &MetaEvent { 5933 event: "restoring".to_string(), 5934 device_id: None, 5935 }, 5936 ]; 5937 assert!(check_sequential_events( 5938 &expected_events, 5939 &event_path_restored 5940 )); 5941 let latest_events = [&MetaEvent { 5942 event: "restored".to_string(), 5943 device_id: None, 5944 }]; 5945 assert!(check_latest_events_exact( 5946 &latest_events, 5947 &event_path_restored 5948 )); 5949 5950 let r = std::panic::catch_unwind(|| { 5951 // Resume the VM 5952 assert!(remote_command(&api_socket_restored, "resume", None)); 5953 let latest_events = [ 5954 &MetaEvent { 5955 event: "resuming".to_string(), 5956 device_id: None, 5957 }, 5958 &MetaEvent { 5959 event: "resumed".to_string(), 5960 device_id: None, 5961 }, 5962 ]; 5963 assert!(check_latest_events_exact( 5964 &latest_events, 5965 &event_path_restored 5966 )); 5967 5968 // Perform same checks to validate VM has been properly restored 5969 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 4); 5970 let total_memory = guest.get_total_memory().unwrap_or_default(); 5971 if !use_hotplug { 5972 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 5973 } else { 5974 assert!(total_memory > 4_800_000); 5975 assert!(total_memory < 5_760_000); 5976 // Deflate balloon to restore entire RAM to the VM 5977 resize_command(&api_socket_restored, None, None, Some(0), None); 5978 thread::sleep(std::time::Duration::new(5, 0)); 5979 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 5980 // Decrease guest RAM with virtio-mem 5981 resize_command(&api_socket_restored, None, Some(5 << 30), None, None); 5982 thread::sleep(std::time::Duration::new(5, 0)); 5983 let total_memory = guest.get_total_memory().unwrap_or_default(); 5984 assert!(total_memory > 4_800_000); 5985 assert!(total_memory < 5_760_000); 5986 } 5987 5988 guest.check_devices_common(Some(&socket), Some(&console_text), None); 5989 }); 5990 // Shutdown the target VM and check console output 5991 let _ = child.kill(); 5992 let output = child.wait_with_output().unwrap(); 5993 handle_child_output(r, &output); 5994 5995 let r = std::panic::catch_unwind(|| { 5996 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 5997 }); 5998 5999 handle_child_output(r, &output); 6000 } 6001 6002 #[test] 6003 fn test_counters() { 6004 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6005 let guest = Guest::new(Box::new(focal)); 6006 let api_socket = temp_api_path(&guest.tmp_dir); 6007 6008 let mut cmd = GuestCommand::new(&guest); 6009 cmd.args(["--cpus", "boot=1"]) 6010 .args(["--memory", "size=512M"]) 6011 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 6012 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6013 .default_disks() 6014 .args(["--net", guest.default_net_string().as_str()]) 6015 .args(["--api-socket", &api_socket]) 6016 .capture_output(); 6017 6018 let mut child = cmd.spawn().unwrap(); 6019 6020 let r = std::panic::catch_unwind(|| { 6021 guest.wait_vm_boot(None).unwrap(); 6022 6023 let orig_counters = get_counters(&api_socket); 6024 guest 6025 .ssh_command("dd if=/dev/zero of=test count=8 bs=1M") 6026 .unwrap(); 6027 6028 let new_counters = get_counters(&api_socket); 6029 6030 // Check that all the counters have increased 6031 assert!(new_counters > orig_counters); 6032 }); 6033 6034 let _ = child.kill(); 6035 let output = child.wait_with_output().unwrap(); 6036 6037 handle_child_output(r, &output); 6038 } 6039 6040 #[test] 6041 #[cfg(feature = "guest_debug")] 6042 fn test_coredump() { 6043 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6044 let guest = Guest::new(Box::new(focal)); 6045 let api_socket = temp_api_path(&guest.tmp_dir); 6046 6047 let mut cmd = GuestCommand::new(&guest); 6048 cmd.args(["--cpus", "boot=4"]) 6049 .args(["--memory", "size=4G"]) 6050 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 6051 .default_disks() 6052 .args(["--net", guest.default_net_string().as_str()]) 6053 .args(["--api-socket", &api_socket]) 6054 .capture_output(); 6055 6056 let mut child = cmd.spawn().unwrap(); 6057 let vmcore_file = temp_vmcore_file_path(&guest.tmp_dir); 6058 6059 let r = std::panic::catch_unwind(|| { 6060 guest.wait_vm_boot(None).unwrap(); 6061 6062 assert!(remote_command(&api_socket, "pause", None)); 6063 6064 assert!(remote_command( 6065 &api_socket, 6066 "coredump", 6067 Some(format!("file://{vmcore_file}").as_str()), 6068 )); 6069 6070 // the num of CORE notes should equals to vcpu 6071 let readelf_core_num_cmd = 6072 format!("readelf --all {vmcore_file} |grep CORE |grep -v Type |wc -l"); 6073 let core_num_in_elf = exec_host_command_output(&readelf_core_num_cmd); 6074 assert_eq!(String::from_utf8_lossy(&core_num_in_elf.stdout).trim(), "4"); 6075 6076 // the num of QEMU notes should equals to vcpu 6077 let readelf_vmm_num_cmd = format!("readelf --all {vmcore_file} |grep QEMU |wc -l"); 6078 let vmm_num_in_elf = exec_host_command_output(&readelf_vmm_num_cmd); 6079 assert_eq!(String::from_utf8_lossy(&vmm_num_in_elf.stdout).trim(), "4"); 6080 }); 6081 6082 let _ = child.kill(); 6083 let output = child.wait_with_output().unwrap(); 6084 6085 handle_child_output(r, &output); 6086 } 6087 6088 #[test] 6089 #[cfg(feature = "guest_debug")] 6090 fn test_coredump_no_pause() { 6091 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6092 let guest = Guest::new(Box::new(focal)); 6093 let api_socket = temp_api_path(&guest.tmp_dir); 6094 6095 let mut cmd = GuestCommand::new(&guest); 6096 cmd.args(["--cpus", "boot=4"]) 6097 .args(["--memory", "size=4G"]) 6098 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 6099 .default_disks() 6100 .args(["--net", guest.default_net_string().as_str()]) 6101 .args(["--api-socket", &api_socket]) 6102 .capture_output(); 6103 6104 let mut child = cmd.spawn().unwrap(); 6105 let vmcore_file = temp_vmcore_file_path(&guest.tmp_dir); 6106 6107 let r = std::panic::catch_unwind(|| { 6108 guest.wait_vm_boot(None).unwrap(); 6109 6110 assert!(remote_command( 6111 &api_socket, 6112 "coredump", 6113 Some(format!("file://{vmcore_file}").as_str()), 6114 )); 6115 6116 assert_eq!(vm_state(&api_socket), "Running"); 6117 }); 6118 6119 let _ = child.kill(); 6120 let output = child.wait_with_output().unwrap(); 6121 6122 handle_child_output(r, &output); 6123 } 6124 6125 #[test] 6126 fn test_watchdog() { 6127 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6128 let guest = Guest::new(Box::new(focal)); 6129 let api_socket = temp_api_path(&guest.tmp_dir); 6130 6131 let kernel_path = direct_kernel_boot_path(); 6132 6133 let mut cmd = GuestCommand::new(&guest); 6134 cmd.args(["--cpus", "boot=1"]) 6135 .args(["--memory", "size=512M"]) 6136 .args(["--kernel", kernel_path.to_str().unwrap()]) 6137 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6138 .default_disks() 6139 .args(["--net", guest.default_net_string().as_str()]) 6140 .args(["--watchdog"]) 6141 .args(["--api-socket", &api_socket]) 6142 .capture_output(); 6143 6144 let mut child = cmd.spawn().unwrap(); 6145 6146 let r = std::panic::catch_unwind(|| { 6147 guest.wait_vm_boot(None).unwrap(); 6148 6149 let mut expected_reboot_count = 1; 6150 6151 // Enable the watchdog with a 15s timeout 6152 enable_guest_watchdog(&guest, 15); 6153 6154 // Reboot and check that systemd has activated the watchdog 6155 guest.ssh_command("sudo reboot").unwrap(); 6156 guest.wait_vm_boot(None).unwrap(); 6157 expected_reboot_count += 1; 6158 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6159 assert_eq!( 6160 guest 6161 .ssh_command("sudo journalctl | grep -c -- \"Watchdog started\"") 6162 .unwrap() 6163 .trim() 6164 .parse::<u32>() 6165 .unwrap_or_default(), 6166 2 6167 ); 6168 6169 // Allow some normal time to elapse to check we don't get spurious reboots 6170 thread::sleep(std::time::Duration::new(40, 0)); 6171 // Check no reboot 6172 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6173 6174 // Trigger a panic (sync first). We need to do this inside a screen with a delay so the SSH command returns. 6175 guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap(); 6176 // Allow some time for the watchdog to trigger (max 30s) and reboot to happen 6177 guest.wait_vm_boot(Some(50)).unwrap(); 6178 // Check a reboot is triggerred by the watchdog 6179 expected_reboot_count += 1; 6180 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6181 6182 #[cfg(target_arch = "x86_64")] 6183 { 6184 // Now pause the VM and remain offline for 30s 6185 assert!(remote_command(&api_socket, "pause", None)); 6186 thread::sleep(std::time::Duration::new(30, 0)); 6187 assert!(remote_command(&api_socket, "resume", None)); 6188 6189 // Check no reboot 6190 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6191 } 6192 }); 6193 6194 let _ = child.kill(); 6195 let output = child.wait_with_output().unwrap(); 6196 6197 handle_child_output(r, &output); 6198 } 6199 6200 #[test] 6201 fn test_pvpanic() { 6202 let jammy = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string()); 6203 let guest = Guest::new(Box::new(jammy)); 6204 let api_socket = temp_api_path(&guest.tmp_dir); 6205 let event_path = temp_event_monitor_path(&guest.tmp_dir); 6206 6207 let kernel_path = direct_kernel_boot_path(); 6208 6209 let mut cmd = GuestCommand::new(&guest); 6210 cmd.args(["--cpus", "boot=1"]) 6211 .args(["--memory", "size=512M"]) 6212 .args(["--kernel", kernel_path.to_str().unwrap()]) 6213 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6214 .default_disks() 6215 .args(["--net", guest.default_net_string().as_str()]) 6216 .args(["--pvpanic"]) 6217 .args(["--api-socket", &api_socket]) 6218 .args(["--event-monitor", format!("path={event_path}").as_str()]) 6219 .capture_output(); 6220 6221 let mut child = cmd.spawn().unwrap(); 6222 6223 let r = std::panic::catch_unwind(|| { 6224 guest.wait_vm_boot(None).unwrap(); 6225 6226 // Trigger guest a panic 6227 make_guest_panic(&guest); 6228 6229 // Wait a while for guest 6230 thread::sleep(std::time::Duration::new(10, 0)); 6231 6232 let expected_sequential_events = [&MetaEvent { 6233 event: "panic".to_string(), 6234 device_id: None, 6235 }]; 6236 assert!(check_latest_events_exact( 6237 &expected_sequential_events, 6238 &event_path 6239 )); 6240 }); 6241 6242 let _ = child.kill(); 6243 let output = child.wait_with_output().unwrap(); 6244 6245 handle_child_output(r, &output); 6246 } 6247 6248 #[test] 6249 fn test_tap_from_fd() { 6250 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6251 let guest = Guest::new(Box::new(focal)); 6252 let kernel_path = direct_kernel_boot_path(); 6253 6254 // Create a TAP interface with multi-queue enabled 6255 let num_queue_pairs: usize = 2; 6256 6257 use std::str::FromStr; 6258 let taps = net_util::open_tap( 6259 Some("chtap0"), 6260 Some(std::net::Ipv4Addr::from_str(&guest.network.host_ip).unwrap()), 6261 None, 6262 &mut None, 6263 None, 6264 num_queue_pairs, 6265 Some(libc::O_RDWR | libc::O_NONBLOCK), 6266 ) 6267 .unwrap(); 6268 6269 let mut child = GuestCommand::new(&guest) 6270 .args(["--cpus", &format!("boot={num_queue_pairs}")]) 6271 .args(["--memory", "size=512M"]) 6272 .args(["--kernel", kernel_path.to_str().unwrap()]) 6273 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6274 .default_disks() 6275 .args([ 6276 "--net", 6277 &format!( 6278 "fd=[{},{}],mac={},num_queues={}", 6279 taps[0].as_raw_fd(), 6280 taps[1].as_raw_fd(), 6281 guest.network.guest_mac, 6282 num_queue_pairs * 2 6283 ), 6284 ]) 6285 .capture_output() 6286 .spawn() 6287 .unwrap(); 6288 6289 let r = std::panic::catch_unwind(|| { 6290 guest.wait_vm_boot(None).unwrap(); 6291 6292 assert_eq!( 6293 guest 6294 .ssh_command("ip -o link | wc -l") 6295 .unwrap() 6296 .trim() 6297 .parse::<u32>() 6298 .unwrap_or_default(), 6299 2 6300 ); 6301 6302 guest.reboot_linux(0, None); 6303 6304 assert_eq!( 6305 guest 6306 .ssh_command("ip -o link | wc -l") 6307 .unwrap() 6308 .trim() 6309 .parse::<u32>() 6310 .unwrap_or_default(), 6311 2 6312 ); 6313 }); 6314 6315 let _ = child.kill(); 6316 let output = child.wait_with_output().unwrap(); 6317 6318 handle_child_output(r, &output); 6319 } 6320 6321 // By design, a guest VM won't be able to connect to the host 6322 // machine when using a macvtap network interface (while it can 6323 // communicate externally). As a workaround, this integration 6324 // test creates two macvtap interfaces in 'bridge' mode on the 6325 // same physical net interface, one for the guest and one for 6326 // the host. With additional setup on the IP address and the 6327 // routing table, it enables the communications between the 6328 // guest VM and the host machine. 6329 // Details: https://wiki.libvirt.org/page/TroubleshootMacvtapHostFail 6330 fn _test_macvtap(hotplug: bool, guest_macvtap_name: &str, host_macvtap_name: &str) { 6331 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6332 let guest = Guest::new(Box::new(focal)); 6333 let api_socket = temp_api_path(&guest.tmp_dir); 6334 6335 #[cfg(target_arch = "x86_64")] 6336 let kernel_path = direct_kernel_boot_path(); 6337 #[cfg(target_arch = "aarch64")] 6338 let kernel_path = edk2_path(); 6339 6340 let phy_net = "eth0"; 6341 6342 // Create a macvtap interface for the guest VM to use 6343 assert!(exec_host_command_status(&format!( 6344 "sudo ip link add link {phy_net} name {guest_macvtap_name} type macvtap mod bridge" 6345 )) 6346 .success()); 6347 assert!(exec_host_command_status(&format!( 6348 "sudo ip link set {} address {} up", 6349 guest_macvtap_name, guest.network.guest_mac 6350 )) 6351 .success()); 6352 assert!( 6353 exec_host_command_status(&format!("sudo ip link show {guest_macvtap_name}")).success() 6354 ); 6355 6356 let tap_index = 6357 fs::read_to_string(format!("/sys/class/net/{guest_macvtap_name}/ifindex")).unwrap(); 6358 let tap_device = format!("/dev/tap{}", tap_index.trim()); 6359 6360 assert!(exec_host_command_status(&format!("sudo chown $UID.$UID {tap_device}")).success()); 6361 6362 let cstr_tap_device = std::ffi::CString::new(tap_device).unwrap(); 6363 let tap_fd1 = unsafe { libc::open(cstr_tap_device.as_ptr(), libc::O_RDWR) }; 6364 assert!(tap_fd1 > 0); 6365 let tap_fd2 = unsafe { libc::open(cstr_tap_device.as_ptr(), libc::O_RDWR) }; 6366 assert!(tap_fd2 > 0); 6367 6368 // Create a macvtap on the same physical net interface for 6369 // the host machine to use 6370 assert!(exec_host_command_status(&format!( 6371 "sudo ip link add link {phy_net} name {host_macvtap_name} type macvtap mod bridge" 6372 )) 6373 .success()); 6374 // Use default mask "255.255.255.0" 6375 assert!(exec_host_command_status(&format!( 6376 "sudo ip address add {}/24 dev {}", 6377 guest.network.host_ip, host_macvtap_name 6378 )) 6379 .success()); 6380 assert!( 6381 exec_host_command_status(&format!("sudo ip link set dev {host_macvtap_name} up")) 6382 .success() 6383 ); 6384 6385 let mut guest_command = GuestCommand::new(&guest); 6386 guest_command 6387 .args(["--cpus", "boot=2"]) 6388 .args(["--memory", "size=512M"]) 6389 .args(["--kernel", kernel_path.to_str().unwrap()]) 6390 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6391 .default_disks() 6392 .args(["--api-socket", &api_socket]); 6393 6394 let net_params = format!( 6395 "fd=[{},{}],mac={},num_queues=4", 6396 tap_fd1, tap_fd2, guest.network.guest_mac 6397 ); 6398 6399 if !hotplug { 6400 guest_command.args(["--net", &net_params]); 6401 } 6402 6403 let mut child = guest_command.capture_output().spawn().unwrap(); 6404 6405 if hotplug { 6406 // Give some time to the VMM process to listen to the API 6407 // socket. This is the only requirement to avoid the following 6408 // call to ch-remote from failing. 6409 thread::sleep(std::time::Duration::new(10, 0)); 6410 // Hotplug the virtio-net device 6411 let (cmd_success, cmd_output) = 6412 remote_command_w_output(&api_socket, "add-net", Some(&net_params)); 6413 assert!(cmd_success); 6414 #[cfg(target_arch = "x86_64")] 6415 assert!(String::from_utf8_lossy(&cmd_output) 6416 .contains("{\"id\":\"_net2\",\"bdf\":\"0000:00:05.0\"}")); 6417 #[cfg(target_arch = "aarch64")] 6418 assert!(String::from_utf8_lossy(&cmd_output) 6419 .contains("{\"id\":\"_net0\",\"bdf\":\"0000:00:05.0\"}")); 6420 } 6421 6422 // The functional connectivity provided by the virtio-net device 6423 // gets tested through wait_vm_boot() as it expects to receive a 6424 // HTTP request, and through the SSH command as well. 6425 let r = std::panic::catch_unwind(|| { 6426 guest.wait_vm_boot(None).unwrap(); 6427 6428 assert_eq!( 6429 guest 6430 .ssh_command("ip -o link | wc -l") 6431 .unwrap() 6432 .trim() 6433 .parse::<u32>() 6434 .unwrap_or_default(), 6435 2 6436 ); 6437 6438 guest.reboot_linux(0, None); 6439 6440 assert_eq!( 6441 guest 6442 .ssh_command("ip -o link | wc -l") 6443 .unwrap() 6444 .trim() 6445 .parse::<u32>() 6446 .unwrap_or_default(), 6447 2 6448 ); 6449 }); 6450 6451 let _ = child.kill(); 6452 6453 exec_host_command_status(&format!("sudo ip link del {guest_macvtap_name}")); 6454 exec_host_command_status(&format!("sudo ip link del {host_macvtap_name}")); 6455 6456 let output = child.wait_with_output().unwrap(); 6457 6458 handle_child_output(r, &output); 6459 } 6460 6461 #[test] 6462 #[cfg_attr(target_arch = "aarch64", ignore = "See #5443")] 6463 fn test_macvtap() { 6464 _test_macvtap(false, "guestmacvtap0", "hostmacvtap0") 6465 } 6466 6467 #[test] 6468 #[cfg_attr(target_arch = "aarch64", ignore = "See #5443")] 6469 fn test_macvtap_hotplug() { 6470 _test_macvtap(true, "guestmacvtap1", "hostmacvtap1") 6471 } 6472 6473 #[test] 6474 #[cfg(not(feature = "mshv"))] 6475 fn test_ovs_dpdk() { 6476 let focal1 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6477 let guest1 = Guest::new(Box::new(focal1)); 6478 6479 let focal2 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6480 let guest2 = Guest::new(Box::new(focal2)); 6481 let api_socket_source = format!("{}.1", temp_api_path(&guest2.tmp_dir)); 6482 6483 let (mut child1, mut child2) = 6484 setup_ovs_dpdk_guests(&guest1, &guest2, &api_socket_source, false); 6485 6486 // Create the snapshot directory 6487 let snapshot_dir = temp_snapshot_dir_path(&guest2.tmp_dir); 6488 6489 let r = std::panic::catch_unwind(|| { 6490 // Remove one of the two ports from the OVS bridge 6491 assert!(exec_host_command_status("ovs-vsctl del-port vhost-user1").success()); 6492 6493 // Spawn a new netcat listener in the first VM 6494 let guest_ip = guest1.network.guest_ip.clone(); 6495 thread::spawn(move || { 6496 ssh_command_ip( 6497 "nc -l 12345", 6498 &guest_ip, 6499 DEFAULT_SSH_RETRIES, 6500 DEFAULT_SSH_TIMEOUT, 6501 ) 6502 .unwrap(); 6503 }); 6504 6505 // Wait for the server to be listening 6506 thread::sleep(std::time::Duration::new(5, 0)); 6507 6508 // Check the connection fails this time 6509 assert!(guest2.ssh_command("nc -vz 172.100.0.1 12345").is_err()); 6510 6511 // Add the OVS port back 6512 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface vhost-user1 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient1").success()); 6513 6514 // And finally check the connection is functional again 6515 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap(); 6516 6517 // Pause the VM 6518 assert!(remote_command(&api_socket_source, "pause", None)); 6519 6520 // Take a snapshot from the VM 6521 assert!(remote_command( 6522 &api_socket_source, 6523 "snapshot", 6524 Some(format!("file://{snapshot_dir}").as_str()), 6525 )); 6526 6527 // Wait to make sure the snapshot is completed 6528 thread::sleep(std::time::Duration::new(10, 0)); 6529 }); 6530 6531 // Shutdown the source VM 6532 let _ = child2.kill(); 6533 let output = child2.wait_with_output().unwrap(); 6534 handle_child_output(r, &output); 6535 6536 // Remove the vhost-user socket file. 6537 Command::new("rm") 6538 .arg("-f") 6539 .arg("/tmp/dpdkvhostclient2") 6540 .output() 6541 .unwrap(); 6542 6543 let api_socket_restored = format!("{}.2", temp_api_path(&guest2.tmp_dir)); 6544 // Restore the VM from the snapshot 6545 let mut child2 = GuestCommand::new(&guest2) 6546 .args(["--api-socket", &api_socket_restored]) 6547 .args([ 6548 "--restore", 6549 format!("source_url=file://{snapshot_dir}").as_str(), 6550 ]) 6551 .capture_output() 6552 .spawn() 6553 .unwrap(); 6554 6555 // Wait for the VM to be restored 6556 thread::sleep(std::time::Duration::new(10, 0)); 6557 6558 let r = std::panic::catch_unwind(|| { 6559 // Resume the VM 6560 assert!(remote_command(&api_socket_restored, "resume", None)); 6561 6562 // Spawn a new netcat listener in the first VM 6563 let guest_ip = guest1.network.guest_ip.clone(); 6564 thread::spawn(move || { 6565 ssh_command_ip( 6566 "nc -l 12345", 6567 &guest_ip, 6568 DEFAULT_SSH_RETRIES, 6569 DEFAULT_SSH_TIMEOUT, 6570 ) 6571 .unwrap(); 6572 }); 6573 6574 // Wait for the server to be listening 6575 thread::sleep(std::time::Duration::new(5, 0)); 6576 6577 // And check the connection is still functional after restore 6578 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap(); 6579 }); 6580 6581 let _ = child1.kill(); 6582 let _ = child2.kill(); 6583 6584 let output = child1.wait_with_output().unwrap(); 6585 child2.wait().unwrap(); 6586 6587 cleanup_ovs_dpdk(); 6588 6589 handle_child_output(r, &output); 6590 } 6591 6592 fn setup_spdk_nvme(nvme_dir: &std::path::Path) { 6593 cleanup_spdk_nvme(); 6594 6595 assert!(exec_host_command_status(&format!( 6596 "mkdir -p {}", 6597 nvme_dir.join("nvme-vfio-user").to_str().unwrap() 6598 )) 6599 .success()); 6600 assert!(exec_host_command_status(&format!( 6601 "truncate {} -s 128M", 6602 nvme_dir.join("test-disk.raw").to_str().unwrap() 6603 )) 6604 .success()); 6605 assert!(exec_host_command_status(&format!( 6606 "mkfs.ext4 {}", 6607 nvme_dir.join("test-disk.raw").to_str().unwrap() 6608 )) 6609 .success()); 6610 6611 // Start the SPDK nvmf_tgt daemon to present NVMe device as a VFIO user device 6612 Command::new("/usr/local/bin/spdk-nvme/nvmf_tgt") 6613 .args(["-i", "0", "-m", "0x1"]) 6614 .spawn() 6615 .unwrap(); 6616 thread::sleep(std::time::Duration::new(2, 0)); 6617 6618 assert!(exec_host_command_status( 6619 "/usr/local/bin/spdk-nvme/rpc.py nvmf_create_transport -t VFIOUSER" 6620 ) 6621 .success()); 6622 assert!(exec_host_command_status(&format!( 6623 "/usr/local/bin/spdk-nvme/rpc.py bdev_aio_create {} test 512", 6624 nvme_dir.join("test-disk.raw").to_str().unwrap() 6625 )) 6626 .success()); 6627 assert!(exec_host_command_status( 6628 "/usr/local/bin/spdk-nvme/rpc.py nvmf_create_subsystem nqn.2019-07.io.spdk:cnode -a -s test" 6629 ) 6630 .success()); 6631 assert!(exec_host_command_status( 6632 "/usr/local/bin/spdk-nvme/rpc.py nvmf_subsystem_add_ns nqn.2019-07.io.spdk:cnode test" 6633 ) 6634 .success()); 6635 assert!(exec_host_command_status(&format!( 6636 "/usr/local/bin/spdk-nvme/rpc.py nvmf_subsystem_add_listener nqn.2019-07.io.spdk:cnode -t VFIOUSER -a {} -s 0", 6637 nvme_dir.join("nvme-vfio-user").to_str().unwrap() 6638 )) 6639 .success()); 6640 } 6641 6642 fn cleanup_spdk_nvme() { 6643 exec_host_command_status("pkill -f nvmf_tgt"); 6644 } 6645 6646 #[test] 6647 fn test_vfio_user() { 6648 let jammy_image = JAMMY_IMAGE_NAME.to_string(); 6649 let jammy = UbuntuDiskConfig::new(jammy_image); 6650 let guest = Guest::new(Box::new(jammy)); 6651 6652 let spdk_nvme_dir = guest.tmp_dir.as_path().join("test-vfio-user"); 6653 setup_spdk_nvme(spdk_nvme_dir.as_path()); 6654 6655 let api_socket = temp_api_path(&guest.tmp_dir); 6656 let mut child = GuestCommand::new(&guest) 6657 .args(["--api-socket", &api_socket]) 6658 .args(["--cpus", "boot=1"]) 6659 .args(["--memory", "size=512M,shared=on,hugepages=on"]) 6660 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 6661 .args(["--serial", "tty", "--console", "off"]) 6662 .default_disks() 6663 .default_net() 6664 .capture_output() 6665 .spawn() 6666 .unwrap(); 6667 6668 let r = std::panic::catch_unwind(|| { 6669 guest.wait_vm_boot(None).unwrap(); 6670 6671 // Hotplug the SPDK-NVMe device to the VM 6672 let (cmd_success, cmd_output) = remote_command_w_output( 6673 &api_socket, 6674 "add-user-device", 6675 Some(&format!( 6676 "socket={},id=vfio_user0", 6677 spdk_nvme_dir 6678 .as_path() 6679 .join("nvme-vfio-user/cntrl") 6680 .to_str() 6681 .unwrap(), 6682 )), 6683 ); 6684 assert!(cmd_success); 6685 assert!(String::from_utf8_lossy(&cmd_output) 6686 .contains("{\"id\":\"vfio_user0\",\"bdf\":\"0000:00:05.0\"}")); 6687 6688 thread::sleep(std::time::Duration::new(10, 0)); 6689 6690 // Check both if /dev/nvme exists and if the block size is 128M. 6691 assert_eq!( 6692 guest 6693 .ssh_command("lsblk | grep nvme0n1 | grep -c 128M") 6694 .unwrap() 6695 .trim() 6696 .parse::<u32>() 6697 .unwrap_or_default(), 6698 1 6699 ); 6700 6701 // Check changes persist after reboot 6702 assert_eq!( 6703 guest.ssh_command("sudo mount /dev/nvme0n1 /mnt").unwrap(), 6704 "" 6705 ); 6706 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), "lost+found\n"); 6707 guest 6708 .ssh_command("echo test123 | sudo tee /mnt/test") 6709 .unwrap(); 6710 assert_eq!(guest.ssh_command("sudo umount /mnt").unwrap(), ""); 6711 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), ""); 6712 6713 guest.reboot_linux(0, None); 6714 assert_eq!( 6715 guest.ssh_command("sudo mount /dev/nvme0n1 /mnt").unwrap(), 6716 "" 6717 ); 6718 assert_eq!( 6719 guest.ssh_command("sudo cat /mnt/test").unwrap().trim(), 6720 "test123" 6721 ); 6722 }); 6723 6724 cleanup_spdk_nvme(); 6725 6726 let _ = child.kill(); 6727 let output = child.wait_with_output().unwrap(); 6728 6729 handle_child_output(r, &output); 6730 } 6731 6732 #[test] 6733 #[cfg(target_arch = "x86_64")] 6734 fn test_vdpa_block() { 6735 // Before trying to run the test, verify the vdpa_sim_blk module is correctly loaded. 6736 if !exec_host_command_status("lsmod | grep vdpa_sim_blk").success() { 6737 return; 6738 } 6739 6740 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6741 let guest = Guest::new(Box::new(focal)); 6742 let api_socket = temp_api_path(&guest.tmp_dir); 6743 6744 let kernel_path = direct_kernel_boot_path(); 6745 6746 let mut child = GuestCommand::new(&guest) 6747 .args(["--cpus", "boot=2"]) 6748 .args(["--memory", "size=512M,hugepages=on"]) 6749 .args(["--kernel", kernel_path.to_str().unwrap()]) 6750 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6751 .default_disks() 6752 .default_net() 6753 .args(["--vdpa", "path=/dev/vhost-vdpa-0,num_queues=1"]) 6754 .args(["--platform", "num_pci_segments=2,iommu_segments=1"]) 6755 .args(["--api-socket", &api_socket]) 6756 .capture_output() 6757 .spawn() 6758 .unwrap(); 6759 6760 let r = std::panic::catch_unwind(|| { 6761 guest.wait_vm_boot(None).unwrap(); 6762 6763 // Check both if /dev/vdc exists and if the block size is 128M. 6764 assert_eq!( 6765 guest 6766 .ssh_command("lsblk | grep vdc | grep -c 128M") 6767 .unwrap() 6768 .trim() 6769 .parse::<u32>() 6770 .unwrap_or_default(), 6771 1 6772 ); 6773 6774 // Check the content of the block device after we wrote to it. 6775 // The vpda-sim-blk should let us read what we previously wrote. 6776 guest 6777 .ssh_command("sudo bash -c 'echo foobar > /dev/vdc'") 6778 .unwrap(); 6779 assert_eq!( 6780 guest.ssh_command("sudo head -1 /dev/vdc").unwrap().trim(), 6781 "foobar" 6782 ); 6783 6784 // Hotplug an extra vDPA block device behind the vIOMMU 6785 // Add a new vDPA device to the VM 6786 let (cmd_success, cmd_output) = remote_command_w_output( 6787 &api_socket, 6788 "add-vdpa", 6789 Some("id=myvdpa0,path=/dev/vhost-vdpa-1,num_queues=1,pci_segment=1,iommu=on"), 6790 ); 6791 assert!(cmd_success); 6792 assert!(String::from_utf8_lossy(&cmd_output) 6793 .contains("{\"id\":\"myvdpa0\",\"bdf\":\"0001:00:01.0\"}")); 6794 6795 thread::sleep(std::time::Duration::new(10, 0)); 6796 6797 // Check IOMMU setup 6798 assert!(guest 6799 .does_device_vendor_pair_match("0x1057", "0x1af4") 6800 .unwrap_or_default()); 6801 assert_eq!( 6802 guest 6803 .ssh_command("ls /sys/kernel/iommu_groups/0/devices") 6804 .unwrap() 6805 .trim(), 6806 "0001:00:01.0" 6807 ); 6808 6809 // Check both if /dev/vdd exists and if the block size is 128M. 6810 assert_eq!( 6811 guest 6812 .ssh_command("lsblk | grep vdd | grep -c 128M") 6813 .unwrap() 6814 .trim() 6815 .parse::<u32>() 6816 .unwrap_or_default(), 6817 1 6818 ); 6819 6820 // Write some content to the block device we've just plugged. 6821 guest 6822 .ssh_command("sudo bash -c 'echo foobar > /dev/vdd'") 6823 .unwrap(); 6824 6825 // Check we can read the content back. 6826 assert_eq!( 6827 guest.ssh_command("sudo head -1 /dev/vdd").unwrap().trim(), 6828 "foobar" 6829 ); 6830 6831 // Unplug the device 6832 let cmd_success = remote_command(&api_socket, "remove-device", Some("myvdpa0")); 6833 assert!(cmd_success); 6834 thread::sleep(std::time::Duration::new(10, 0)); 6835 6836 // Check /dev/vdd doesn't exist anymore 6837 assert_eq!( 6838 guest 6839 .ssh_command("lsblk | grep -c vdd || true") 6840 .unwrap() 6841 .trim() 6842 .parse::<u32>() 6843 .unwrap_or(1), 6844 0 6845 ); 6846 }); 6847 6848 let _ = child.kill(); 6849 let output = child.wait_with_output().unwrap(); 6850 6851 handle_child_output(r, &output); 6852 } 6853 6854 #[test] 6855 #[cfg(target_arch = "x86_64")] 6856 fn test_vdpa_net() { 6857 // Before trying to run the test, verify the vdpa_sim_net module is correctly loaded. 6858 if !exec_host_command_status("lsmod | grep vdpa_sim_net").success() { 6859 return; 6860 } 6861 6862 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6863 let guest = Guest::new(Box::new(focal)); 6864 6865 let kernel_path = direct_kernel_boot_path(); 6866 6867 let mut child = GuestCommand::new(&guest) 6868 .args(["--cpus", "boot=2"]) 6869 .args(["--memory", "size=512M,hugepages=on"]) 6870 .args(["--kernel", kernel_path.to_str().unwrap()]) 6871 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6872 .default_disks() 6873 .default_net() 6874 .args(["--vdpa", "path=/dev/vhost-vdpa-2,num_queues=2"]) 6875 .capture_output() 6876 .spawn() 6877 .unwrap(); 6878 6879 let r = std::panic::catch_unwind(|| { 6880 guest.wait_vm_boot(None).unwrap(); 6881 6882 // Check we can find network interface related to vDPA device 6883 assert_eq!( 6884 guest 6885 .ssh_command("ip -o link | grep -c ens6") 6886 .unwrap() 6887 .trim() 6888 .parse::<u32>() 6889 .unwrap_or(0), 6890 1 6891 ); 6892 6893 guest 6894 .ssh_command("sudo ip addr add 172.16.1.2/24 dev ens6") 6895 .unwrap(); 6896 guest.ssh_command("sudo ip link set up dev ens6").unwrap(); 6897 6898 // Check there is no packet yet on both TX/RX of the network interface 6899 assert_eq!( 6900 guest 6901 .ssh_command("ip -j -p -s link show ens6 | grep -c '\"packets\": 0'") 6902 .unwrap() 6903 .trim() 6904 .parse::<u32>() 6905 .unwrap_or(0), 6906 2 6907 ); 6908 6909 // Send 6 packets with ping command 6910 guest.ssh_command("ping 172.16.1.10 -c 6 || true").unwrap(); 6911 6912 // Check we can find 6 packets on both TX/RX of the network interface 6913 assert_eq!( 6914 guest 6915 .ssh_command("ip -j -p -s link show ens6 | grep -c '\"packets\": 6'") 6916 .unwrap() 6917 .trim() 6918 .parse::<u32>() 6919 .unwrap_or(0), 6920 2 6921 ); 6922 6923 // No need to check for hotplug as we already tested it through 6924 // test_vdpa_block() 6925 }); 6926 6927 let _ = child.kill(); 6928 let output = child.wait_with_output().unwrap(); 6929 6930 handle_child_output(r, &output); 6931 } 6932 6933 #[test] 6934 #[cfg(target_arch = "x86_64")] 6935 fn test_tpm() { 6936 let focal = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string()); 6937 let guest = Guest::new(Box::new(focal)); 6938 6939 let (mut swtpm_command, swtpm_socket_path) = prepare_swtpm_daemon(&guest.tmp_dir); 6940 6941 let mut guest_cmd = GuestCommand::new(&guest); 6942 guest_cmd 6943 .args(["--cpus", "boot=1"]) 6944 .args(["--memory", "size=512M"]) 6945 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 6946 .args(["--tpm", &format!("socket={swtpm_socket_path}")]) 6947 .capture_output() 6948 .default_disks() 6949 .default_net(); 6950 6951 // Start swtpm daemon 6952 let mut swtpm_child = swtpm_command.spawn().unwrap(); 6953 thread::sleep(std::time::Duration::new(10, 0)); 6954 let mut child = guest_cmd.spawn().unwrap(); 6955 let r = std::panic::catch_unwind(|| { 6956 guest.wait_vm_boot(None).unwrap(); 6957 assert_eq!( 6958 guest.ssh_command("ls /dev/tpm0").unwrap().trim(), 6959 "/dev/tpm0" 6960 ); 6961 guest.ssh_command("sudo tpm2_selftest -f").unwrap(); 6962 guest 6963 .ssh_command("echo 'hello' > /tmp/checksum_test; ") 6964 .unwrap(); 6965 guest.ssh_command("cmp <(sudo tpm2_pcrevent /tmp/checksum_test | grep sha256 | awk '{print $2}') <(sha256sum /tmp/checksum_test| awk '{print $1}')").unwrap(); 6966 }); 6967 6968 let _ = swtpm_child.kill(); 6969 let _d_out = swtpm_child.wait_with_output().unwrap(); 6970 6971 let _ = child.kill(); 6972 let output = child.wait_with_output().unwrap(); 6973 6974 handle_child_output(r, &output); 6975 } 6976 } 6977 6978 mod dbus_api { 6979 use crate::*; 6980 6981 // Start cloud-hypervisor with no VM parameters, running both the HTTP 6982 // and DBus APIs. Alternate calls to the external APIs (HTTP and DBus) 6983 // to create a VM, boot it, and verify that it can be shut down and then 6984 // booted again. 6985 #[test] 6986 fn test_api_dbus_and_http_interleaved() { 6987 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6988 let guest = Guest::new(Box::new(focal)); 6989 let dbus_api = TargetApi::new_dbus_api(&guest.tmp_dir); 6990 let http_api = TargetApi::new_http_api(&guest.tmp_dir); 6991 6992 let mut child = GuestCommand::new(&guest) 6993 .args(dbus_api.guest_args()) 6994 .args(http_api.guest_args()) 6995 .capture_output() 6996 .spawn() 6997 .unwrap(); 6998 6999 thread::sleep(std::time::Duration::new(1, 0)); 7000 7001 // Verify API servers are running 7002 assert!(dbus_api.remote_command("ping", None)); 7003 assert!(http_api.remote_command("ping", None)); 7004 7005 // Create the VM first 7006 let cpu_count: u8 = 4; 7007 let request_body = guest.api_create_body( 7008 cpu_count, 7009 direct_kernel_boot_path().to_str().unwrap(), 7010 DIRECT_KERNEL_BOOT_CMDLINE, 7011 ); 7012 7013 let temp_config_path = guest.tmp_dir.as_path().join("config"); 7014 std::fs::write(&temp_config_path, request_body).unwrap(); 7015 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 7016 7017 let r = std::panic::catch_unwind(|| { 7018 // Create the VM 7019 assert!(dbus_api.remote_command("create", Some(create_config),)); 7020 7021 // Then boot it 7022 assert!(http_api.remote_command("boot", None)); 7023 guest.wait_vm_boot(None).unwrap(); 7024 7025 // Check that the VM booted as expected 7026 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 7027 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 7028 7029 // Sync and shutdown without powering off to prevent filesystem 7030 // corruption. 7031 guest.ssh_command("sync").unwrap(); 7032 guest.ssh_command("sudo shutdown -H now").unwrap(); 7033 7034 // Wait for the guest to be fully shutdown 7035 thread::sleep(std::time::Duration::new(20, 0)); 7036 7037 // Then shutdown the VM 7038 assert!(dbus_api.remote_command("shutdown", None)); 7039 7040 // Then boot it again 7041 assert!(http_api.remote_command("boot", None)); 7042 guest.wait_vm_boot(None).unwrap(); 7043 7044 // Check that the VM booted as expected 7045 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 7046 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 7047 }); 7048 7049 let _ = child.kill(); 7050 let output = child.wait_with_output().unwrap(); 7051 7052 handle_child_output(r, &output); 7053 } 7054 7055 #[test] 7056 fn test_api_dbus_create_boot() { 7057 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7058 let guest = Guest::new(Box::new(focal)); 7059 7060 _test_api_create_boot(TargetApi::new_dbus_api(&guest.tmp_dir), guest) 7061 } 7062 7063 #[test] 7064 fn test_api_dbus_shutdown() { 7065 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7066 let guest = Guest::new(Box::new(focal)); 7067 7068 _test_api_shutdown(TargetApi::new_dbus_api(&guest.tmp_dir), guest) 7069 } 7070 7071 #[test] 7072 fn test_api_dbus_delete() { 7073 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7074 let guest = Guest::new(Box::new(focal)); 7075 7076 _test_api_delete(TargetApi::new_dbus_api(&guest.tmp_dir), guest); 7077 } 7078 7079 #[test] 7080 fn test_api_dbus_pause_resume() { 7081 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7082 let guest = Guest::new(Box::new(focal)); 7083 7084 _test_api_pause_resume(TargetApi::new_dbus_api(&guest.tmp_dir), guest) 7085 } 7086 } 7087 7088 mod common_sequential { 7089 #[cfg(not(feature = "mshv"))] 7090 use crate::*; 7091 7092 #[test] 7093 #[cfg(not(feature = "mshv"))] 7094 fn test_memory_mergeable_on() { 7095 test_memory_mergeable(true) 7096 } 7097 } 7098 7099 mod windows { 7100 use crate::*; 7101 use once_cell::sync::Lazy; 7102 7103 static NEXT_DISK_ID: Lazy<Mutex<u8>> = Lazy::new(|| Mutex::new(1)); 7104 7105 struct WindowsGuest { 7106 guest: Guest, 7107 auth: PasswordAuth, 7108 } 7109 7110 trait FsType { 7111 const FS_FAT: u8; 7112 const FS_NTFS: u8; 7113 } 7114 impl FsType for WindowsGuest { 7115 const FS_FAT: u8 = 0; 7116 const FS_NTFS: u8 = 1; 7117 } 7118 7119 impl WindowsGuest { 7120 fn new() -> Self { 7121 let disk = WindowsDiskConfig::new(WINDOWS_IMAGE_NAME.to_string()); 7122 let guest = Guest::new(Box::new(disk)); 7123 let auth = PasswordAuth { 7124 username: String::from("administrator"), 7125 password: String::from("Admin123"), 7126 }; 7127 7128 WindowsGuest { guest, auth } 7129 } 7130 7131 fn guest(&self) -> &Guest { 7132 &self.guest 7133 } 7134 7135 fn ssh_cmd(&self, cmd: &str) -> String { 7136 ssh_command_ip_with_auth( 7137 cmd, 7138 &self.auth, 7139 &self.guest.network.guest_ip, 7140 DEFAULT_SSH_RETRIES, 7141 DEFAULT_SSH_TIMEOUT, 7142 ) 7143 .unwrap() 7144 } 7145 7146 fn cpu_count(&self) -> u8 { 7147 self.ssh_cmd("powershell -Command \"(Get-CimInstance win32_computersystem).NumberOfLogicalProcessors\"") 7148 .trim() 7149 .parse::<u8>() 7150 .unwrap_or(0) 7151 } 7152 7153 fn ram_size(&self) -> usize { 7154 self.ssh_cmd("powershell -Command \"(Get-CimInstance win32_computersystem).TotalPhysicalMemory\"") 7155 .trim() 7156 .parse::<usize>() 7157 .unwrap_or(0) 7158 } 7159 7160 fn netdev_count(&self) -> u8 { 7161 self.ssh_cmd("powershell -Command \"netsh int ipv4 show interfaces | Select-String ethernet | Measure-Object -Line | Format-Table -HideTableHeaders\"") 7162 .trim() 7163 .parse::<u8>() 7164 .unwrap_or(0) 7165 } 7166 7167 fn disk_count(&self) -> u8 { 7168 self.ssh_cmd("powershell -Command \"Get-Disk | Measure-Object -Line | Format-Table -HideTableHeaders\"") 7169 .trim() 7170 .parse::<u8>() 7171 .unwrap_or(0) 7172 } 7173 7174 fn reboot(&self) { 7175 let _ = self.ssh_cmd("shutdown /r /t 0"); 7176 } 7177 7178 fn shutdown(&self) { 7179 let _ = self.ssh_cmd("shutdown /s /t 0"); 7180 } 7181 7182 fn run_dnsmasq(&self) -> std::process::Child { 7183 let listen_address = format!("--listen-address={}", self.guest.network.host_ip); 7184 let dhcp_host = format!( 7185 "--dhcp-host={},{}", 7186 self.guest.network.guest_mac, self.guest.network.guest_ip 7187 ); 7188 let dhcp_range = format!( 7189 "--dhcp-range=eth,{},{}", 7190 self.guest.network.guest_ip, self.guest.network.guest_ip 7191 ); 7192 7193 Command::new("dnsmasq") 7194 .arg("--no-daemon") 7195 .arg("--log-queries") 7196 .arg(listen_address.as_str()) 7197 .arg("--except-interface=lo") 7198 .arg("--bind-dynamic") // Allow listening to host_ip while the interface is not ready yet. 7199 .arg("--conf-file=/dev/null") 7200 .arg(dhcp_host.as_str()) 7201 .arg(dhcp_range.as_str()) 7202 .spawn() 7203 .unwrap() 7204 } 7205 7206 // TODO Cleanup image file explicitly after test, if there's some space issues. 7207 fn disk_new(&self, fs: u8, sz: usize) -> String { 7208 let mut guard = NEXT_DISK_ID.lock().unwrap(); 7209 let id = *guard; 7210 *guard = id + 1; 7211 7212 let img = PathBuf::from(format!("/tmp/test-hotplug-{id}.raw")); 7213 let _ = fs::remove_file(&img); 7214 7215 // Create an image file 7216 let out = Command::new("qemu-img") 7217 .args([ 7218 "create", 7219 "-f", 7220 "raw", 7221 img.to_str().unwrap(), 7222 format!("{sz}m").as_str(), 7223 ]) 7224 .output() 7225 .expect("qemu-img command failed") 7226 .stdout; 7227 println!("{out:?}"); 7228 7229 // Associate image to a loop device 7230 let out = Command::new("losetup") 7231 .args(["--show", "-f", img.to_str().unwrap()]) 7232 .output() 7233 .expect("failed to create loop device") 7234 .stdout; 7235 let _tmp = String::from_utf8_lossy(&out); 7236 let loop_dev = _tmp.trim(); 7237 println!("{out:?}"); 7238 7239 // Create a partition table 7240 // echo 'type=7' | sudo sfdisk "${LOOP}" 7241 let mut child = Command::new("sfdisk") 7242 .args([loop_dev]) 7243 .stdin(Stdio::piped()) 7244 .spawn() 7245 .unwrap(); 7246 let stdin = child.stdin.as_mut().expect("failed to open stdin"); 7247 stdin 7248 .write_all("type=7".as_bytes()) 7249 .expect("failed to write stdin"); 7250 let out = child.wait_with_output().expect("sfdisk failed").stdout; 7251 println!("{out:?}"); 7252 7253 // Disengage the loop device 7254 let out = Command::new("losetup") 7255 .args(["-d", loop_dev]) 7256 .output() 7257 .expect("loop device not found") 7258 .stdout; 7259 println!("{out:?}"); 7260 7261 // Re-associate loop device pointing to the partition only 7262 let out = Command::new("losetup") 7263 .args([ 7264 "--show", 7265 "--offset", 7266 (512 * 2048).to_string().as_str(), 7267 "-f", 7268 img.to_str().unwrap(), 7269 ]) 7270 .output() 7271 .expect("failed to create loop device") 7272 .stdout; 7273 let _tmp = String::from_utf8_lossy(&out); 7274 let loop_dev = _tmp.trim(); 7275 println!("{out:?}"); 7276 7277 // Create filesystem. 7278 let fs_cmd = match fs { 7279 WindowsGuest::FS_FAT => "mkfs.msdos", 7280 WindowsGuest::FS_NTFS => "mkfs.ntfs", 7281 _ => panic!("Unknown filesystem type '{fs}'"), 7282 }; 7283 let out = Command::new(fs_cmd) 7284 .args([&loop_dev]) 7285 .output() 7286 .unwrap_or_else(|_| panic!("{fs_cmd} failed")) 7287 .stdout; 7288 println!("{out:?}"); 7289 7290 // Disengage the loop device 7291 let out = Command::new("losetup") 7292 .args(["-d", loop_dev]) 7293 .output() 7294 .unwrap_or_else(|_| panic!("loop device '{loop_dev}' not found")) 7295 .stdout; 7296 println!("{out:?}"); 7297 7298 img.to_str().unwrap().to_string() 7299 } 7300 7301 fn disks_set_rw(&self) { 7302 let _ = self.ssh_cmd("powershell -Command \"Get-Disk | Where-Object IsOffline -eq $True | Set-Disk -IsReadOnly $False\""); 7303 } 7304 7305 fn disks_online(&self) { 7306 let _ = self.ssh_cmd("powershell -Command \"Get-Disk | Where-Object IsOffline -eq $True | Set-Disk -IsOffline $False\""); 7307 } 7308 7309 fn disk_file_put(&self, fname: &str, data: &str) { 7310 let _ = self.ssh_cmd(&format!( 7311 "powershell -Command \"'{data}' | Set-Content -Path {fname}\"" 7312 )); 7313 } 7314 7315 fn disk_file_read(&self, fname: &str) -> String { 7316 self.ssh_cmd(&format!( 7317 "powershell -Command \"Get-Content -Path {fname}\"" 7318 )) 7319 } 7320 7321 fn wait_for_boot(&self) -> bool { 7322 let cmd = "dir /b c:\\ | find \"Windows\""; 7323 let tmo_max = 180; 7324 // The timeout increase by n*1+n*2+n*3+..., therefore the initial 7325 // interval must be small. 7326 let tmo_int = 2; 7327 let out = ssh_command_ip_with_auth( 7328 cmd, 7329 &self.auth, 7330 &self.guest.network.guest_ip, 7331 { 7332 let mut ret = 1; 7333 let mut tmo_acc = 0; 7334 loop { 7335 tmo_acc += tmo_int * ret; 7336 if tmo_acc >= tmo_max { 7337 break; 7338 } 7339 ret += 1; 7340 } 7341 ret 7342 }, 7343 tmo_int, 7344 ) 7345 .unwrap(); 7346 7347 if "Windows" == out.trim() { 7348 return true; 7349 } 7350 7351 false 7352 } 7353 } 7354 7355 fn vcpu_threads_count(pid: u32) -> u8 { 7356 // ps -T -p 12345 | grep vcpu | wc -l 7357 let out = Command::new("ps") 7358 .args(["-T", "-p", format!("{pid}").as_str()]) 7359 .output() 7360 .expect("ps command failed") 7361 .stdout; 7362 return String::from_utf8_lossy(&out).matches("vcpu").count() as u8; 7363 } 7364 7365 fn netdev_ctrl_threads_count(pid: u32) -> u8 { 7366 // ps -T -p 12345 | grep "_net[0-9]*_ctrl" | wc -l 7367 let out = Command::new("ps") 7368 .args(["-T", "-p", format!("{pid}").as_str()]) 7369 .output() 7370 .expect("ps command failed") 7371 .stdout; 7372 let mut n = 0; 7373 String::from_utf8_lossy(&out) 7374 .split_whitespace() 7375 .for_each(|s| n += (s.starts_with("_net") && s.ends_with("_ctrl")) as u8); // _net1_ctrl 7376 n 7377 } 7378 7379 fn disk_ctrl_threads_count(pid: u32) -> u8 { 7380 // ps -T -p 15782 | grep "_disk[0-9]*_q0" | wc -l 7381 let out = Command::new("ps") 7382 .args(["-T", "-p", format!("{pid}").as_str()]) 7383 .output() 7384 .expect("ps command failed") 7385 .stdout; 7386 let mut n = 0; 7387 String::from_utf8_lossy(&out) 7388 .split_whitespace() 7389 .for_each(|s| n += (s.starts_with("_disk") && s.ends_with("_q0")) as u8); // _disk0_q0, don't care about multiple queues as they're related to the same hdd 7390 n 7391 } 7392 7393 #[test] 7394 fn test_windows_guest() { 7395 let windows_guest = WindowsGuest::new(); 7396 7397 let mut child = GuestCommand::new(windows_guest.guest()) 7398 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 7399 .args(["--memory", "size=4G"]) 7400 .args(["--kernel", edk2_path().to_str().unwrap()]) 7401 .args(["--serial", "tty"]) 7402 .args(["--console", "off"]) 7403 .default_disks() 7404 .default_net() 7405 .capture_output() 7406 .spawn() 7407 .unwrap(); 7408 7409 let fd = child.stdout.as_ref().unwrap().as_raw_fd(); 7410 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7411 let fd = child.stderr.as_ref().unwrap().as_raw_fd(); 7412 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7413 7414 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE); 7415 7416 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7417 7418 let r = std::panic::catch_unwind(|| { 7419 // Wait to make sure Windows boots up 7420 assert!(windows_guest.wait_for_boot()); 7421 7422 windows_guest.shutdown(); 7423 }); 7424 7425 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7426 let _ = child.kill(); 7427 let output = child.wait_with_output().unwrap(); 7428 7429 let _ = child_dnsmasq.kill(); 7430 let _ = child_dnsmasq.wait(); 7431 7432 handle_child_output(r, &output); 7433 } 7434 7435 #[test] 7436 fn test_windows_guest_multiple_queues() { 7437 let windows_guest = WindowsGuest::new(); 7438 7439 let mut ovmf_path = dirs::home_dir().unwrap(); 7440 ovmf_path.push("workloads"); 7441 ovmf_path.push(OVMF_NAME); 7442 7443 let mut child = GuestCommand::new(windows_guest.guest()) 7444 .args(["--cpus", "boot=4,kvm_hyperv=on"]) 7445 .args(["--memory", "size=4G"]) 7446 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7447 .args(["--serial", "tty"]) 7448 .args(["--console", "off"]) 7449 .args([ 7450 "--disk", 7451 format!( 7452 "path={},num_queues=4", 7453 windows_guest 7454 .guest() 7455 .disk_config 7456 .disk(DiskType::OperatingSystem) 7457 .unwrap() 7458 ) 7459 .as_str(), 7460 ]) 7461 .args([ 7462 "--net", 7463 format!( 7464 "tap=,mac={},ip={},mask=255.255.255.0,num_queues=8", 7465 windows_guest.guest().network.guest_mac, 7466 windows_guest.guest().network.host_ip 7467 ) 7468 .as_str(), 7469 ]) 7470 .capture_output() 7471 .spawn() 7472 .unwrap(); 7473 7474 let fd = child.stdout.as_ref().unwrap().as_raw_fd(); 7475 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7476 let fd = child.stderr.as_ref().unwrap().as_raw_fd(); 7477 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7478 7479 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE); 7480 7481 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7482 7483 let r = std::panic::catch_unwind(|| { 7484 // Wait to make sure Windows boots up 7485 assert!(windows_guest.wait_for_boot()); 7486 7487 windows_guest.shutdown(); 7488 }); 7489 7490 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7491 let _ = child.kill(); 7492 let output = child.wait_with_output().unwrap(); 7493 7494 let _ = child_dnsmasq.kill(); 7495 let _ = child_dnsmasq.wait(); 7496 7497 handle_child_output(r, &output); 7498 } 7499 7500 #[test] 7501 #[cfg(not(feature = "mshv"))] 7502 #[ignore = "See #4327"] 7503 fn test_windows_guest_snapshot_restore() { 7504 let windows_guest = WindowsGuest::new(); 7505 7506 let mut ovmf_path = dirs::home_dir().unwrap(); 7507 ovmf_path.push("workloads"); 7508 ovmf_path.push(OVMF_NAME); 7509 7510 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 7511 let api_socket_source = format!("{}.1", temp_api_path(&tmp_dir)); 7512 7513 let mut child = GuestCommand::new(windows_guest.guest()) 7514 .args(["--api-socket", &api_socket_source]) 7515 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 7516 .args(["--memory", "size=4G"]) 7517 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7518 .args(["--serial", "tty"]) 7519 .args(["--console", "off"]) 7520 .default_disks() 7521 .default_net() 7522 .capture_output() 7523 .spawn() 7524 .unwrap(); 7525 7526 let fd = child.stdout.as_ref().unwrap().as_raw_fd(); 7527 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7528 let fd = child.stderr.as_ref().unwrap().as_raw_fd(); 7529 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7530 7531 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE); 7532 7533 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7534 7535 // Wait to make sure Windows boots up 7536 assert!(windows_guest.wait_for_boot()); 7537 7538 let snapshot_dir = temp_snapshot_dir_path(&tmp_dir); 7539 7540 // Pause the VM 7541 assert!(remote_command(&api_socket_source, "pause", None)); 7542 7543 // Take a snapshot from the VM 7544 assert!(remote_command( 7545 &api_socket_source, 7546 "snapshot", 7547 Some(format!("file://{snapshot_dir}").as_str()), 7548 )); 7549 7550 // Wait to make sure the snapshot is completed 7551 thread::sleep(std::time::Duration::new(30, 0)); 7552 7553 let _ = child.kill(); 7554 child.wait().unwrap(); 7555 7556 let api_socket_restored = format!("{}.2", temp_api_path(&tmp_dir)); 7557 7558 // Restore the VM from the snapshot 7559 let mut child = GuestCommand::new(windows_guest.guest()) 7560 .args(["--api-socket", &api_socket_restored]) 7561 .args([ 7562 "--restore", 7563 format!("source_url=file://{snapshot_dir}").as_str(), 7564 ]) 7565 .capture_output() 7566 .spawn() 7567 .unwrap(); 7568 7569 // Wait for the VM to be restored 7570 thread::sleep(std::time::Duration::new(20, 0)); 7571 7572 let r = std::panic::catch_unwind(|| { 7573 // Resume the VM 7574 assert!(remote_command(&api_socket_restored, "resume", None)); 7575 7576 windows_guest.shutdown(); 7577 }); 7578 7579 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7580 let _ = child.kill(); 7581 let output = child.wait_with_output().unwrap(); 7582 7583 let _ = child_dnsmasq.kill(); 7584 let _ = child_dnsmasq.wait(); 7585 7586 handle_child_output(r, &output); 7587 } 7588 7589 #[test] 7590 #[cfg(not(feature = "mshv"))] 7591 #[cfg(not(target_arch = "aarch64"))] 7592 fn test_windows_guest_cpu_hotplug() { 7593 let windows_guest = WindowsGuest::new(); 7594 7595 let mut ovmf_path = dirs::home_dir().unwrap(); 7596 ovmf_path.push("workloads"); 7597 ovmf_path.push(OVMF_NAME); 7598 7599 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 7600 let api_socket = temp_api_path(&tmp_dir); 7601 7602 let mut child = GuestCommand::new(windows_guest.guest()) 7603 .args(["--api-socket", &api_socket]) 7604 .args(["--cpus", "boot=2,max=8,kvm_hyperv=on"]) 7605 .args(["--memory", "size=4G"]) 7606 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7607 .args(["--serial", "tty"]) 7608 .args(["--console", "off"]) 7609 .default_disks() 7610 .default_net() 7611 .capture_output() 7612 .spawn() 7613 .unwrap(); 7614 7615 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7616 7617 let r = std::panic::catch_unwind(|| { 7618 // Wait to make sure Windows boots up 7619 assert!(windows_guest.wait_for_boot()); 7620 7621 let vcpu_num = 2; 7622 // Check the initial number of CPUs the guest sees 7623 assert_eq!(windows_guest.cpu_count(), vcpu_num); 7624 // Check the initial number of vcpu threads in the CH process 7625 assert_eq!(vcpu_threads_count(child.id()), vcpu_num); 7626 7627 let vcpu_num = 6; 7628 // Hotplug some CPUs 7629 resize_command(&api_socket, Some(vcpu_num), None, None, None); 7630 // Wait to make sure CPUs are added 7631 thread::sleep(std::time::Duration::new(10, 0)); 7632 // Check the guest sees the correct number 7633 assert_eq!(windows_guest.cpu_count(), vcpu_num); 7634 // Check the CH process has the correct number of vcpu threads 7635 assert_eq!(vcpu_threads_count(child.id()), vcpu_num); 7636 7637 let vcpu_num = 4; 7638 // Remove some CPUs. Note that Windows doesn't support hot-remove. 7639 resize_command(&api_socket, Some(vcpu_num), None, None, None); 7640 // Wait to make sure CPUs are removed 7641 thread::sleep(std::time::Duration::new(10, 0)); 7642 // Reboot to let Windows catch up 7643 windows_guest.reboot(); 7644 // Wait to make sure Windows completely rebooted 7645 thread::sleep(std::time::Duration::new(60, 0)); 7646 // Check the guest sees the correct number 7647 assert_eq!(windows_guest.cpu_count(), vcpu_num); 7648 // Check the CH process has the correct number of vcpu threads 7649 assert_eq!(vcpu_threads_count(child.id()), vcpu_num); 7650 7651 windows_guest.shutdown(); 7652 }); 7653 7654 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7655 let _ = child.kill(); 7656 let output = child.wait_with_output().unwrap(); 7657 7658 let _ = child_dnsmasq.kill(); 7659 let _ = child_dnsmasq.wait(); 7660 7661 handle_child_output(r, &output); 7662 } 7663 7664 #[test] 7665 #[cfg(not(feature = "mshv"))] 7666 #[cfg(not(target_arch = "aarch64"))] 7667 fn test_windows_guest_ram_hotplug() { 7668 let windows_guest = WindowsGuest::new(); 7669 7670 let mut ovmf_path = dirs::home_dir().unwrap(); 7671 ovmf_path.push("workloads"); 7672 ovmf_path.push(OVMF_NAME); 7673 7674 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 7675 let api_socket = temp_api_path(&tmp_dir); 7676 7677 let mut child = GuestCommand::new(windows_guest.guest()) 7678 .args(["--api-socket", &api_socket]) 7679 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 7680 .args(["--memory", "size=2G,hotplug_size=5G"]) 7681 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7682 .args(["--serial", "tty"]) 7683 .args(["--console", "off"]) 7684 .default_disks() 7685 .default_net() 7686 .capture_output() 7687 .spawn() 7688 .unwrap(); 7689 7690 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7691 7692 let r = std::panic::catch_unwind(|| { 7693 // Wait to make sure Windows boots up 7694 assert!(windows_guest.wait_for_boot()); 7695 7696 let ram_size = 2 * 1024 * 1024 * 1024; 7697 // Check the initial number of RAM the guest sees 7698 let current_ram_size = windows_guest.ram_size(); 7699 // This size seems to be reserved by the system and thus the 7700 // reported amount differs by this constant value. 7701 let reserved_ram_size = ram_size - current_ram_size; 7702 // Verify that there's not more than 4mb constant diff wasted 7703 // by the reserved ram. 7704 assert!(reserved_ram_size < 4 * 1024 * 1024); 7705 7706 let ram_size = 4 * 1024 * 1024 * 1024; 7707 // Hotplug some RAM 7708 resize_command(&api_socket, None, Some(ram_size), None, None); 7709 // Wait to make sure RAM has been added 7710 thread::sleep(std::time::Duration::new(10, 0)); 7711 // Check the guest sees the correct number 7712 assert_eq!(windows_guest.ram_size(), ram_size - reserved_ram_size); 7713 7714 let ram_size = 3 * 1024 * 1024 * 1024; 7715 // Unplug some RAM. Note that hot-remove most likely won't work. 7716 resize_command(&api_socket, None, Some(ram_size), None, None); 7717 // Wait to make sure RAM has been added 7718 thread::sleep(std::time::Duration::new(10, 0)); 7719 // Reboot to let Windows catch up 7720 windows_guest.reboot(); 7721 // Wait to make sure guest completely rebooted 7722 thread::sleep(std::time::Duration::new(60, 0)); 7723 // Check the guest sees the correct number 7724 assert_eq!(windows_guest.ram_size(), ram_size - reserved_ram_size); 7725 7726 windows_guest.shutdown(); 7727 }); 7728 7729 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7730 let _ = child.kill(); 7731 let output = child.wait_with_output().unwrap(); 7732 7733 let _ = child_dnsmasq.kill(); 7734 let _ = child_dnsmasq.wait(); 7735 7736 handle_child_output(r, &output); 7737 } 7738 7739 #[test] 7740 #[cfg(not(feature = "mshv"))] 7741 fn test_windows_guest_netdev_hotplug() { 7742 let windows_guest = WindowsGuest::new(); 7743 7744 let mut ovmf_path = dirs::home_dir().unwrap(); 7745 ovmf_path.push("workloads"); 7746 ovmf_path.push(OVMF_NAME); 7747 7748 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 7749 let api_socket = temp_api_path(&tmp_dir); 7750 7751 let mut child = GuestCommand::new(windows_guest.guest()) 7752 .args(["--api-socket", &api_socket]) 7753 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 7754 .args(["--memory", "size=4G"]) 7755 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7756 .args(["--serial", "tty"]) 7757 .args(["--console", "off"]) 7758 .default_disks() 7759 .default_net() 7760 .capture_output() 7761 .spawn() 7762 .unwrap(); 7763 7764 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7765 7766 let r = std::panic::catch_unwind(|| { 7767 // Wait to make sure Windows boots up 7768 assert!(windows_guest.wait_for_boot()); 7769 7770 // Initially present network device 7771 let netdev_num = 1; 7772 assert_eq!(windows_guest.netdev_count(), netdev_num); 7773 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 7774 7775 // Hotplug network device 7776 let (cmd_success, cmd_output) = remote_command_w_output( 7777 &api_socket, 7778 "add-net", 7779 Some(windows_guest.guest().default_net_string().as_str()), 7780 ); 7781 assert!(cmd_success); 7782 assert!(String::from_utf8_lossy(&cmd_output).contains("\"id\":\"_net2\"")); 7783 thread::sleep(std::time::Duration::new(5, 0)); 7784 // Verify the device is on the system 7785 let netdev_num = 2; 7786 assert_eq!(windows_guest.netdev_count(), netdev_num); 7787 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 7788 7789 // Remove network device 7790 let cmd_success = remote_command(&api_socket, "remove-device", Some("_net2")); 7791 assert!(cmd_success); 7792 thread::sleep(std::time::Duration::new(5, 0)); 7793 // Verify the device has been removed 7794 let netdev_num = 1; 7795 assert_eq!(windows_guest.netdev_count(), netdev_num); 7796 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 7797 7798 windows_guest.shutdown(); 7799 }); 7800 7801 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7802 let _ = child.kill(); 7803 let output = child.wait_with_output().unwrap(); 7804 7805 let _ = child_dnsmasq.kill(); 7806 let _ = child_dnsmasq.wait(); 7807 7808 handle_child_output(r, &output); 7809 } 7810 7811 #[test] 7812 #[cfg(not(feature = "mshv"))] 7813 #[cfg(not(target_arch = "aarch64"))] 7814 fn test_windows_guest_disk_hotplug() { 7815 let windows_guest = WindowsGuest::new(); 7816 7817 let mut ovmf_path = dirs::home_dir().unwrap(); 7818 ovmf_path.push("workloads"); 7819 ovmf_path.push(OVMF_NAME); 7820 7821 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 7822 let api_socket = temp_api_path(&tmp_dir); 7823 7824 let mut child = GuestCommand::new(windows_guest.guest()) 7825 .args(["--api-socket", &api_socket]) 7826 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 7827 .args(["--memory", "size=4G"]) 7828 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7829 .args(["--serial", "tty"]) 7830 .args(["--console", "off"]) 7831 .default_disks() 7832 .default_net() 7833 .capture_output() 7834 .spawn() 7835 .unwrap(); 7836 7837 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7838 7839 let disk = windows_guest.disk_new(WindowsGuest::FS_FAT, 100); 7840 7841 let r = std::panic::catch_unwind(|| { 7842 // Wait to make sure Windows boots up 7843 assert!(windows_guest.wait_for_boot()); 7844 7845 // Initially present disk device 7846 let disk_num = 1; 7847 assert_eq!(windows_guest.disk_count(), disk_num); 7848 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 7849 7850 // Hotplug disk device 7851 let (cmd_success, cmd_output) = remote_command_w_output( 7852 &api_socket, 7853 "add-disk", 7854 Some(format!("path={disk},readonly=off").as_str()), 7855 ); 7856 assert!(cmd_success); 7857 assert!(String::from_utf8_lossy(&cmd_output).contains("\"id\":\"_disk2\"")); 7858 thread::sleep(std::time::Duration::new(5, 0)); 7859 // Online disk device 7860 windows_guest.disks_set_rw(); 7861 windows_guest.disks_online(); 7862 // Verify the device is on the system 7863 let disk_num = 2; 7864 assert_eq!(windows_guest.disk_count(), disk_num); 7865 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 7866 7867 let data = "hello"; 7868 let fname = "d:\\world"; 7869 windows_guest.disk_file_put(fname, data); 7870 7871 // Unmount disk device 7872 let cmd_success = remote_command(&api_socket, "remove-device", Some("_disk2")); 7873 assert!(cmd_success); 7874 thread::sleep(std::time::Duration::new(5, 0)); 7875 // Verify the device has been removed 7876 let disk_num = 1; 7877 assert_eq!(windows_guest.disk_count(), disk_num); 7878 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 7879 7880 // Remount and check the file exists with the expected contents 7881 let (cmd_success, _cmd_output) = remote_command_w_output( 7882 &api_socket, 7883 "add-disk", 7884 Some(format!("path={disk},readonly=off").as_str()), 7885 ); 7886 assert!(cmd_success); 7887 thread::sleep(std::time::Duration::new(5, 0)); 7888 let out = windows_guest.disk_file_read(fname); 7889 assert_eq!(data, out.trim()); 7890 7891 // Intentionally no unmount, it'll happen at shutdown. 7892 7893 windows_guest.shutdown(); 7894 }); 7895 7896 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7897 let _ = child.kill(); 7898 let output = child.wait_with_output().unwrap(); 7899 7900 let _ = child_dnsmasq.kill(); 7901 let _ = child_dnsmasq.wait(); 7902 7903 handle_child_output(r, &output); 7904 } 7905 7906 #[test] 7907 #[cfg(not(feature = "mshv"))] 7908 #[cfg(not(target_arch = "aarch64"))] 7909 fn test_windows_guest_disk_hotplug_multi() { 7910 let windows_guest = WindowsGuest::new(); 7911 7912 let mut ovmf_path = dirs::home_dir().unwrap(); 7913 ovmf_path.push("workloads"); 7914 ovmf_path.push(OVMF_NAME); 7915 7916 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 7917 let api_socket = temp_api_path(&tmp_dir); 7918 7919 let mut child = GuestCommand::new(windows_guest.guest()) 7920 .args(["--api-socket", &api_socket]) 7921 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 7922 .args(["--memory", "size=2G"]) 7923 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7924 .args(["--serial", "tty"]) 7925 .args(["--console", "off"]) 7926 .default_disks() 7927 .default_net() 7928 .capture_output() 7929 .spawn() 7930 .unwrap(); 7931 7932 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7933 7934 // Predefined data to used at various test stages 7935 let disk_test_data: [[String; 4]; 2] = [ 7936 [ 7937 "_disk2".to_string(), 7938 windows_guest.disk_new(WindowsGuest::FS_FAT, 123), 7939 "d:\\world".to_string(), 7940 "hello".to_string(), 7941 ], 7942 [ 7943 "_disk3".to_string(), 7944 windows_guest.disk_new(WindowsGuest::FS_NTFS, 333), 7945 "e:\\hello".to_string(), 7946 "world".to_string(), 7947 ], 7948 ]; 7949 7950 let r = std::panic::catch_unwind(|| { 7951 // Wait to make sure Windows boots up 7952 assert!(windows_guest.wait_for_boot()); 7953 7954 // Initially present disk device 7955 let disk_num = 1; 7956 assert_eq!(windows_guest.disk_count(), disk_num); 7957 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 7958 7959 for it in &disk_test_data { 7960 let disk_id = it[0].as_str(); 7961 let disk = it[1].as_str(); 7962 // Hotplug disk device 7963 let (cmd_success, cmd_output) = remote_command_w_output( 7964 &api_socket, 7965 "add-disk", 7966 Some(format!("path={disk},readonly=off").as_str()), 7967 ); 7968 assert!(cmd_success); 7969 assert!(String::from_utf8_lossy(&cmd_output) 7970 .contains(format!("\"id\":\"{disk_id}\"").as_str())); 7971 thread::sleep(std::time::Duration::new(5, 0)); 7972 // Online disk devices 7973 windows_guest.disks_set_rw(); 7974 windows_guest.disks_online(); 7975 } 7976 // Verify the devices are on the system 7977 let disk_num = (disk_test_data.len() + 1) as u8; 7978 assert_eq!(windows_guest.disk_count(), disk_num); 7979 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 7980 7981 // Put test data 7982 for it in &disk_test_data { 7983 let fname = it[2].as_str(); 7984 let data = it[3].as_str(); 7985 windows_guest.disk_file_put(fname, data); 7986 } 7987 7988 // Unmount disk devices 7989 for it in &disk_test_data { 7990 let disk_id = it[0].as_str(); 7991 let cmd_success = remote_command(&api_socket, "remove-device", Some(disk_id)); 7992 assert!(cmd_success); 7993 thread::sleep(std::time::Duration::new(5, 0)); 7994 } 7995 7996 // Verify the devices have been removed 7997 let disk_num = 1; 7998 assert_eq!(windows_guest.disk_count(), disk_num); 7999 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8000 8001 // Remount 8002 for it in &disk_test_data { 8003 let disk = it[1].as_str(); 8004 let (cmd_success, _cmd_output) = remote_command_w_output( 8005 &api_socket, 8006 "add-disk", 8007 Some(format!("path={disk},readonly=off").as_str()), 8008 ); 8009 assert!(cmd_success); 8010 thread::sleep(std::time::Duration::new(5, 0)); 8011 } 8012 8013 // Check the files exists with the expected contents 8014 for it in &disk_test_data { 8015 let fname = it[2].as_str(); 8016 let data = it[3].as_str(); 8017 let out = windows_guest.disk_file_read(fname); 8018 assert_eq!(data, out.trim()); 8019 } 8020 8021 // Intentionally no unmount, it'll happen at shutdown. 8022 8023 windows_guest.shutdown(); 8024 }); 8025 8026 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8027 let _ = child.kill(); 8028 let output = child.wait_with_output().unwrap(); 8029 8030 let _ = child_dnsmasq.kill(); 8031 let _ = child_dnsmasq.wait(); 8032 8033 handle_child_output(r, &output); 8034 } 8035 8036 #[test] 8037 #[cfg(not(feature = "mshv"))] 8038 #[cfg(not(target_arch = "aarch64"))] 8039 fn test_windows_guest_netdev_multi() { 8040 let windows_guest = WindowsGuest::new(); 8041 8042 let mut ovmf_path = dirs::home_dir().unwrap(); 8043 ovmf_path.push("workloads"); 8044 ovmf_path.push(OVMF_NAME); 8045 8046 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8047 let api_socket = temp_api_path(&tmp_dir); 8048 8049 let mut child = GuestCommand::new(windows_guest.guest()) 8050 .args(["--api-socket", &api_socket]) 8051 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8052 .args(["--memory", "size=4G"]) 8053 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8054 .args(["--serial", "tty"]) 8055 .args(["--console", "off"]) 8056 .default_disks() 8057 // The multi net dev config is borrowed from test_multiple_network_interfaces 8058 .args([ 8059 "--net", 8060 windows_guest.guest().default_net_string().as_str(), 8061 "--net", 8062 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0", 8063 "--net", 8064 "tap=mytap42,mac=fe:1f:9e:e1:60:f2,ip=192.168.4.1,mask=255.255.255.0", 8065 ]) 8066 .capture_output() 8067 .spawn() 8068 .unwrap(); 8069 8070 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8071 8072 let r = std::panic::catch_unwind(|| { 8073 // Wait to make sure Windows boots up 8074 assert!(windows_guest.wait_for_boot()); 8075 8076 let netdev_num = 3; 8077 assert_eq!(windows_guest.netdev_count(), netdev_num); 8078 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 8079 8080 let tap_count = exec_host_command_output("ip link | grep -c mytap42"); 8081 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1"); 8082 8083 windows_guest.shutdown(); 8084 }); 8085 8086 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8087 let _ = child.kill(); 8088 let output = child.wait_with_output().unwrap(); 8089 8090 let _ = child_dnsmasq.kill(); 8091 let _ = child_dnsmasq.wait(); 8092 8093 handle_child_output(r, &output); 8094 } 8095 } 8096 8097 #[cfg(target_arch = "x86_64")] 8098 mod sgx { 8099 use crate::*; 8100 8101 #[test] 8102 fn test_sgx() { 8103 let jammy_image = JAMMY_IMAGE_NAME.to_string(); 8104 let jammy = UbuntuDiskConfig::new(jammy_image); 8105 let guest = Guest::new(Box::new(jammy)); 8106 8107 let mut child = GuestCommand::new(&guest) 8108 .args(["--cpus", "boot=1"]) 8109 .args(["--memory", "size=512M"]) 8110 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 8111 .default_disks() 8112 .default_net() 8113 .args(["--sgx-epc", "id=epc0,size=64M"]) 8114 .capture_output() 8115 .spawn() 8116 .unwrap(); 8117 8118 let r = std::panic::catch_unwind(|| { 8119 guest.wait_vm_boot(None).unwrap(); 8120 8121 // Check if SGX is correctly detected in the guest. 8122 guest.check_sgx_support().unwrap(); 8123 8124 // Validate the SGX EPC section is 64MiB. 8125 assert_eq!( 8126 guest 8127 .ssh_command("cpuid -l 0x12 -s 2 | grep 'section size' | cut -d '=' -f 2") 8128 .unwrap() 8129 .trim(), 8130 "0x0000000004000000" 8131 ); 8132 }); 8133 8134 let _ = child.kill(); 8135 let output = child.wait_with_output().unwrap(); 8136 8137 handle_child_output(r, &output); 8138 } 8139 } 8140 8141 #[cfg(target_arch = "x86_64")] 8142 mod vfio { 8143 use crate::*; 8144 8145 #[test] 8146 // The VFIO integration test starts cloud-hypervisor guest with 3 TAP 8147 // backed networking interfaces, bound through a simple bridge on the host. 8148 // So if the nested cloud-hypervisor succeeds in getting a directly 8149 // assigned interface from its cloud-hypervisor host, we should be able to 8150 // ssh into it, and verify that it's running with the right kernel command 8151 // line (We tag the command line from cloud-hypervisor for that purpose). 8152 // The third device is added to validate that hotplug works correctly since 8153 // it is being added to the L2 VM through hotplugging mechanism. 8154 // Also, we pass-through a vitio-blk device to the L2 VM to test the 32-bit 8155 // vfio device support 8156 fn test_vfio() { 8157 setup_vfio_network_interfaces(); 8158 8159 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 8160 let guest = Guest::new_from_ip_range(Box::new(focal), "172.18", 0); 8161 8162 let mut workload_path = dirs::home_dir().unwrap(); 8163 workload_path.push("workloads"); 8164 8165 let kernel_path = direct_kernel_boot_path(); 8166 8167 let mut vfio_path = workload_path.clone(); 8168 vfio_path.push("vfio"); 8169 8170 let mut cloud_init_vfio_base_path = vfio_path.clone(); 8171 cloud_init_vfio_base_path.push("cloudinit.img"); 8172 8173 // We copy our cloudinit into the vfio mount point, for the nested 8174 // cloud-hypervisor guest to use. 8175 rate_limited_copy( 8176 guest.disk_config.disk(DiskType::CloudInit).unwrap(), 8177 &cloud_init_vfio_base_path, 8178 ) 8179 .expect("copying of cloud-init disk failed"); 8180 8181 let mut vfio_disk_path = workload_path.clone(); 8182 vfio_disk_path.push("vfio.img"); 8183 8184 // Create the vfio disk image 8185 let output = Command::new("mkfs.ext4") 8186 .arg("-d") 8187 .arg(vfio_path.to_str().unwrap()) 8188 .arg(vfio_disk_path.to_str().unwrap()) 8189 .arg("2g") 8190 .output() 8191 .unwrap(); 8192 if !output.status.success() { 8193 eprintln!("{}", String::from_utf8_lossy(&output.stderr)); 8194 panic!("mkfs.ext4 command generated an error"); 8195 } 8196 8197 let mut blk_file_path = workload_path; 8198 blk_file_path.push("blk.img"); 8199 8200 let vfio_tap0 = "vfio-tap0"; 8201 let vfio_tap1 = "vfio-tap1"; 8202 let vfio_tap2 = "vfio-tap2"; 8203 let vfio_tap3 = "vfio-tap3"; 8204 8205 let mut child = GuestCommand::new(&guest) 8206 .args(["--cpus", "boot=4"]) 8207 .args(["--memory", "size=2G,hugepages=on,shared=on"]) 8208 .args(["--kernel", kernel_path.to_str().unwrap()]) 8209 .args([ 8210 "--disk", 8211 format!( 8212 "path={}", 8213 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 8214 ) 8215 .as_str(), 8216 "--disk", 8217 format!( 8218 "path={}", 8219 guest.disk_config.disk(DiskType::CloudInit).unwrap() 8220 ) 8221 .as_str(), 8222 "--disk", 8223 format!("path={}", vfio_disk_path.to_str().unwrap()).as_str(), 8224 "--disk", 8225 format!("path={},iommu=on", blk_file_path.to_str().unwrap()).as_str(), 8226 ]) 8227 .args([ 8228 "--cmdline", 8229 format!( 8230 "{DIRECT_KERNEL_BOOT_CMDLINE} kvm-intel.nested=1 vfio_iommu_type1.allow_unsafe_interrupts" 8231 ) 8232 .as_str(), 8233 ]) 8234 .args([ 8235 "--net", 8236 format!("tap={},mac={}", vfio_tap0, guest.network.guest_mac).as_str(), 8237 "--net", 8238 format!( 8239 "tap={},mac={},iommu=on", 8240 vfio_tap1, guest.network.l2_guest_mac1 8241 ) 8242 .as_str(), 8243 "--net", 8244 format!( 8245 "tap={},mac={},iommu=on", 8246 vfio_tap2, guest.network.l2_guest_mac2 8247 ) 8248 .as_str(), 8249 "--net", 8250 format!( 8251 "tap={},mac={},iommu=on", 8252 vfio_tap3, guest.network.l2_guest_mac3 8253 ) 8254 .as_str(), 8255 ]) 8256 .capture_output() 8257 .spawn() 8258 .unwrap(); 8259 8260 thread::sleep(std::time::Duration::new(30, 0)); 8261 8262 let r = std::panic::catch_unwind(|| { 8263 guest.ssh_command_l1("sudo systemctl start vfio").unwrap(); 8264 thread::sleep(std::time::Duration::new(120, 0)); 8265 8266 // We booted our cloud hypervisor L2 guest with a "VFIOTAG" tag 8267 // added to its kernel command line. 8268 // Let's ssh into it and verify that it's there. If it is it means 8269 // we're in the right guest (The L2 one) because the QEMU L1 guest 8270 // does not have this command line tag. 8271 assert_eq!( 8272 guest 8273 .ssh_command_l2_1("grep -c VFIOTAG /proc/cmdline") 8274 .unwrap() 8275 .trim() 8276 .parse::<u32>() 8277 .unwrap_or_default(), 8278 1 8279 ); 8280 8281 // Let's also verify from the second virtio-net device passed to 8282 // the L2 VM. 8283 assert_eq!( 8284 guest 8285 .ssh_command_l2_2("grep -c VFIOTAG /proc/cmdline") 8286 .unwrap() 8287 .trim() 8288 .parse::<u32>() 8289 .unwrap_or_default(), 8290 1 8291 ); 8292 8293 // Check the amount of PCI devices appearing in L2 VM. 8294 assert_eq!( 8295 guest 8296 .ssh_command_l2_1("ls /sys/bus/pci/devices | wc -l") 8297 .unwrap() 8298 .trim() 8299 .parse::<u32>() 8300 .unwrap_or_default(), 8301 8, 8302 ); 8303 8304 // Check both if /dev/vdc exists and if the block size is 16M in L2 VM 8305 assert_eq!( 8306 guest 8307 .ssh_command_l2_1("lsblk | grep vdc | grep -c 16M") 8308 .unwrap() 8309 .trim() 8310 .parse::<u32>() 8311 .unwrap_or_default(), 8312 1 8313 ); 8314 8315 // Hotplug an extra virtio-net device through L2 VM. 8316 guest 8317 .ssh_command_l1( 8318 "echo 0000:00:09.0 | sudo tee /sys/bus/pci/devices/0000:00:09.0/driver/unbind", 8319 ) 8320 .unwrap(); 8321 guest 8322 .ssh_command_l1("echo 0000:00:09.0 | sudo tee /sys/bus/pci/drivers/vfio-pci/bind") 8323 .unwrap(); 8324 let vfio_hotplug_output = guest 8325 .ssh_command_l1( 8326 "sudo /mnt/ch-remote \ 8327 --api-socket /tmp/ch_api.sock \ 8328 add-device path=/sys/bus/pci/devices/0000:00:09.0,id=vfio123", 8329 ) 8330 .unwrap(); 8331 assert!(vfio_hotplug_output.contains("{\"id\":\"vfio123\",\"bdf\":\"0000:00:08.0\"}")); 8332 8333 thread::sleep(std::time::Duration::new(10, 0)); 8334 8335 // Let's also verify from the third virtio-net device passed to 8336 // the L2 VM. This third device has been hotplugged through the L2 8337 // VM, so this is our way to validate hotplug works for VFIO PCI. 8338 assert_eq!( 8339 guest 8340 .ssh_command_l2_3("grep -c VFIOTAG /proc/cmdline") 8341 .unwrap() 8342 .trim() 8343 .parse::<u32>() 8344 .unwrap_or_default(), 8345 1 8346 ); 8347 8348 // Check the amount of PCI devices appearing in L2 VM. 8349 // There should be one more device than before, raising the count 8350 // up to 9 PCI devices. 8351 assert_eq!( 8352 guest 8353 .ssh_command_l2_1("ls /sys/bus/pci/devices | wc -l") 8354 .unwrap() 8355 .trim() 8356 .parse::<u32>() 8357 .unwrap_or_default(), 8358 9, 8359 ); 8360 8361 // Let's now verify that we can correctly remove the virtio-net 8362 // device through the "remove-device" command responsible for 8363 // unplugging VFIO devices. 8364 guest 8365 .ssh_command_l1( 8366 "sudo /mnt/ch-remote \ 8367 --api-socket /tmp/ch_api.sock \ 8368 remove-device vfio123", 8369 ) 8370 .unwrap(); 8371 thread::sleep(std::time::Duration::new(10, 0)); 8372 8373 // Check the amount of PCI devices appearing in L2 VM is back down 8374 // to 8 devices. 8375 assert_eq!( 8376 guest 8377 .ssh_command_l2_1("ls /sys/bus/pci/devices | wc -l") 8378 .unwrap() 8379 .trim() 8380 .parse::<u32>() 8381 .unwrap_or_default(), 8382 8, 8383 ); 8384 8385 // Perform memory hotplug in L2 and validate the memory is showing 8386 // up as expected. In order to check, we will use the virtio-net 8387 // device already passed through L2 as a VFIO device, this will 8388 // verify that VFIO devices are functional with memory hotplug. 8389 assert!(guest.get_total_memory_l2().unwrap_or_default() > 480_000); 8390 guest 8391 .ssh_command_l2_1( 8392 "sudo bash -c 'echo online > /sys/devices/system/memory/auto_online_blocks'", 8393 ) 8394 .unwrap(); 8395 guest 8396 .ssh_command_l1( 8397 "sudo /mnt/ch-remote \ 8398 --api-socket /tmp/ch_api.sock \ 8399 resize --memory 1073741824", 8400 ) 8401 .unwrap(); 8402 assert!(guest.get_total_memory_l2().unwrap_or_default() > 960_000); 8403 }); 8404 8405 let _ = child.kill(); 8406 let output = child.wait_with_output().unwrap(); 8407 8408 cleanup_vfio_network_interfaces(); 8409 8410 handle_child_output(r, &output); 8411 } 8412 8413 fn test_nvidia_card_memory_hotplug(hotplug_method: &str) { 8414 let jammy = UbuntuDiskConfig::new(JAMMY_NVIDIA_IMAGE_NAME.to_string()); 8415 let guest = Guest::new(Box::new(jammy)); 8416 let api_socket = temp_api_path(&guest.tmp_dir); 8417 8418 let mut child = GuestCommand::new(&guest) 8419 .args(["--cpus", "boot=4"]) 8420 .args([ 8421 "--memory", 8422 format!("size=4G,hotplug_size=4G,hotplug_method={hotplug_method}").as_str(), 8423 ]) 8424 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 8425 .args(["--device", "path=/sys/bus/pci/devices/0000:31:00.0/"]) 8426 .args(["--api-socket", &api_socket]) 8427 .default_disks() 8428 .default_net() 8429 .capture_output() 8430 .spawn() 8431 .unwrap(); 8432 8433 let r = std::panic::catch_unwind(|| { 8434 guest.wait_vm_boot(None).unwrap(); 8435 8436 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 8437 8438 guest.enable_memory_hotplug(); 8439 8440 // Add RAM to the VM 8441 let desired_ram = 6 << 30; 8442 resize_command(&api_socket, None, Some(desired_ram), None, None); 8443 thread::sleep(std::time::Duration::new(30, 0)); 8444 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 8445 8446 // Check the VFIO device works when RAM is increased to 6GiB 8447 guest.check_nvidia_gpu(); 8448 }); 8449 8450 let _ = child.kill(); 8451 let output = child.wait_with_output().unwrap(); 8452 8453 handle_child_output(r, &output); 8454 } 8455 8456 #[test] 8457 fn test_nvidia_card_memory_hotplug_acpi() { 8458 test_nvidia_card_memory_hotplug("acpi") 8459 } 8460 8461 #[test] 8462 fn test_nvidia_card_memory_hotplug_virtio_mem() { 8463 test_nvidia_card_memory_hotplug("virtio-mem") 8464 } 8465 8466 #[test] 8467 fn test_nvidia_card_pci_hotplug() { 8468 let jammy = UbuntuDiskConfig::new(JAMMY_NVIDIA_IMAGE_NAME.to_string()); 8469 let guest = Guest::new(Box::new(jammy)); 8470 let api_socket = temp_api_path(&guest.tmp_dir); 8471 8472 let mut child = GuestCommand::new(&guest) 8473 .args(["--cpus", "boot=4"]) 8474 .args(["--memory", "size=4G"]) 8475 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 8476 .args(["--api-socket", &api_socket]) 8477 .default_disks() 8478 .default_net() 8479 .capture_output() 8480 .spawn() 8481 .unwrap(); 8482 8483 let r = std::panic::catch_unwind(|| { 8484 guest.wait_vm_boot(None).unwrap(); 8485 8486 // Hotplug the card to the VM 8487 let (cmd_success, cmd_output) = remote_command_w_output( 8488 &api_socket, 8489 "add-device", 8490 Some("id=vfio0,path=/sys/bus/pci/devices/0000:31:00.0/"), 8491 ); 8492 assert!(cmd_success); 8493 assert!(String::from_utf8_lossy(&cmd_output) 8494 .contains("{\"id\":\"vfio0\",\"bdf\":\"0000:00:06.0\"}")); 8495 8496 thread::sleep(std::time::Duration::new(10, 0)); 8497 8498 // Check the VFIO device works after hotplug 8499 guest.check_nvidia_gpu(); 8500 }); 8501 8502 let _ = child.kill(); 8503 let output = child.wait_with_output().unwrap(); 8504 8505 handle_child_output(r, &output); 8506 } 8507 8508 #[test] 8509 fn test_nvidia_card_reboot() { 8510 let jammy = UbuntuDiskConfig::new(JAMMY_NVIDIA_IMAGE_NAME.to_string()); 8511 let guest = Guest::new(Box::new(jammy)); 8512 let api_socket = temp_api_path(&guest.tmp_dir); 8513 8514 let mut child = GuestCommand::new(&guest) 8515 .args(["--cpus", "boot=4"]) 8516 .args(["--memory", "size=4G"]) 8517 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 8518 .args(["--device", "path=/sys/bus/pci/devices/0000:31:00.0/"]) 8519 .args(["--api-socket", &api_socket]) 8520 .default_disks() 8521 .default_net() 8522 .capture_output() 8523 .spawn() 8524 .unwrap(); 8525 8526 let r = std::panic::catch_unwind(|| { 8527 guest.wait_vm_boot(None).unwrap(); 8528 8529 // Check the VFIO device works after boot 8530 guest.check_nvidia_gpu(); 8531 8532 guest.reboot_linux(0, None); 8533 8534 // Check the VFIO device works after reboot 8535 guest.check_nvidia_gpu(); 8536 }); 8537 8538 let _ = child.kill(); 8539 let output = child.wait_with_output().unwrap(); 8540 8541 handle_child_output(r, &output); 8542 } 8543 } 8544 8545 mod live_migration { 8546 use crate::*; 8547 8548 fn start_live_migration( 8549 migration_socket: &str, 8550 src_api_socket: &str, 8551 dest_api_socket: &str, 8552 local: bool, 8553 ) -> bool { 8554 // Start to receive migration from the destintion VM 8555 let mut receive_migration = Command::new(clh_command("ch-remote")) 8556 .args([ 8557 "--api-socket", 8558 dest_api_socket, 8559 "receive-migration", 8560 &format! {"unix:{migration_socket}"}, 8561 ]) 8562 .stderr(Stdio::piped()) 8563 .stdout(Stdio::piped()) 8564 .spawn() 8565 .unwrap(); 8566 // Give it '1s' to make sure the 'migration_socket' file is properly created 8567 thread::sleep(std::time::Duration::new(1, 0)); 8568 // Start to send migration from the source VM 8569 8570 let mut args = [ 8571 "--api-socket".to_string(), 8572 src_api_socket.to_string(), 8573 "send-migration".to_string(), 8574 format! {"unix:{migration_socket}"}, 8575 ] 8576 .to_vec(); 8577 8578 if local { 8579 args.insert(3, "--local".to_string()); 8580 } 8581 8582 let mut send_migration = Command::new(clh_command("ch-remote")) 8583 .args(&args) 8584 .stderr(Stdio::piped()) 8585 .stdout(Stdio::piped()) 8586 .spawn() 8587 .unwrap(); 8588 8589 // The 'send-migration' command should be executed successfully within the given timeout 8590 let send_success = if let Some(status) = send_migration 8591 .wait_timeout(std::time::Duration::from_secs(30)) 8592 .unwrap() 8593 { 8594 status.success() 8595 } else { 8596 false 8597 }; 8598 8599 if !send_success { 8600 let _ = send_migration.kill(); 8601 let output = send_migration.wait_with_output().unwrap(); 8602 eprintln!("\n\n==== Start 'send_migration' output ====\n\n---stdout---\n{}\n\n---stderr---\n{}\n\n==== End 'send_migration' output ====\n\n", 8603 String::from_utf8_lossy(&output.stdout), String::from_utf8_lossy(&output.stderr)); 8604 } 8605 8606 // The 'receive-migration' command should be executed successfully within the given timeout 8607 let receive_success = if let Some(status) = receive_migration 8608 .wait_timeout(std::time::Duration::from_secs(30)) 8609 .unwrap() 8610 { 8611 status.success() 8612 } else { 8613 false 8614 }; 8615 8616 if !receive_success { 8617 let _ = receive_migration.kill(); 8618 let output = receive_migration.wait_with_output().unwrap(); 8619 eprintln!("\n\n==== Start 'receive_migration' output ====\n\n---stdout---\n{}\n\n---stderr---\n{}\n\n==== End 'receive_migration' output ====\n\n", 8620 String::from_utf8_lossy(&output.stdout), String::from_utf8_lossy(&output.stderr)); 8621 } 8622 8623 send_success && receive_success 8624 } 8625 8626 fn print_and_panic(src_vm: Child, dest_vm: Child, ovs_vm: Option<Child>, message: &str) -> ! { 8627 let mut src_vm = src_vm; 8628 let mut dest_vm = dest_vm; 8629 8630 let _ = src_vm.kill(); 8631 let src_output = src_vm.wait_with_output().unwrap(); 8632 eprintln!( 8633 "\n\n==== Start 'source_vm' stdout ====\n\n{}\n\n==== End 'source_vm' stdout ====", 8634 String::from_utf8_lossy(&src_output.stdout) 8635 ); 8636 eprintln!( 8637 "\n\n==== Start 'source_vm' stderr ====\n\n{}\n\n==== End 'source_vm' stderr ====", 8638 String::from_utf8_lossy(&src_output.stderr) 8639 ); 8640 let _ = dest_vm.kill(); 8641 let dest_output = dest_vm.wait_with_output().unwrap(); 8642 eprintln!( 8643 "\n\n==== Start 'destination_vm' stdout ====\n\n{}\n\n==== End 'destination_vm' stdout ====", 8644 String::from_utf8_lossy(&dest_output.stdout) 8645 ); 8646 eprintln!( 8647 "\n\n==== Start 'destination_vm' stderr ====\n\n{}\n\n==== End 'destination_vm' stderr ====", 8648 String::from_utf8_lossy(&dest_output.stderr) 8649 ); 8650 8651 if let Some(ovs_vm) = ovs_vm { 8652 let mut ovs_vm = ovs_vm; 8653 let _ = ovs_vm.kill(); 8654 let ovs_output = ovs_vm.wait_with_output().unwrap(); 8655 eprintln!( 8656 "\n\n==== Start 'ovs_vm' stdout ====\n\n{}\n\n==== End 'ovs_vm' stdout ====", 8657 String::from_utf8_lossy(&ovs_output.stdout) 8658 ); 8659 eprintln!( 8660 "\n\n==== Start 'ovs_vm' stderr ====\n\n{}\n\n==== End 'ovs_vm' stderr ====", 8661 String::from_utf8_lossy(&ovs_output.stderr) 8662 ); 8663 8664 cleanup_ovs_dpdk(); 8665 } 8666 8667 panic!("Test failed: {message}") 8668 } 8669 8670 // This test exercises the local live-migration between two Cloud Hypervisor VMs on the 8671 // same host. It ensures the following behaviors: 8672 // 1. The source VM is up and functional (including various virtio-devices are working properly); 8673 // 2. The 'send-migration' and 'receive-migration' command finished successfully; 8674 // 3. The source VM terminated gracefully after live migration; 8675 // 4. The destination VM is functional (including various virtio-devices are working properly) after 8676 // live migration; 8677 // Note: This test does not use vsock as we can't create two identical vsock on the same host. 8678 fn _test_live_migration(upgrade_test: bool, local: bool) { 8679 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 8680 let guest = Guest::new(Box::new(focal)); 8681 let kernel_path = direct_kernel_boot_path(); 8682 let console_text = String::from("On a branch floating down river a cricket, singing."); 8683 let net_id = "net123"; 8684 let net_params = format!( 8685 "id={},tap=,mac={},ip={},mask=255.255.255.0", 8686 net_id, guest.network.guest_mac, guest.network.host_ip 8687 ); 8688 8689 let memory_param: &[&str] = if local { 8690 &["--memory", "size=4G,shared=on"] 8691 } else { 8692 &["--memory", "size=4G"] 8693 }; 8694 8695 let boot_vcpus = 2; 8696 let max_vcpus = 4; 8697 8698 let pmem_temp_file = TempFile::new().unwrap(); 8699 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 8700 std::process::Command::new("mkfs.ext4") 8701 .arg(pmem_temp_file.as_path()) 8702 .output() 8703 .expect("Expect creating disk image to succeed"); 8704 let pmem_path = String::from("/dev/pmem0"); 8705 8706 // Start the source VM 8707 let src_vm_path = if !upgrade_test { 8708 clh_command("cloud-hypervisor") 8709 } else { 8710 cloud_hypervisor_release_path() 8711 }; 8712 let src_api_socket = temp_api_path(&guest.tmp_dir); 8713 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 8714 src_vm_cmd 8715 .args([ 8716 "--cpus", 8717 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 8718 ]) 8719 .args(memory_param) 8720 .args(["--kernel", kernel_path.to_str().unwrap()]) 8721 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 8722 .default_disks() 8723 .args(["--net", net_params.as_str()]) 8724 .args(["--api-socket", &src_api_socket]) 8725 .args([ 8726 "--pmem", 8727 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 8728 ]); 8729 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 8730 8731 // Start the destination VM 8732 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 8733 dest_api_socket.push_str(".dest"); 8734 let mut dest_child = GuestCommand::new(&guest) 8735 .args(["--api-socket", &dest_api_socket]) 8736 .capture_output() 8737 .spawn() 8738 .unwrap(); 8739 8740 let r = std::panic::catch_unwind(|| { 8741 guest.wait_vm_boot(None).unwrap(); 8742 8743 // Make sure the source VM is functaionl 8744 // Check the number of vCPUs 8745 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 8746 8747 // Check the guest RAM 8748 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 8749 8750 // Check the guest virtio-devices, e.g. block, rng, console, and net 8751 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 8752 8753 // x86_64: Following what's done in the `test_snapshot_restore`, we need 8754 // to make sure that removing and adding back the virtio-net device does 8755 // not break the live-migration support for virtio-pci. 8756 #[cfg(target_arch = "x86_64")] 8757 { 8758 assert!(remote_command( 8759 &src_api_socket, 8760 "remove-device", 8761 Some(net_id), 8762 )); 8763 thread::sleep(std::time::Duration::new(10, 0)); 8764 8765 // Plug the virtio-net device again 8766 assert!(remote_command( 8767 &src_api_socket, 8768 "add-net", 8769 Some(net_params.as_str()), 8770 )); 8771 thread::sleep(std::time::Duration::new(10, 0)); 8772 } 8773 8774 // Start the live-migration 8775 let migration_socket = String::from( 8776 guest 8777 .tmp_dir 8778 .as_path() 8779 .join("live-migration.sock") 8780 .to_str() 8781 .unwrap(), 8782 ); 8783 8784 assert!( 8785 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 8786 "Unsuccessful command: 'send-migration' or 'receive-migration'." 8787 ); 8788 }); 8789 8790 // Check and report any errors occured during the live-migration 8791 if r.is_err() { 8792 print_and_panic( 8793 src_child, 8794 dest_child, 8795 None, 8796 "Error occured during live-migration", 8797 ); 8798 } 8799 8800 // Check the source vm has been terminated successful (give it '3s' to settle) 8801 thread::sleep(std::time::Duration::new(3, 0)); 8802 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 8803 print_and_panic( 8804 src_child, 8805 dest_child, 8806 None, 8807 "source VM was not terminated successfully.", 8808 ); 8809 }; 8810 8811 // Post live-migration check to make sure the destination VM is funcational 8812 let r = std::panic::catch_unwind(|| { 8813 // Perform same checks to validate VM has been properly migrated 8814 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 8815 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 8816 8817 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 8818 }); 8819 8820 // Clean-up the destination VM and make sure it terminated correctly 8821 let _ = dest_child.kill(); 8822 let dest_output = dest_child.wait_with_output().unwrap(); 8823 handle_child_output(r, &dest_output); 8824 8825 // Check the destination VM has the expected 'concole_text' from its output 8826 let r = std::panic::catch_unwind(|| { 8827 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 8828 }); 8829 handle_child_output(r, &dest_output); 8830 } 8831 8832 fn _test_live_migration_balloon(upgrade_test: bool, local: bool) { 8833 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 8834 let guest = Guest::new(Box::new(focal)); 8835 let kernel_path = direct_kernel_boot_path(); 8836 let console_text = String::from("On a branch floating down river a cricket, singing."); 8837 let net_id = "net123"; 8838 let net_params = format!( 8839 "id={},tap=,mac={},ip={},mask=255.255.255.0", 8840 net_id, guest.network.guest_mac, guest.network.host_ip 8841 ); 8842 8843 let memory_param: &[&str] = if local { 8844 &[ 8845 "--memory", 8846 "size=4G,hotplug_method=virtio-mem,hotplug_size=8G,shared=on", 8847 "--balloon", 8848 "size=0", 8849 ] 8850 } else { 8851 &[ 8852 "--memory", 8853 "size=4G,hotplug_method=virtio-mem,hotplug_size=8G", 8854 "--balloon", 8855 "size=0", 8856 ] 8857 }; 8858 8859 let boot_vcpus = 2; 8860 let max_vcpus = 4; 8861 8862 let pmem_temp_file = TempFile::new().unwrap(); 8863 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 8864 std::process::Command::new("mkfs.ext4") 8865 .arg(pmem_temp_file.as_path()) 8866 .output() 8867 .expect("Expect creating disk image to succeed"); 8868 let pmem_path = String::from("/dev/pmem0"); 8869 8870 // Start the source VM 8871 let src_vm_path = if !upgrade_test { 8872 clh_command("cloud-hypervisor") 8873 } else { 8874 cloud_hypervisor_release_path() 8875 }; 8876 let src_api_socket = temp_api_path(&guest.tmp_dir); 8877 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 8878 src_vm_cmd 8879 .args([ 8880 "--cpus", 8881 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 8882 ]) 8883 .args(memory_param) 8884 .args(["--kernel", kernel_path.to_str().unwrap()]) 8885 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 8886 .default_disks() 8887 .args(["--net", net_params.as_str()]) 8888 .args(["--api-socket", &src_api_socket]) 8889 .args([ 8890 "--pmem", 8891 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 8892 ]); 8893 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 8894 8895 // Start the destination VM 8896 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 8897 dest_api_socket.push_str(".dest"); 8898 let mut dest_child = GuestCommand::new(&guest) 8899 .args(["--api-socket", &dest_api_socket]) 8900 .capture_output() 8901 .spawn() 8902 .unwrap(); 8903 8904 let r = std::panic::catch_unwind(|| { 8905 guest.wait_vm_boot(None).unwrap(); 8906 8907 // Make sure the source VM is functaionl 8908 // Check the number of vCPUs 8909 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 8910 8911 // Check the guest RAM 8912 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 8913 // Increase the guest RAM 8914 resize_command(&src_api_socket, None, Some(6 << 30), None, None); 8915 thread::sleep(std::time::Duration::new(5, 0)); 8916 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 8917 // Use balloon to remove RAM from the VM 8918 resize_command(&src_api_socket, None, None, Some(1 << 30), None); 8919 thread::sleep(std::time::Duration::new(5, 0)); 8920 let total_memory = guest.get_total_memory().unwrap_or_default(); 8921 assert!(total_memory > 4_800_000); 8922 assert!(total_memory < 5_760_000); 8923 8924 // Check the guest virtio-devices, e.g. block, rng, console, and net 8925 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 8926 8927 // x86_64: Following what's done in the `test_snapshot_restore`, we need 8928 // to make sure that removing and adding back the virtio-net device does 8929 // not break the live-migration support for virtio-pci. 8930 #[cfg(target_arch = "x86_64")] 8931 { 8932 assert!(remote_command( 8933 &src_api_socket, 8934 "remove-device", 8935 Some(net_id), 8936 )); 8937 thread::sleep(std::time::Duration::new(10, 0)); 8938 8939 // Plug the virtio-net device again 8940 assert!(remote_command( 8941 &src_api_socket, 8942 "add-net", 8943 Some(net_params.as_str()), 8944 )); 8945 thread::sleep(std::time::Duration::new(10, 0)); 8946 } 8947 8948 // Start the live-migration 8949 let migration_socket = String::from( 8950 guest 8951 .tmp_dir 8952 .as_path() 8953 .join("live-migration.sock") 8954 .to_str() 8955 .unwrap(), 8956 ); 8957 8958 assert!( 8959 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 8960 "Unsuccessful command: 'send-migration' or 'receive-migration'." 8961 ); 8962 }); 8963 8964 // Check and report any errors occured during the live-migration 8965 if r.is_err() { 8966 print_and_panic( 8967 src_child, 8968 dest_child, 8969 None, 8970 "Error occured during live-migration", 8971 ); 8972 } 8973 8974 // Check the source vm has been terminated successful (give it '3s' to settle) 8975 thread::sleep(std::time::Duration::new(3, 0)); 8976 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 8977 print_and_panic( 8978 src_child, 8979 dest_child, 8980 None, 8981 "source VM was not terminated successfully.", 8982 ); 8983 }; 8984 8985 // Post live-migration check to make sure the destination VM is funcational 8986 let r = std::panic::catch_unwind(|| { 8987 // Perform same checks to validate VM has been properly migrated 8988 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 8989 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 8990 8991 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 8992 8993 // Perform checks on guest RAM using balloon 8994 let total_memory = guest.get_total_memory().unwrap_or_default(); 8995 assert!(total_memory > 4_800_000); 8996 assert!(total_memory < 5_760_000); 8997 // Deflate balloon to restore entire RAM to the VM 8998 resize_command(&dest_api_socket, None, None, Some(0), None); 8999 thread::sleep(std::time::Duration::new(5, 0)); 9000 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 9001 // Decrease guest RAM with virtio-mem 9002 resize_command(&dest_api_socket, None, Some(5 << 30), None, None); 9003 thread::sleep(std::time::Duration::new(5, 0)); 9004 let total_memory = guest.get_total_memory().unwrap_or_default(); 9005 assert!(total_memory > 4_800_000); 9006 assert!(total_memory < 5_760_000); 9007 }); 9008 9009 // Clean-up the destination VM and make sure it terminated correctly 9010 let _ = dest_child.kill(); 9011 let dest_output = dest_child.wait_with_output().unwrap(); 9012 handle_child_output(r, &dest_output); 9013 9014 // Check the destination VM has the expected 'concole_text' from its output 9015 let r = std::panic::catch_unwind(|| { 9016 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 9017 }); 9018 handle_child_output(r, &dest_output); 9019 } 9020 9021 fn _test_live_migration_numa(upgrade_test: bool, local: bool) { 9022 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9023 let guest = Guest::new(Box::new(focal)); 9024 let kernel_path = direct_kernel_boot_path(); 9025 let console_text = String::from("On a branch floating down river a cricket, singing."); 9026 let net_id = "net123"; 9027 let net_params = format!( 9028 "id={},tap=,mac={},ip={},mask=255.255.255.0", 9029 net_id, guest.network.guest_mac, guest.network.host_ip 9030 ); 9031 9032 let memory_param: &[&str] = if local { 9033 &[ 9034 "--memory", 9035 "size=0,hotplug_method=virtio-mem,shared=on", 9036 "--memory-zone", 9037 "id=mem0,size=1G,hotplug_size=4G,shared=on", 9038 "--memory-zone", 9039 "id=mem1,size=1G,hotplug_size=4G,shared=on", 9040 "--memory-zone", 9041 "id=mem2,size=2G,hotplug_size=4G,shared=on", 9042 "--numa", 9043 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0", 9044 "--numa", 9045 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1", 9046 "--numa", 9047 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2", 9048 ] 9049 } else { 9050 &[ 9051 "--memory", 9052 "size=0,hotplug_method=virtio-mem", 9053 "--memory-zone", 9054 "id=mem0,size=1G,hotplug_size=4G", 9055 "--memory-zone", 9056 "id=mem1,size=1G,hotplug_size=4G", 9057 "--memory-zone", 9058 "id=mem2,size=2G,hotplug_size=4G", 9059 "--numa", 9060 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0", 9061 "--numa", 9062 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1", 9063 "--numa", 9064 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2", 9065 ] 9066 }; 9067 9068 let boot_vcpus = 6; 9069 let max_vcpus = 12; 9070 9071 let pmem_temp_file = TempFile::new().unwrap(); 9072 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 9073 std::process::Command::new("mkfs.ext4") 9074 .arg(pmem_temp_file.as_path()) 9075 .output() 9076 .expect("Expect creating disk image to succeed"); 9077 let pmem_path = String::from("/dev/pmem0"); 9078 9079 // Start the source VM 9080 let src_vm_path = if !upgrade_test { 9081 clh_command("cloud-hypervisor") 9082 } else { 9083 cloud_hypervisor_release_path() 9084 }; 9085 let src_api_socket = temp_api_path(&guest.tmp_dir); 9086 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 9087 src_vm_cmd 9088 .args([ 9089 "--cpus", 9090 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 9091 ]) 9092 .args(memory_param) 9093 .args(["--kernel", kernel_path.to_str().unwrap()]) 9094 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9095 .default_disks() 9096 .args(["--net", net_params.as_str()]) 9097 .args(["--api-socket", &src_api_socket]) 9098 .args([ 9099 "--pmem", 9100 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 9101 ]); 9102 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 9103 9104 // Start the destination VM 9105 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 9106 dest_api_socket.push_str(".dest"); 9107 let mut dest_child = GuestCommand::new(&guest) 9108 .args(["--api-socket", &dest_api_socket]) 9109 .capture_output() 9110 .spawn() 9111 .unwrap(); 9112 9113 let r = std::panic::catch_unwind(|| { 9114 guest.wait_vm_boot(None).unwrap(); 9115 9116 // Make sure the source VM is functaionl 9117 // Check the number of vCPUs 9118 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9119 9120 // Check the guest RAM 9121 assert!(guest.get_total_memory().unwrap_or_default() > 2_880_000); 9122 9123 // Check the guest virtio-devices, e.g. block, rng, console, and net 9124 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9125 9126 // Check the NUMA parameters are applied correctly and resize 9127 // each zone to test the case where we migrate a VM with the 9128 // virtio-mem regions being used. 9129 { 9130 guest.check_numa_common( 9131 Some(&[960_000, 960_000, 1_920_000]), 9132 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 9133 Some(&["10 15 20", "20 10 25", "25 30 10"]), 9134 ); 9135 9136 // AArch64 currently does not support hotplug, and therefore we only 9137 // test hotplug-related function on x86_64 here. 9138 #[cfg(target_arch = "x86_64")] 9139 { 9140 guest.enable_memory_hotplug(); 9141 9142 // Resize every memory zone and check each associated NUMA node 9143 // has been assigned the right amount of memory. 9144 resize_zone_command(&src_api_socket, "mem0", "2G"); 9145 resize_zone_command(&src_api_socket, "mem1", "2G"); 9146 resize_zone_command(&src_api_socket, "mem2", "3G"); 9147 thread::sleep(std::time::Duration::new(5, 0)); 9148 9149 guest.check_numa_common(Some(&[1_920_000, 1_920_000, 1_920_000]), None, None); 9150 } 9151 } 9152 9153 // x86_64: Following what's done in the `test_snapshot_restore`, we need 9154 // to make sure that removing and adding back the virtio-net device does 9155 // not break the live-migration support for virtio-pci. 9156 #[cfg(target_arch = "x86_64")] 9157 { 9158 assert!(remote_command( 9159 &src_api_socket, 9160 "remove-device", 9161 Some(net_id), 9162 )); 9163 thread::sleep(std::time::Duration::new(10, 0)); 9164 9165 // Plug the virtio-net device again 9166 assert!(remote_command( 9167 &src_api_socket, 9168 "add-net", 9169 Some(net_params.as_str()), 9170 )); 9171 thread::sleep(std::time::Duration::new(10, 0)); 9172 } 9173 9174 // Start the live-migration 9175 let migration_socket = String::from( 9176 guest 9177 .tmp_dir 9178 .as_path() 9179 .join("live-migration.sock") 9180 .to_str() 9181 .unwrap(), 9182 ); 9183 9184 assert!( 9185 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9186 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9187 ); 9188 }); 9189 9190 // Check and report any errors occured during the live-migration 9191 if r.is_err() { 9192 print_and_panic( 9193 src_child, 9194 dest_child, 9195 None, 9196 "Error occured during live-migration", 9197 ); 9198 } 9199 9200 // Check the source vm has been terminated successful (give it '3s' to settle) 9201 thread::sleep(std::time::Duration::new(3, 0)); 9202 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 9203 print_and_panic( 9204 src_child, 9205 dest_child, 9206 None, 9207 "source VM was not terminated successfully.", 9208 ); 9209 }; 9210 9211 // Post live-migration check to make sure the destination VM is funcational 9212 let r = std::panic::catch_unwind(|| { 9213 // Perform same checks to validate VM has been properly migrated 9214 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9215 #[cfg(target_arch = "x86_64")] 9216 assert!(guest.get_total_memory().unwrap_or_default() > 6_720_000); 9217 #[cfg(target_arch = "aarch64")] 9218 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9219 9220 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9221 9222 // Perform NUMA related checks 9223 { 9224 #[cfg(target_arch = "aarch64")] 9225 { 9226 guest.check_numa_common( 9227 Some(&[960_000, 960_000, 1_920_000]), 9228 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 9229 Some(&["10 15 20", "20 10 25", "25 30 10"]), 9230 ); 9231 } 9232 9233 // AArch64 currently does not support hotplug, and therefore we only 9234 // test hotplug-related function on x86_64 here. 9235 #[cfg(target_arch = "x86_64")] 9236 { 9237 guest.check_numa_common( 9238 Some(&[1_920_000, 1_920_000, 2_880_000]), 9239 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 9240 Some(&["10 15 20", "20 10 25", "25 30 10"]), 9241 ); 9242 9243 guest.enable_memory_hotplug(); 9244 9245 // Resize every memory zone and check each associated NUMA node 9246 // has been assigned the right amount of memory. 9247 resize_zone_command(&dest_api_socket, "mem0", "4G"); 9248 resize_zone_command(&dest_api_socket, "mem1", "4G"); 9249 resize_zone_command(&dest_api_socket, "mem2", "4G"); 9250 // Resize to the maximum amount of CPUs and check each NUMA 9251 // node has been assigned the right CPUs set. 9252 resize_command(&dest_api_socket, Some(max_vcpus), None, None, None); 9253 thread::sleep(std::time::Duration::new(5, 0)); 9254 9255 guest.check_numa_common( 9256 Some(&[3_840_000, 3_840_000, 3_840_000]), 9257 Some(&[vec![0, 1, 2, 9], vec![3, 4, 6, 7, 8], vec![5, 10, 11]]), 9258 None, 9259 ); 9260 } 9261 } 9262 }); 9263 9264 // Clean-up the destination VM and make sure it terminated correctly 9265 let _ = dest_child.kill(); 9266 let dest_output = dest_child.wait_with_output().unwrap(); 9267 handle_child_output(r, &dest_output); 9268 9269 // Check the destination VM has the expected 'concole_text' from its output 9270 let r = std::panic::catch_unwind(|| { 9271 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 9272 }); 9273 handle_child_output(r, &dest_output); 9274 } 9275 9276 fn _test_live_migration_watchdog(upgrade_test: bool, local: bool) { 9277 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9278 let guest = Guest::new(Box::new(focal)); 9279 let kernel_path = direct_kernel_boot_path(); 9280 let console_text = String::from("On a branch floating down river a cricket, singing."); 9281 let net_id = "net123"; 9282 let net_params = format!( 9283 "id={},tap=,mac={},ip={},mask=255.255.255.0", 9284 net_id, guest.network.guest_mac, guest.network.host_ip 9285 ); 9286 9287 let memory_param: &[&str] = if local { 9288 &["--memory", "size=4G,shared=on"] 9289 } else { 9290 &["--memory", "size=4G"] 9291 }; 9292 9293 let boot_vcpus = 2; 9294 let max_vcpus = 4; 9295 9296 let pmem_temp_file = TempFile::new().unwrap(); 9297 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 9298 std::process::Command::new("mkfs.ext4") 9299 .arg(pmem_temp_file.as_path()) 9300 .output() 9301 .expect("Expect creating disk image to succeed"); 9302 let pmem_path = String::from("/dev/pmem0"); 9303 9304 // Start the source VM 9305 let src_vm_path = if !upgrade_test { 9306 clh_command("cloud-hypervisor") 9307 } else { 9308 cloud_hypervisor_release_path() 9309 }; 9310 let src_api_socket = temp_api_path(&guest.tmp_dir); 9311 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 9312 src_vm_cmd 9313 .args([ 9314 "--cpus", 9315 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 9316 ]) 9317 .args(memory_param) 9318 .args(["--kernel", kernel_path.to_str().unwrap()]) 9319 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9320 .default_disks() 9321 .args(["--net", net_params.as_str()]) 9322 .args(["--api-socket", &src_api_socket]) 9323 .args([ 9324 "--pmem", 9325 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 9326 ]) 9327 .args(["--watchdog"]); 9328 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 9329 9330 // Start the destination VM 9331 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 9332 dest_api_socket.push_str(".dest"); 9333 let mut dest_child = GuestCommand::new(&guest) 9334 .args(["--api-socket", &dest_api_socket]) 9335 .capture_output() 9336 .spawn() 9337 .unwrap(); 9338 9339 let r = std::panic::catch_unwind(|| { 9340 guest.wait_vm_boot(None).unwrap(); 9341 9342 // Make sure the source VM is functaionl 9343 // Check the number of vCPUs 9344 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9345 // Check the guest RAM 9346 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9347 // Check the guest virtio-devices, e.g. block, rng, console, and net 9348 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9349 // x86_64: Following what's done in the `test_snapshot_restore`, we need 9350 // to make sure that removing and adding back the virtio-net device does 9351 // not break the live-migration support for virtio-pci. 9352 #[cfg(target_arch = "x86_64")] 9353 { 9354 assert!(remote_command( 9355 &src_api_socket, 9356 "remove-device", 9357 Some(net_id), 9358 )); 9359 thread::sleep(std::time::Duration::new(10, 0)); 9360 9361 // Plug the virtio-net device again 9362 assert!(remote_command( 9363 &src_api_socket, 9364 "add-net", 9365 Some(net_params.as_str()), 9366 )); 9367 thread::sleep(std::time::Duration::new(10, 0)); 9368 } 9369 9370 // Enable watchdog and ensure its functional 9371 let mut expected_reboot_count = 1; 9372 // Enable the watchdog with a 15s timeout 9373 enable_guest_watchdog(&guest, 15); 9374 // Reboot and check that systemd has activated the watchdog 9375 guest.ssh_command("sudo reboot").unwrap(); 9376 guest.wait_vm_boot(None).unwrap(); 9377 expected_reboot_count += 1; 9378 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9379 assert_eq!( 9380 guest 9381 .ssh_command("sudo journalctl | grep -c -- \"Watchdog started\"") 9382 .unwrap() 9383 .trim() 9384 .parse::<u32>() 9385 .unwrap_or_default(), 9386 2 9387 ); 9388 // Allow some normal time to elapse to check we don't get spurious reboots 9389 thread::sleep(std::time::Duration::new(40, 0)); 9390 // Check no reboot 9391 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9392 9393 // Start the live-migration 9394 let migration_socket = String::from( 9395 guest 9396 .tmp_dir 9397 .as_path() 9398 .join("live-migration.sock") 9399 .to_str() 9400 .unwrap(), 9401 ); 9402 9403 assert!( 9404 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9405 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9406 ); 9407 }); 9408 9409 // Check and report any errors occured during the live-migration 9410 if r.is_err() { 9411 print_and_panic( 9412 src_child, 9413 dest_child, 9414 None, 9415 "Error occured during live-migration", 9416 ); 9417 } 9418 9419 // Check the source vm has been terminated successful (give it '3s' to settle) 9420 thread::sleep(std::time::Duration::new(3, 0)); 9421 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 9422 print_and_panic( 9423 src_child, 9424 dest_child, 9425 None, 9426 "source VM was not terminated successfully.", 9427 ); 9428 }; 9429 9430 // Post live-migration check to make sure the destination VM is funcational 9431 let r = std::panic::catch_unwind(|| { 9432 // Perform same checks to validate VM has been properly migrated 9433 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9434 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9435 9436 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9437 9438 // Perform checks on watchdog 9439 let mut expected_reboot_count = 2; 9440 9441 // Allow some normal time to elapse to check we don't get spurious reboots 9442 thread::sleep(std::time::Duration::new(40, 0)); 9443 // Check no reboot 9444 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9445 9446 // Trigger a panic (sync first). We need to do this inside a screen with a delay so the SSH command returns. 9447 guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap(); 9448 // Allow some time for the watchdog to trigger (max 30s) and reboot to happen 9449 guest.wait_vm_boot(Some(50)).unwrap(); 9450 // Check a reboot is triggerred by the watchdog 9451 expected_reboot_count += 1; 9452 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9453 9454 #[cfg(target_arch = "x86_64")] 9455 { 9456 // Now pause the VM and remain offline for 30s 9457 assert!(remote_command(&dest_api_socket, "pause", None)); 9458 thread::sleep(std::time::Duration::new(30, 0)); 9459 assert!(remote_command(&dest_api_socket, "resume", None)); 9460 9461 // Check no reboot 9462 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9463 } 9464 }); 9465 9466 // Clean-up the destination VM and make sure it terminated correctly 9467 let _ = dest_child.kill(); 9468 let dest_output = dest_child.wait_with_output().unwrap(); 9469 handle_child_output(r, &dest_output); 9470 9471 // Check the destination VM has the expected 'concole_text' from its output 9472 let r = std::panic::catch_unwind(|| { 9473 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 9474 }); 9475 handle_child_output(r, &dest_output); 9476 } 9477 9478 fn _test_live_migration_ovs_dpdk(upgrade_test: bool, local: bool) { 9479 let ovs_focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9480 let ovs_guest = Guest::new(Box::new(ovs_focal)); 9481 9482 let migration_focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9483 let migration_guest = Guest::new(Box::new(migration_focal)); 9484 let src_api_socket = temp_api_path(&migration_guest.tmp_dir); 9485 9486 // Start two VMs that are connected through ovs-dpdk and one of the VMs is the source VM for live-migration 9487 let (mut ovs_child, mut src_child) = 9488 setup_ovs_dpdk_guests(&ovs_guest, &migration_guest, &src_api_socket, upgrade_test); 9489 9490 // Start the destination VM 9491 let mut dest_api_socket = temp_api_path(&migration_guest.tmp_dir); 9492 dest_api_socket.push_str(".dest"); 9493 let mut dest_child = GuestCommand::new(&migration_guest) 9494 .args(["--api-socket", &dest_api_socket]) 9495 .capture_output() 9496 .spawn() 9497 .unwrap(); 9498 9499 let r = std::panic::catch_unwind(|| { 9500 // Give it '1s' to make sure the 'dest_api_socket' file is properly created 9501 thread::sleep(std::time::Duration::new(1, 0)); 9502 9503 // Start the live-migration 9504 let migration_socket = String::from( 9505 migration_guest 9506 .tmp_dir 9507 .as_path() 9508 .join("live-migration.sock") 9509 .to_str() 9510 .unwrap(), 9511 ); 9512 9513 assert!( 9514 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9515 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9516 ); 9517 }); 9518 9519 // Check and report any errors occured during the live-migration 9520 if r.is_err() { 9521 print_and_panic( 9522 src_child, 9523 dest_child, 9524 Some(ovs_child), 9525 "Error occured during live-migration", 9526 ); 9527 } 9528 9529 // Check the source vm has been terminated successful (give it '3s' to settle) 9530 thread::sleep(std::time::Duration::new(3, 0)); 9531 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 9532 print_and_panic( 9533 src_child, 9534 dest_child, 9535 Some(ovs_child), 9536 "source VM was not terminated successfully.", 9537 ); 9538 }; 9539 9540 // Post live-migration check to make sure the destination VM is funcational 9541 let r = std::panic::catch_unwind(|| { 9542 // Perform same checks to validate VM has been properly migrated 9543 // Spawn a new netcat listener in the OVS VM 9544 let guest_ip = ovs_guest.network.guest_ip.clone(); 9545 thread::spawn(move || { 9546 ssh_command_ip( 9547 "nc -l 12345", 9548 &guest_ip, 9549 DEFAULT_SSH_RETRIES, 9550 DEFAULT_SSH_TIMEOUT, 9551 ) 9552 .unwrap(); 9553 }); 9554 9555 // Wait for the server to be listening 9556 thread::sleep(std::time::Duration::new(5, 0)); 9557 9558 // And check the connection is still functional after live-migration 9559 migration_guest 9560 .ssh_command("nc -vz 172.100.0.1 12345") 9561 .unwrap(); 9562 }); 9563 9564 // Clean-up the destination VM and OVS VM, and make sure they terminated correctly 9565 let _ = dest_child.kill(); 9566 let _ = ovs_child.kill(); 9567 let dest_output = dest_child.wait_with_output().unwrap(); 9568 let ovs_output = ovs_child.wait_with_output().unwrap(); 9569 9570 cleanup_ovs_dpdk(); 9571 9572 handle_child_output(r, &dest_output); 9573 handle_child_output(Ok(()), &ovs_output); 9574 } 9575 9576 mod live_migration_parallel { 9577 use super::*; 9578 #[test] 9579 fn test_live_migration_basic() { 9580 _test_live_migration(false, false) 9581 } 9582 9583 #[test] 9584 fn test_live_migration_local() { 9585 _test_live_migration(false, true) 9586 } 9587 9588 #[test] 9589 #[cfg(not(feature = "mshv"))] 9590 fn test_live_migration_numa() { 9591 _test_live_migration_numa(false, false) 9592 } 9593 9594 #[test] 9595 #[cfg(not(feature = "mshv"))] 9596 fn test_live_migration_numa_local() { 9597 _test_live_migration_numa(false, true) 9598 } 9599 9600 #[test] 9601 fn test_live_migration_watchdog() { 9602 _test_live_migration_watchdog(false, false) 9603 } 9604 9605 #[test] 9606 fn test_live_migration_watchdog_local() { 9607 _test_live_migration_watchdog(false, true) 9608 } 9609 9610 #[test] 9611 fn test_live_migration_balloon() { 9612 _test_live_migration_balloon(false, false) 9613 } 9614 9615 #[test] 9616 fn test_live_migration_balloon_local() { 9617 _test_live_migration_balloon(false, true) 9618 } 9619 9620 #[test] 9621 fn test_live_upgrade_basic() { 9622 _test_live_migration(true, false) 9623 } 9624 9625 #[test] 9626 fn test_live_upgrade_local() { 9627 _test_live_migration(true, true) 9628 } 9629 9630 #[test] 9631 #[cfg(not(feature = "mshv"))] 9632 fn test_live_upgrade_numa() { 9633 _test_live_migration_numa(true, false) 9634 } 9635 9636 #[test] 9637 #[cfg(not(feature = "mshv"))] 9638 fn test_live_upgrade_numa_local() { 9639 _test_live_migration_numa(true, true) 9640 } 9641 9642 #[test] 9643 fn test_live_upgrade_watchdog() { 9644 _test_live_migration_watchdog(true, false) 9645 } 9646 9647 #[test] 9648 fn test_live_upgrade_watchdog_local() { 9649 _test_live_migration_watchdog(true, true) 9650 } 9651 9652 #[test] 9653 fn test_live_upgrade_balloon() { 9654 _test_live_migration_balloon(true, false) 9655 } 9656 9657 #[test] 9658 fn test_live_upgrade_balloon_local() { 9659 _test_live_migration_balloon(true, true) 9660 } 9661 } 9662 9663 mod live_migration_sequential { 9664 #[cfg(target_arch = "x86_64")] 9665 #[cfg(not(feature = "mshv"))] 9666 use super::*; 9667 9668 // Require to run ovs-dpdk tests sequentially because they rely on the same ovs-dpdk setup 9669 #[test] 9670 #[cfg(target_arch = "x86_64")] 9671 #[cfg(not(feature = "mshv"))] 9672 fn test_live_migration_ovs_dpdk() { 9673 _test_live_migration_ovs_dpdk(false, false); 9674 } 9675 9676 #[test] 9677 #[cfg(target_arch = "x86_64")] 9678 #[cfg(not(feature = "mshv"))] 9679 fn test_live_migration_ovs_dpdk_local() { 9680 _test_live_migration_ovs_dpdk(false, true); 9681 } 9682 9683 #[test] 9684 #[cfg(target_arch = "x86_64")] 9685 #[cfg(not(feature = "mshv"))] 9686 fn test_live_upgrade_ovs_dpdk() { 9687 _test_live_migration_ovs_dpdk(true, false); 9688 } 9689 9690 #[test] 9691 #[cfg(target_arch = "x86_64")] 9692 #[cfg(not(feature = "mshv"))] 9693 fn test_live_upgrade_ovs_dpdk_local() { 9694 _test_live_migration_ovs_dpdk(true, true); 9695 } 9696 } 9697 } 9698 9699 #[cfg(target_arch = "aarch64")] 9700 mod aarch64_acpi { 9701 use crate::*; 9702 9703 #[test] 9704 fn test_simple_launch_acpi() { 9705 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9706 9707 vec![Box::new(focal)].drain(..).for_each(|disk_config| { 9708 let guest = Guest::new(disk_config); 9709 9710 let mut child = GuestCommand::new(&guest) 9711 .args(["--cpus", "boot=1"]) 9712 .args(["--memory", "size=512M"]) 9713 .args(["--kernel", edk2_path().to_str().unwrap()]) 9714 .default_disks() 9715 .default_net() 9716 .args(["--serial", "tty", "--console", "off"]) 9717 .capture_output() 9718 .spawn() 9719 .unwrap(); 9720 9721 let r = std::panic::catch_unwind(|| { 9722 guest.wait_vm_boot(Some(120)).unwrap(); 9723 9724 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 9725 assert!(guest.get_total_memory().unwrap_or_default() > 400_000); 9726 assert_eq!(guest.get_pci_bridge_class().unwrap_or_default(), "0x060000"); 9727 }); 9728 9729 let _ = child.kill(); 9730 let output = child.wait_with_output().unwrap(); 9731 9732 handle_child_output(r, &output); 9733 }); 9734 } 9735 9736 #[test] 9737 fn test_guest_numa_nodes_acpi() { 9738 _test_guest_numa_nodes(true); 9739 } 9740 9741 #[test] 9742 fn test_cpu_topology_421_acpi() { 9743 test_cpu_topology(4, 2, 1, true); 9744 } 9745 9746 #[test] 9747 fn test_cpu_topology_142_acpi() { 9748 test_cpu_topology(1, 4, 2, true); 9749 } 9750 9751 #[test] 9752 fn test_cpu_topology_262_acpi() { 9753 test_cpu_topology(2, 6, 2, true); 9754 } 9755 9756 #[test] 9757 fn test_power_button_acpi() { 9758 _test_power_button(true); 9759 } 9760 9761 #[test] 9762 fn test_virtio_iommu() { 9763 _test_virtio_iommu(true) 9764 } 9765 } 9766 9767 mod rate_limiter { 9768 use super::*; 9769 9770 // Check if the 'measured' rate is within the expected 'difference' (in percentage) 9771 // compared to given 'limit' rate. 9772 fn check_rate_limit(measured: f64, limit: f64, difference: f64) -> bool { 9773 let upper_limit = limit * (1_f64 + difference); 9774 let lower_limit = limit * (1_f64 - difference); 9775 9776 if measured > lower_limit && measured < upper_limit { 9777 return true; 9778 } 9779 9780 eprintln!( 9781 "\n\n==== check_rate_limit failed! ====\n\nmeasured={measured}, , lower_limit={lower_limit}, upper_limit={upper_limit}\n\n" 9782 ); 9783 9784 false 9785 } 9786 9787 fn _test_rate_limiter_net(rx: bool) { 9788 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9789 let guest = Guest::new(Box::new(focal)); 9790 9791 let test_timeout = 10; 9792 let num_queues = 2; 9793 let queue_size = 256; 9794 let bw_size = 10485760_u64; // bytes 9795 let bw_refill_time = 100; // ms 9796 let limit_bps = (bw_size * 8 * 1000) as f64 / bw_refill_time as f64; 9797 9798 let net_params = format!( 9799 "tap=,mac={},ip={},mask=255.255.255.0,num_queues={},queue_size={},bw_size={},bw_refill_time={}", 9800 guest.network.guest_mac, 9801 guest.network.host_ip, 9802 num_queues, 9803 queue_size, 9804 bw_size, 9805 bw_refill_time, 9806 ); 9807 9808 let mut child = GuestCommand::new(&guest) 9809 .args(["--cpus", &format!("boot={}", num_queues / 2)]) 9810 .args(["--memory", "size=4G"]) 9811 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 9812 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9813 .default_disks() 9814 .args(["--net", net_params.as_str()]) 9815 .capture_output() 9816 .spawn() 9817 .unwrap(); 9818 9819 let r = std::panic::catch_unwind(|| { 9820 guest.wait_vm_boot(None).unwrap(); 9821 let measured_bps = 9822 measure_virtio_net_throughput(test_timeout, num_queues / 2, &guest, rx, true) 9823 .unwrap(); 9824 assert!(check_rate_limit(measured_bps, limit_bps, 0.1)); 9825 }); 9826 9827 let _ = child.kill(); 9828 let output = child.wait_with_output().unwrap(); 9829 handle_child_output(r, &output); 9830 } 9831 9832 #[test] 9833 fn test_rate_limiter_net_rx() { 9834 _test_rate_limiter_net(true); 9835 } 9836 9837 #[test] 9838 fn test_rate_limiter_net_tx() { 9839 _test_rate_limiter_net(false); 9840 } 9841 9842 fn _test_rate_limiter_block(bandwidth: bool) { 9843 let test_timeout = 10; 9844 let num_queues = 1; 9845 let fio_ops = FioOps::RandRW; 9846 9847 let bw_size = if bandwidth { 9848 10485760_u64 // bytes 9849 } else { 9850 100_u64 // I/O 9851 }; 9852 let bw_refill_time = 100; // ms 9853 let limit_rate = (bw_size * 1000) as f64 / bw_refill_time as f64; 9854 9855 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9856 let guest = Guest::new(Box::new(focal)); 9857 let api_socket = temp_api_path(&guest.tmp_dir); 9858 let test_img_dir = TempDir::new_with_prefix("/var/tmp/ch").unwrap(); 9859 let blk_rate_limiter_test_img = 9860 String::from(test_img_dir.as_path().join("blk.img").to_str().unwrap()); 9861 9862 // Create the test block image 9863 assert!(exec_host_command_output(&format!( 9864 "dd if=/dev/zero of={blk_rate_limiter_test_img} bs=1M count=1024" 9865 )) 9866 .status 9867 .success()); 9868 9869 let test_blk_params = if bandwidth { 9870 format!( 9871 "path={blk_rate_limiter_test_img},bw_size={bw_size},bw_refill_time={bw_refill_time}" 9872 ) 9873 } else { 9874 format!( 9875 "path={blk_rate_limiter_test_img},ops_size={bw_size},ops_refill_time={bw_refill_time}" 9876 ) 9877 }; 9878 9879 let mut child = GuestCommand::new(&guest) 9880 .args(["--cpus", &format!("boot={num_queues}")]) 9881 .args(["--memory", "size=4G"]) 9882 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 9883 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9884 .args([ 9885 "--disk", 9886 format!( 9887 "path={}", 9888 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 9889 ) 9890 .as_str(), 9891 "--disk", 9892 format!( 9893 "path={}", 9894 guest.disk_config.disk(DiskType::CloudInit).unwrap() 9895 ) 9896 .as_str(), 9897 "--disk", 9898 test_blk_params.as_str(), 9899 ]) 9900 .default_net() 9901 .args(["--api-socket", &api_socket]) 9902 .capture_output() 9903 .spawn() 9904 .unwrap(); 9905 9906 let r = std::panic::catch_unwind(|| { 9907 guest.wait_vm_boot(None).unwrap(); 9908 9909 let fio_command = format!( 9910 "sudo fio --filename=/dev/vdc --name=test --output-format=json \ 9911 --direct=1 --bs=4k --ioengine=io_uring --iodepth=64 \ 9912 --rw={fio_ops} --runtime={test_timeout} --numjobs={num_queues}" 9913 ); 9914 let output = guest.ssh_command(&fio_command).unwrap(); 9915 9916 // Parse fio output 9917 let measured_rate = if bandwidth { 9918 parse_fio_output(&output, &fio_ops, num_queues).unwrap() 9919 } else { 9920 parse_fio_output_iops(&output, &fio_ops, num_queues).unwrap() 9921 }; 9922 assert!(check_rate_limit(measured_rate, limit_rate, 0.1)); 9923 }); 9924 9925 let _ = child.kill(); 9926 let output = child.wait_with_output().unwrap(); 9927 handle_child_output(r, &output); 9928 } 9929 9930 #[test] 9931 fn test_rate_limiter_block_bandwidth() { 9932 _test_rate_limiter_block(true) 9933 } 9934 9935 #[test] 9936 fn test_rate_limiter_block_iops() { 9937 _test_rate_limiter_block(false) 9938 } 9939 } 9940