1 // Copyright © 2020 Intel Corporation 2 // 3 // SPDX-License-Identifier: Apache-2.0 4 // 5 #![allow(clippy::undocumented_unsafe_blocks)] 6 // When enabling the `mshv` feature, we skip quite some tests and 7 // hence have known dead-code. This annotation silences dead-code 8 // related warnings for our quality workflow to pass. 9 #![allow(dead_code)] 10 11 extern crate test_infra; 12 13 use net_util::MacAddr; 14 use std::collections::HashMap; 15 use std::fs; 16 use std::io; 17 use std::io::BufRead; 18 use std::io::Read; 19 use std::io::Seek; 20 use std::io::Write; 21 use std::os::unix::io::AsRawFd; 22 use std::path::PathBuf; 23 use std::process::{Child, Command, Stdio}; 24 use std::string::String; 25 use std::sync::mpsc; 26 use std::sync::mpsc::Receiver; 27 use std::sync::Mutex; 28 use std::thread; 29 use test_infra::*; 30 use vmm_sys_util::{tempdir::TempDir, tempfile::TempFile}; 31 use wait_timeout::ChildExt; 32 33 // Constant taken from the VMM crate. 34 const MAX_NUM_PCI_SEGMENTS: u16 = 96; 35 36 #[cfg(target_arch = "x86_64")] 37 mod x86_64 { 38 pub const FOCAL_IMAGE_NAME: &str = "focal-server-cloudimg-amd64-custom-20210609-0.raw"; 39 pub const JAMMY_NVIDIA_IMAGE_NAME: &str = "jammy-server-cloudimg-amd64-nvidia.raw"; 40 pub const FOCAL_IMAGE_NAME_QCOW2: &str = "focal-server-cloudimg-amd64-custom-20210609-0.qcow2"; 41 pub const FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE: &str = 42 "focal-server-cloudimg-amd64-custom-20210609-0-backing.qcow2"; 43 pub const FOCAL_IMAGE_NAME_VHD: &str = "focal-server-cloudimg-amd64-custom-20210609-0.vhd"; 44 pub const FOCAL_IMAGE_NAME_VHDX: &str = "focal-server-cloudimg-amd64-custom-20210609-0.vhdx"; 45 pub const JAMMY_IMAGE_NAME: &str = "jammy-server-cloudimg-amd64-custom-20230119-0.raw"; 46 pub const WINDOWS_IMAGE_NAME: &str = "windows-server-2022-amd64-2.raw"; 47 pub const OVMF_NAME: &str = "CLOUDHV.fd"; 48 pub const GREP_SERIAL_IRQ_CMD: &str = "grep -c 'IO-APIC.*ttyS0' /proc/interrupts || true"; 49 } 50 51 #[cfg(target_arch = "x86_64")] 52 use x86_64::*; 53 54 #[cfg(target_arch = "aarch64")] 55 mod aarch64 { 56 pub const BIONIC_IMAGE_NAME: &str = "bionic-server-cloudimg-arm64.raw"; 57 pub const FOCAL_IMAGE_NAME: &str = "focal-server-cloudimg-arm64-custom-20210929-0.raw"; 58 pub const FOCAL_IMAGE_UPDATE_KERNEL_NAME: &str = 59 "focal-server-cloudimg-arm64-custom-20210929-0-update-kernel.raw"; 60 pub const FOCAL_IMAGE_NAME_QCOW2: &str = "focal-server-cloudimg-arm64-custom-20210929-0.qcow2"; 61 pub const FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE: &str = 62 "focal-server-cloudimg-arm64-custom-20210929-0-backing.qcow2"; 63 pub const FOCAL_IMAGE_NAME_VHD: &str = "focal-server-cloudimg-arm64-custom-20210929-0.vhd"; 64 pub const FOCAL_IMAGE_NAME_VHDX: &str = "focal-server-cloudimg-arm64-custom-20210929-0.vhdx"; 65 pub const JAMMY_IMAGE_NAME: &str = "jammy-server-cloudimg-arm64-custom-20220329-0.raw"; 66 pub const WINDOWS_IMAGE_NAME: &str = "windows-11-iot-enterprise-aarch64.raw"; 67 pub const OVMF_NAME: &str = "CLOUDHV_EFI.fd"; 68 pub const GREP_SERIAL_IRQ_CMD: &str = "grep -c 'GICv3.*uart-pl011' /proc/interrupts || true"; 69 pub const GREP_PMU_IRQ_CMD: &str = "grep -c 'GICv3.*arm-pmu' /proc/interrupts || true"; 70 } 71 72 #[cfg(target_arch = "aarch64")] 73 use aarch64::*; 74 75 const DIRECT_KERNEL_BOOT_CMDLINE: &str = 76 "root=/dev/vda1 console=hvc0 rw systemd.journald.forward_to_console=1"; 77 78 const CONSOLE_TEST_STRING: &str = "Started OpenBSD Secure Shell server"; 79 80 // This enum exists to make it more convenient to 81 // implement test for both D-Bus and REST APIs. 82 enum TargetApi { 83 // API socket 84 HttpApi(String), 85 // well known service name, object path 86 DBusApi(String, String), 87 } 88 89 impl TargetApi { 90 fn new_http_api(tmp_dir: &TempDir) -> Self { 91 Self::HttpApi(temp_api_path(tmp_dir)) 92 } 93 94 fn new_dbus_api(tmp_dir: &TempDir) -> Self { 95 // `tmp_dir` is in the form of "/tmp/chXXXXXX" 96 // and we take the `chXXXXXX` part as a unique identifier for the guest 97 let id = tmp_dir.as_path().file_name().unwrap().to_str().unwrap(); 98 99 Self::DBusApi( 100 format!("org.cloudhypervisor.{id}"), 101 format!("/org/cloudhypervisor/{id}"), 102 ) 103 } 104 105 fn guest_args(&self) -> Vec<&str> { 106 match self { 107 TargetApi::HttpApi(api_socket) => { 108 vec!["--api-socket", api_socket.as_str()] 109 } 110 TargetApi::DBusApi(service_name, object_path) => { 111 vec![ 112 "--dbus-service-name", 113 service_name.as_str(), 114 "--dbus-object-path", 115 object_path.as_str(), 116 ] 117 } 118 } 119 } 120 121 fn remote_args(&self) -> Vec<&str> { 122 // `guest_args` and `remote_args` are consistent with each other 123 self.guest_args() 124 } 125 126 fn remote_command(&self, command: &str, arg: Option<&str>) -> bool { 127 let mut cmd = Command::new(clh_command("ch-remote")); 128 cmd.args(self.remote_args()); 129 cmd.arg(command); 130 131 if let Some(arg) = arg { 132 cmd.arg(arg); 133 } 134 135 let output = cmd.output().unwrap(); 136 if output.status.success() { 137 true 138 } else { 139 eprintln!("Error running ch-remote command: {:?}", &cmd); 140 let stderr = String::from_utf8_lossy(&output.stderr); 141 eprintln!("stderr: {stderr}"); 142 false 143 } 144 } 145 } 146 147 // Start cloud-hypervisor with no VM parameters, only the API server running. 148 // From the API: Create a VM, boot it and check that it looks as expected. 149 fn _test_api_create_boot(target_api: TargetApi, guest: Guest) { 150 let mut child = GuestCommand::new(&guest) 151 .args(target_api.guest_args()) 152 .capture_output() 153 .spawn() 154 .unwrap(); 155 156 thread::sleep(std::time::Duration::new(1, 0)); 157 158 // Verify API server is running 159 assert!(target_api.remote_command("ping", None)); 160 161 // Create the VM first 162 let cpu_count: u8 = 4; 163 let request_body = guest.api_create_body( 164 cpu_count, 165 direct_kernel_boot_path().to_str().unwrap(), 166 DIRECT_KERNEL_BOOT_CMDLINE, 167 ); 168 169 let temp_config_path = guest.tmp_dir.as_path().join("config"); 170 std::fs::write(&temp_config_path, request_body).unwrap(); 171 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 172 173 assert!(target_api.remote_command("create", Some(create_config),)); 174 175 // Then boot it 176 assert!(target_api.remote_command("boot", None)); 177 thread::sleep(std::time::Duration::new(20, 0)); 178 179 let r = std::panic::catch_unwind(|| { 180 // Check that the VM booted as expected 181 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 182 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 183 }); 184 185 let _ = child.kill(); 186 let output = child.wait_with_output().unwrap(); 187 188 handle_child_output(r, &output); 189 } 190 191 // Start cloud-hypervisor with no VM parameters, only the API server running. 192 // From the API: Create a VM, boot it and check it can be shutdown and then 193 // booted again 194 fn _test_api_shutdown(target_api: TargetApi, guest: Guest) { 195 let mut child = GuestCommand::new(&guest) 196 .args(target_api.guest_args()) 197 .capture_output() 198 .spawn() 199 .unwrap(); 200 201 thread::sleep(std::time::Duration::new(1, 0)); 202 203 // Verify API server is running 204 assert!(target_api.remote_command("ping", None)); 205 206 // Create the VM first 207 let cpu_count: u8 = 4; 208 let request_body = guest.api_create_body( 209 cpu_count, 210 direct_kernel_boot_path().to_str().unwrap(), 211 DIRECT_KERNEL_BOOT_CMDLINE, 212 ); 213 214 let temp_config_path = guest.tmp_dir.as_path().join("config"); 215 std::fs::write(&temp_config_path, request_body).unwrap(); 216 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 217 218 let r = std::panic::catch_unwind(|| { 219 assert!(target_api.remote_command("create", Some(create_config))); 220 221 // Then boot it 222 assert!(target_api.remote_command("boot", None)); 223 224 guest.wait_vm_boot(None).unwrap(); 225 226 // Check that the VM booted as expected 227 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 228 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 229 230 // Sync and shutdown without powering off to prevent filesystem 231 // corruption. 232 guest.ssh_command("sync").unwrap(); 233 guest.ssh_command("sudo shutdown -H now").unwrap(); 234 235 // Wait for the guest to be fully shutdown 236 thread::sleep(std::time::Duration::new(20, 0)); 237 238 // Then shut it down 239 assert!(target_api.remote_command("shutdown", None)); 240 241 // Then boot it again 242 assert!(target_api.remote_command("boot", None)); 243 244 guest.wait_vm_boot(None).unwrap(); 245 246 // Check that the VM booted as expected 247 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 248 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 249 }); 250 251 let _ = child.kill(); 252 let output = child.wait_with_output().unwrap(); 253 254 handle_child_output(r, &output); 255 } 256 257 // Start cloud-hypervisor with no VM parameters, only the API server running. 258 // From the API: Create a VM, boot it and check it can be deleted and then recreated 259 // booted again. 260 fn _test_api_delete(target_api: TargetApi, guest: Guest) { 261 let mut child = GuestCommand::new(&guest) 262 .args(target_api.guest_args()) 263 .capture_output() 264 .spawn() 265 .unwrap(); 266 267 thread::sleep(std::time::Duration::new(1, 0)); 268 269 // Verify API server is running 270 assert!(target_api.remote_command("ping", None)); 271 272 // Create the VM first 273 let cpu_count: u8 = 4; 274 let request_body = guest.api_create_body( 275 cpu_count, 276 direct_kernel_boot_path().to_str().unwrap(), 277 DIRECT_KERNEL_BOOT_CMDLINE, 278 ); 279 let temp_config_path = guest.tmp_dir.as_path().join("config"); 280 std::fs::write(&temp_config_path, request_body).unwrap(); 281 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 282 283 let r = std::panic::catch_unwind(|| { 284 assert!(target_api.remote_command("create", Some(create_config))); 285 286 // Then boot it 287 assert!(target_api.remote_command("boot", None)); 288 289 guest.wait_vm_boot(None).unwrap(); 290 291 // Check that the VM booted as expected 292 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 293 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 294 295 // Sync and shutdown without powering off to prevent filesystem 296 // corruption. 297 guest.ssh_command("sync").unwrap(); 298 guest.ssh_command("sudo shutdown -H now").unwrap(); 299 300 // Wait for the guest to be fully shutdown 301 thread::sleep(std::time::Duration::new(20, 0)); 302 303 // Then delete it 304 assert!(target_api.remote_command("delete", None)); 305 306 assert!(target_api.remote_command("create", Some(create_config))); 307 308 // Then boot it again 309 assert!(target_api.remote_command("boot", None)); 310 311 guest.wait_vm_boot(None).unwrap(); 312 313 // Check that the VM booted as expected 314 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 315 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 316 }); 317 318 let _ = child.kill(); 319 let output = child.wait_with_output().unwrap(); 320 321 handle_child_output(r, &output); 322 } 323 324 // Start cloud-hypervisor with no VM parameters, only the API server running. 325 // From the API: Create a VM, boot it and check that it looks as expected. 326 // Then we pause the VM, check that it's no longer available. 327 // Finally we resume the VM and check that it's available. 328 fn _test_api_pause_resume(target_api: TargetApi, guest: Guest) { 329 let mut child = GuestCommand::new(&guest) 330 .args(target_api.guest_args()) 331 .capture_output() 332 .spawn() 333 .unwrap(); 334 335 thread::sleep(std::time::Duration::new(1, 0)); 336 337 // Verify API server is running 338 assert!(target_api.remote_command("ping", None)); 339 340 // Create the VM first 341 let cpu_count: u8 = 4; 342 let request_body = guest.api_create_body( 343 cpu_count, 344 direct_kernel_boot_path().to_str().unwrap(), 345 DIRECT_KERNEL_BOOT_CMDLINE, 346 ); 347 348 let temp_config_path = guest.tmp_dir.as_path().join("config"); 349 std::fs::write(&temp_config_path, request_body).unwrap(); 350 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 351 352 assert!(target_api.remote_command("create", Some(create_config))); 353 354 // Then boot it 355 assert!(target_api.remote_command("boot", None)); 356 thread::sleep(std::time::Duration::new(20, 0)); 357 358 let r = std::panic::catch_unwind(|| { 359 // Check that the VM booted as expected 360 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 361 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 362 363 // We now pause the VM 364 assert!(target_api.remote_command("pause", None)); 365 366 // Check pausing again fails 367 assert!(!target_api.remote_command("pause", None)); 368 369 thread::sleep(std::time::Duration::new(2, 0)); 370 371 // SSH into the VM should fail 372 assert!(ssh_command_ip( 373 "grep -c processor /proc/cpuinfo", 374 &guest.network.guest_ip, 375 2, 376 5 377 ) 378 .is_err()); 379 380 // Resume the VM 381 assert!(target_api.remote_command("resume", None)); 382 383 // Check resuming again fails 384 assert!(!target_api.remote_command("resume", None)); 385 386 thread::sleep(std::time::Duration::new(2, 0)); 387 388 // Now we should be able to SSH back in and get the right number of CPUs 389 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 390 }); 391 392 let _ = child.kill(); 393 let output = child.wait_with_output().unwrap(); 394 395 handle_child_output(r, &output); 396 } 397 398 fn prepare_virtiofsd(tmp_dir: &TempDir, shared_dir: &str) -> (std::process::Child, String) { 399 let mut workload_path = dirs::home_dir().unwrap(); 400 workload_path.push("workloads"); 401 402 let mut virtiofsd_path = workload_path; 403 virtiofsd_path.push("virtiofsd"); 404 let virtiofsd_path = String::from(virtiofsd_path.to_str().unwrap()); 405 406 let virtiofsd_socket_path = 407 String::from(tmp_dir.as_path().join("virtiofs.sock").to_str().unwrap()); 408 409 // Start the daemon 410 let child = Command::new(virtiofsd_path.as_str()) 411 .args(["--shared-dir", shared_dir]) 412 .args(["--socket-path", virtiofsd_socket_path.as_str()]) 413 .args(["--cache", "never"]) 414 .spawn() 415 .unwrap(); 416 417 thread::sleep(std::time::Duration::new(10, 0)); 418 419 (child, virtiofsd_socket_path) 420 } 421 422 fn prepare_vubd( 423 tmp_dir: &TempDir, 424 blk_img: &str, 425 num_queues: usize, 426 rdonly: bool, 427 direct: bool, 428 ) -> (std::process::Child, String) { 429 let mut workload_path = dirs::home_dir().unwrap(); 430 workload_path.push("workloads"); 431 432 let mut blk_file_path = workload_path; 433 blk_file_path.push(blk_img); 434 let blk_file_path = String::from(blk_file_path.to_str().unwrap()); 435 436 let vubd_socket_path = String::from(tmp_dir.as_path().join("vub.sock").to_str().unwrap()); 437 438 // Start the daemon 439 let child = Command::new(clh_command("vhost_user_block")) 440 .args([ 441 "--block-backend", 442 format!( 443 "path={blk_file_path},socket={vubd_socket_path},num_queues={num_queues},readonly={rdonly},direct={direct}" 444 ) 445 .as_str(), 446 ]) 447 .spawn() 448 .unwrap(); 449 450 thread::sleep(std::time::Duration::new(10, 0)); 451 452 (child, vubd_socket_path) 453 } 454 455 fn temp_vsock_path(tmp_dir: &TempDir) -> String { 456 String::from(tmp_dir.as_path().join("vsock").to_str().unwrap()) 457 } 458 459 fn temp_api_path(tmp_dir: &TempDir) -> String { 460 String::from( 461 tmp_dir 462 .as_path() 463 .join("cloud-hypervisor.sock") 464 .to_str() 465 .unwrap(), 466 ) 467 } 468 469 fn temp_event_monitor_path(tmp_dir: &TempDir) -> String { 470 String::from(tmp_dir.as_path().join("event.json").to_str().unwrap()) 471 } 472 473 // Creates the directory and returns the path. 474 fn temp_snapshot_dir_path(tmp_dir: &TempDir) -> String { 475 let snapshot_dir = String::from(tmp_dir.as_path().join("snapshot").to_str().unwrap()); 476 std::fs::create_dir(&snapshot_dir).unwrap(); 477 snapshot_dir 478 } 479 480 fn temp_vmcore_file_path(tmp_dir: &TempDir) -> String { 481 let vmcore_file = String::from(tmp_dir.as_path().join("vmcore").to_str().unwrap()); 482 vmcore_file 483 } 484 485 // Creates the path for direct kernel boot and return the path. 486 // For x86_64, this function returns the vmlinux kernel path. 487 // For AArch64, this function returns the PE kernel path. 488 fn direct_kernel_boot_path() -> PathBuf { 489 let mut workload_path = dirs::home_dir().unwrap(); 490 workload_path.push("workloads"); 491 492 let mut kernel_path = workload_path; 493 #[cfg(target_arch = "x86_64")] 494 kernel_path.push("vmlinux"); 495 #[cfg(target_arch = "aarch64")] 496 kernel_path.push("Image"); 497 498 kernel_path 499 } 500 501 fn edk2_path() -> PathBuf { 502 let mut workload_path = dirs::home_dir().unwrap(); 503 workload_path.push("workloads"); 504 let mut edk2_path = workload_path; 505 edk2_path.push(OVMF_NAME); 506 507 edk2_path 508 } 509 510 fn cloud_hypervisor_release_path() -> String { 511 let mut workload_path = dirs::home_dir().unwrap(); 512 workload_path.push("workloads"); 513 514 let mut ch_release_path = workload_path; 515 #[cfg(target_arch = "x86_64")] 516 ch_release_path.push("cloud-hypervisor-static"); 517 #[cfg(target_arch = "aarch64")] 518 ch_release_path.push("cloud-hypervisor-static-aarch64"); 519 520 ch_release_path.into_os_string().into_string().unwrap() 521 } 522 523 fn prepare_vhost_user_net_daemon( 524 tmp_dir: &TempDir, 525 ip: &str, 526 tap: Option<&str>, 527 mtu: Option<u16>, 528 num_queues: usize, 529 client_mode: bool, 530 ) -> (std::process::Command, String) { 531 let vunet_socket_path = String::from(tmp_dir.as_path().join("vunet.sock").to_str().unwrap()); 532 533 // Start the daemon 534 let mut net_params = format!( 535 "ip={ip},mask=255.255.255.0,socket={vunet_socket_path},num_queues={num_queues},queue_size=1024,client={client_mode}" 536 ); 537 538 if let Some(tap) = tap { 539 net_params.push_str(format!(",tap={tap}").as_str()); 540 } 541 542 if let Some(mtu) = mtu { 543 net_params.push_str(format!(",mtu={mtu}").as_str()); 544 } 545 546 let mut command = Command::new(clh_command("vhost_user_net")); 547 command.args(["--net-backend", net_params.as_str()]); 548 549 (command, vunet_socket_path) 550 } 551 552 fn prepare_swtpm_daemon(tmp_dir: &TempDir) -> (std::process::Command, String) { 553 let swtpm_tpm_dir = String::from(tmp_dir.as_path().join("swtpm").to_str().unwrap()); 554 let swtpm_socket_path = String::from( 555 tmp_dir 556 .as_path() 557 .join("swtpm") 558 .join("swtpm.sock") 559 .to_str() 560 .unwrap(), 561 ); 562 std::fs::create_dir(&swtpm_tpm_dir).unwrap(); 563 564 let mut swtpm_command = Command::new("swtpm"); 565 let swtpm_args = [ 566 "socket", 567 "--tpmstate", 568 &format!("dir={swtpm_tpm_dir}"), 569 "--ctrl", 570 &format!("type=unixio,path={swtpm_socket_path}"), 571 "--flags", 572 "startup-clear", 573 "--tpm2", 574 ]; 575 swtpm_command.args(swtpm_args); 576 577 (swtpm_command, swtpm_socket_path) 578 } 579 580 fn remote_command(api_socket: &str, command: &str, arg: Option<&str>) -> bool { 581 let mut cmd = Command::new(clh_command("ch-remote")); 582 cmd.args(["--api-socket", api_socket, command]); 583 584 if let Some(arg) = arg { 585 cmd.arg(arg); 586 } 587 let output = cmd.output().unwrap(); 588 if output.status.success() { 589 true 590 } else { 591 eprintln!("Error running ch-remote command: {:?}", &cmd); 592 let stderr = String::from_utf8_lossy(&output.stderr); 593 eprintln!("stderr: {stderr}"); 594 false 595 } 596 } 597 598 fn remote_command_w_output(api_socket: &str, command: &str, arg: Option<&str>) -> (bool, Vec<u8>) { 599 let mut cmd = Command::new(clh_command("ch-remote")); 600 cmd.args(["--api-socket", api_socket, command]); 601 602 if let Some(arg) = arg { 603 cmd.arg(arg); 604 } 605 606 let output = cmd.output().expect("Failed to launch ch-remote"); 607 608 (output.status.success(), output.stdout) 609 } 610 611 fn resize_command( 612 api_socket: &str, 613 desired_vcpus: Option<u8>, 614 desired_ram: Option<usize>, 615 desired_balloon: Option<usize>, 616 event_file: Option<&str>, 617 ) -> bool { 618 let mut cmd = Command::new(clh_command("ch-remote")); 619 cmd.args(["--api-socket", api_socket, "resize"]); 620 621 if let Some(desired_vcpus) = desired_vcpus { 622 cmd.args(["--cpus", &format!("{desired_vcpus}")]); 623 } 624 625 if let Some(desired_ram) = desired_ram { 626 cmd.args(["--memory", &format!("{desired_ram}")]); 627 } 628 629 if let Some(desired_balloon) = desired_balloon { 630 cmd.args(["--balloon", &format!("{desired_balloon}")]); 631 } 632 633 let ret = cmd.status().expect("Failed to launch ch-remote").success(); 634 635 if let Some(event_path) = event_file { 636 let latest_events = [ 637 &MetaEvent { 638 event: "resizing".to_string(), 639 device_id: None, 640 }, 641 &MetaEvent { 642 event: "resized".to_string(), 643 device_id: None, 644 }, 645 ]; 646 assert!(check_latest_events_exact(&latest_events, event_path)); 647 } 648 649 ret 650 } 651 652 fn resize_zone_command(api_socket: &str, id: &str, desired_size: &str) -> bool { 653 let mut cmd = Command::new(clh_command("ch-remote")); 654 cmd.args([ 655 "--api-socket", 656 api_socket, 657 "resize-zone", 658 "--id", 659 id, 660 "--size", 661 desired_size, 662 ]); 663 664 cmd.status().expect("Failed to launch ch-remote").success() 665 } 666 667 // setup OVS-DPDK bridge and ports 668 fn setup_ovs_dpdk() { 669 // setup OVS-DPDK 670 assert!(exec_host_command_status("service openvswitch-switch start").success()); 671 assert!(exec_host_command_status("ovs-vsctl init").success()); 672 assert!( 673 exec_host_command_status("ovs-vsctl set Open_vSwitch . other_config:dpdk-init=true") 674 .success() 675 ); 676 assert!(exec_host_command_status("service openvswitch-switch restart").success()); 677 678 // Create OVS-DPDK bridge and ports 679 assert!(exec_host_command_status( 680 "ovs-vsctl add-br ovsbr0 -- set bridge ovsbr0 datapath_type=netdev", 681 ) 682 .success()); 683 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface vhost-user1 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient1").success()); 684 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user2 -- set Interface vhost-user2 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient2").success()); 685 assert!(exec_host_command_status("ip link set up dev ovsbr0").success()); 686 assert!(exec_host_command_status("service openvswitch-switch restart").success()); 687 } 688 fn cleanup_ovs_dpdk() { 689 assert!(exec_host_command_status("ovs-vsctl del-br ovsbr0").success()); 690 exec_host_command_status("rm -f ovs-vsctl /tmp/dpdkvhostclient1 /tmp/dpdkvhostclient2"); 691 } 692 // Setup two guests and ensure they are connected through ovs-dpdk 693 fn setup_ovs_dpdk_guests( 694 guest1: &Guest, 695 guest2: &Guest, 696 api_socket: &str, 697 release_binary: bool, 698 ) -> (Child, Child) { 699 setup_ovs_dpdk(); 700 701 let clh_path = if !release_binary { 702 clh_command("cloud-hypervisor") 703 } else { 704 cloud_hypervisor_release_path() 705 }; 706 707 let mut child1 = GuestCommand::new_with_binary_path(guest1, &clh_path) 708 .args(["--cpus", "boot=2"]) 709 .args(["--memory", "size=0,shared=on"]) 710 .args(["--memory-zone", "id=mem0,size=1G,shared=on,host_numa_node=0"]) 711 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 712 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 713 .default_disks() 714 .args(["--net", guest1.default_net_string().as_str(), "--net", "vhost_user=true,socket=/tmp/dpdkvhostclient1,num_queues=2,queue_size=256,vhost_mode=server"]) 715 .capture_output() 716 .spawn() 717 .unwrap(); 718 719 #[cfg(target_arch = "x86_64")] 720 let guest_net_iface = "ens5"; 721 #[cfg(target_arch = "aarch64")] 722 let guest_net_iface = "enp0s5"; 723 724 let r = std::panic::catch_unwind(|| { 725 guest1.wait_vm_boot(None).unwrap(); 726 727 guest1 728 .ssh_command(&format!( 729 "sudo ip addr add 172.100.0.1/24 dev {guest_net_iface}" 730 )) 731 .unwrap(); 732 guest1 733 .ssh_command(&format!("sudo ip link set up dev {guest_net_iface}")) 734 .unwrap(); 735 736 let guest_ip = guest1.network.guest_ip.clone(); 737 thread::spawn(move || { 738 ssh_command_ip( 739 "nc -l 12345", 740 &guest_ip, 741 DEFAULT_SSH_RETRIES, 742 DEFAULT_SSH_TIMEOUT, 743 ) 744 .unwrap(); 745 }); 746 }); 747 if r.is_err() { 748 cleanup_ovs_dpdk(); 749 750 let _ = child1.kill(); 751 let output = child1.wait_with_output().unwrap(); 752 handle_child_output(r, &output); 753 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return 754 } 755 756 let mut child2 = GuestCommand::new_with_binary_path(guest2, &clh_path) 757 .args(["--api-socket", api_socket]) 758 .args(["--cpus", "boot=2"]) 759 .args(["--memory", "size=0,shared=on"]) 760 .args(["--memory-zone", "id=mem0,size=1G,shared=on,host_numa_node=0"]) 761 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 762 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 763 .default_disks() 764 .args(["--net", guest2.default_net_string().as_str(), "--net", "vhost_user=true,socket=/tmp/dpdkvhostclient2,num_queues=2,queue_size=256,vhost_mode=server"]) 765 .capture_output() 766 .spawn() 767 .unwrap(); 768 769 let r = std::panic::catch_unwind(|| { 770 guest2.wait_vm_boot(None).unwrap(); 771 772 guest2 773 .ssh_command(&format!( 774 "sudo ip addr add 172.100.0.2/24 dev {guest_net_iface}" 775 )) 776 .unwrap(); 777 guest2 778 .ssh_command(&format!("sudo ip link set up dev {guest_net_iface}")) 779 .unwrap(); 780 781 // Check the connection works properly between the two VMs 782 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap(); 783 }); 784 if r.is_err() { 785 cleanup_ovs_dpdk(); 786 787 let _ = child1.kill(); 788 let _ = child2.kill(); 789 let output = child2.wait_with_output().unwrap(); 790 handle_child_output(r, &output); 791 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return 792 } 793 794 (child1, child2) 795 } 796 797 enum FwType { 798 Ovmf, 799 RustHypervisorFirmware, 800 } 801 802 fn fw_path(_fw_type: FwType) -> String { 803 let mut workload_path = dirs::home_dir().unwrap(); 804 workload_path.push("workloads"); 805 806 let mut fw_path = workload_path; 807 #[cfg(target_arch = "aarch64")] 808 fw_path.push("CLOUDHV_EFI.fd"); 809 #[cfg(target_arch = "x86_64")] 810 { 811 match _fw_type { 812 FwType::Ovmf => fw_path.push(OVMF_NAME), 813 FwType::RustHypervisorFirmware => fw_path.push("hypervisor-fw"), 814 } 815 } 816 817 fw_path.to_str().unwrap().to_string() 818 } 819 820 struct MetaEvent { 821 event: String, 822 device_id: Option<String>, 823 } 824 825 impl MetaEvent { 826 pub fn match_with_json_event(&self, v: &serde_json::Value) -> bool { 827 let mut matched = false; 828 if v["event"].as_str().unwrap() == self.event { 829 if let Some(device_id) = &self.device_id { 830 if v["properties"]["id"].as_str().unwrap() == device_id { 831 matched = true 832 } 833 } else { 834 matched = true; 835 } 836 } 837 matched 838 } 839 } 840 841 // Parse the event_monitor file based on the format that each event 842 // is followed by a double newline 843 fn parse_event_file(event_file: &str) -> Vec<serde_json::Value> { 844 let content = fs::read(event_file).unwrap(); 845 let mut ret = Vec::new(); 846 for entry in String::from_utf8_lossy(&content) 847 .trim() 848 .split("\n\n") 849 .collect::<Vec<&str>>() 850 { 851 ret.push(serde_json::from_str(entry).unwrap()); 852 } 853 854 ret 855 } 856 857 // Return true if all events from the input 'expected_events' are matched sequentially 858 // with events from the 'event_file' 859 fn check_sequential_events(expected_events: &[&MetaEvent], event_file: &str) -> bool { 860 let json_events = parse_event_file(event_file); 861 let len = expected_events.len(); 862 let mut idx = 0; 863 for e in &json_events { 864 if idx == len { 865 break; 866 } 867 if expected_events[idx].match_with_json_event(e) { 868 idx += 1; 869 } 870 } 871 872 idx == len 873 } 874 875 // Return true if all events from the input 'expected_events' are matched exactly 876 // with events from the 'event_file' 877 fn check_sequential_events_exact(expected_events: &[&MetaEvent], event_file: &str) -> bool { 878 let json_events = parse_event_file(event_file); 879 assert!(expected_events.len() <= json_events.len()); 880 let json_events = &json_events[..expected_events.len()]; 881 882 for (idx, e) in json_events.iter().enumerate() { 883 if !expected_events[idx].match_with_json_event(e) { 884 return false; 885 } 886 } 887 888 true 889 } 890 891 // Return true if events from the input 'latest_events' are matched exactly 892 // with the most recent events from the 'event_file' 893 fn check_latest_events_exact(latest_events: &[&MetaEvent], event_file: &str) -> bool { 894 let json_events = parse_event_file(event_file); 895 assert!(latest_events.len() <= json_events.len()); 896 let json_events = &json_events[(json_events.len() - latest_events.len())..]; 897 898 for (idx, e) in json_events.iter().enumerate() { 899 if !latest_events[idx].match_with_json_event(e) { 900 return false; 901 } 902 } 903 904 true 905 } 906 907 fn test_cpu_topology(threads_per_core: u8, cores_per_package: u8, packages: u8, use_fw: bool) { 908 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 909 let guest = Guest::new(Box::new(focal)); 910 let total_vcpus = threads_per_core * cores_per_package * packages; 911 let direct_kernel_boot_path = direct_kernel_boot_path(); 912 let mut kernel_path = direct_kernel_boot_path.to_str().unwrap(); 913 let fw_path = fw_path(FwType::RustHypervisorFirmware); 914 if use_fw { 915 kernel_path = fw_path.as_str(); 916 } 917 918 let mut child = GuestCommand::new(&guest) 919 .args([ 920 "--cpus", 921 &format!( 922 "boot={total_vcpus},topology={threads_per_core}:{cores_per_package}:1:{packages}" 923 ), 924 ]) 925 .args(["--memory", "size=512M"]) 926 .args(["--kernel", kernel_path]) 927 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 928 .default_disks() 929 .default_net() 930 .capture_output() 931 .spawn() 932 .unwrap(); 933 934 let r = std::panic::catch_unwind(|| { 935 guest.wait_vm_boot(None).unwrap(); 936 assert_eq!( 937 guest.get_cpu_count().unwrap_or_default(), 938 u32::from(total_vcpus) 939 ); 940 assert_eq!( 941 guest 942 .ssh_command("lscpu | grep \"per core\" | cut -f 2 -d \":\" | sed \"s# *##\"") 943 .unwrap() 944 .trim() 945 .parse::<u8>() 946 .unwrap_or(0), 947 threads_per_core 948 ); 949 950 assert_eq!( 951 guest 952 .ssh_command("lscpu | grep \"per socket\" | cut -f 2 -d \":\" | sed \"s# *##\"") 953 .unwrap() 954 .trim() 955 .parse::<u8>() 956 .unwrap_or(0), 957 cores_per_package 958 ); 959 960 assert_eq!( 961 guest 962 .ssh_command("lscpu | grep \"Socket\" | cut -f 2 -d \":\" | sed \"s# *##\"") 963 .unwrap() 964 .trim() 965 .parse::<u8>() 966 .unwrap_or(0), 967 packages 968 ); 969 }); 970 971 let _ = child.kill(); 972 let output = child.wait_with_output().unwrap(); 973 974 handle_child_output(r, &output); 975 } 976 977 #[allow(unused_variables)] 978 fn _test_guest_numa_nodes(acpi: bool) { 979 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 980 let guest = Guest::new(Box::new(focal)); 981 let api_socket = temp_api_path(&guest.tmp_dir); 982 #[cfg(target_arch = "x86_64")] 983 let kernel_path = direct_kernel_boot_path(); 984 #[cfg(target_arch = "aarch64")] 985 let kernel_path = if acpi { 986 edk2_path() 987 } else { 988 direct_kernel_boot_path() 989 }; 990 991 let mut child = GuestCommand::new(&guest) 992 .args(["--cpus", "boot=6,max=12"]) 993 .args(["--memory", "size=0,hotplug_method=virtio-mem"]) 994 .args([ 995 "--memory-zone", 996 "id=mem0,size=1G,hotplug_size=3G", 997 "--memory-zone", 998 "id=mem1,size=2G,hotplug_size=3G", 999 "--memory-zone", 1000 "id=mem2,size=3G,hotplug_size=3G", 1001 ]) 1002 .args([ 1003 "--numa", 1004 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0", 1005 "--numa", 1006 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1", 1007 "--numa", 1008 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2", 1009 ]) 1010 .args(["--kernel", kernel_path.to_str().unwrap()]) 1011 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1012 .args(["--api-socket", &api_socket]) 1013 .capture_output() 1014 .default_disks() 1015 .default_net() 1016 .spawn() 1017 .unwrap(); 1018 1019 let r = std::panic::catch_unwind(|| { 1020 guest.wait_vm_boot(None).unwrap(); 1021 1022 guest.check_numa_common( 1023 Some(&[960_000, 1_920_000, 2_880_000]), 1024 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 1025 Some(&["10 15 20", "20 10 25", "25 30 10"]), 1026 ); 1027 1028 // AArch64 currently does not support hotplug, and therefore we only 1029 // test hotplug-related function on x86_64 here. 1030 #[cfg(target_arch = "x86_64")] 1031 { 1032 guest.enable_memory_hotplug(); 1033 1034 // Resize every memory zone and check each associated NUMA node 1035 // has been assigned the right amount of memory. 1036 resize_zone_command(&api_socket, "mem0", "4G"); 1037 resize_zone_command(&api_socket, "mem1", "4G"); 1038 resize_zone_command(&api_socket, "mem2", "4G"); 1039 // Resize to the maximum amount of CPUs and check each NUMA 1040 // node has been assigned the right CPUs set. 1041 resize_command(&api_socket, Some(12), None, None, None); 1042 thread::sleep(std::time::Duration::new(5, 0)); 1043 1044 guest.check_numa_common( 1045 Some(&[3_840_000, 3_840_000, 3_840_000]), 1046 Some(&[vec![0, 1, 2, 9], vec![3, 4, 6, 7, 8], vec![5, 10, 11]]), 1047 None, 1048 ); 1049 } 1050 }); 1051 1052 let _ = child.kill(); 1053 let output = child.wait_with_output().unwrap(); 1054 1055 handle_child_output(r, &output); 1056 } 1057 1058 #[allow(unused_variables)] 1059 fn _test_power_button(acpi: bool) { 1060 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1061 let guest = Guest::new(Box::new(focal)); 1062 let mut cmd = GuestCommand::new(&guest); 1063 let api_socket = temp_api_path(&guest.tmp_dir); 1064 1065 #[cfg(target_arch = "x86_64")] 1066 let kernel_path = direct_kernel_boot_path(); 1067 #[cfg(target_arch = "aarch64")] 1068 let kernel_path = if acpi { 1069 edk2_path() 1070 } else { 1071 direct_kernel_boot_path() 1072 }; 1073 1074 cmd.args(["--cpus", "boot=1"]) 1075 .args(["--memory", "size=512M"]) 1076 .args(["--kernel", kernel_path.to_str().unwrap()]) 1077 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1078 .capture_output() 1079 .default_disks() 1080 .default_net() 1081 .args(["--api-socket", &api_socket]); 1082 1083 let child = cmd.spawn().unwrap(); 1084 1085 let r = std::panic::catch_unwind(|| { 1086 guest.wait_vm_boot(None).unwrap(); 1087 assert!(remote_command(&api_socket, "power-button", None)); 1088 }); 1089 1090 let output = child.wait_with_output().unwrap(); 1091 assert!(output.status.success()); 1092 handle_child_output(r, &output); 1093 } 1094 1095 type PrepareNetDaemon = dyn Fn( 1096 &TempDir, 1097 &str, 1098 Option<&str>, 1099 Option<u16>, 1100 usize, 1101 bool, 1102 ) -> (std::process::Command, String); 1103 1104 fn test_vhost_user_net( 1105 tap: Option<&str>, 1106 num_queues: usize, 1107 prepare_daemon: &PrepareNetDaemon, 1108 generate_host_mac: bool, 1109 client_mode_daemon: bool, 1110 ) { 1111 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1112 let guest = Guest::new(Box::new(focal)); 1113 let api_socket = temp_api_path(&guest.tmp_dir); 1114 1115 let kernel_path = direct_kernel_boot_path(); 1116 1117 let host_mac = if generate_host_mac { 1118 Some(MacAddr::local_random()) 1119 } else { 1120 None 1121 }; 1122 1123 let mtu = Some(3000); 1124 1125 let (mut daemon_command, vunet_socket_path) = prepare_daemon( 1126 &guest.tmp_dir, 1127 &guest.network.host_ip, 1128 tap, 1129 mtu, 1130 num_queues, 1131 client_mode_daemon, 1132 ); 1133 1134 let net_params = format!( 1135 "vhost_user=true,mac={},socket={},num_queues={},queue_size=1024{},vhost_mode={},mtu=3000", 1136 guest.network.guest_mac, 1137 vunet_socket_path, 1138 num_queues, 1139 if let Some(host_mac) = host_mac { 1140 format!(",host_mac={host_mac}") 1141 } else { 1142 "".to_owned() 1143 }, 1144 if client_mode_daemon { 1145 "server" 1146 } else { 1147 "client" 1148 }, 1149 ); 1150 1151 let mut ch_command = GuestCommand::new(&guest); 1152 ch_command 1153 .args(["--cpus", format!("boot={}", num_queues / 2).as_str()]) 1154 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"]) 1155 .args(["--kernel", kernel_path.to_str().unwrap()]) 1156 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1157 .default_disks() 1158 .args(["--net", net_params.as_str()]) 1159 .args(["--api-socket", &api_socket]) 1160 .capture_output(); 1161 1162 let mut daemon_child: std::process::Child; 1163 let mut child: std::process::Child; 1164 1165 if client_mode_daemon { 1166 child = ch_command.spawn().unwrap(); 1167 // Make sure the VMM is waiting for the backend to connect 1168 thread::sleep(std::time::Duration::new(10, 0)); 1169 daemon_child = daemon_command.spawn().unwrap(); 1170 } else { 1171 daemon_child = daemon_command.spawn().unwrap(); 1172 // Make sure the backend is waiting for the VMM to connect 1173 thread::sleep(std::time::Duration::new(10, 0)); 1174 child = ch_command.spawn().unwrap(); 1175 } 1176 1177 let r = std::panic::catch_unwind(|| { 1178 guest.wait_vm_boot(None).unwrap(); 1179 1180 if let Some(tap_name) = tap { 1181 let tap_count = exec_host_command_output(&format!("ip link | grep -c {tap_name}")); 1182 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1"); 1183 } 1184 1185 if let Some(host_mac) = tap { 1186 let mac_count = exec_host_command_output(&format!("ip link | grep -c {host_mac}")); 1187 assert_eq!(String::from_utf8_lossy(&mac_count.stdout).trim(), "1"); 1188 } 1189 1190 #[cfg(target_arch = "aarch64")] 1191 let iface = "enp0s4"; 1192 #[cfg(target_arch = "x86_64")] 1193 let iface = "ens4"; 1194 1195 assert_eq!( 1196 guest 1197 .ssh_command(format!("cat /sys/class/net/{iface}/mtu").as_str()) 1198 .unwrap() 1199 .trim(), 1200 "3000" 1201 ); 1202 1203 // 1 network interface + default localhost ==> 2 interfaces 1204 // It's important to note that this test is fully exercising the 1205 // vhost-user-net implementation and the associated backend since 1206 // it does not define any --net network interface. That means all 1207 // the ssh communication in that test happens through the network 1208 // interface backed by vhost-user-net. 1209 assert_eq!( 1210 guest 1211 .ssh_command("ip -o link | wc -l") 1212 .unwrap() 1213 .trim() 1214 .parse::<u32>() 1215 .unwrap_or_default(), 1216 2 1217 ); 1218 1219 // The following pci devices will appear on guest with PCI-MSI 1220 // interrupt vectors assigned. 1221 // 1 virtio-console with 3 vectors: config, Rx, Tx 1222 // 1 virtio-blk with 2 vectors: config, Request 1223 // 1 virtio-blk with 2 vectors: config, Request 1224 // 1 virtio-rng with 2 vectors: config, Request 1225 // Since virtio-net has 2 queue pairs, its vectors is as follows: 1226 // 1 virtio-net with 5 vectors: config, Rx (2), Tx (2) 1227 // Based on the above, the total vectors should 14. 1228 #[cfg(target_arch = "x86_64")] 1229 let grep_cmd = "grep -c PCI-MSI /proc/interrupts"; 1230 #[cfg(target_arch = "aarch64")] 1231 let grep_cmd = "grep -c ITS-MSI /proc/interrupts"; 1232 assert_eq!( 1233 guest 1234 .ssh_command(grep_cmd) 1235 .unwrap() 1236 .trim() 1237 .parse::<u32>() 1238 .unwrap_or_default(), 1239 10 + (num_queues as u32) 1240 ); 1241 1242 // ACPI feature is needed. 1243 #[cfg(target_arch = "x86_64")] 1244 { 1245 guest.enable_memory_hotplug(); 1246 1247 // Add RAM to the VM 1248 let desired_ram = 1024 << 20; 1249 resize_command(&api_socket, None, Some(desired_ram), None, None); 1250 1251 thread::sleep(std::time::Duration::new(10, 0)); 1252 1253 // Here by simply checking the size (through ssh), we validate 1254 // the connection is still working, which means vhost-user-net 1255 // keeps working after the resize. 1256 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 1257 } 1258 }); 1259 1260 let _ = child.kill(); 1261 let output = child.wait_with_output().unwrap(); 1262 1263 thread::sleep(std::time::Duration::new(5, 0)); 1264 let _ = daemon_child.kill(); 1265 let _ = daemon_child.wait(); 1266 1267 handle_child_output(r, &output); 1268 } 1269 1270 type PrepareBlkDaemon = dyn Fn(&TempDir, &str, usize, bool, bool) -> (std::process::Child, String); 1271 1272 fn test_vhost_user_blk( 1273 num_queues: usize, 1274 readonly: bool, 1275 direct: bool, 1276 prepare_vhost_user_blk_daemon: Option<&PrepareBlkDaemon>, 1277 ) { 1278 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1279 let guest = Guest::new(Box::new(focal)); 1280 let api_socket = temp_api_path(&guest.tmp_dir); 1281 1282 let kernel_path = direct_kernel_boot_path(); 1283 1284 let (blk_params, daemon_child) = { 1285 let prepare_daemon = prepare_vhost_user_blk_daemon.unwrap(); 1286 // Start the daemon 1287 let (daemon_child, vubd_socket_path) = 1288 prepare_daemon(&guest.tmp_dir, "blk.img", num_queues, readonly, direct); 1289 1290 ( 1291 format!( 1292 "vhost_user=true,socket={vubd_socket_path},num_queues={num_queues},queue_size=128", 1293 ), 1294 Some(daemon_child), 1295 ) 1296 }; 1297 1298 let mut child = GuestCommand::new(&guest) 1299 .args(["--cpus", format!("boot={num_queues}").as_str()]) 1300 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"]) 1301 .args(["--kernel", kernel_path.to_str().unwrap()]) 1302 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1303 .args([ 1304 "--disk", 1305 format!( 1306 "path={}", 1307 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 1308 ) 1309 .as_str(), 1310 "--disk", 1311 format!( 1312 "path={}", 1313 guest.disk_config.disk(DiskType::CloudInit).unwrap() 1314 ) 1315 .as_str(), 1316 "--disk", 1317 blk_params.as_str(), 1318 ]) 1319 .default_net() 1320 .args(["--api-socket", &api_socket]) 1321 .capture_output() 1322 .spawn() 1323 .unwrap(); 1324 1325 let r = std::panic::catch_unwind(|| { 1326 guest.wait_vm_boot(None).unwrap(); 1327 1328 // Check both if /dev/vdc exists and if the block size is 16M. 1329 assert_eq!( 1330 guest 1331 .ssh_command("lsblk | grep vdc | grep -c 16M") 1332 .unwrap() 1333 .trim() 1334 .parse::<u32>() 1335 .unwrap_or_default(), 1336 1 1337 ); 1338 1339 // Check if this block is RO or RW. 1340 assert_eq!( 1341 guest 1342 .ssh_command("lsblk | grep vdc | awk '{print $5}'") 1343 .unwrap() 1344 .trim() 1345 .parse::<u32>() 1346 .unwrap_or_default(), 1347 readonly as u32 1348 ); 1349 1350 // Check if the number of queues in /sys/block/vdc/mq matches the 1351 // expected num_queues. 1352 assert_eq!( 1353 guest 1354 .ssh_command("ls -ll /sys/block/vdc/mq | grep ^d | wc -l") 1355 .unwrap() 1356 .trim() 1357 .parse::<u32>() 1358 .unwrap_or_default(), 1359 num_queues as u32 1360 ); 1361 1362 // Mount the device 1363 let mount_ro_rw_flag = if readonly { "ro,noload" } else { "rw" }; 1364 guest.ssh_command("mkdir mount_image").unwrap(); 1365 guest 1366 .ssh_command( 1367 format!("sudo mount -o {mount_ro_rw_flag} -t ext4 /dev/vdc mount_image/").as_str(), 1368 ) 1369 .unwrap(); 1370 1371 // Check the content of the block device. The file "foo" should 1372 // contain "bar". 1373 assert_eq!( 1374 guest.ssh_command("cat mount_image/foo").unwrap().trim(), 1375 "bar" 1376 ); 1377 1378 // ACPI feature is needed. 1379 #[cfg(target_arch = "x86_64")] 1380 { 1381 guest.enable_memory_hotplug(); 1382 1383 // Add RAM to the VM 1384 let desired_ram = 1024 << 20; 1385 resize_command(&api_socket, None, Some(desired_ram), None, None); 1386 1387 thread::sleep(std::time::Duration::new(10, 0)); 1388 1389 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 1390 1391 // Check again the content of the block device after the resize 1392 // has been performed. 1393 assert_eq!( 1394 guest.ssh_command("cat mount_image/foo").unwrap().trim(), 1395 "bar" 1396 ); 1397 } 1398 1399 // Unmount the device 1400 guest.ssh_command("sudo umount /dev/vdc").unwrap(); 1401 guest.ssh_command("rm -r mount_image").unwrap(); 1402 }); 1403 1404 let _ = child.kill(); 1405 let output = child.wait_with_output().unwrap(); 1406 1407 if let Some(mut daemon_child) = daemon_child { 1408 thread::sleep(std::time::Duration::new(5, 0)); 1409 let _ = daemon_child.kill(); 1410 let _ = daemon_child.wait(); 1411 } 1412 1413 handle_child_output(r, &output); 1414 } 1415 1416 fn test_boot_from_vhost_user_blk( 1417 num_queues: usize, 1418 readonly: bool, 1419 direct: bool, 1420 prepare_vhost_user_blk_daemon: Option<&PrepareBlkDaemon>, 1421 ) { 1422 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1423 let guest = Guest::new(Box::new(focal)); 1424 1425 let kernel_path = direct_kernel_boot_path(); 1426 1427 let disk_path = guest.disk_config.disk(DiskType::OperatingSystem).unwrap(); 1428 1429 let (blk_boot_params, daemon_child) = { 1430 let prepare_daemon = prepare_vhost_user_blk_daemon.unwrap(); 1431 // Start the daemon 1432 let (daemon_child, vubd_socket_path) = prepare_daemon( 1433 &guest.tmp_dir, 1434 disk_path.as_str(), 1435 num_queues, 1436 readonly, 1437 direct, 1438 ); 1439 1440 ( 1441 format!( 1442 "vhost_user=true,socket={vubd_socket_path},num_queues={num_queues},queue_size=128", 1443 ), 1444 Some(daemon_child), 1445 ) 1446 }; 1447 1448 let mut child = GuestCommand::new(&guest) 1449 .args(["--cpus", format!("boot={num_queues}").as_str()]) 1450 .args(["--memory", "size=512M,shared=on"]) 1451 .args(["--kernel", kernel_path.to_str().unwrap()]) 1452 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1453 .args([ 1454 "--disk", 1455 blk_boot_params.as_str(), 1456 "--disk", 1457 format!( 1458 "path={}", 1459 guest.disk_config.disk(DiskType::CloudInit).unwrap() 1460 ) 1461 .as_str(), 1462 ]) 1463 .default_net() 1464 .capture_output() 1465 .spawn() 1466 .unwrap(); 1467 1468 let r = std::panic::catch_unwind(|| { 1469 guest.wait_vm_boot(None).unwrap(); 1470 1471 // Just check the VM booted correctly. 1472 assert_eq!(guest.get_cpu_count().unwrap_or_default(), num_queues as u32); 1473 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 1474 }); 1475 let _ = child.kill(); 1476 let output = child.wait_with_output().unwrap(); 1477 1478 if let Some(mut daemon_child) = daemon_child { 1479 thread::sleep(std::time::Duration::new(5, 0)); 1480 let _ = daemon_child.kill(); 1481 let _ = daemon_child.wait(); 1482 } 1483 1484 handle_child_output(r, &output); 1485 } 1486 1487 fn _test_virtio_fs( 1488 prepare_daemon: &dyn Fn(&TempDir, &str) -> (std::process::Child, String), 1489 hotplug: bool, 1490 pci_segment: Option<u16>, 1491 ) { 1492 #[cfg(target_arch = "aarch64")] 1493 let focal_image = if hotplug { 1494 FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string() 1495 } else { 1496 FOCAL_IMAGE_NAME.to_string() 1497 }; 1498 #[cfg(target_arch = "x86_64")] 1499 let focal_image = FOCAL_IMAGE_NAME.to_string(); 1500 let focal = UbuntuDiskConfig::new(focal_image); 1501 let guest = Guest::new(Box::new(focal)); 1502 let api_socket = temp_api_path(&guest.tmp_dir); 1503 1504 let mut workload_path = dirs::home_dir().unwrap(); 1505 workload_path.push("workloads"); 1506 1507 let mut shared_dir = workload_path; 1508 shared_dir.push("shared_dir"); 1509 1510 #[cfg(target_arch = "x86_64")] 1511 let kernel_path = direct_kernel_boot_path(); 1512 #[cfg(target_arch = "aarch64")] 1513 let kernel_path = if hotplug { 1514 edk2_path() 1515 } else { 1516 direct_kernel_boot_path() 1517 }; 1518 1519 let (mut daemon_child, virtiofsd_socket_path) = 1520 prepare_daemon(&guest.tmp_dir, shared_dir.to_str().unwrap()); 1521 1522 let mut guest_command = GuestCommand::new(&guest); 1523 guest_command 1524 .args(["--cpus", "boot=1"]) 1525 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"]) 1526 .args(["--kernel", kernel_path.to_str().unwrap()]) 1527 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1528 .default_disks() 1529 .default_net() 1530 .args(["--api-socket", &api_socket]); 1531 if pci_segment.is_some() { 1532 guest_command.args([ 1533 "--platform", 1534 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 1535 ]); 1536 } 1537 1538 let fs_params = format!( 1539 "id=myfs0,tag=myfs,socket={},num_queues=1,queue_size=1024{}", 1540 virtiofsd_socket_path, 1541 if let Some(pci_segment) = pci_segment { 1542 format!(",pci_segment={pci_segment}") 1543 } else { 1544 "".to_owned() 1545 } 1546 ); 1547 1548 if !hotplug { 1549 guest_command.args(["--fs", fs_params.as_str()]); 1550 } 1551 1552 let mut child = guest_command.capture_output().spawn().unwrap(); 1553 1554 let r = std::panic::catch_unwind(|| { 1555 guest.wait_vm_boot(None).unwrap(); 1556 1557 if hotplug { 1558 // Add fs to the VM 1559 let (cmd_success, cmd_output) = 1560 remote_command_w_output(&api_socket, "add-fs", Some(&fs_params)); 1561 assert!(cmd_success); 1562 1563 if let Some(pci_segment) = pci_segment { 1564 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 1565 "{{\"id\":\"myfs0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 1566 ))); 1567 } else { 1568 assert!(String::from_utf8_lossy(&cmd_output) 1569 .contains("{\"id\":\"myfs0\",\"bdf\":\"0000:00:06.0\"}")); 1570 } 1571 1572 thread::sleep(std::time::Duration::new(10, 0)); 1573 } 1574 1575 // Mount shared directory through virtio_fs filesystem 1576 guest 1577 .ssh_command("mkdir -p mount_dir && sudo mount -t virtiofs myfs mount_dir/") 1578 .unwrap(); 1579 1580 // Check file1 exists and its content is "foo" 1581 assert_eq!( 1582 guest.ssh_command("cat mount_dir/file1").unwrap().trim(), 1583 "foo" 1584 ); 1585 // Check file2 does not exist 1586 guest 1587 .ssh_command("[ ! -f 'mount_dir/file2' ] || true") 1588 .unwrap(); 1589 1590 // Check file3 exists and its content is "bar" 1591 assert_eq!( 1592 guest.ssh_command("cat mount_dir/file3").unwrap().trim(), 1593 "bar" 1594 ); 1595 1596 // ACPI feature is needed. 1597 #[cfg(target_arch = "x86_64")] 1598 { 1599 guest.enable_memory_hotplug(); 1600 1601 // Add RAM to the VM 1602 let desired_ram = 1024 << 20; 1603 resize_command(&api_socket, None, Some(desired_ram), None, None); 1604 1605 thread::sleep(std::time::Duration::new(30, 0)); 1606 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 1607 1608 // After the resize, check again that file1 exists and its 1609 // content is "foo". 1610 assert_eq!( 1611 guest.ssh_command("cat mount_dir/file1").unwrap().trim(), 1612 "foo" 1613 ); 1614 } 1615 1616 if hotplug { 1617 // Remove from VM 1618 guest.ssh_command("sudo umount mount_dir").unwrap(); 1619 assert!(remote_command(&api_socket, "remove-device", Some("myfs0"))); 1620 } 1621 }); 1622 1623 let (r, hotplug_daemon_child) = if r.is_ok() && hotplug { 1624 thread::sleep(std::time::Duration::new(10, 0)); 1625 let (daemon_child, virtiofsd_socket_path) = 1626 prepare_daemon(&guest.tmp_dir, shared_dir.to_str().unwrap()); 1627 1628 let r = std::panic::catch_unwind(|| { 1629 thread::sleep(std::time::Duration::new(10, 0)); 1630 let fs_params = format!( 1631 "id=myfs0,tag=myfs,socket={},num_queues=1,queue_size=1024{}", 1632 virtiofsd_socket_path, 1633 if let Some(pci_segment) = pci_segment { 1634 format!(",pci_segment={pci_segment}") 1635 } else { 1636 "".to_owned() 1637 } 1638 ); 1639 1640 // Add back and check it works 1641 let (cmd_success, cmd_output) = 1642 remote_command_w_output(&api_socket, "add-fs", Some(&fs_params)); 1643 assert!(cmd_success); 1644 if let Some(pci_segment) = pci_segment { 1645 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 1646 "{{\"id\":\"myfs0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 1647 ))); 1648 } else { 1649 assert!(String::from_utf8_lossy(&cmd_output) 1650 .contains("{\"id\":\"myfs0\",\"bdf\":\"0000:00:06.0\"}")); 1651 } 1652 1653 thread::sleep(std::time::Duration::new(10, 0)); 1654 // Mount shared directory through virtio_fs filesystem 1655 guest 1656 .ssh_command("mkdir -p mount_dir && sudo mount -t virtiofs myfs mount_dir/") 1657 .unwrap(); 1658 1659 // Check file1 exists and its content is "foo" 1660 assert_eq!( 1661 guest.ssh_command("cat mount_dir/file1").unwrap().trim(), 1662 "foo" 1663 ); 1664 }); 1665 1666 (r, Some(daemon_child)) 1667 } else { 1668 (r, None) 1669 }; 1670 1671 let _ = child.kill(); 1672 let output = child.wait_with_output().unwrap(); 1673 1674 let _ = daemon_child.kill(); 1675 let _ = daemon_child.wait(); 1676 1677 if let Some(mut daemon_child) = hotplug_daemon_child { 1678 let _ = daemon_child.kill(); 1679 let _ = daemon_child.wait(); 1680 } 1681 1682 handle_child_output(r, &output); 1683 } 1684 1685 fn test_virtio_pmem(discard_writes: bool, specify_size: bool) { 1686 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1687 let guest = Guest::new(Box::new(focal)); 1688 1689 let kernel_path = direct_kernel_boot_path(); 1690 1691 let pmem_temp_file = TempFile::new().unwrap(); 1692 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 1693 1694 std::process::Command::new("mkfs.ext4") 1695 .arg(pmem_temp_file.as_path()) 1696 .output() 1697 .expect("Expect creating disk image to succeed"); 1698 1699 let mut child = GuestCommand::new(&guest) 1700 .args(["--cpus", "boot=1"]) 1701 .args(["--memory", "size=512M"]) 1702 .args(["--kernel", kernel_path.to_str().unwrap()]) 1703 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1704 .default_disks() 1705 .default_net() 1706 .args([ 1707 "--pmem", 1708 format!( 1709 "file={}{}{}", 1710 pmem_temp_file.as_path().to_str().unwrap(), 1711 if specify_size { ",size=128M" } else { "" }, 1712 if discard_writes { 1713 ",discard_writes=on" 1714 } else { 1715 "" 1716 } 1717 ) 1718 .as_str(), 1719 ]) 1720 .capture_output() 1721 .spawn() 1722 .unwrap(); 1723 1724 let r = std::panic::catch_unwind(|| { 1725 guest.wait_vm_boot(None).unwrap(); 1726 1727 // Check for the presence of /dev/pmem0 1728 assert_eq!( 1729 guest.ssh_command("ls /dev/pmem0").unwrap().trim(), 1730 "/dev/pmem0" 1731 ); 1732 1733 // Check changes persist after reboot 1734 assert_eq!(guest.ssh_command("sudo mount /dev/pmem0 /mnt").unwrap(), ""); 1735 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), "lost+found\n"); 1736 guest 1737 .ssh_command("echo test123 | sudo tee /mnt/test") 1738 .unwrap(); 1739 assert_eq!(guest.ssh_command("sudo umount /mnt").unwrap(), ""); 1740 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), ""); 1741 1742 guest.reboot_linux(0, None); 1743 assert_eq!(guest.ssh_command("sudo mount /dev/pmem0 /mnt").unwrap(), ""); 1744 assert_eq!( 1745 guest 1746 .ssh_command("sudo cat /mnt/test || true") 1747 .unwrap() 1748 .trim(), 1749 if discard_writes { "" } else { "test123" } 1750 ); 1751 }); 1752 1753 let _ = child.kill(); 1754 let output = child.wait_with_output().unwrap(); 1755 1756 handle_child_output(r, &output); 1757 } 1758 1759 fn get_fd_count(pid: u32) -> usize { 1760 fs::read_dir(format!("/proc/{pid}/fd")).unwrap().count() 1761 } 1762 1763 fn _test_virtio_vsock(hotplug: bool) { 1764 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1765 let guest = Guest::new(Box::new(focal)); 1766 1767 #[cfg(target_arch = "x86_64")] 1768 let kernel_path = direct_kernel_boot_path(); 1769 #[cfg(target_arch = "aarch64")] 1770 let kernel_path = if hotplug { 1771 edk2_path() 1772 } else { 1773 direct_kernel_boot_path() 1774 }; 1775 1776 let socket = temp_vsock_path(&guest.tmp_dir); 1777 let api_socket = temp_api_path(&guest.tmp_dir); 1778 1779 let mut cmd = GuestCommand::new(&guest); 1780 cmd.args(["--api-socket", &api_socket]); 1781 cmd.args(["--cpus", "boot=1"]); 1782 cmd.args(["--memory", "size=512M"]); 1783 cmd.args(["--kernel", kernel_path.to_str().unwrap()]); 1784 cmd.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]); 1785 cmd.default_disks(); 1786 cmd.default_net(); 1787 1788 if !hotplug { 1789 cmd.args(["--vsock", format!("cid=3,socket={socket}").as_str()]); 1790 } 1791 1792 let mut child = cmd.capture_output().spawn().unwrap(); 1793 1794 let r = std::panic::catch_unwind(|| { 1795 guest.wait_vm_boot(None).unwrap(); 1796 1797 if hotplug { 1798 let (cmd_success, cmd_output) = remote_command_w_output( 1799 &api_socket, 1800 "add-vsock", 1801 Some(format!("cid=3,socket={socket},id=test0").as_str()), 1802 ); 1803 assert!(cmd_success); 1804 assert!(String::from_utf8_lossy(&cmd_output) 1805 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 1806 thread::sleep(std::time::Duration::new(10, 0)); 1807 // Check adding a second one fails 1808 assert!(!remote_command( 1809 &api_socket, 1810 "add-vsock", 1811 Some("cid=1234,socket=/tmp/fail") 1812 )); 1813 } 1814 1815 // Validate vsock works as expected. 1816 guest.check_vsock(socket.as_str()); 1817 guest.reboot_linux(0, None); 1818 // Validate vsock still works after a reboot. 1819 guest.check_vsock(socket.as_str()); 1820 1821 if hotplug { 1822 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 1823 } 1824 }); 1825 1826 let _ = child.kill(); 1827 let output = child.wait_with_output().unwrap(); 1828 1829 handle_child_output(r, &output); 1830 } 1831 1832 fn get_ksm_pages_shared() -> u32 { 1833 fs::read_to_string("/sys/kernel/mm/ksm/pages_shared") 1834 .unwrap() 1835 .trim() 1836 .parse::<u32>() 1837 .unwrap() 1838 } 1839 1840 fn test_memory_mergeable(mergeable: bool) { 1841 let memory_param = if mergeable { 1842 "mergeable=on" 1843 } else { 1844 "mergeable=off" 1845 }; 1846 1847 // We are assuming the rest of the system in our CI is not using mergeable memory 1848 let ksm_ps_init = get_ksm_pages_shared(); 1849 assert!(ksm_ps_init == 0); 1850 1851 let focal1 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1852 let guest1 = Guest::new(Box::new(focal1)); 1853 let mut child1 = GuestCommand::new(&guest1) 1854 .args(["--cpus", "boot=1"]) 1855 .args(["--memory", format!("size=512M,{memory_param}").as_str()]) 1856 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 1857 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1858 .default_disks() 1859 .args(["--net", guest1.default_net_string().as_str()]) 1860 .args(["--serial", "tty", "--console", "off"]) 1861 .capture_output() 1862 .spawn() 1863 .unwrap(); 1864 1865 let r = std::panic::catch_unwind(|| { 1866 guest1.wait_vm_boot(None).unwrap(); 1867 }); 1868 if r.is_err() { 1869 let _ = child1.kill(); 1870 let output = child1.wait_with_output().unwrap(); 1871 handle_child_output(r, &output); 1872 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return 1873 } 1874 1875 let ksm_ps_guest1 = get_ksm_pages_shared(); 1876 1877 let focal2 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1878 let guest2 = Guest::new(Box::new(focal2)); 1879 let mut child2 = GuestCommand::new(&guest2) 1880 .args(["--cpus", "boot=1"]) 1881 .args(["--memory", format!("size=512M,{memory_param}").as_str()]) 1882 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 1883 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1884 .default_disks() 1885 .args(["--net", guest2.default_net_string().as_str()]) 1886 .args(["--serial", "tty", "--console", "off"]) 1887 .capture_output() 1888 .spawn() 1889 .unwrap(); 1890 1891 let r = std::panic::catch_unwind(|| { 1892 guest2.wait_vm_boot(None).unwrap(); 1893 let ksm_ps_guest2 = get_ksm_pages_shared(); 1894 1895 if mergeable { 1896 println!( 1897 "ksm pages_shared after vm1 booted '{ksm_ps_guest1}', ksm pages_shared after vm2 booted '{ksm_ps_guest2}'" 1898 ); 1899 // We are expecting the number of shared pages to increase as the number of VM increases 1900 assert!(ksm_ps_guest1 < ksm_ps_guest2); 1901 } else { 1902 assert!(ksm_ps_guest1 == 0); 1903 assert!(ksm_ps_guest2 == 0); 1904 } 1905 }); 1906 1907 let _ = child1.kill(); 1908 let _ = child2.kill(); 1909 1910 let output = child1.wait_with_output().unwrap(); 1911 child2.wait().unwrap(); 1912 1913 handle_child_output(r, &output); 1914 } 1915 1916 fn _get_vmm_overhead(pid: u32, guest_memory_size: u32) -> HashMap<String, u32> { 1917 let smaps = fs::File::open(format!("/proc/{pid}/smaps")).unwrap(); 1918 let reader = io::BufReader::new(smaps); 1919 1920 let mut skip_map: bool = false; 1921 let mut region_name: String = "".to_string(); 1922 let mut region_maps = HashMap::new(); 1923 for line in reader.lines() { 1924 let l = line.unwrap(); 1925 1926 if l.contains('-') { 1927 let values: Vec<&str> = l.split_whitespace().collect(); 1928 region_name = values.last().unwrap().trim().to_string(); 1929 if region_name == "0" { 1930 region_name = "anonymous".to_string() 1931 } 1932 } 1933 1934 // Each section begins with something that looks like: 1935 // Size: 2184 kB 1936 if l.starts_with("Size:") { 1937 let values: Vec<&str> = l.split_whitespace().collect(); 1938 let map_size = values[1].parse::<u32>().unwrap(); 1939 // We skip the assigned guest RAM map, its RSS is only 1940 // dependent on the guest actual memory usage. 1941 // Everything else can be added to the VMM overhead. 1942 skip_map = map_size >= guest_memory_size; 1943 continue; 1944 } 1945 1946 // If this is a map we're taking into account, then we only 1947 // count the RSS. The sum of all counted RSS is the VMM overhead. 1948 if !skip_map && l.starts_with("Rss:") { 1949 let values: Vec<&str> = l.split_whitespace().collect(); 1950 let value = values[1].trim().parse::<u32>().unwrap(); 1951 *region_maps.entry(region_name.clone()).or_insert(0) += value; 1952 } 1953 } 1954 1955 region_maps 1956 } 1957 1958 fn get_vmm_overhead(pid: u32, guest_memory_size: u32) -> u32 { 1959 let mut total = 0; 1960 1961 for (region_name, value) in &_get_vmm_overhead(pid, guest_memory_size) { 1962 eprintln!("{region_name}: {value}"); 1963 total += value; 1964 } 1965 1966 total 1967 } 1968 1969 fn process_rss_kib(pid: u32) -> usize { 1970 let command = format!("ps -q {pid} -o rss="); 1971 let rss = exec_host_command_output(&command); 1972 String::from_utf8_lossy(&rss.stdout).trim().parse().unwrap() 1973 } 1974 1975 // 10MB is our maximum accepted overhead. 1976 const MAXIMUM_VMM_OVERHEAD_KB: u32 = 10 * 1024; 1977 1978 #[derive(PartialEq, Eq, PartialOrd)] 1979 struct Counters { 1980 rx_bytes: u64, 1981 rx_frames: u64, 1982 tx_bytes: u64, 1983 tx_frames: u64, 1984 read_bytes: u64, 1985 write_bytes: u64, 1986 read_ops: u64, 1987 write_ops: u64, 1988 } 1989 1990 fn get_counters(api_socket: &str) -> Counters { 1991 // Get counters 1992 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "counters", None); 1993 assert!(cmd_success); 1994 1995 let counters: HashMap<&str, HashMap<&str, u64>> = 1996 serde_json::from_slice(&cmd_output).unwrap_or_default(); 1997 1998 let rx_bytes = *counters.get("_net2").unwrap().get("rx_bytes").unwrap(); 1999 let rx_frames = *counters.get("_net2").unwrap().get("rx_frames").unwrap(); 2000 let tx_bytes = *counters.get("_net2").unwrap().get("tx_bytes").unwrap(); 2001 let tx_frames = *counters.get("_net2").unwrap().get("tx_frames").unwrap(); 2002 2003 let read_bytes = *counters.get("_disk0").unwrap().get("read_bytes").unwrap(); 2004 let write_bytes = *counters.get("_disk0").unwrap().get("write_bytes").unwrap(); 2005 let read_ops = *counters.get("_disk0").unwrap().get("read_ops").unwrap(); 2006 let write_ops = *counters.get("_disk0").unwrap().get("write_ops").unwrap(); 2007 2008 Counters { 2009 rx_bytes, 2010 rx_frames, 2011 tx_bytes, 2012 tx_frames, 2013 read_bytes, 2014 write_bytes, 2015 read_ops, 2016 write_ops, 2017 } 2018 } 2019 2020 fn pty_read(mut pty: std::fs::File) -> Receiver<String> { 2021 let (tx, rx) = mpsc::channel::<String>(); 2022 thread::spawn(move || loop { 2023 thread::sleep(std::time::Duration::new(1, 0)); 2024 let mut buf = [0; 512]; 2025 match pty.read(&mut buf) { 2026 Ok(_) => { 2027 let output = std::str::from_utf8(&buf).unwrap().to_string(); 2028 match tx.send(output) { 2029 Ok(_) => (), 2030 Err(_) => break, 2031 } 2032 } 2033 Err(_) => break, 2034 } 2035 }); 2036 rx 2037 } 2038 2039 fn get_pty_path(api_socket: &str, pty_type: &str) -> PathBuf { 2040 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None); 2041 assert!(cmd_success); 2042 let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default(); 2043 assert_eq!("Pty", info["config"][pty_type]["mode"]); 2044 PathBuf::from( 2045 info["config"][pty_type]["file"] 2046 .as_str() 2047 .expect("Missing pty path"), 2048 ) 2049 } 2050 2051 // VFIO test network setup. 2052 // We reserve a different IP class for it: 172.18.0.0/24. 2053 #[cfg(target_arch = "x86_64")] 2054 fn setup_vfio_network_interfaces() { 2055 // 'vfio-br0' 2056 assert!(exec_host_command_status("sudo ip link add name vfio-br0 type bridge").success()); 2057 assert!(exec_host_command_status("sudo ip link set vfio-br0 up").success()); 2058 assert!(exec_host_command_status("sudo ip addr add 172.18.0.1/24 dev vfio-br0").success()); 2059 // 'vfio-tap0' 2060 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap0 mode tap").success()); 2061 assert!(exec_host_command_status("sudo ip link set vfio-tap0 master vfio-br0").success()); 2062 assert!(exec_host_command_status("sudo ip link set vfio-tap0 up").success()); 2063 // 'vfio-tap1' 2064 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap1 mode tap").success()); 2065 assert!(exec_host_command_status("sudo ip link set vfio-tap1 master vfio-br0").success()); 2066 assert!(exec_host_command_status("sudo ip link set vfio-tap1 up").success()); 2067 // 'vfio-tap2' 2068 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap2 mode tap").success()); 2069 assert!(exec_host_command_status("sudo ip link set vfio-tap2 master vfio-br0").success()); 2070 assert!(exec_host_command_status("sudo ip link set vfio-tap2 up").success()); 2071 // 'vfio-tap3' 2072 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap3 mode tap").success()); 2073 assert!(exec_host_command_status("sudo ip link set vfio-tap3 master vfio-br0").success()); 2074 assert!(exec_host_command_status("sudo ip link set vfio-tap3 up").success()); 2075 } 2076 2077 // Tear VFIO test network down 2078 #[cfg(target_arch = "x86_64")] 2079 fn cleanup_vfio_network_interfaces() { 2080 assert!(exec_host_command_status("sudo ip link del vfio-br0").success()); 2081 assert!(exec_host_command_status("sudo ip link del vfio-tap0").success()); 2082 assert!(exec_host_command_status("sudo ip link del vfio-tap1").success()); 2083 assert!(exec_host_command_status("sudo ip link del vfio-tap2").success()); 2084 assert!(exec_host_command_status("sudo ip link del vfio-tap3").success()); 2085 } 2086 2087 fn balloon_size(api_socket: &str) -> u64 { 2088 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None); 2089 assert!(cmd_success); 2090 2091 let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default(); 2092 let total_mem = &info["config"]["memory"]["size"] 2093 .to_string() 2094 .parse::<u64>() 2095 .unwrap(); 2096 let actual_mem = &info["memory_actual_size"] 2097 .to_string() 2098 .parse::<u64>() 2099 .unwrap(); 2100 total_mem - actual_mem 2101 } 2102 2103 fn vm_state(api_socket: &str) -> String { 2104 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None); 2105 assert!(cmd_success); 2106 2107 let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default(); 2108 let state = &info["state"].as_str().unwrap(); 2109 2110 state.to_string() 2111 } 2112 2113 // This test validates that it can find the virtio-iommu device at first. 2114 // It also verifies that both disks and the network card are attached to 2115 // the virtual IOMMU by looking at /sys/kernel/iommu_groups directory. 2116 // The last interesting part of this test is that it exercises the network 2117 // interface attached to the virtual IOMMU since this is the one used to 2118 // send all commands through SSH. 2119 fn _test_virtio_iommu(acpi: bool) { 2120 // Virtio-iommu support is ready in recent kernel (v5.14). But the kernel in 2121 // Focal image is still old. 2122 // So if ACPI is enabled on AArch64, we use a modified Focal image in which 2123 // the kernel binary has been updated. 2124 #[cfg(target_arch = "aarch64")] 2125 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string(); 2126 #[cfg(target_arch = "x86_64")] 2127 let focal_image = FOCAL_IMAGE_NAME.to_string(); 2128 let focal = UbuntuDiskConfig::new(focal_image); 2129 let guest = Guest::new(Box::new(focal)); 2130 2131 #[cfg(target_arch = "x86_64")] 2132 let kernel_path = direct_kernel_boot_path(); 2133 #[cfg(target_arch = "aarch64")] 2134 let kernel_path = if acpi { 2135 edk2_path() 2136 } else { 2137 direct_kernel_boot_path() 2138 }; 2139 2140 let mut child = GuestCommand::new(&guest) 2141 .args(["--cpus", "boot=1"]) 2142 .args(["--memory", "size=512M"]) 2143 .args(["--kernel", kernel_path.to_str().unwrap()]) 2144 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2145 .args([ 2146 "--disk", 2147 format!( 2148 "path={},iommu=on", 2149 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 2150 ) 2151 .as_str(), 2152 "--disk", 2153 format!( 2154 "path={},iommu=on", 2155 guest.disk_config.disk(DiskType::CloudInit).unwrap() 2156 ) 2157 .as_str(), 2158 ]) 2159 .args(["--net", guest.default_net_string_w_iommu().as_str()]) 2160 .capture_output() 2161 .spawn() 2162 .unwrap(); 2163 2164 let r = std::panic::catch_unwind(|| { 2165 guest.wait_vm_boot(None).unwrap(); 2166 2167 // Verify the virtio-iommu device is present. 2168 assert!(guest 2169 .does_device_vendor_pair_match("0x1057", "0x1af4") 2170 .unwrap_or_default()); 2171 2172 // On AArch64, if the guest system boots from FDT, the behavior of IOMMU is a bit 2173 // different with ACPI. 2174 // All devices on the PCI bus will be attached to the virtual IOMMU, except the 2175 // virtio-iommu device itself. So these devices will all be added to IOMMU groups, 2176 // and appear under folder '/sys/kernel/iommu_groups/'. 2177 // The result is, in the case of FDT, IOMMU group '0' contains "0000:00:01.0" 2178 // which is the console. The first disk "0000:00:02.0" is in group '1'. 2179 // While on ACPI, console device is not attached to IOMMU. So the IOMMU group '0' 2180 // contains "0000:00:02.0" which is the first disk. 2181 // 2182 // Verify the iommu group of the first disk. 2183 let iommu_group = !acpi as i32; 2184 assert_eq!( 2185 guest 2186 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str()) 2187 .unwrap() 2188 .trim(), 2189 "0000:00:02.0" 2190 ); 2191 2192 // Verify the iommu group of the second disk. 2193 let iommu_group = if acpi { 1 } else { 2 }; 2194 assert_eq!( 2195 guest 2196 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str()) 2197 .unwrap() 2198 .trim(), 2199 "0000:00:03.0" 2200 ); 2201 2202 // Verify the iommu group of the network card. 2203 let iommu_group = if acpi { 2 } else { 3 }; 2204 assert_eq!( 2205 guest 2206 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str()) 2207 .unwrap() 2208 .trim(), 2209 "0000:00:04.0" 2210 ); 2211 }); 2212 2213 let _ = child.kill(); 2214 let output = child.wait_with_output().unwrap(); 2215 2216 handle_child_output(r, &output); 2217 } 2218 2219 fn get_reboot_count(guest: &Guest) -> u32 { 2220 guest 2221 .ssh_command("sudo last | grep -c reboot") 2222 .unwrap() 2223 .trim() 2224 .parse::<u32>() 2225 .unwrap_or_default() 2226 } 2227 2228 fn enable_guest_watchdog(guest: &Guest, watchdog_sec: u32) { 2229 // Check for PCI device 2230 assert!(guest 2231 .does_device_vendor_pair_match("0x1063", "0x1af4") 2232 .unwrap_or_default()); 2233 2234 // Enable systemd watchdog 2235 guest 2236 .ssh_command(&format!( 2237 "echo RuntimeWatchdogSec={watchdog_sec}s | sudo tee -a /etc/systemd/system.conf" 2238 )) 2239 .unwrap(); 2240 } 2241 2242 fn make_guest_panic(guest: &Guest) { 2243 // Check for pvpanic device 2244 assert!(guest 2245 .does_device_vendor_pair_match("0x0011", "0x1b36") 2246 .unwrap_or_default()); 2247 2248 // Trigger guest a panic 2249 guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap(); 2250 } 2251 2252 mod common_parallel { 2253 use std::{fs::OpenOptions, io::SeekFrom}; 2254 2255 use crate::*; 2256 2257 #[test] 2258 #[cfg(target_arch = "x86_64")] 2259 fn test_focal_hypervisor_fw() { 2260 test_simple_launch(fw_path(FwType::RustHypervisorFirmware), FOCAL_IMAGE_NAME) 2261 } 2262 2263 #[test] 2264 #[cfg(target_arch = "x86_64")] 2265 fn test_focal_ovmf() { 2266 test_simple_launch(fw_path(FwType::Ovmf), FOCAL_IMAGE_NAME) 2267 } 2268 2269 #[cfg(target_arch = "x86_64")] 2270 fn test_simple_launch(fw_path: String, disk_path: &str) { 2271 let disk_config = Box::new(UbuntuDiskConfig::new(disk_path.to_string())); 2272 let guest = Guest::new(disk_config); 2273 let event_path = temp_event_monitor_path(&guest.tmp_dir); 2274 2275 let mut child = GuestCommand::new(&guest) 2276 .args(["--cpus", "boot=1"]) 2277 .args(["--memory", "size=512M"]) 2278 .args(["--kernel", fw_path.as_str()]) 2279 .default_disks() 2280 .default_net() 2281 .args(["--serial", "tty", "--console", "off"]) 2282 .args(["--event-monitor", format!("path={event_path}").as_str()]) 2283 .capture_output() 2284 .spawn() 2285 .unwrap(); 2286 2287 let r = std::panic::catch_unwind(|| { 2288 guest.wait_vm_boot(Some(120)).unwrap(); 2289 2290 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 2291 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 2292 assert_eq!(guest.get_pci_bridge_class().unwrap_or_default(), "0x060000"); 2293 2294 let expected_sequential_events = [ 2295 &MetaEvent { 2296 event: "starting".to_string(), 2297 device_id: None, 2298 }, 2299 &MetaEvent { 2300 event: "booting".to_string(), 2301 device_id: None, 2302 }, 2303 &MetaEvent { 2304 event: "booted".to_string(), 2305 device_id: None, 2306 }, 2307 &MetaEvent { 2308 event: "activated".to_string(), 2309 device_id: Some("_disk0".to_string()), 2310 }, 2311 &MetaEvent { 2312 event: "reset".to_string(), 2313 device_id: Some("_disk0".to_string()), 2314 }, 2315 ]; 2316 assert!(check_sequential_events( 2317 &expected_sequential_events, 2318 &event_path 2319 )); 2320 2321 // It's been observed on the Bionic image that udev and snapd 2322 // services can cause some delay in the VM's shutdown. Disabling 2323 // them improves the reliability of this test. 2324 let _ = guest.ssh_command("sudo systemctl disable udev"); 2325 let _ = guest.ssh_command("sudo systemctl stop udev"); 2326 let _ = guest.ssh_command("sudo systemctl disable snapd"); 2327 let _ = guest.ssh_command("sudo systemctl stop snapd"); 2328 2329 guest.ssh_command("sudo poweroff").unwrap(); 2330 thread::sleep(std::time::Duration::new(20, 0)); 2331 let latest_events = [ 2332 &MetaEvent { 2333 event: "shutdown".to_string(), 2334 device_id: None, 2335 }, 2336 &MetaEvent { 2337 event: "deleted".to_string(), 2338 device_id: None, 2339 }, 2340 &MetaEvent { 2341 event: "shutdown".to_string(), 2342 device_id: None, 2343 }, 2344 ]; 2345 assert!(check_latest_events_exact(&latest_events, &event_path)); 2346 }); 2347 2348 let _ = child.kill(); 2349 let output = child.wait_with_output().unwrap(); 2350 2351 handle_child_output(r, &output); 2352 } 2353 2354 #[test] 2355 fn test_multi_cpu() { 2356 let jammy_image = JAMMY_IMAGE_NAME.to_string(); 2357 let jammy = UbuntuDiskConfig::new(jammy_image); 2358 let guest = Guest::new(Box::new(jammy)); 2359 2360 let mut cmd = GuestCommand::new(&guest); 2361 cmd.args(["--cpus", "boot=2,max=4"]) 2362 .args(["--memory", "size=512M"]) 2363 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2364 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2365 .capture_output() 2366 .default_disks() 2367 .default_net(); 2368 2369 let mut child = cmd.spawn().unwrap(); 2370 2371 let r = std::panic::catch_unwind(|| { 2372 guest.wait_vm_boot(Some(120)).unwrap(); 2373 2374 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 2375 2376 assert_eq!( 2377 guest 2378 .ssh_command( 2379 r#"sudo dmesg | grep "smp: Brought up" | sed "s/\[\ *[0-9.]*\] //""# 2380 ) 2381 .unwrap() 2382 .trim(), 2383 "smp: Brought up 1 node, 2 CPUs" 2384 ); 2385 }); 2386 2387 let _ = child.kill(); 2388 let output = child.wait_with_output().unwrap(); 2389 2390 handle_child_output(r, &output); 2391 } 2392 2393 #[test] 2394 fn test_cpu_topology_421() { 2395 test_cpu_topology(4, 2, 1, false); 2396 } 2397 2398 #[test] 2399 fn test_cpu_topology_142() { 2400 test_cpu_topology(1, 4, 2, false); 2401 } 2402 2403 #[test] 2404 fn test_cpu_topology_262() { 2405 test_cpu_topology(2, 6, 2, false); 2406 } 2407 2408 #[test] 2409 #[cfg(target_arch = "x86_64")] 2410 #[cfg(not(feature = "mshv"))] 2411 fn test_cpu_physical_bits() { 2412 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2413 let guest = Guest::new(Box::new(focal)); 2414 let max_phys_bits: u8 = 36; 2415 let mut child = GuestCommand::new(&guest) 2416 .args(["--cpus", &format!("max_phys_bits={max_phys_bits}")]) 2417 .args(["--memory", "size=512M"]) 2418 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2419 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2420 .default_disks() 2421 .default_net() 2422 .capture_output() 2423 .spawn() 2424 .unwrap(); 2425 2426 let r = std::panic::catch_unwind(|| { 2427 guest.wait_vm_boot(None).unwrap(); 2428 2429 assert!( 2430 guest 2431 .ssh_command("lscpu | grep \"Address sizes:\" | cut -f 2 -d \":\" | sed \"s# *##\" | cut -f 1 -d \" \"") 2432 .unwrap() 2433 .trim() 2434 .parse::<u8>() 2435 .unwrap_or(max_phys_bits + 1) <= max_phys_bits, 2436 ); 2437 }); 2438 2439 let _ = child.kill(); 2440 let output = child.wait_with_output().unwrap(); 2441 2442 handle_child_output(r, &output); 2443 } 2444 2445 #[test] 2446 fn test_cpu_affinity() { 2447 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2448 let guest = Guest::new(Box::new(focal)); 2449 2450 // We need the host to have at least 4 CPUs if we want to be able 2451 // to run this test. 2452 let host_cpus_count = exec_host_command_output("nproc"); 2453 assert!( 2454 String::from_utf8_lossy(&host_cpus_count.stdout) 2455 .trim() 2456 .parse::<u16>() 2457 .unwrap_or(0) 2458 >= 4 2459 ); 2460 2461 let mut child = GuestCommand::new(&guest) 2462 .args(["--cpus", "boot=2,affinity=[0@[0,2],1@[1,3]]"]) 2463 .args(["--memory", "size=512M"]) 2464 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2465 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2466 .default_disks() 2467 .default_net() 2468 .capture_output() 2469 .spawn() 2470 .unwrap(); 2471 2472 let r = std::panic::catch_unwind(|| { 2473 guest.wait_vm_boot(None).unwrap(); 2474 let pid = child.id(); 2475 let taskset_vcpu0 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep vcpu0 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2476 assert_eq!(String::from_utf8_lossy(&taskset_vcpu0.stdout).trim(), "0,2"); 2477 let taskset_vcpu1 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep vcpu1 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2478 assert_eq!(String::from_utf8_lossy(&taskset_vcpu1.stdout).trim(), "1,3"); 2479 }); 2480 2481 let _ = child.kill(); 2482 let output = child.wait_with_output().unwrap(); 2483 2484 handle_child_output(r, &output); 2485 } 2486 2487 #[test] 2488 #[cfg(not(feature = "mshv"))] 2489 fn test_large_vm() { 2490 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2491 let guest = Guest::new(Box::new(focal)); 2492 let mut cmd = GuestCommand::new(&guest); 2493 cmd.args(["--cpus", "boot=48"]) 2494 .args(["--memory", "size=5120M"]) 2495 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2496 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2497 .args(["--serial", "tty"]) 2498 .args(["--console", "off"]) 2499 .capture_output() 2500 .default_disks() 2501 .default_net(); 2502 2503 let mut child = cmd.spawn().unwrap(); 2504 2505 guest.wait_vm_boot(None).unwrap(); 2506 2507 let r = std::panic::catch_unwind(|| { 2508 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 48); 2509 assert_eq!( 2510 guest 2511 .ssh_command("lscpu | grep \"On-line\" | cut -f 2 -d \":\" | sed \"s# *##\"") 2512 .unwrap() 2513 .trim(), 2514 "0-47" 2515 ); 2516 2517 assert!(guest.get_total_memory().unwrap_or_default() > 5_000_000); 2518 }); 2519 2520 let _ = child.kill(); 2521 let output = child.wait_with_output().unwrap(); 2522 2523 handle_child_output(r, &output); 2524 } 2525 2526 #[test] 2527 #[cfg(not(feature = "mshv"))] 2528 fn test_huge_memory() { 2529 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2530 let guest = Guest::new(Box::new(focal)); 2531 let mut cmd = GuestCommand::new(&guest); 2532 cmd.args(["--cpus", "boot=1"]) 2533 .args(["--memory", "size=128G"]) 2534 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2535 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2536 .capture_output() 2537 .default_disks() 2538 .default_net(); 2539 2540 let mut child = cmd.spawn().unwrap(); 2541 2542 guest.wait_vm_boot(Some(120)).unwrap(); 2543 2544 let r = std::panic::catch_unwind(|| { 2545 assert!(guest.get_total_memory().unwrap_or_default() > 128_000_000); 2546 }); 2547 2548 let _ = child.kill(); 2549 let output = child.wait_with_output().unwrap(); 2550 2551 handle_child_output(r, &output); 2552 } 2553 2554 #[test] 2555 fn test_power_button() { 2556 _test_power_button(false); 2557 } 2558 2559 #[test] 2560 #[cfg(not(feature = "mshv"))] 2561 fn test_user_defined_memory_regions() { 2562 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2563 let guest = Guest::new(Box::new(focal)); 2564 let api_socket = temp_api_path(&guest.tmp_dir); 2565 2566 let kernel_path = direct_kernel_boot_path(); 2567 2568 let mut child = GuestCommand::new(&guest) 2569 .args(["--cpus", "boot=1"]) 2570 .args(["--memory", "size=0,hotplug_method=virtio-mem"]) 2571 .args([ 2572 "--memory-zone", 2573 "id=mem0,size=1G,hotplug_size=2G", 2574 "--memory-zone", 2575 "id=mem1,size=1G,shared=on", 2576 "--memory-zone", 2577 "id=mem2,size=1G,host_numa_node=0,hotplug_size=2G", 2578 ]) 2579 .args(["--kernel", kernel_path.to_str().unwrap()]) 2580 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2581 .args(["--api-socket", &api_socket]) 2582 .capture_output() 2583 .default_disks() 2584 .default_net() 2585 .spawn() 2586 .unwrap(); 2587 2588 let r = std::panic::catch_unwind(|| { 2589 guest.wait_vm_boot(None).unwrap(); 2590 2591 assert!(guest.get_total_memory().unwrap_or_default() > 2_880_000); 2592 2593 guest.enable_memory_hotplug(); 2594 2595 resize_zone_command(&api_socket, "mem0", "3G"); 2596 thread::sleep(std::time::Duration::new(5, 0)); 2597 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000); 2598 resize_zone_command(&api_socket, "mem2", "3G"); 2599 thread::sleep(std::time::Duration::new(5, 0)); 2600 assert!(guest.get_total_memory().unwrap_or_default() > 6_720_000); 2601 resize_zone_command(&api_socket, "mem0", "2G"); 2602 thread::sleep(std::time::Duration::new(5, 0)); 2603 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 2604 resize_zone_command(&api_socket, "mem2", "2G"); 2605 thread::sleep(std::time::Duration::new(5, 0)); 2606 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000); 2607 2608 guest.reboot_linux(0, None); 2609 2610 // Check the amount of RAM after reboot 2611 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000); 2612 assert!(guest.get_total_memory().unwrap_or_default() < 5_760_000); 2613 2614 // Check if we can still resize down to the initial 'boot'size 2615 resize_zone_command(&api_socket, "mem0", "1G"); 2616 thread::sleep(std::time::Duration::new(5, 0)); 2617 assert!(guest.get_total_memory().unwrap_or_default() < 4_800_000); 2618 resize_zone_command(&api_socket, "mem2", "1G"); 2619 thread::sleep(std::time::Duration::new(5, 0)); 2620 assert!(guest.get_total_memory().unwrap_or_default() < 3_840_000); 2621 }); 2622 2623 let _ = child.kill(); 2624 let output = child.wait_with_output().unwrap(); 2625 2626 handle_child_output(r, &output); 2627 } 2628 2629 #[test] 2630 #[cfg(not(feature = "mshv"))] 2631 fn test_guest_numa_nodes() { 2632 _test_guest_numa_nodes(false); 2633 } 2634 2635 #[test] 2636 #[cfg(target_arch = "x86_64")] 2637 fn test_iommu_segments() { 2638 let focal_image = FOCAL_IMAGE_NAME.to_string(); 2639 let focal = UbuntuDiskConfig::new(focal_image); 2640 let guest = Guest::new(Box::new(focal)); 2641 2642 // Prepare another disk file for the virtio-disk device 2643 let test_disk_path = String::from( 2644 guest 2645 .tmp_dir 2646 .as_path() 2647 .join("test-disk.raw") 2648 .to_str() 2649 .unwrap(), 2650 ); 2651 assert!( 2652 exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success() 2653 ); 2654 assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success()); 2655 2656 let api_socket = temp_api_path(&guest.tmp_dir); 2657 let mut cmd = GuestCommand::new(&guest); 2658 2659 cmd.args(["--cpus", "boot=1"]) 2660 .args(["--api-socket", &api_socket]) 2661 .args(["--memory", "size=512M"]) 2662 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2663 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2664 .args([ 2665 "--platform", 2666 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS},iommu_segments=[1]"), 2667 ]) 2668 .default_disks() 2669 .capture_output() 2670 .default_net(); 2671 2672 let mut child = cmd.spawn().unwrap(); 2673 2674 guest.wait_vm_boot(None).unwrap(); 2675 2676 let r = std::panic::catch_unwind(|| { 2677 let (cmd_success, cmd_output) = remote_command_w_output( 2678 &api_socket, 2679 "add-disk", 2680 Some( 2681 format!( 2682 "path={},id=test0,pci_segment=1,iommu=on", 2683 test_disk_path.as_str() 2684 ) 2685 .as_str(), 2686 ), 2687 ); 2688 assert!(cmd_success); 2689 assert!(String::from_utf8_lossy(&cmd_output) 2690 .contains("{\"id\":\"test0\",\"bdf\":\"0001:00:01.0\"}")); 2691 2692 // Check IOMMU setup 2693 assert!(guest 2694 .does_device_vendor_pair_match("0x1057", "0x1af4") 2695 .unwrap_or_default()); 2696 assert_eq!( 2697 guest 2698 .ssh_command("ls /sys/kernel/iommu_groups/0/devices") 2699 .unwrap() 2700 .trim(), 2701 "0001:00:01.0" 2702 ); 2703 }); 2704 2705 let _ = child.kill(); 2706 let output = child.wait_with_output().unwrap(); 2707 2708 handle_child_output(r, &output); 2709 } 2710 2711 #[test] 2712 fn test_pci_msi() { 2713 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2714 let guest = Guest::new(Box::new(focal)); 2715 let mut cmd = GuestCommand::new(&guest); 2716 cmd.args(["--cpus", "boot=1"]) 2717 .args(["--memory", "size=512M"]) 2718 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2719 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2720 .capture_output() 2721 .default_disks() 2722 .default_net(); 2723 2724 let mut child = cmd.spawn().unwrap(); 2725 2726 guest.wait_vm_boot(None).unwrap(); 2727 2728 #[cfg(target_arch = "x86_64")] 2729 let grep_cmd = "grep -c PCI-MSI /proc/interrupts"; 2730 #[cfg(target_arch = "aarch64")] 2731 let grep_cmd = "grep -c ITS-MSI /proc/interrupts"; 2732 2733 let r = std::panic::catch_unwind(|| { 2734 assert_eq!( 2735 guest 2736 .ssh_command(grep_cmd) 2737 .unwrap() 2738 .trim() 2739 .parse::<u32>() 2740 .unwrap_or_default(), 2741 12 2742 ); 2743 }); 2744 2745 let _ = child.kill(); 2746 let output = child.wait_with_output().unwrap(); 2747 2748 handle_child_output(r, &output); 2749 } 2750 2751 #[test] 2752 fn test_virtio_net_ctrl_queue() { 2753 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2754 let guest = Guest::new(Box::new(focal)); 2755 let mut cmd = GuestCommand::new(&guest); 2756 cmd.args(["--cpus", "boot=1"]) 2757 .args(["--memory", "size=512M"]) 2758 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2759 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2760 .args(["--net", guest.default_net_string_w_mtu(3000).as_str()]) 2761 .capture_output() 2762 .default_disks(); 2763 2764 let mut child = cmd.spawn().unwrap(); 2765 2766 guest.wait_vm_boot(None).unwrap(); 2767 2768 #[cfg(target_arch = "aarch64")] 2769 let iface = "enp0s4"; 2770 #[cfg(target_arch = "x86_64")] 2771 let iface = "ens4"; 2772 2773 let r = std::panic::catch_unwind(|| { 2774 assert_eq!( 2775 guest 2776 .ssh_command( 2777 format!("sudo ethtool -K {iface} rx-gro-hw off && echo success").as_str() 2778 ) 2779 .unwrap() 2780 .trim(), 2781 "success" 2782 ); 2783 assert_eq!( 2784 guest 2785 .ssh_command(format!("cat /sys/class/net/{iface}/mtu").as_str()) 2786 .unwrap() 2787 .trim(), 2788 "3000" 2789 ); 2790 }); 2791 2792 let _ = child.kill(); 2793 let output = child.wait_with_output().unwrap(); 2794 2795 handle_child_output(r, &output); 2796 } 2797 2798 #[test] 2799 #[cfg(not(feature = "mshv"))] 2800 fn test_pci_multiple_segments() { 2801 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2802 let guest = Guest::new(Box::new(focal)); 2803 2804 // Prepare another disk file for the virtio-disk device 2805 let test_disk_path = String::from( 2806 guest 2807 .tmp_dir 2808 .as_path() 2809 .join("test-disk.raw") 2810 .to_str() 2811 .unwrap(), 2812 ); 2813 assert!( 2814 exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success() 2815 ); 2816 assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success()); 2817 2818 let mut cmd = GuestCommand::new(&guest); 2819 cmd.args(["--cpus", "boot=1"]) 2820 .args(["--memory", "size=512M"]) 2821 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2822 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2823 .args([ 2824 "--platform", 2825 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 2826 ]) 2827 .args([ 2828 "--disk", 2829 format!( 2830 "path={}", 2831 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 2832 ) 2833 .as_str(), 2834 "--disk", 2835 format!( 2836 "path={}", 2837 guest.disk_config.disk(DiskType::CloudInit).unwrap() 2838 ) 2839 .as_str(), 2840 "--disk", 2841 format!("path={test_disk_path},pci_segment=15").as_str(), 2842 ]) 2843 .capture_output() 2844 .default_net(); 2845 2846 let mut child = cmd.spawn().unwrap(); 2847 2848 guest.wait_vm_boot(None).unwrap(); 2849 2850 let grep_cmd = "lspci | grep \"Host bridge\" | wc -l"; 2851 2852 let r = std::panic::catch_unwind(|| { 2853 // There should be MAX_NUM_PCI_SEGMENTS PCI host bridges in the guest. 2854 assert_eq!( 2855 guest 2856 .ssh_command(grep_cmd) 2857 .unwrap() 2858 .trim() 2859 .parse::<u16>() 2860 .unwrap_or_default(), 2861 MAX_NUM_PCI_SEGMENTS 2862 ); 2863 2864 // Check both if /dev/vdc exists and if the block size is 4M. 2865 assert_eq!( 2866 guest 2867 .ssh_command("lsblk | grep vdc | grep -c 4M") 2868 .unwrap() 2869 .trim() 2870 .parse::<u32>() 2871 .unwrap_or_default(), 2872 1 2873 ); 2874 2875 // Mount the device. 2876 guest.ssh_command("mkdir mount_image").unwrap(); 2877 guest 2878 .ssh_command("sudo mount -o rw -t ext4 /dev/vdc mount_image/") 2879 .unwrap(); 2880 // Grant all users with write permission. 2881 guest.ssh_command("sudo chmod a+w mount_image/").unwrap(); 2882 2883 // Write something to the device. 2884 guest 2885 .ssh_command("sudo echo \"bar\" >> mount_image/foo") 2886 .unwrap(); 2887 2888 // Check the content of the block device. The file "foo" should 2889 // contain "bar". 2890 assert_eq!( 2891 guest 2892 .ssh_command("sudo cat mount_image/foo") 2893 .unwrap() 2894 .trim(), 2895 "bar" 2896 ); 2897 }); 2898 2899 let _ = child.kill(); 2900 let output = child.wait_with_output().unwrap(); 2901 2902 handle_child_output(r, &output); 2903 } 2904 2905 #[test] 2906 fn test_direct_kernel_boot() { 2907 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2908 let guest = Guest::new(Box::new(focal)); 2909 2910 let kernel_path = direct_kernel_boot_path(); 2911 2912 let mut child = GuestCommand::new(&guest) 2913 .args(["--cpus", "boot=1"]) 2914 .args(["--memory", "size=512M"]) 2915 .args(["--kernel", kernel_path.to_str().unwrap()]) 2916 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2917 .default_disks() 2918 .default_net() 2919 .capture_output() 2920 .spawn() 2921 .unwrap(); 2922 2923 let r = std::panic::catch_unwind(|| { 2924 guest.wait_vm_boot(None).unwrap(); 2925 2926 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 2927 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 2928 2929 let grep_cmd = if cfg!(target_arch = "x86_64") { 2930 "grep -c PCI-MSI /proc/interrupts" 2931 } else { 2932 "grep -c ITS-MSI /proc/interrupts" 2933 }; 2934 assert_eq!( 2935 guest 2936 .ssh_command(grep_cmd) 2937 .unwrap() 2938 .trim() 2939 .parse::<u32>() 2940 .unwrap_or_default(), 2941 12 2942 ); 2943 }); 2944 2945 let _ = child.kill(); 2946 let output = child.wait_with_output().unwrap(); 2947 2948 handle_child_output(r, &output); 2949 } 2950 2951 fn _test_virtio_block(image_name: &str, disable_io_uring: bool) { 2952 let focal = UbuntuDiskConfig::new(image_name.to_string()); 2953 let guest = Guest::new(Box::new(focal)); 2954 2955 let mut workload_path = dirs::home_dir().unwrap(); 2956 workload_path.push("workloads"); 2957 2958 let mut blk_file_path = workload_path; 2959 blk_file_path.push("blk.img"); 2960 2961 let kernel_path = direct_kernel_boot_path(); 2962 2963 let mut cloud_child = GuestCommand::new(&guest) 2964 .args(["--cpus", "boot=4"]) 2965 .args(["--memory", "size=512M,shared=on"]) 2966 .args(["--kernel", kernel_path.to_str().unwrap()]) 2967 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2968 .args([ 2969 "--disk", 2970 format!( 2971 "path={}", 2972 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 2973 ) 2974 .as_str(), 2975 "--disk", 2976 format!( 2977 "path={}", 2978 guest.disk_config.disk(DiskType::CloudInit).unwrap() 2979 ) 2980 .as_str(), 2981 "--disk", 2982 format!( 2983 "path={},readonly=on,direct=on,num_queues=4,_disable_io_uring={}", 2984 blk_file_path.to_str().unwrap(), 2985 disable_io_uring 2986 ) 2987 .as_str(), 2988 ]) 2989 .default_net() 2990 .capture_output() 2991 .spawn() 2992 .unwrap(); 2993 2994 let r = std::panic::catch_unwind(|| { 2995 guest.wait_vm_boot(None).unwrap(); 2996 2997 // Check both if /dev/vdc exists and if the block size is 16M. 2998 assert_eq!( 2999 guest 3000 .ssh_command("lsblk | grep vdc | grep -c 16M") 3001 .unwrap() 3002 .trim() 3003 .parse::<u32>() 3004 .unwrap_or_default(), 3005 1 3006 ); 3007 3008 // Check both if /dev/vdc exists and if this block is RO. 3009 assert_eq!( 3010 guest 3011 .ssh_command("lsblk | grep vdc | awk '{print $5}'") 3012 .unwrap() 3013 .trim() 3014 .parse::<u32>() 3015 .unwrap_or_default(), 3016 1 3017 ); 3018 3019 // Check if the number of queues is 4. 3020 assert_eq!( 3021 guest 3022 .ssh_command("ls -ll /sys/block/vdc/mq | grep ^d | wc -l") 3023 .unwrap() 3024 .trim() 3025 .parse::<u32>() 3026 .unwrap_or_default(), 3027 4 3028 ); 3029 }); 3030 3031 let _ = cloud_child.kill(); 3032 let output = cloud_child.wait_with_output().unwrap(); 3033 3034 handle_child_output(r, &output); 3035 } 3036 3037 #[test] 3038 fn test_virtio_block() { 3039 _test_virtio_block(FOCAL_IMAGE_NAME, false) 3040 } 3041 3042 #[test] 3043 fn test_virtio_block_disable_io_uring() { 3044 _test_virtio_block(FOCAL_IMAGE_NAME, true) 3045 } 3046 3047 #[test] 3048 fn test_virtio_block_qcow2() { 3049 _test_virtio_block(FOCAL_IMAGE_NAME_QCOW2, false) 3050 } 3051 3052 #[test] 3053 fn test_virtio_block_qcow2_backing_file() { 3054 _test_virtio_block(FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE, false) 3055 } 3056 3057 #[test] 3058 fn test_virtio_block_vhd() { 3059 let mut workload_path = dirs::home_dir().unwrap(); 3060 workload_path.push("workloads"); 3061 3062 let mut raw_file_path = workload_path.clone(); 3063 let mut vhd_file_path = workload_path; 3064 raw_file_path.push(FOCAL_IMAGE_NAME); 3065 vhd_file_path.push(FOCAL_IMAGE_NAME_VHD); 3066 3067 // Generate VHD file from RAW file 3068 std::process::Command::new("qemu-img") 3069 .arg("convert") 3070 .arg("-p") 3071 .args(["-f", "raw"]) 3072 .args(["-O", "vpc"]) 3073 .args(["-o", "subformat=fixed"]) 3074 .arg(raw_file_path.to_str().unwrap()) 3075 .arg(vhd_file_path.to_str().unwrap()) 3076 .output() 3077 .expect("Expect generating VHD image from RAW image"); 3078 3079 _test_virtio_block(FOCAL_IMAGE_NAME_VHD, false) 3080 } 3081 3082 #[test] 3083 fn test_virtio_block_vhdx() { 3084 let mut workload_path = dirs::home_dir().unwrap(); 3085 workload_path.push("workloads"); 3086 3087 let mut raw_file_path = workload_path.clone(); 3088 let mut vhdx_file_path = workload_path; 3089 raw_file_path.push(FOCAL_IMAGE_NAME); 3090 vhdx_file_path.push(FOCAL_IMAGE_NAME_VHDX); 3091 3092 // Generate dynamic VHDX file from RAW file 3093 std::process::Command::new("qemu-img") 3094 .arg("convert") 3095 .arg("-p") 3096 .args(["-f", "raw"]) 3097 .args(["-O", "vhdx"]) 3098 .arg(raw_file_path.to_str().unwrap()) 3099 .arg(vhdx_file_path.to_str().unwrap()) 3100 .output() 3101 .expect("Expect generating dynamic VHDx image from RAW image"); 3102 3103 _test_virtio_block(FOCAL_IMAGE_NAME_VHDX, false) 3104 } 3105 3106 #[test] 3107 fn test_virtio_block_dynamic_vhdx_expand() { 3108 const VIRTUAL_DISK_SIZE: u64 = 100 << 20; 3109 const EMPTY_VHDX_FILE_SIZE: u64 = 8 << 20; 3110 const FULL_VHDX_FILE_SIZE: u64 = 112 << 20; 3111 const DYNAMIC_VHDX_NAME: &str = "dynamic.vhdx"; 3112 3113 let mut workload_path = dirs::home_dir().unwrap(); 3114 workload_path.push("workloads"); 3115 3116 let mut vhdx_file_path = workload_path; 3117 vhdx_file_path.push(DYNAMIC_VHDX_NAME); 3118 let vhdx_path = vhdx_file_path.to_str().unwrap(); 3119 3120 // Generate a 100 MiB dynamic VHDX file 3121 std::process::Command::new("qemu-img") 3122 .arg("create") 3123 .args(["-f", "vhdx"]) 3124 .arg(vhdx_path) 3125 .arg(VIRTUAL_DISK_SIZE.to_string()) 3126 .output() 3127 .expect("Expect generating dynamic VHDx image from RAW image"); 3128 3129 // Check if the size matches with empty VHDx file size 3130 assert_eq!(vhdx_image_size(vhdx_path), EMPTY_VHDX_FILE_SIZE); 3131 3132 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3133 let guest = Guest::new(Box::new(focal)); 3134 let kernel_path = direct_kernel_boot_path(); 3135 3136 let mut cloud_child = GuestCommand::new(&guest) 3137 .args(["--cpus", "boot=1"]) 3138 .args(["--memory", "size=512M"]) 3139 .args(["--kernel", kernel_path.to_str().unwrap()]) 3140 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3141 .args([ 3142 "--disk", 3143 format!( 3144 "path={}", 3145 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 3146 ) 3147 .as_str(), 3148 "--disk", 3149 format!( 3150 "path={}", 3151 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3152 ) 3153 .as_str(), 3154 "--disk", 3155 format!("path={vhdx_path}").as_str(), 3156 ]) 3157 .default_net() 3158 .capture_output() 3159 .spawn() 3160 .unwrap(); 3161 3162 let r = std::panic::catch_unwind(|| { 3163 guest.wait_vm_boot(None).unwrap(); 3164 3165 // Check both if /dev/vdc exists and if the block size is 100 MiB. 3166 assert_eq!( 3167 guest 3168 .ssh_command("lsblk | grep vdc | grep -c 100M") 3169 .unwrap() 3170 .trim() 3171 .parse::<u32>() 3172 .unwrap_or_default(), 3173 1 3174 ); 3175 3176 // Write 100 MB of data to the VHDx disk 3177 guest 3178 .ssh_command("sudo dd if=/dev/urandom of=/dev/vdc bs=1M count=100") 3179 .unwrap(); 3180 }); 3181 3182 // Check if the size matches with expected expanded VHDx file size 3183 assert_eq!(vhdx_image_size(vhdx_path), FULL_VHDX_FILE_SIZE); 3184 3185 let _ = cloud_child.kill(); 3186 let output = cloud_child.wait_with_output().unwrap(); 3187 3188 handle_child_output(r, &output); 3189 } 3190 3191 fn vhdx_image_size(disk_name: &str) -> u64 { 3192 std::fs::File::open(disk_name) 3193 .unwrap() 3194 .seek(SeekFrom::End(0)) 3195 .unwrap() 3196 } 3197 3198 #[test] 3199 fn test_virtio_block_direct_and_firmware() { 3200 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3201 let guest = Guest::new(Box::new(focal)); 3202 3203 // The OS disk must be copied to a location that is not backed by 3204 // tmpfs, otherwise the syscall openat(2) with O_DIRECT simply fails 3205 // with EINVAL because tmpfs doesn't support this flag. 3206 let mut workloads_path = dirs::home_dir().unwrap(); 3207 workloads_path.push("workloads"); 3208 let os_dir = TempDir::new_in(workloads_path.as_path()).unwrap(); 3209 let mut os_path = os_dir.as_path().to_path_buf(); 3210 os_path.push("osdisk.img"); 3211 rate_limited_copy( 3212 guest.disk_config.disk(DiskType::OperatingSystem).unwrap(), 3213 os_path.as_path(), 3214 ) 3215 .expect("copying of OS disk failed"); 3216 3217 let mut child = GuestCommand::new(&guest) 3218 .args(["--cpus", "boot=1"]) 3219 .args(["--memory", "size=512M"]) 3220 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 3221 .args([ 3222 "--disk", 3223 format!("path={},direct=on", os_path.as_path().to_str().unwrap()).as_str(), 3224 "--disk", 3225 format!( 3226 "path={}", 3227 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3228 ) 3229 .as_str(), 3230 ]) 3231 .default_net() 3232 .capture_output() 3233 .spawn() 3234 .unwrap(); 3235 3236 let r = std::panic::catch_unwind(|| { 3237 guest.wait_vm_boot(Some(120)).unwrap(); 3238 }); 3239 3240 let _ = child.kill(); 3241 let output = child.wait_with_output().unwrap(); 3242 3243 handle_child_output(r, &output); 3244 } 3245 3246 #[test] 3247 fn test_vhost_user_net_default() { 3248 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, false, false) 3249 } 3250 3251 #[test] 3252 fn test_vhost_user_net_named_tap() { 3253 test_vhost_user_net( 3254 Some("mytap0"), 3255 2, 3256 &prepare_vhost_user_net_daemon, 3257 false, 3258 false, 3259 ) 3260 } 3261 3262 #[test] 3263 fn test_vhost_user_net_existing_tap() { 3264 test_vhost_user_net( 3265 Some("vunet-tap0"), 3266 2, 3267 &prepare_vhost_user_net_daemon, 3268 false, 3269 false, 3270 ) 3271 } 3272 3273 #[test] 3274 fn test_vhost_user_net_multiple_queues() { 3275 test_vhost_user_net(None, 4, &prepare_vhost_user_net_daemon, false, false) 3276 } 3277 3278 #[test] 3279 fn test_vhost_user_net_tap_multiple_queues() { 3280 test_vhost_user_net( 3281 Some("vunet-tap1"), 3282 4, 3283 &prepare_vhost_user_net_daemon, 3284 false, 3285 false, 3286 ) 3287 } 3288 3289 #[test] 3290 fn test_vhost_user_net_host_mac() { 3291 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, true, false) 3292 } 3293 3294 #[test] 3295 fn test_vhost_user_net_client_mode() { 3296 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, false, true) 3297 } 3298 3299 #[test] 3300 fn test_vhost_user_blk_default() { 3301 test_vhost_user_blk(2, false, false, Some(&prepare_vubd)) 3302 } 3303 3304 #[test] 3305 fn test_vhost_user_blk_readonly() { 3306 test_vhost_user_blk(1, true, false, Some(&prepare_vubd)) 3307 } 3308 3309 #[test] 3310 fn test_vhost_user_blk_direct() { 3311 test_vhost_user_blk(1, false, true, Some(&prepare_vubd)) 3312 } 3313 3314 #[test] 3315 fn test_boot_from_vhost_user_blk_default() { 3316 test_boot_from_vhost_user_blk(1, false, false, Some(&prepare_vubd)) 3317 } 3318 3319 #[test] 3320 #[cfg(target_arch = "x86_64")] 3321 fn test_split_irqchip() { 3322 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3323 let guest = Guest::new(Box::new(focal)); 3324 3325 let mut child = GuestCommand::new(&guest) 3326 .args(["--cpus", "boot=1"]) 3327 .args(["--memory", "size=512M"]) 3328 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3329 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3330 .default_disks() 3331 .default_net() 3332 .capture_output() 3333 .spawn() 3334 .unwrap(); 3335 3336 let r = std::panic::catch_unwind(|| { 3337 guest.wait_vm_boot(None).unwrap(); 3338 3339 assert_eq!( 3340 guest 3341 .ssh_command("grep -c IO-APIC.*timer /proc/interrupts || true") 3342 .unwrap() 3343 .trim() 3344 .parse::<u32>() 3345 .unwrap_or(1), 3346 0 3347 ); 3348 assert_eq!( 3349 guest 3350 .ssh_command("grep -c IO-APIC.*cascade /proc/interrupts || true") 3351 .unwrap() 3352 .trim() 3353 .parse::<u32>() 3354 .unwrap_or(1), 3355 0 3356 ); 3357 }); 3358 3359 let _ = child.kill(); 3360 let output = child.wait_with_output().unwrap(); 3361 3362 handle_child_output(r, &output); 3363 } 3364 3365 #[test] 3366 #[cfg(target_arch = "x86_64")] 3367 fn test_dmi_serial_number() { 3368 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3369 let guest = Guest::new(Box::new(focal)); 3370 3371 let mut child = GuestCommand::new(&guest) 3372 .args(["--cpus", "boot=1"]) 3373 .args(["--memory", "size=512M"]) 3374 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3375 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3376 .args(["--platform", "serial_number=a=b;c=d"]) 3377 .default_disks() 3378 .default_net() 3379 .capture_output() 3380 .spawn() 3381 .unwrap(); 3382 3383 let r = std::panic::catch_unwind(|| { 3384 guest.wait_vm_boot(None).unwrap(); 3385 3386 assert_eq!( 3387 guest 3388 .ssh_command("sudo cat /sys/class/dmi/id/product_serial") 3389 .unwrap() 3390 .trim(), 3391 "a=b;c=d" 3392 ); 3393 }); 3394 3395 let _ = child.kill(); 3396 let output = child.wait_with_output().unwrap(); 3397 3398 handle_child_output(r, &output); 3399 } 3400 3401 #[test] 3402 #[cfg(target_arch = "x86_64")] 3403 fn test_dmi_uuid() { 3404 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3405 let guest = Guest::new(Box::new(focal)); 3406 3407 let mut child = GuestCommand::new(&guest) 3408 .args(["--cpus", "boot=1"]) 3409 .args(["--memory", "size=512M"]) 3410 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3411 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3412 .args(["--platform", "uuid=1e8aa28a-435d-4027-87f4-40dceff1fa0a"]) 3413 .default_disks() 3414 .default_net() 3415 .capture_output() 3416 .spawn() 3417 .unwrap(); 3418 3419 let r = std::panic::catch_unwind(|| { 3420 guest.wait_vm_boot(None).unwrap(); 3421 3422 assert_eq!( 3423 guest 3424 .ssh_command("sudo cat /sys/class/dmi/id/product_uuid") 3425 .unwrap() 3426 .trim(), 3427 "1e8aa28a-435d-4027-87f4-40dceff1fa0a" 3428 ); 3429 }); 3430 3431 let _ = child.kill(); 3432 let output = child.wait_with_output().unwrap(); 3433 3434 handle_child_output(r, &output); 3435 } 3436 3437 #[test] 3438 #[cfg(target_arch = "x86_64")] 3439 fn test_dmi_oem_strings() { 3440 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3441 let guest = Guest::new(Box::new(focal)); 3442 3443 let s1 = "io.systemd.credential:xx=yy"; 3444 let s2 = "This is a test string"; 3445 3446 let oem_strings = format!("oem_strings=[{s1},{s2}]"); 3447 3448 let mut child = GuestCommand::new(&guest) 3449 .args(["--cpus", "boot=1"]) 3450 .args(["--memory", "size=512M"]) 3451 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3452 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3453 .args(["--platform", &oem_strings]) 3454 .default_disks() 3455 .default_net() 3456 .capture_output() 3457 .spawn() 3458 .unwrap(); 3459 3460 let r = std::panic::catch_unwind(|| { 3461 guest.wait_vm_boot(None).unwrap(); 3462 3463 assert_eq!( 3464 guest 3465 .ssh_command("sudo dmidecode --oem-string count") 3466 .unwrap() 3467 .trim(), 3468 "2" 3469 ); 3470 3471 assert_eq!( 3472 guest 3473 .ssh_command("sudo dmidecode --oem-string 1") 3474 .unwrap() 3475 .trim(), 3476 s1 3477 ); 3478 3479 assert_eq!( 3480 guest 3481 .ssh_command("sudo dmidecode --oem-string 2") 3482 .unwrap() 3483 .trim(), 3484 s2 3485 ); 3486 }); 3487 3488 let _ = child.kill(); 3489 let output = child.wait_with_output().unwrap(); 3490 3491 handle_child_output(r, &output); 3492 } 3493 3494 #[test] 3495 fn test_virtio_fs() { 3496 _test_virtio_fs(&prepare_virtiofsd, false, None) 3497 } 3498 3499 #[test] 3500 fn test_virtio_fs_hotplug() { 3501 _test_virtio_fs(&prepare_virtiofsd, true, None) 3502 } 3503 3504 #[test] 3505 #[cfg(not(feature = "mshv"))] 3506 fn test_virtio_fs_multi_segment_hotplug() { 3507 _test_virtio_fs(&prepare_virtiofsd, true, Some(15)) 3508 } 3509 3510 #[test] 3511 #[cfg(not(feature = "mshv"))] 3512 fn test_virtio_fs_multi_segment() { 3513 _test_virtio_fs(&prepare_virtiofsd, false, Some(15)) 3514 } 3515 3516 #[test] 3517 fn test_virtio_pmem_persist_writes() { 3518 test_virtio_pmem(false, false) 3519 } 3520 3521 #[test] 3522 fn test_virtio_pmem_discard_writes() { 3523 test_virtio_pmem(true, false) 3524 } 3525 3526 #[test] 3527 fn test_virtio_pmem_with_size() { 3528 test_virtio_pmem(true, true) 3529 } 3530 3531 #[test] 3532 fn test_boot_from_virtio_pmem() { 3533 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3534 let guest = Guest::new(Box::new(focal)); 3535 3536 let kernel_path = direct_kernel_boot_path(); 3537 3538 let mut child = GuestCommand::new(&guest) 3539 .args(["--cpus", "boot=1"]) 3540 .args(["--memory", "size=512M"]) 3541 .args(["--kernel", kernel_path.to_str().unwrap()]) 3542 .args([ 3543 "--disk", 3544 format!( 3545 "path={}", 3546 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3547 ) 3548 .as_str(), 3549 ]) 3550 .default_net() 3551 .args([ 3552 "--pmem", 3553 format!( 3554 "file={},size={}", 3555 guest.disk_config.disk(DiskType::OperatingSystem).unwrap(), 3556 fs::metadata(guest.disk_config.disk(DiskType::OperatingSystem).unwrap()) 3557 .unwrap() 3558 .len() 3559 ) 3560 .as_str(), 3561 ]) 3562 .args([ 3563 "--cmdline", 3564 DIRECT_KERNEL_BOOT_CMDLINE 3565 .replace("vda1", "pmem0p1") 3566 .as_str(), 3567 ]) 3568 .capture_output() 3569 .spawn() 3570 .unwrap(); 3571 3572 let r = std::panic::catch_unwind(|| { 3573 guest.wait_vm_boot(None).unwrap(); 3574 3575 // Simple checks to validate the VM booted properly 3576 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 3577 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 3578 }); 3579 3580 let _ = child.kill(); 3581 let output = child.wait_with_output().unwrap(); 3582 3583 handle_child_output(r, &output); 3584 } 3585 3586 #[test] 3587 fn test_multiple_network_interfaces() { 3588 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3589 let guest = Guest::new(Box::new(focal)); 3590 3591 let kernel_path = direct_kernel_boot_path(); 3592 3593 let mut child = GuestCommand::new(&guest) 3594 .args(["--cpus", "boot=1"]) 3595 .args(["--memory", "size=512M"]) 3596 .args(["--kernel", kernel_path.to_str().unwrap()]) 3597 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3598 .default_disks() 3599 .args([ 3600 "--net", 3601 guest.default_net_string().as_str(), 3602 "--net", 3603 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0", 3604 "--net", 3605 "tap=mytap1,mac=fe:1f:9e:e1:60:f2,ip=192.168.4.1,mask=255.255.255.0", 3606 ]) 3607 .capture_output() 3608 .spawn() 3609 .unwrap(); 3610 3611 let r = std::panic::catch_unwind(|| { 3612 guest.wait_vm_boot(None).unwrap(); 3613 3614 let tap_count = exec_host_command_output("ip link | grep -c mytap1"); 3615 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1"); 3616 3617 // 3 network interfaces + default localhost ==> 4 interfaces 3618 assert_eq!( 3619 guest 3620 .ssh_command("ip -o link | wc -l") 3621 .unwrap() 3622 .trim() 3623 .parse::<u32>() 3624 .unwrap_or_default(), 3625 4 3626 ); 3627 }); 3628 3629 let _ = child.kill(); 3630 let output = child.wait_with_output().unwrap(); 3631 3632 handle_child_output(r, &output); 3633 } 3634 3635 #[test] 3636 #[cfg(target_arch = "aarch64")] 3637 fn test_pmu_on() { 3638 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3639 let guest = Guest::new(Box::new(focal)); 3640 let mut child = GuestCommand::new(&guest) 3641 .args(["--cpus", "boot=1"]) 3642 .args(["--memory", "size=512M"]) 3643 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3644 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3645 .default_disks() 3646 .default_net() 3647 .capture_output() 3648 .spawn() 3649 .unwrap(); 3650 3651 let r = std::panic::catch_unwind(|| { 3652 guest.wait_vm_boot(None).unwrap(); 3653 3654 // Test that PMU exists. 3655 assert_eq!( 3656 guest 3657 .ssh_command(GREP_PMU_IRQ_CMD) 3658 .unwrap() 3659 .trim() 3660 .parse::<u32>() 3661 .unwrap_or_default(), 3662 1 3663 ); 3664 }); 3665 3666 let _ = child.kill(); 3667 let output = child.wait_with_output().unwrap(); 3668 3669 handle_child_output(r, &output); 3670 } 3671 3672 #[test] 3673 fn test_serial_off() { 3674 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3675 let guest = Guest::new(Box::new(focal)); 3676 let mut child = GuestCommand::new(&guest) 3677 .args(["--cpus", "boot=1"]) 3678 .args(["--memory", "size=512M"]) 3679 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3680 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3681 .default_disks() 3682 .default_net() 3683 .args(["--serial", "off"]) 3684 .capture_output() 3685 .spawn() 3686 .unwrap(); 3687 3688 let r = std::panic::catch_unwind(|| { 3689 guest.wait_vm_boot(None).unwrap(); 3690 3691 // Test that there is no ttyS0 3692 assert_eq!( 3693 guest 3694 .ssh_command(GREP_SERIAL_IRQ_CMD) 3695 .unwrap() 3696 .trim() 3697 .parse::<u32>() 3698 .unwrap_or(1), 3699 0 3700 ); 3701 }); 3702 3703 let _ = child.kill(); 3704 let output = child.wait_with_output().unwrap(); 3705 3706 handle_child_output(r, &output); 3707 } 3708 3709 #[test] 3710 fn test_serial_null() { 3711 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3712 let guest = Guest::new(Box::new(focal)); 3713 let mut cmd = GuestCommand::new(&guest); 3714 #[cfg(target_arch = "x86_64")] 3715 let console_str: &str = "console=ttyS0"; 3716 #[cfg(target_arch = "aarch64")] 3717 let console_str: &str = "console=ttyAMA0"; 3718 3719 cmd.args(["--cpus", "boot=1"]) 3720 .args(["--memory", "size=512M"]) 3721 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3722 .args([ 3723 "--cmdline", 3724 DIRECT_KERNEL_BOOT_CMDLINE 3725 .replace("console=hvc0 ", console_str) 3726 .as_str(), 3727 ]) 3728 .default_disks() 3729 .default_net() 3730 .args(["--serial", "null"]) 3731 .args(["--console", "off"]) 3732 .capture_output(); 3733 3734 let mut child = cmd.spawn().unwrap(); 3735 3736 let r = std::panic::catch_unwind(|| { 3737 guest.wait_vm_boot(None).unwrap(); 3738 3739 // Test that there is a ttyS0 3740 assert_eq!( 3741 guest 3742 .ssh_command(GREP_SERIAL_IRQ_CMD) 3743 .unwrap() 3744 .trim() 3745 .parse::<u32>() 3746 .unwrap_or_default(), 3747 1 3748 ); 3749 }); 3750 3751 let _ = child.kill(); 3752 let output = child.wait_with_output().unwrap(); 3753 handle_child_output(r, &output); 3754 3755 let r = std::panic::catch_unwind(|| { 3756 assert!(!String::from_utf8_lossy(&output.stdout).contains(CONSOLE_TEST_STRING)); 3757 }); 3758 3759 handle_child_output(r, &output); 3760 } 3761 3762 #[test] 3763 fn test_serial_tty() { 3764 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3765 let guest = Guest::new(Box::new(focal)); 3766 3767 let kernel_path = direct_kernel_boot_path(); 3768 3769 #[cfg(target_arch = "x86_64")] 3770 let console_str: &str = "console=ttyS0"; 3771 #[cfg(target_arch = "aarch64")] 3772 let console_str: &str = "console=ttyAMA0"; 3773 3774 let mut child = GuestCommand::new(&guest) 3775 .args(["--cpus", "boot=1"]) 3776 .args(["--memory", "size=512M"]) 3777 .args(["--kernel", kernel_path.to_str().unwrap()]) 3778 .args([ 3779 "--cmdline", 3780 DIRECT_KERNEL_BOOT_CMDLINE 3781 .replace("console=hvc0 ", console_str) 3782 .as_str(), 3783 ]) 3784 .default_disks() 3785 .default_net() 3786 .args(["--serial", "tty"]) 3787 .args(["--console", "off"]) 3788 .capture_output() 3789 .spawn() 3790 .unwrap(); 3791 3792 let r = std::panic::catch_unwind(|| { 3793 guest.wait_vm_boot(None).unwrap(); 3794 3795 // Test that there is a ttyS0 3796 assert_eq!( 3797 guest 3798 .ssh_command(GREP_SERIAL_IRQ_CMD) 3799 .unwrap() 3800 .trim() 3801 .parse::<u32>() 3802 .unwrap_or_default(), 3803 1 3804 ); 3805 }); 3806 3807 // This sleep is needed to wait for the login prompt 3808 thread::sleep(std::time::Duration::new(2, 0)); 3809 3810 let _ = child.kill(); 3811 let output = child.wait_with_output().unwrap(); 3812 handle_child_output(r, &output); 3813 3814 let r = std::panic::catch_unwind(|| { 3815 assert!(String::from_utf8_lossy(&output.stdout).contains(CONSOLE_TEST_STRING)); 3816 }); 3817 3818 handle_child_output(r, &output); 3819 } 3820 3821 #[test] 3822 fn test_serial_file() { 3823 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3824 let guest = Guest::new(Box::new(focal)); 3825 3826 let serial_path = guest.tmp_dir.as_path().join("/tmp/serial-output"); 3827 #[cfg(target_arch = "x86_64")] 3828 let console_str: &str = "console=ttyS0"; 3829 #[cfg(target_arch = "aarch64")] 3830 let console_str: &str = "console=ttyAMA0"; 3831 3832 let mut child = GuestCommand::new(&guest) 3833 .args(["--cpus", "boot=1"]) 3834 .args(["--memory", "size=512M"]) 3835 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3836 .args([ 3837 "--cmdline", 3838 DIRECT_KERNEL_BOOT_CMDLINE 3839 .replace("console=hvc0 ", console_str) 3840 .as_str(), 3841 ]) 3842 .default_disks() 3843 .default_net() 3844 .args([ 3845 "--serial", 3846 format!("file={}", serial_path.to_str().unwrap()).as_str(), 3847 ]) 3848 .capture_output() 3849 .spawn() 3850 .unwrap(); 3851 3852 let r = std::panic::catch_unwind(|| { 3853 guest.wait_vm_boot(None).unwrap(); 3854 3855 // Test that there is a ttyS0 3856 assert_eq!( 3857 guest 3858 .ssh_command(GREP_SERIAL_IRQ_CMD) 3859 .unwrap() 3860 .trim() 3861 .parse::<u32>() 3862 .unwrap_or_default(), 3863 1 3864 ); 3865 3866 guest.ssh_command("sudo shutdown -h now").unwrap(); 3867 }); 3868 3869 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 3870 let _ = child.kill(); 3871 let output = child.wait_with_output().unwrap(); 3872 handle_child_output(r, &output); 3873 3874 let r = std::panic::catch_unwind(|| { 3875 // Check that the cloud-hypervisor binary actually terminated 3876 assert!(output.status.success()); 3877 3878 // Do this check after shutdown of the VM as an easy way to ensure 3879 // all writes are flushed to disk 3880 let mut f = std::fs::File::open(serial_path).unwrap(); 3881 let mut buf = String::new(); 3882 f.read_to_string(&mut buf).unwrap(); 3883 assert!(buf.contains(CONSOLE_TEST_STRING)); 3884 }); 3885 3886 handle_child_output(r, &output); 3887 } 3888 3889 #[test] 3890 fn test_pty_interaction() { 3891 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3892 let guest = Guest::new(Box::new(focal)); 3893 let api_socket = temp_api_path(&guest.tmp_dir); 3894 let serial_option = if cfg!(target_arch = "x86_64") { 3895 " console=ttyS0" 3896 } else { 3897 " console=ttyAMA0" 3898 }; 3899 let cmdline = DIRECT_KERNEL_BOOT_CMDLINE.to_owned() + serial_option; 3900 3901 let mut child = GuestCommand::new(&guest) 3902 .args(["--cpus", "boot=1"]) 3903 .args(["--memory", "size=512M"]) 3904 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3905 .args(["--cmdline", &cmdline]) 3906 .default_disks() 3907 .default_net() 3908 .args(["--serial", "null"]) 3909 .args(["--console", "pty"]) 3910 .args(["--api-socket", &api_socket]) 3911 .spawn() 3912 .unwrap(); 3913 3914 let r = std::panic::catch_unwind(|| { 3915 guest.wait_vm_boot(None).unwrap(); 3916 // Get pty fd for console 3917 let console_path = get_pty_path(&api_socket, "console"); 3918 // TODO: Get serial pty test working 3919 let mut cf = std::fs::OpenOptions::new() 3920 .write(true) 3921 .read(true) 3922 .open(console_path) 3923 .unwrap(); 3924 3925 // Some dumb sleeps but we don't want to write 3926 // before the console is up and we don't want 3927 // to try and write the next line before the 3928 // login process is ready. 3929 thread::sleep(std::time::Duration::new(5, 0)); 3930 assert_eq!(cf.write(b"cloud\n").unwrap(), 6); 3931 thread::sleep(std::time::Duration::new(2, 0)); 3932 assert_eq!(cf.write(b"cloud123\n").unwrap(), 9); 3933 thread::sleep(std::time::Duration::new(2, 0)); 3934 assert_eq!(cf.write(b"echo test_pty_console\n").unwrap(), 22); 3935 thread::sleep(std::time::Duration::new(2, 0)); 3936 3937 // read pty and ensure they have a login shell 3938 // some fairly hacky workarounds to avoid looping 3939 // forever in case the channel is blocked getting output 3940 let ptyc = pty_read(cf); 3941 let mut empty = 0; 3942 let mut prev = String::new(); 3943 loop { 3944 thread::sleep(std::time::Duration::new(2, 0)); 3945 match ptyc.try_recv() { 3946 Ok(line) => { 3947 empty = 0; 3948 prev = prev + &line; 3949 if prev.contains("test_pty_console") { 3950 break; 3951 } 3952 } 3953 Err(mpsc::TryRecvError::Empty) => { 3954 empty += 1; 3955 assert!(empty <= 5, "No login on pty"); 3956 } 3957 _ => panic!("No login on pty"), 3958 } 3959 } 3960 3961 guest.ssh_command("sudo shutdown -h now").unwrap(); 3962 }); 3963 3964 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 3965 let _ = child.kill(); 3966 let output = child.wait_with_output().unwrap(); 3967 handle_child_output(r, &output); 3968 3969 let r = std::panic::catch_unwind(|| { 3970 // Check that the cloud-hypervisor binary actually terminated 3971 assert!(output.status.success()) 3972 }); 3973 handle_child_output(r, &output); 3974 } 3975 3976 #[test] 3977 fn test_virtio_console() { 3978 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3979 let guest = Guest::new(Box::new(focal)); 3980 3981 let kernel_path = direct_kernel_boot_path(); 3982 3983 let mut child = GuestCommand::new(&guest) 3984 .args(["--cpus", "boot=1"]) 3985 .args(["--memory", "size=512M"]) 3986 .args(["--kernel", kernel_path.to_str().unwrap()]) 3987 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3988 .default_disks() 3989 .default_net() 3990 .args(["--console", "tty"]) 3991 .args(["--serial", "null"]) 3992 .capture_output() 3993 .spawn() 3994 .unwrap(); 3995 3996 let text = String::from("On a branch floating down river a cricket, singing."); 3997 let cmd = format!("echo {text} | sudo tee /dev/hvc0"); 3998 3999 let r = std::panic::catch_unwind(|| { 4000 guest.wait_vm_boot(None).unwrap(); 4001 4002 assert!(guest 4003 .does_device_vendor_pair_match("0x1043", "0x1af4") 4004 .unwrap_or_default()); 4005 4006 guest.ssh_command(&cmd).unwrap(); 4007 }); 4008 4009 let _ = child.kill(); 4010 let output = child.wait_with_output().unwrap(); 4011 handle_child_output(r, &output); 4012 4013 let r = std::panic::catch_unwind(|| { 4014 assert!(String::from_utf8_lossy(&output.stdout).contains(&text)); 4015 }); 4016 4017 handle_child_output(r, &output); 4018 } 4019 4020 #[test] 4021 fn test_console_file() { 4022 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4023 let guest = Guest::new(Box::new(focal)); 4024 4025 let console_path = guest.tmp_dir.as_path().join("/tmp/console-output"); 4026 let mut child = GuestCommand::new(&guest) 4027 .args(["--cpus", "boot=1"]) 4028 .args(["--memory", "size=512M"]) 4029 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4030 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4031 .default_disks() 4032 .default_net() 4033 .args([ 4034 "--console", 4035 format!("file={}", console_path.to_str().unwrap()).as_str(), 4036 ]) 4037 .capture_output() 4038 .spawn() 4039 .unwrap(); 4040 4041 guest.wait_vm_boot(None).unwrap(); 4042 4043 guest.ssh_command("sudo shutdown -h now").unwrap(); 4044 4045 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 4046 let _ = child.kill(); 4047 let output = child.wait_with_output().unwrap(); 4048 4049 let r = std::panic::catch_unwind(|| { 4050 // Check that the cloud-hypervisor binary actually terminated 4051 assert!(output.status.success()); 4052 4053 // Do this check after shutdown of the VM as an easy way to ensure 4054 // all writes are flushed to disk 4055 let mut f = std::fs::File::open(console_path).unwrap(); 4056 let mut buf = String::new(); 4057 f.read_to_string(&mut buf).unwrap(); 4058 4059 if !buf.contains(CONSOLE_TEST_STRING) { 4060 eprintln!( 4061 "\n\n==== Console file output ====\n\n{buf}\n\n==== End console file output ====" 4062 ); 4063 } 4064 assert!(buf.contains(CONSOLE_TEST_STRING)); 4065 }); 4066 4067 handle_child_output(r, &output); 4068 } 4069 4070 #[test] 4071 #[cfg(target_arch = "x86_64")] 4072 #[cfg(not(feature = "mshv"))] 4073 #[ignore = "See #4324"] 4074 // The VFIO integration test starts cloud-hypervisor guest with 3 TAP 4075 // backed networking interfaces, bound through a simple bridge on the host. 4076 // So if the nested cloud-hypervisor succeeds in getting a directly 4077 // assigned interface from its cloud-hypervisor host, we should be able to 4078 // ssh into it, and verify that it's running with the right kernel command 4079 // line (We tag the command line from cloud-hypervisor for that purpose). 4080 // The third device is added to validate that hotplug works correctly since 4081 // it is being added to the L2 VM through hotplugging mechanism. 4082 // Also, we pass-through a virtio-blk device to the L2 VM to test the 32-bit 4083 // vfio device support 4084 fn test_vfio() { 4085 setup_vfio_network_interfaces(); 4086 4087 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4088 let guest = Guest::new_from_ip_range(Box::new(focal), "172.18", 0); 4089 4090 let mut workload_path = dirs::home_dir().unwrap(); 4091 workload_path.push("workloads"); 4092 4093 let kernel_path = direct_kernel_boot_path(); 4094 4095 let mut vfio_path = workload_path.clone(); 4096 vfio_path.push("vfio"); 4097 4098 let mut cloud_init_vfio_base_path = vfio_path.clone(); 4099 cloud_init_vfio_base_path.push("cloudinit.img"); 4100 4101 // We copy our cloudinit into the vfio mount point, for the nested 4102 // cloud-hypervisor guest to use. 4103 rate_limited_copy( 4104 guest.disk_config.disk(DiskType::CloudInit).unwrap(), 4105 &cloud_init_vfio_base_path, 4106 ) 4107 .expect("copying of cloud-init disk failed"); 4108 4109 let mut vfio_disk_path = workload_path.clone(); 4110 vfio_disk_path.push("vfio.img"); 4111 4112 // Create the vfio disk image 4113 let output = Command::new("mkfs.ext4") 4114 .arg("-d") 4115 .arg(vfio_path.to_str().unwrap()) 4116 .arg(vfio_disk_path.to_str().unwrap()) 4117 .arg("2g") 4118 .output() 4119 .unwrap(); 4120 if !output.status.success() { 4121 eprintln!("{}", String::from_utf8_lossy(&output.stderr)); 4122 panic!("mkfs.ext4 command generated an error"); 4123 } 4124 4125 let mut blk_file_path = workload_path; 4126 blk_file_path.push("blk.img"); 4127 4128 let vfio_tap0 = "vfio-tap0"; 4129 let vfio_tap1 = "vfio-tap1"; 4130 let vfio_tap2 = "vfio-tap2"; 4131 let vfio_tap3 = "vfio-tap3"; 4132 4133 let mut child = GuestCommand::new(&guest) 4134 .args(["--cpus", "boot=4"]) 4135 .args(["--memory", "size=2G,hugepages=on,shared=on"]) 4136 .args(["--kernel", kernel_path.to_str().unwrap()]) 4137 .args([ 4138 "--disk", 4139 format!( 4140 "path={}", 4141 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 4142 ) 4143 .as_str(), 4144 "--disk", 4145 format!( 4146 "path={}", 4147 guest.disk_config.disk(DiskType::CloudInit).unwrap() 4148 ) 4149 .as_str(), 4150 "--disk", 4151 format!("path={}", vfio_disk_path.to_str().unwrap()).as_str(), 4152 "--disk", 4153 format!("path={},iommu=on", blk_file_path.to_str().unwrap()).as_str(), 4154 ]) 4155 .args([ 4156 "--cmdline", 4157 format!( 4158 "{DIRECT_KERNEL_BOOT_CMDLINE} kvm-intel.nested=1 vfio_iommu_type1.allow_unsafe_interrupts" 4159 ) 4160 .as_str(), 4161 ]) 4162 .args([ 4163 "--net", 4164 format!("tap={},mac={}", vfio_tap0, guest.network.guest_mac).as_str(), 4165 "--net", 4166 format!( 4167 "tap={},mac={},iommu=on", 4168 vfio_tap1, guest.network.l2_guest_mac1 4169 ) 4170 .as_str(), 4171 "--net", 4172 format!( 4173 "tap={},mac={},iommu=on", 4174 vfio_tap2, guest.network.l2_guest_mac2 4175 ) 4176 .as_str(), 4177 "--net", 4178 format!( 4179 "tap={},mac={},iommu=on", 4180 vfio_tap3, guest.network.l2_guest_mac3 4181 ) 4182 .as_str(), 4183 ]) 4184 .capture_output() 4185 .spawn() 4186 .unwrap(); 4187 4188 thread::sleep(std::time::Duration::new(30, 0)); 4189 4190 let r = std::panic::catch_unwind(|| { 4191 guest.ssh_command_l1("sudo systemctl start vfio").unwrap(); 4192 thread::sleep(std::time::Duration::new(120, 0)); 4193 4194 // We booted our cloud hypervisor L2 guest with a "VFIOTAG" tag 4195 // added to its kernel command line. 4196 // Let's ssh into it and verify that it's there. If it is it means 4197 // we're in the right guest (The L2 one) because the QEMU L1 guest 4198 // does not have this command line tag. 4199 assert_eq!( 4200 guest 4201 .ssh_command_l2_1("grep -c VFIOTAG /proc/cmdline") 4202 .unwrap() 4203 .trim() 4204 .parse::<u32>() 4205 .unwrap_or_default(), 4206 1 4207 ); 4208 4209 // Let's also verify from the second virtio-net device passed to 4210 // the L2 VM. 4211 assert_eq!( 4212 guest 4213 .ssh_command_l2_2("grep -c VFIOTAG /proc/cmdline") 4214 .unwrap() 4215 .trim() 4216 .parse::<u32>() 4217 .unwrap_or_default(), 4218 1 4219 ); 4220 4221 // Check the amount of PCI devices appearing in L2 VM. 4222 assert_eq!( 4223 guest 4224 .ssh_command_l2_1("ls /sys/bus/pci/devices | wc -l") 4225 .unwrap() 4226 .trim() 4227 .parse::<u32>() 4228 .unwrap_or_default(), 4229 8, 4230 ); 4231 4232 // Check both if /dev/vdc exists and if the block size is 16M in L2 VM 4233 assert_eq!( 4234 guest 4235 .ssh_command_l2_1("lsblk | grep vdc | grep -c 16M") 4236 .unwrap() 4237 .trim() 4238 .parse::<u32>() 4239 .unwrap_or_default(), 4240 1 4241 ); 4242 4243 // Hotplug an extra virtio-net device through L2 VM. 4244 guest 4245 .ssh_command_l1( 4246 "echo 0000:00:09.0 | sudo tee /sys/bus/pci/devices/0000:00:09.0/driver/unbind", 4247 ) 4248 .unwrap(); 4249 guest 4250 .ssh_command_l1("echo 0000:00:09.0 | sudo tee /sys/bus/pci/drivers/vfio-pci/bind") 4251 .unwrap(); 4252 let vfio_hotplug_output = guest 4253 .ssh_command_l1( 4254 "sudo /mnt/ch-remote \ 4255 --api-socket /tmp/ch_api.sock \ 4256 add-device path=/sys/bus/pci/devices/0000:00:09.0,id=vfio123", 4257 ) 4258 .unwrap(); 4259 assert!(vfio_hotplug_output.contains("{\"id\":\"vfio123\",\"bdf\":\"0000:00:08.0\"}")); 4260 4261 thread::sleep(std::time::Duration::new(10, 0)); 4262 4263 // Let's also verify from the third virtio-net device passed to 4264 // the L2 VM. This third device has been hotplugged through the L2 4265 // VM, so this is our way to validate hotplug works for VFIO PCI. 4266 assert_eq!( 4267 guest 4268 .ssh_command_l2_3("grep -c VFIOTAG /proc/cmdline") 4269 .unwrap() 4270 .trim() 4271 .parse::<u32>() 4272 .unwrap_or_default(), 4273 1 4274 ); 4275 4276 // Check the amount of PCI devices appearing in L2 VM. 4277 // There should be one more device than before, raising the count 4278 // up to 9 PCI devices. 4279 assert_eq!( 4280 guest 4281 .ssh_command_l2_1("ls /sys/bus/pci/devices | wc -l") 4282 .unwrap() 4283 .trim() 4284 .parse::<u32>() 4285 .unwrap_or_default(), 4286 9, 4287 ); 4288 4289 // Let's now verify that we can correctly remove the virtio-net 4290 // device through the "remove-device" command responsible for 4291 // unplugging VFIO devices. 4292 guest 4293 .ssh_command_l1( 4294 "sudo /mnt/ch-remote \ 4295 --api-socket /tmp/ch_api.sock \ 4296 remove-device vfio123", 4297 ) 4298 .unwrap(); 4299 thread::sleep(std::time::Duration::new(10, 0)); 4300 4301 // Check the amount of PCI devices appearing in L2 VM is back down 4302 // to 8 devices. 4303 assert_eq!( 4304 guest 4305 .ssh_command_l2_1("ls /sys/bus/pci/devices | wc -l") 4306 .unwrap() 4307 .trim() 4308 .parse::<u32>() 4309 .unwrap_or_default(), 4310 8, 4311 ); 4312 4313 // Perform memory hotplug in L2 and validate the memory is showing 4314 // up as expected. In order to check, we will use the virtio-net 4315 // device already passed through L2 as a VFIO device, this will 4316 // verify that VFIO devices are functional with memory hotplug. 4317 assert!(guest.get_total_memory_l2().unwrap_or_default() > 480_000); 4318 guest 4319 .ssh_command_l2_1( 4320 "sudo bash -c 'echo online > /sys/devices/system/memory/auto_online_blocks'", 4321 ) 4322 .unwrap(); 4323 guest 4324 .ssh_command_l1( 4325 "sudo /mnt/ch-remote \ 4326 --api-socket /tmp/ch_api.sock \ 4327 resize --memory 1073741824", 4328 ) 4329 .unwrap(); 4330 assert!(guest.get_total_memory_l2().unwrap_or_default() > 960_000); 4331 }); 4332 4333 let _ = child.kill(); 4334 let output = child.wait_with_output().unwrap(); 4335 4336 cleanup_vfio_network_interfaces(); 4337 4338 handle_child_output(r, &output); 4339 } 4340 4341 #[test] 4342 fn test_direct_kernel_boot_noacpi() { 4343 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4344 let guest = Guest::new(Box::new(focal)); 4345 4346 let kernel_path = direct_kernel_boot_path(); 4347 4348 let mut child = GuestCommand::new(&guest) 4349 .args(["--cpus", "boot=1"]) 4350 .args(["--memory", "size=512M"]) 4351 .args(["--kernel", kernel_path.to_str().unwrap()]) 4352 .args([ 4353 "--cmdline", 4354 format!("{DIRECT_KERNEL_BOOT_CMDLINE} acpi=off").as_str(), 4355 ]) 4356 .default_disks() 4357 .default_net() 4358 .capture_output() 4359 .spawn() 4360 .unwrap(); 4361 4362 let r = std::panic::catch_unwind(|| { 4363 guest.wait_vm_boot(None).unwrap(); 4364 4365 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 4366 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4367 }); 4368 4369 let _ = child.kill(); 4370 let output = child.wait_with_output().unwrap(); 4371 4372 handle_child_output(r, &output); 4373 } 4374 4375 #[test] 4376 fn test_virtio_vsock() { 4377 _test_virtio_vsock(false) 4378 } 4379 4380 #[test] 4381 fn test_virtio_vsock_hotplug() { 4382 _test_virtio_vsock(true); 4383 } 4384 4385 #[test] 4386 fn test_api_http_shutdown() { 4387 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4388 let guest = Guest::new(Box::new(focal)); 4389 4390 _test_api_shutdown(TargetApi::new_http_api(&guest.tmp_dir), guest) 4391 } 4392 4393 #[test] 4394 fn test_api_http_delete() { 4395 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4396 let guest = Guest::new(Box::new(focal)); 4397 4398 _test_api_delete(TargetApi::new_http_api(&guest.tmp_dir), guest); 4399 } 4400 4401 #[test] 4402 fn test_api_http_pause_resume() { 4403 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4404 let guest = Guest::new(Box::new(focal)); 4405 4406 _test_api_pause_resume(TargetApi::new_http_api(&guest.tmp_dir), guest) 4407 } 4408 4409 #[test] 4410 fn test_api_http_create_boot() { 4411 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4412 let guest = Guest::new(Box::new(focal)); 4413 4414 _test_api_create_boot(TargetApi::new_http_api(&guest.tmp_dir), guest) 4415 } 4416 4417 #[test] 4418 fn test_virtio_iommu() { 4419 _test_virtio_iommu(cfg!(target_arch = "x86_64")) 4420 } 4421 4422 #[test] 4423 // We cannot force the software running in the guest to reprogram the BAR 4424 // with some different addresses, but we have a reliable way of testing it 4425 // with a standard Linux kernel. 4426 // By removing a device from the PCI tree, and then rescanning the tree, 4427 // Linux consistently chooses to reorganize the PCI device BARs to other 4428 // locations in the guest address space. 4429 // This test creates a dedicated PCI network device to be checked as being 4430 // properly probed first, then removing it, and adding it again by doing a 4431 // rescan. 4432 fn test_pci_bar_reprogramming() { 4433 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4434 let guest = Guest::new(Box::new(focal)); 4435 4436 #[cfg(target_arch = "x86_64")] 4437 let kernel_path = direct_kernel_boot_path(); 4438 #[cfg(target_arch = "aarch64")] 4439 let kernel_path = edk2_path(); 4440 4441 let mut child = GuestCommand::new(&guest) 4442 .args(["--cpus", "boot=1"]) 4443 .args(["--memory", "size=512M"]) 4444 .args(["--kernel", kernel_path.to_str().unwrap()]) 4445 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4446 .default_disks() 4447 .args([ 4448 "--net", 4449 guest.default_net_string().as_str(), 4450 "--net", 4451 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0", 4452 ]) 4453 .capture_output() 4454 .spawn() 4455 .unwrap(); 4456 4457 let r = std::panic::catch_unwind(|| { 4458 guest.wait_vm_boot(None).unwrap(); 4459 4460 // 2 network interfaces + default localhost ==> 3 interfaces 4461 assert_eq!( 4462 guest 4463 .ssh_command("ip -o link | wc -l") 4464 .unwrap() 4465 .trim() 4466 .parse::<u32>() 4467 .unwrap_or_default(), 4468 3 4469 ); 4470 4471 let init_bar_addr = guest 4472 .ssh_command( 4473 "sudo awk '{print $1; exit}' /sys/bus/pci/devices/0000:00:05.0/resource", 4474 ) 4475 .unwrap(); 4476 4477 // Remove the PCI device 4478 guest 4479 .ssh_command("echo 1 | sudo tee /sys/bus/pci/devices/0000:00:05.0/remove") 4480 .unwrap(); 4481 4482 // Only 1 network interface left + default localhost ==> 2 interfaces 4483 assert_eq!( 4484 guest 4485 .ssh_command("ip -o link | wc -l") 4486 .unwrap() 4487 .trim() 4488 .parse::<u32>() 4489 .unwrap_or_default(), 4490 2 4491 ); 4492 4493 // Remove the PCI device 4494 guest 4495 .ssh_command("echo 1 | sudo tee /sys/bus/pci/rescan") 4496 .unwrap(); 4497 4498 // Back to 2 network interface + default localhost ==> 3 interfaces 4499 assert_eq!( 4500 guest 4501 .ssh_command("ip -o link | wc -l") 4502 .unwrap() 4503 .trim() 4504 .parse::<u32>() 4505 .unwrap_or_default(), 4506 3 4507 ); 4508 4509 let new_bar_addr = guest 4510 .ssh_command( 4511 "sudo awk '{print $1; exit}' /sys/bus/pci/devices/0000:00:05.0/resource", 4512 ) 4513 .unwrap(); 4514 4515 // Let's compare the BAR addresses for our virtio-net device. 4516 // They should be different as we expect the BAR reprogramming 4517 // to have happened. 4518 assert_ne!(init_bar_addr, new_bar_addr); 4519 }); 4520 4521 let _ = child.kill(); 4522 let output = child.wait_with_output().unwrap(); 4523 4524 handle_child_output(r, &output); 4525 } 4526 4527 #[test] 4528 fn test_memory_mergeable_off() { 4529 test_memory_mergeable(false) 4530 } 4531 4532 #[test] 4533 #[cfg(target_arch = "x86_64")] 4534 fn test_cpu_hotplug() { 4535 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4536 let guest = Guest::new(Box::new(focal)); 4537 let api_socket = temp_api_path(&guest.tmp_dir); 4538 4539 let kernel_path = direct_kernel_boot_path(); 4540 4541 let mut child = GuestCommand::new(&guest) 4542 .args(["--cpus", "boot=2,max=4"]) 4543 .args(["--memory", "size=512M"]) 4544 .args(["--kernel", kernel_path.to_str().unwrap()]) 4545 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4546 .default_disks() 4547 .default_net() 4548 .args(["--api-socket", &api_socket]) 4549 .capture_output() 4550 .spawn() 4551 .unwrap(); 4552 4553 let r = std::panic::catch_unwind(|| { 4554 guest.wait_vm_boot(None).unwrap(); 4555 4556 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 4557 4558 // Resize the VM 4559 let desired_vcpus = 4; 4560 resize_command(&api_socket, Some(desired_vcpus), None, None, None); 4561 4562 guest 4563 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online") 4564 .unwrap(); 4565 guest 4566 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online") 4567 .unwrap(); 4568 thread::sleep(std::time::Duration::new(10, 0)); 4569 assert_eq!( 4570 guest.get_cpu_count().unwrap_or_default(), 4571 u32::from(desired_vcpus) 4572 ); 4573 4574 guest.reboot_linux(0, None); 4575 4576 assert_eq!( 4577 guest.get_cpu_count().unwrap_or_default(), 4578 u32::from(desired_vcpus) 4579 ); 4580 4581 // Resize the VM 4582 let desired_vcpus = 2; 4583 resize_command(&api_socket, Some(desired_vcpus), None, None, None); 4584 4585 thread::sleep(std::time::Duration::new(10, 0)); 4586 assert_eq!( 4587 guest.get_cpu_count().unwrap_or_default(), 4588 u32::from(desired_vcpus) 4589 ); 4590 4591 // Resize the VM back up to 4 4592 let desired_vcpus = 4; 4593 resize_command(&api_socket, Some(desired_vcpus), None, None, None); 4594 4595 guest 4596 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online") 4597 .unwrap(); 4598 guest 4599 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online") 4600 .unwrap(); 4601 thread::sleep(std::time::Duration::new(10, 0)); 4602 assert_eq!( 4603 guest.get_cpu_count().unwrap_or_default(), 4604 u32::from(desired_vcpus) 4605 ); 4606 }); 4607 4608 let _ = child.kill(); 4609 let output = child.wait_with_output().unwrap(); 4610 4611 handle_child_output(r, &output); 4612 } 4613 4614 #[test] 4615 fn test_memory_hotplug() { 4616 #[cfg(target_arch = "aarch64")] 4617 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string(); 4618 #[cfg(target_arch = "x86_64")] 4619 let focal_image = FOCAL_IMAGE_NAME.to_string(); 4620 let focal = UbuntuDiskConfig::new(focal_image); 4621 let guest = Guest::new(Box::new(focal)); 4622 let api_socket = temp_api_path(&guest.tmp_dir); 4623 4624 #[cfg(target_arch = "aarch64")] 4625 let kernel_path = edk2_path(); 4626 #[cfg(target_arch = "x86_64")] 4627 let kernel_path = direct_kernel_boot_path(); 4628 4629 let mut child = GuestCommand::new(&guest) 4630 .args(["--cpus", "boot=2,max=4"]) 4631 .args(["--memory", "size=512M,hotplug_size=8192M"]) 4632 .args(["--kernel", kernel_path.to_str().unwrap()]) 4633 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4634 .default_disks() 4635 .default_net() 4636 .args(["--balloon", "size=0"]) 4637 .args(["--api-socket", &api_socket]) 4638 .capture_output() 4639 .spawn() 4640 .unwrap(); 4641 4642 let r = std::panic::catch_unwind(|| { 4643 guest.wait_vm_boot(None).unwrap(); 4644 4645 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4646 4647 guest.enable_memory_hotplug(); 4648 4649 // Add RAM to the VM 4650 let desired_ram = 1024 << 20; 4651 resize_command(&api_socket, None, Some(desired_ram), None, None); 4652 4653 thread::sleep(std::time::Duration::new(10, 0)); 4654 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4655 4656 // Use balloon to remove RAM from the VM 4657 let desired_balloon = 512 << 20; 4658 resize_command(&api_socket, None, None, Some(desired_balloon), None); 4659 4660 thread::sleep(std::time::Duration::new(10, 0)); 4661 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4662 assert!(guest.get_total_memory().unwrap_or_default() < 960_000); 4663 4664 guest.reboot_linux(0, None); 4665 4666 assert!(guest.get_total_memory().unwrap_or_default() < 960_000); 4667 4668 // Use balloon add RAM to the VM 4669 let desired_balloon = 0; 4670 resize_command(&api_socket, None, None, Some(desired_balloon), None); 4671 4672 thread::sleep(std::time::Duration::new(10, 0)); 4673 4674 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4675 4676 guest.enable_memory_hotplug(); 4677 4678 // Add RAM to the VM 4679 let desired_ram = 2048 << 20; 4680 resize_command(&api_socket, None, Some(desired_ram), None, None); 4681 4682 thread::sleep(std::time::Duration::new(10, 0)); 4683 assert!(guest.get_total_memory().unwrap_or_default() > 1_920_000); 4684 4685 // Remove RAM to the VM (only applies after reboot) 4686 let desired_ram = 1024 << 20; 4687 resize_command(&api_socket, None, Some(desired_ram), None, None); 4688 4689 guest.reboot_linux(1, None); 4690 4691 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4692 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000); 4693 }); 4694 4695 let _ = child.kill(); 4696 let output = child.wait_with_output().unwrap(); 4697 4698 handle_child_output(r, &output); 4699 } 4700 4701 #[test] 4702 #[cfg(not(feature = "mshv"))] 4703 fn test_virtio_mem() { 4704 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4705 let guest = Guest::new(Box::new(focal)); 4706 let api_socket = temp_api_path(&guest.tmp_dir); 4707 4708 let kernel_path = direct_kernel_boot_path(); 4709 4710 let mut child = GuestCommand::new(&guest) 4711 .args(["--cpus", "boot=2,max=4"]) 4712 .args([ 4713 "--memory", 4714 "size=512M,hotplug_method=virtio-mem,hotplug_size=8192M", 4715 ]) 4716 .args(["--kernel", kernel_path.to_str().unwrap()]) 4717 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4718 .default_disks() 4719 .default_net() 4720 .args(["--api-socket", &api_socket]) 4721 .capture_output() 4722 .spawn() 4723 .unwrap(); 4724 4725 let r = std::panic::catch_unwind(|| { 4726 guest.wait_vm_boot(None).unwrap(); 4727 4728 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4729 4730 guest.enable_memory_hotplug(); 4731 4732 // Add RAM to the VM 4733 let desired_ram = 1024 << 20; 4734 resize_command(&api_socket, None, Some(desired_ram), None, None); 4735 4736 thread::sleep(std::time::Duration::new(10, 0)); 4737 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4738 4739 // Add RAM to the VM 4740 let desired_ram = 2048 << 20; 4741 resize_command(&api_socket, None, Some(desired_ram), None, None); 4742 4743 thread::sleep(std::time::Duration::new(10, 0)); 4744 assert!(guest.get_total_memory().unwrap_or_default() > 1_920_000); 4745 4746 // Remove RAM from the VM 4747 let desired_ram = 1024 << 20; 4748 resize_command(&api_socket, None, Some(desired_ram), None, None); 4749 4750 thread::sleep(std::time::Duration::new(10, 0)); 4751 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4752 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000); 4753 4754 guest.reboot_linux(0, None); 4755 4756 // Check the amount of memory after reboot is 1GiB 4757 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4758 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000); 4759 4760 // Check we can still resize to 512MiB 4761 let desired_ram = 512 << 20; 4762 resize_command(&api_socket, None, Some(desired_ram), None, None); 4763 thread::sleep(std::time::Duration::new(10, 0)); 4764 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4765 assert!(guest.get_total_memory().unwrap_or_default() < 960_000); 4766 }); 4767 4768 let _ = child.kill(); 4769 let output = child.wait_with_output().unwrap(); 4770 4771 handle_child_output(r, &output); 4772 } 4773 4774 #[test] 4775 #[cfg(target_arch = "x86_64")] 4776 #[cfg(not(feature = "mshv"))] 4777 // Test both vCPU and memory resizing together 4778 fn test_resize() { 4779 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4780 let guest = Guest::new(Box::new(focal)); 4781 let api_socket = temp_api_path(&guest.tmp_dir); 4782 4783 let kernel_path = direct_kernel_boot_path(); 4784 4785 let mut child = GuestCommand::new(&guest) 4786 .args(["--cpus", "boot=2,max=4"]) 4787 .args(["--memory", "size=512M,hotplug_size=8192M"]) 4788 .args(["--kernel", kernel_path.to_str().unwrap()]) 4789 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4790 .default_disks() 4791 .default_net() 4792 .args(["--api-socket", &api_socket]) 4793 .capture_output() 4794 .spawn() 4795 .unwrap(); 4796 4797 let r = std::panic::catch_unwind(|| { 4798 guest.wait_vm_boot(None).unwrap(); 4799 4800 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 4801 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4802 4803 guest.enable_memory_hotplug(); 4804 4805 // Resize the VM 4806 let desired_vcpus = 4; 4807 let desired_ram = 1024 << 20; 4808 resize_command( 4809 &api_socket, 4810 Some(desired_vcpus), 4811 Some(desired_ram), 4812 None, 4813 None, 4814 ); 4815 4816 guest 4817 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online") 4818 .unwrap(); 4819 guest 4820 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online") 4821 .unwrap(); 4822 thread::sleep(std::time::Duration::new(10, 0)); 4823 assert_eq!( 4824 guest.get_cpu_count().unwrap_or_default(), 4825 u32::from(desired_vcpus) 4826 ); 4827 4828 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4829 }); 4830 4831 let _ = child.kill(); 4832 let output = child.wait_with_output().unwrap(); 4833 4834 handle_child_output(r, &output); 4835 } 4836 4837 #[test] 4838 fn test_memory_overhead() { 4839 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4840 let guest = Guest::new(Box::new(focal)); 4841 4842 let kernel_path = direct_kernel_boot_path(); 4843 4844 let guest_memory_size_kb = 512 * 1024; 4845 4846 let mut child = GuestCommand::new(&guest) 4847 .args(["--cpus", "boot=1"]) 4848 .args(["--memory", format!("size={guest_memory_size_kb}K").as_str()]) 4849 .args(["--kernel", kernel_path.to_str().unwrap()]) 4850 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4851 .default_disks() 4852 .capture_output() 4853 .spawn() 4854 .unwrap(); 4855 4856 thread::sleep(std::time::Duration::new(20, 0)); 4857 4858 let r = std::panic::catch_unwind(|| { 4859 let overhead = get_vmm_overhead(child.id(), guest_memory_size_kb); 4860 eprintln!("Guest memory overhead: {overhead} vs {MAXIMUM_VMM_OVERHEAD_KB}"); 4861 assert!(overhead <= MAXIMUM_VMM_OVERHEAD_KB); 4862 }); 4863 4864 let _ = child.kill(); 4865 let output = child.wait_with_output().unwrap(); 4866 4867 handle_child_output(r, &output); 4868 } 4869 4870 #[test] 4871 fn test_disk_hotplug() { 4872 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4873 let guest = Guest::new(Box::new(focal)); 4874 4875 #[cfg(target_arch = "x86_64")] 4876 let kernel_path = direct_kernel_boot_path(); 4877 #[cfg(target_arch = "aarch64")] 4878 let kernel_path = edk2_path(); 4879 4880 let api_socket = temp_api_path(&guest.tmp_dir); 4881 4882 let mut child = GuestCommand::new(&guest) 4883 .args(["--api-socket", &api_socket]) 4884 .args(["--cpus", "boot=1"]) 4885 .args(["--memory", "size=512M"]) 4886 .args(["--kernel", kernel_path.to_str().unwrap()]) 4887 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4888 .default_disks() 4889 .default_net() 4890 .capture_output() 4891 .spawn() 4892 .unwrap(); 4893 4894 let r = std::panic::catch_unwind(|| { 4895 guest.wait_vm_boot(None).unwrap(); 4896 4897 // Check /dev/vdc is not there 4898 assert_eq!( 4899 guest 4900 .ssh_command("lsblk | grep -c vdc.*16M || true") 4901 .unwrap() 4902 .trim() 4903 .parse::<u32>() 4904 .unwrap_or(1), 4905 0 4906 ); 4907 4908 // Now let's add the extra disk. 4909 let mut blk_file_path = dirs::home_dir().unwrap(); 4910 blk_file_path.push("workloads"); 4911 blk_file_path.push("blk.img"); 4912 let (cmd_success, cmd_output) = remote_command_w_output( 4913 &api_socket, 4914 "add-disk", 4915 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 4916 ); 4917 assert!(cmd_success); 4918 assert!(String::from_utf8_lossy(&cmd_output) 4919 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 4920 4921 thread::sleep(std::time::Duration::new(10, 0)); 4922 4923 // Check that /dev/vdc exists and the block size is 16M. 4924 assert_eq!( 4925 guest 4926 .ssh_command("lsblk | grep vdc | grep -c 16M") 4927 .unwrap() 4928 .trim() 4929 .parse::<u32>() 4930 .unwrap_or_default(), 4931 1 4932 ); 4933 // And check the block device can be read. 4934 guest 4935 .ssh_command("sudo dd if=/dev/vdc of=/dev/null bs=1M iflag=direct count=16") 4936 .unwrap(); 4937 4938 // Let's remove it the extra disk. 4939 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 4940 thread::sleep(std::time::Duration::new(5, 0)); 4941 // And check /dev/vdc is not there 4942 assert_eq!( 4943 guest 4944 .ssh_command("lsblk | grep -c vdc.*16M || true") 4945 .unwrap() 4946 .trim() 4947 .parse::<u32>() 4948 .unwrap_or(1), 4949 0 4950 ); 4951 4952 // And add it back to validate unplug did work correctly. 4953 let (cmd_success, cmd_output) = remote_command_w_output( 4954 &api_socket, 4955 "add-disk", 4956 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 4957 ); 4958 assert!(cmd_success); 4959 assert!(String::from_utf8_lossy(&cmd_output) 4960 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 4961 4962 thread::sleep(std::time::Duration::new(10, 0)); 4963 4964 // Check that /dev/vdc exists and the block size is 16M. 4965 assert_eq!( 4966 guest 4967 .ssh_command("lsblk | grep vdc | grep -c 16M") 4968 .unwrap() 4969 .trim() 4970 .parse::<u32>() 4971 .unwrap_or_default(), 4972 1 4973 ); 4974 // And check the block device can be read. 4975 guest 4976 .ssh_command("sudo dd if=/dev/vdc of=/dev/null bs=1M iflag=direct count=16") 4977 .unwrap(); 4978 4979 // Reboot the VM. 4980 guest.reboot_linux(0, None); 4981 4982 // Check still there after reboot 4983 assert_eq!( 4984 guest 4985 .ssh_command("lsblk | grep vdc | grep -c 16M") 4986 .unwrap() 4987 .trim() 4988 .parse::<u32>() 4989 .unwrap_or_default(), 4990 1 4991 ); 4992 4993 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 4994 4995 thread::sleep(std::time::Duration::new(20, 0)); 4996 4997 // Check device has gone away 4998 assert_eq!( 4999 guest 5000 .ssh_command("lsblk | grep -c vdc.*16M || true") 5001 .unwrap() 5002 .trim() 5003 .parse::<u32>() 5004 .unwrap_or(1), 5005 0 5006 ); 5007 5008 guest.reboot_linux(1, None); 5009 5010 // Check device still absent 5011 assert_eq!( 5012 guest 5013 .ssh_command("lsblk | grep -c vdc.*16M || true") 5014 .unwrap() 5015 .trim() 5016 .parse::<u32>() 5017 .unwrap_or(1), 5018 0 5019 ); 5020 }); 5021 5022 let _ = child.kill(); 5023 let output = child.wait_with_output().unwrap(); 5024 5025 handle_child_output(r, &output); 5026 } 5027 5028 fn create_loop_device(backing_file_path: &str, block_size: u32, num_retries: usize) -> String { 5029 const LOOP_CONFIGURE: u64 = 0x4c0a; 5030 const LOOP_CTL_GET_FREE: u64 = 0x4c82; 5031 const LOOP_CTL_PATH: &str = "/dev/loop-control"; 5032 const LOOP_DEVICE_PREFIX: &str = "/dev/loop"; 5033 5034 #[repr(C)] 5035 struct LoopInfo64 { 5036 lo_device: u64, 5037 lo_inode: u64, 5038 lo_rdevice: u64, 5039 lo_offset: u64, 5040 lo_sizelimit: u64, 5041 lo_number: u32, 5042 lo_encrypt_type: u32, 5043 lo_encrypt_key_size: u32, 5044 lo_flags: u32, 5045 lo_file_name: [u8; 64], 5046 lo_crypt_name: [u8; 64], 5047 lo_encrypt_key: [u8; 32], 5048 lo_init: [u64; 2], 5049 } 5050 5051 impl Default for LoopInfo64 { 5052 fn default() -> Self { 5053 LoopInfo64 { 5054 lo_device: 0, 5055 lo_inode: 0, 5056 lo_rdevice: 0, 5057 lo_offset: 0, 5058 lo_sizelimit: 0, 5059 lo_number: 0, 5060 lo_encrypt_type: 0, 5061 lo_encrypt_key_size: 0, 5062 lo_flags: 0, 5063 lo_file_name: [0; 64], 5064 lo_crypt_name: [0; 64], 5065 lo_encrypt_key: [0; 32], 5066 lo_init: [0; 2], 5067 } 5068 } 5069 } 5070 5071 #[derive(Default)] 5072 #[repr(C)] 5073 struct LoopConfig { 5074 fd: u32, 5075 block_size: u32, 5076 info: LoopInfo64, 5077 _reserved: [u64; 8], 5078 } 5079 5080 // Open loop-control device 5081 let loop_ctl_file = OpenOptions::new() 5082 .read(true) 5083 .write(true) 5084 .open(LOOP_CTL_PATH) 5085 .unwrap(); 5086 5087 // Request a free loop device 5088 let loop_device_number = 5089 unsafe { libc::ioctl(loop_ctl_file.as_raw_fd(), LOOP_CTL_GET_FREE as _) }; 5090 5091 if loop_device_number < 0 { 5092 panic!("Couldn't find a free loop device"); 5093 } 5094 5095 // Create loop device path 5096 let loop_device_path = format!("{LOOP_DEVICE_PREFIX}{loop_device_number}"); 5097 5098 // Open loop device 5099 let loop_device_file = OpenOptions::new() 5100 .read(true) 5101 .write(true) 5102 .open(&loop_device_path) 5103 .unwrap(); 5104 5105 // Open backing file 5106 let backing_file = OpenOptions::new() 5107 .read(true) 5108 .write(true) 5109 .open(backing_file_path) 5110 .unwrap(); 5111 5112 let loop_config = LoopConfig { 5113 fd: backing_file.as_raw_fd() as u32, 5114 block_size, 5115 ..Default::default() 5116 }; 5117 5118 for i in 0..num_retries { 5119 let ret = unsafe { 5120 libc::ioctl( 5121 loop_device_file.as_raw_fd(), 5122 LOOP_CONFIGURE as _, 5123 &loop_config, 5124 ) 5125 }; 5126 if ret != 0 { 5127 if i < num_retries - 1 { 5128 println!( 5129 "Iteration {}: Failed to configure the loop device {}: {}", 5130 i, 5131 loop_device_path, 5132 std::io::Error::last_os_error() 5133 ); 5134 } else { 5135 panic!( 5136 "Failed {} times trying to configure the loop device {}: {}", 5137 num_retries, 5138 loop_device_path, 5139 std::io::Error::last_os_error() 5140 ); 5141 } 5142 } else { 5143 break; 5144 } 5145 5146 // Wait for a bit before retrying 5147 thread::sleep(std::time::Duration::new(5, 0)); 5148 } 5149 5150 loop_device_path 5151 } 5152 5153 #[test] 5154 fn test_virtio_block_topology() { 5155 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5156 let guest = Guest::new(Box::new(focal)); 5157 5158 let kernel_path = direct_kernel_boot_path(); 5159 let test_disk_path = guest.tmp_dir.as_path().join("test.img"); 5160 5161 let output = exec_host_command_output( 5162 format!( 5163 "qemu-img create -f raw {} 16M", 5164 test_disk_path.to_str().unwrap() 5165 ) 5166 .as_str(), 5167 ); 5168 if !output.status.success() { 5169 let stdout = String::from_utf8_lossy(&output.stdout); 5170 let stderr = String::from_utf8_lossy(&output.stderr); 5171 panic!("qemu-img command failed\nstdout\n{stdout}\nstderr\n{stderr}"); 5172 } 5173 5174 let loop_dev = create_loop_device(test_disk_path.to_str().unwrap(), 4096, 5); 5175 5176 let mut child = GuestCommand::new(&guest) 5177 .args(["--cpus", "boot=1"]) 5178 .args(["--memory", "size=512M"]) 5179 .args(["--kernel", kernel_path.to_str().unwrap()]) 5180 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5181 .args([ 5182 "--disk", 5183 format!( 5184 "path={}", 5185 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 5186 ) 5187 .as_str(), 5188 "--disk", 5189 format!( 5190 "path={}", 5191 guest.disk_config.disk(DiskType::CloudInit).unwrap() 5192 ) 5193 .as_str(), 5194 "--disk", 5195 format!("path={}", &loop_dev).as_str(), 5196 ]) 5197 .default_net() 5198 .capture_output() 5199 .spawn() 5200 .unwrap(); 5201 5202 let r = std::panic::catch_unwind(|| { 5203 guest.wait_vm_boot(None).unwrap(); 5204 5205 // MIN-IO column 5206 assert_eq!( 5207 guest 5208 .ssh_command("lsblk -t| grep vdc | awk '{print $3}'") 5209 .unwrap() 5210 .trim() 5211 .parse::<u32>() 5212 .unwrap_or_default(), 5213 4096 5214 ); 5215 // PHY-SEC column 5216 assert_eq!( 5217 guest 5218 .ssh_command("lsblk -t| grep vdc | awk '{print $5}'") 5219 .unwrap() 5220 .trim() 5221 .parse::<u32>() 5222 .unwrap_or_default(), 5223 4096 5224 ); 5225 // LOG-SEC column 5226 assert_eq!( 5227 guest 5228 .ssh_command("lsblk -t| grep vdc | awk '{print $6}'") 5229 .unwrap() 5230 .trim() 5231 .parse::<u32>() 5232 .unwrap_or_default(), 5233 4096 5234 ); 5235 }); 5236 5237 let _ = child.kill(); 5238 let output = child.wait_with_output().unwrap(); 5239 5240 handle_child_output(r, &output); 5241 5242 Command::new("losetup") 5243 .args(["-d", &loop_dev]) 5244 .output() 5245 .expect("loop device not found"); 5246 } 5247 5248 #[test] 5249 fn test_virtio_balloon_deflate_on_oom() { 5250 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5251 let guest = Guest::new(Box::new(focal)); 5252 5253 let kernel_path = direct_kernel_boot_path(); 5254 5255 let api_socket = temp_api_path(&guest.tmp_dir); 5256 5257 //Let's start a 4G guest with balloon occupied 2G memory 5258 let mut child = GuestCommand::new(&guest) 5259 .args(["--api-socket", &api_socket]) 5260 .args(["--cpus", "boot=1"]) 5261 .args(["--memory", "size=4G"]) 5262 .args(["--kernel", kernel_path.to_str().unwrap()]) 5263 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5264 .args(["--balloon", "size=2G,deflate_on_oom=on"]) 5265 .default_disks() 5266 .default_net() 5267 .capture_output() 5268 .spawn() 5269 .unwrap(); 5270 5271 let r = std::panic::catch_unwind(|| { 5272 guest.wait_vm_boot(None).unwrap(); 5273 5274 // Wait for balloon memory's initialization and check its size. 5275 // The virtio-balloon driver might take a few seconds to report the 5276 // balloon effective size back to the VMM. 5277 thread::sleep(std::time::Duration::new(20, 0)); 5278 5279 let orig_balloon = balloon_size(&api_socket); 5280 println!("The original balloon memory size is {orig_balloon} bytes"); 5281 assert!(orig_balloon == 2147483648); 5282 5283 // Two steps to verify if the 'deflate_on_oom' parameter works. 5284 // 1st: run a command to trigger an OOM in the guest. 5285 guest 5286 .ssh_command("echo f | sudo tee /proc/sysrq-trigger") 5287 .unwrap(); 5288 5289 // Give some time for the OOM to happen in the guest and be reported 5290 // back to the host. 5291 thread::sleep(std::time::Duration::new(20, 0)); 5292 5293 // 2nd: check balloon_mem's value to verify balloon has been automatically deflated 5294 let deflated_balloon = balloon_size(&api_socket); 5295 println!("After deflating, balloon memory size is {deflated_balloon} bytes"); 5296 // Verify the balloon size deflated 5297 assert!(deflated_balloon < 2147483648); 5298 }); 5299 5300 let _ = child.kill(); 5301 let output = child.wait_with_output().unwrap(); 5302 5303 handle_child_output(r, &output); 5304 } 5305 5306 #[test] 5307 #[cfg(not(feature = "mshv"))] 5308 fn test_virtio_balloon_free_page_reporting() { 5309 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5310 let guest = Guest::new(Box::new(focal)); 5311 5312 //Let's start a 4G guest with balloon occupied 2G memory 5313 let mut child = GuestCommand::new(&guest) 5314 .args(["--cpus", "boot=1"]) 5315 .args(["--memory", "size=4G"]) 5316 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 5317 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5318 .args(["--balloon", "size=0,free_page_reporting=on"]) 5319 .default_disks() 5320 .default_net() 5321 .capture_output() 5322 .spawn() 5323 .unwrap(); 5324 5325 let pid = child.id(); 5326 let r = std::panic::catch_unwind(|| { 5327 guest.wait_vm_boot(None).unwrap(); 5328 5329 // Check the initial RSS is less than 1GiB 5330 let rss = process_rss_kib(pid); 5331 println!("RSS {rss} < 1048576"); 5332 assert!(rss < 1048576); 5333 5334 // Spawn a command inside the guest to consume 2GiB of RAM for 60 5335 // seconds 5336 let guest_ip = guest.network.guest_ip.clone(); 5337 thread::spawn(move || { 5338 ssh_command_ip( 5339 "stress --vm 1 --vm-bytes 2G --vm-keep --timeout 60", 5340 &guest_ip, 5341 DEFAULT_SSH_RETRIES, 5342 DEFAULT_SSH_TIMEOUT, 5343 ) 5344 .unwrap(); 5345 }); 5346 5347 // Wait for 50 seconds to make sure the stress command is consuming 5348 // the expected amount of memory. 5349 thread::sleep(std::time::Duration::new(50, 0)); 5350 let rss = process_rss_kib(pid); 5351 println!("RSS {rss} >= 2097152"); 5352 assert!(rss >= 2097152); 5353 5354 // Wait for an extra minute to make sure the stress command has 5355 // completed and that the guest reported the free pages to the VMM 5356 // through the virtio-balloon device. We expect the RSS to be under 5357 // 2GiB. 5358 thread::sleep(std::time::Duration::new(60, 0)); 5359 let rss = process_rss_kib(pid); 5360 println!("RSS {rss} < 2097152"); 5361 assert!(rss < 2097152); 5362 }); 5363 5364 let _ = child.kill(); 5365 let output = child.wait_with_output().unwrap(); 5366 5367 handle_child_output(r, &output); 5368 } 5369 5370 #[test] 5371 fn test_pmem_hotplug() { 5372 _test_pmem_hotplug(None) 5373 } 5374 5375 #[test] 5376 fn test_pmem_multi_segment_hotplug() { 5377 _test_pmem_hotplug(Some(15)) 5378 } 5379 5380 fn _test_pmem_hotplug(pci_segment: Option<u16>) { 5381 #[cfg(target_arch = "aarch64")] 5382 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string(); 5383 #[cfg(target_arch = "x86_64")] 5384 let focal_image = FOCAL_IMAGE_NAME.to_string(); 5385 let focal = UbuntuDiskConfig::new(focal_image); 5386 let guest = Guest::new(Box::new(focal)); 5387 5388 #[cfg(target_arch = "x86_64")] 5389 let kernel_path = direct_kernel_boot_path(); 5390 #[cfg(target_arch = "aarch64")] 5391 let kernel_path = edk2_path(); 5392 5393 let api_socket = temp_api_path(&guest.tmp_dir); 5394 5395 let mut cmd = GuestCommand::new(&guest); 5396 5397 cmd.args(["--api-socket", &api_socket]) 5398 .args(["--cpus", "boot=1"]) 5399 .args(["--memory", "size=512M"]) 5400 .args(["--kernel", kernel_path.to_str().unwrap()]) 5401 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5402 .default_disks() 5403 .default_net() 5404 .capture_output(); 5405 5406 if pci_segment.is_some() { 5407 cmd.args([ 5408 "--platform", 5409 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 5410 ]); 5411 } 5412 5413 let mut child = cmd.spawn().unwrap(); 5414 5415 let r = std::panic::catch_unwind(|| { 5416 guest.wait_vm_boot(None).unwrap(); 5417 5418 // Check /dev/pmem0 is not there 5419 assert_eq!( 5420 guest 5421 .ssh_command("lsblk | grep -c pmem0 || true") 5422 .unwrap() 5423 .trim() 5424 .parse::<u32>() 5425 .unwrap_or(1), 5426 0 5427 ); 5428 5429 let pmem_temp_file = TempFile::new().unwrap(); 5430 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 5431 let (cmd_success, cmd_output) = remote_command_w_output( 5432 &api_socket, 5433 "add-pmem", 5434 Some(&format!( 5435 "file={},id=test0{}", 5436 pmem_temp_file.as_path().to_str().unwrap(), 5437 if let Some(pci_segment) = pci_segment { 5438 format!(",pci_segment={pci_segment}") 5439 } else { 5440 "".to_owned() 5441 } 5442 )), 5443 ); 5444 assert!(cmd_success); 5445 if let Some(pci_segment) = pci_segment { 5446 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 5447 "{{\"id\":\"test0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 5448 ))); 5449 } else { 5450 assert!(String::from_utf8_lossy(&cmd_output) 5451 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 5452 } 5453 5454 // Check that /dev/pmem0 exists and the block size is 128M 5455 assert_eq!( 5456 guest 5457 .ssh_command("lsblk | grep pmem0 | grep -c 128M") 5458 .unwrap() 5459 .trim() 5460 .parse::<u32>() 5461 .unwrap_or_default(), 5462 1 5463 ); 5464 5465 guest.reboot_linux(0, None); 5466 5467 // Check still there after reboot 5468 assert_eq!( 5469 guest 5470 .ssh_command("lsblk | grep pmem0 | grep -c 128M") 5471 .unwrap() 5472 .trim() 5473 .parse::<u32>() 5474 .unwrap_or_default(), 5475 1 5476 ); 5477 5478 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 5479 5480 thread::sleep(std::time::Duration::new(20, 0)); 5481 5482 // Check device has gone away 5483 assert_eq!( 5484 guest 5485 .ssh_command("lsblk | grep -c pmem0.*128M || true") 5486 .unwrap() 5487 .trim() 5488 .parse::<u32>() 5489 .unwrap_or(1), 5490 0 5491 ); 5492 5493 guest.reboot_linux(1, None); 5494 5495 // Check still absent after reboot 5496 assert_eq!( 5497 guest 5498 .ssh_command("lsblk | grep -c pmem0.*128M || true") 5499 .unwrap() 5500 .trim() 5501 .parse::<u32>() 5502 .unwrap_or(1), 5503 0 5504 ); 5505 }); 5506 5507 let _ = child.kill(); 5508 let output = child.wait_with_output().unwrap(); 5509 5510 handle_child_output(r, &output); 5511 } 5512 5513 #[test] 5514 fn test_net_hotplug() { 5515 _test_net_hotplug(None) 5516 } 5517 5518 #[test] 5519 fn test_net_multi_segment_hotplug() { 5520 _test_net_hotplug(Some(15)) 5521 } 5522 5523 fn _test_net_hotplug(pci_segment: Option<u16>) { 5524 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5525 let guest = Guest::new(Box::new(focal)); 5526 5527 #[cfg(target_arch = "x86_64")] 5528 let kernel_path = direct_kernel_boot_path(); 5529 #[cfg(target_arch = "aarch64")] 5530 let kernel_path = edk2_path(); 5531 5532 let api_socket = temp_api_path(&guest.tmp_dir); 5533 5534 // Boot without network 5535 let mut cmd = GuestCommand::new(&guest); 5536 5537 cmd.args(["--api-socket", &api_socket]) 5538 .args(["--cpus", "boot=1"]) 5539 .args(["--memory", "size=512M"]) 5540 .args(["--kernel", kernel_path.to_str().unwrap()]) 5541 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5542 .default_disks() 5543 .capture_output(); 5544 5545 if pci_segment.is_some() { 5546 cmd.args([ 5547 "--platform", 5548 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 5549 ]); 5550 } 5551 5552 let mut child = cmd.spawn().unwrap(); 5553 5554 thread::sleep(std::time::Duration::new(20, 0)); 5555 5556 let r = std::panic::catch_unwind(|| { 5557 // Add network 5558 let (cmd_success, cmd_output) = remote_command_w_output( 5559 &api_socket, 5560 "add-net", 5561 Some( 5562 format!( 5563 "{}{},id=test0", 5564 guest.default_net_string(), 5565 if let Some(pci_segment) = pci_segment { 5566 format!(",pci_segment={pci_segment}") 5567 } else { 5568 "".to_owned() 5569 } 5570 ) 5571 .as_str(), 5572 ), 5573 ); 5574 assert!(cmd_success); 5575 5576 if let Some(pci_segment) = pci_segment { 5577 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 5578 "{{\"id\":\"test0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 5579 ))); 5580 } else { 5581 assert!(String::from_utf8_lossy(&cmd_output) 5582 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:05.0\"}")); 5583 } 5584 5585 thread::sleep(std::time::Duration::new(5, 0)); 5586 5587 // 1 network interfaces + default localhost ==> 2 interfaces 5588 assert_eq!( 5589 guest 5590 .ssh_command("ip -o link | wc -l") 5591 .unwrap() 5592 .trim() 5593 .parse::<u32>() 5594 .unwrap_or_default(), 5595 2 5596 ); 5597 5598 // Remove network 5599 assert!(remote_command(&api_socket, "remove-device", Some("test0"),)); 5600 thread::sleep(std::time::Duration::new(5, 0)); 5601 5602 let (cmd_success, cmd_output) = remote_command_w_output( 5603 &api_socket, 5604 "add-net", 5605 Some( 5606 format!( 5607 "{}{},id=test1", 5608 guest.default_net_string(), 5609 if let Some(pci_segment) = pci_segment { 5610 format!(",pci_segment={pci_segment}") 5611 } else { 5612 "".to_owned() 5613 } 5614 ) 5615 .as_str(), 5616 ), 5617 ); 5618 assert!(cmd_success); 5619 5620 if let Some(pci_segment) = pci_segment { 5621 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 5622 "{{\"id\":\"test1\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 5623 ))); 5624 } else { 5625 assert!(String::from_utf8_lossy(&cmd_output) 5626 .contains("{\"id\":\"test1\",\"bdf\":\"0000:00:05.0\"}")); 5627 } 5628 5629 thread::sleep(std::time::Duration::new(5, 0)); 5630 5631 // 1 network interfaces + default localhost ==> 2 interfaces 5632 assert_eq!( 5633 guest 5634 .ssh_command("ip -o link | wc -l") 5635 .unwrap() 5636 .trim() 5637 .parse::<u32>() 5638 .unwrap_or_default(), 5639 2 5640 ); 5641 5642 guest.reboot_linux(0, None); 5643 5644 // Check still there after reboot 5645 // 1 network interfaces + default localhost ==> 2 interfaces 5646 assert_eq!( 5647 guest 5648 .ssh_command("ip -o link | wc -l") 5649 .unwrap() 5650 .trim() 5651 .parse::<u32>() 5652 .unwrap_or_default(), 5653 2 5654 ); 5655 }); 5656 5657 let _ = child.kill(); 5658 let output = child.wait_with_output().unwrap(); 5659 5660 handle_child_output(r, &output); 5661 } 5662 5663 #[test] 5664 fn test_initramfs() { 5665 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5666 let guest = Guest::new(Box::new(focal)); 5667 let mut workload_path = dirs::home_dir().unwrap(); 5668 workload_path.push("workloads"); 5669 5670 #[cfg(target_arch = "x86_64")] 5671 let mut kernels = vec![direct_kernel_boot_path()]; 5672 #[cfg(target_arch = "aarch64")] 5673 let kernels = vec![direct_kernel_boot_path()]; 5674 5675 #[cfg(target_arch = "x86_64")] 5676 { 5677 let mut pvh_kernel_path = workload_path.clone(); 5678 pvh_kernel_path.push("vmlinux"); 5679 kernels.push(pvh_kernel_path); 5680 } 5681 5682 let mut initramfs_path = workload_path; 5683 initramfs_path.push("alpine_initramfs.img"); 5684 5685 let test_string = String::from("axz34i9rylotd8n50wbv6kcj7f2qushme1pg"); 5686 let cmdline = format!("console=hvc0 quiet TEST_STRING={test_string}"); 5687 5688 kernels.iter().for_each(|k_path| { 5689 let mut child = GuestCommand::new(&guest) 5690 .args(["--kernel", k_path.to_str().unwrap()]) 5691 .args(["--initramfs", initramfs_path.to_str().unwrap()]) 5692 .args(["--cmdline", &cmdline]) 5693 .capture_output() 5694 .spawn() 5695 .unwrap(); 5696 5697 thread::sleep(std::time::Duration::new(20, 0)); 5698 5699 let _ = child.kill(); 5700 let output = child.wait_with_output().unwrap(); 5701 5702 let r = std::panic::catch_unwind(|| { 5703 let s = String::from_utf8_lossy(&output.stdout); 5704 5705 assert_ne!(s.lines().position(|line| line == test_string), None); 5706 }); 5707 5708 handle_child_output(r, &output); 5709 }); 5710 } 5711 5712 // One thing to note about this test. The virtio-net device is heavily used 5713 // through each ssh command. There's no need to perform a dedicated test to 5714 // verify the migration went well for virtio-net. 5715 #[test] 5716 #[cfg(not(feature = "mshv"))] 5717 fn test_snapshot_restore_hotplug_virtiomem() { 5718 _test_snapshot_restore(true); 5719 } 5720 5721 #[test] 5722 fn test_snapshot_restore_basic() { 5723 _test_snapshot_restore(false); 5724 } 5725 5726 fn _test_snapshot_restore(use_hotplug: bool) { 5727 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5728 let guest = Guest::new(Box::new(focal)); 5729 let kernel_path = direct_kernel_boot_path(); 5730 5731 let api_socket_source = format!("{}.1", temp_api_path(&guest.tmp_dir)); 5732 5733 let net_id = "net123"; 5734 let net_params = format!( 5735 "id={},tap=,mac={},ip={},mask=255.255.255.0", 5736 net_id, guest.network.guest_mac, guest.network.host_ip 5737 ); 5738 let mut mem_params = "size=4G"; 5739 5740 if use_hotplug { 5741 mem_params = "size=4G,hotplug_method=virtio-mem,hotplug_size=32G" 5742 } 5743 5744 let cloudinit_params = format!( 5745 "path={},iommu=on", 5746 guest.disk_config.disk(DiskType::CloudInit).unwrap() 5747 ); 5748 5749 let socket = temp_vsock_path(&guest.tmp_dir); 5750 let event_path = temp_event_monitor_path(&guest.tmp_dir); 5751 5752 let mut child = GuestCommand::new(&guest) 5753 .args(["--api-socket", &api_socket_source]) 5754 .args(["--event-monitor", format!("path={event_path}").as_str()]) 5755 .args(["--cpus", "boot=4"]) 5756 .args(["--memory", mem_params]) 5757 .args(["--balloon", "size=0"]) 5758 .args(["--kernel", kernel_path.to_str().unwrap()]) 5759 .args([ 5760 "--disk", 5761 format!( 5762 "path={}", 5763 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 5764 ) 5765 .as_str(), 5766 "--disk", 5767 cloudinit_params.as_str(), 5768 ]) 5769 .args(["--net", net_params.as_str()]) 5770 .args(["--vsock", format!("cid=3,socket={socket}").as_str()]) 5771 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5772 .capture_output() 5773 .spawn() 5774 .unwrap(); 5775 5776 let console_text = String::from("On a branch floating down river a cricket, singing."); 5777 // Create the snapshot directory 5778 let snapshot_dir = temp_snapshot_dir_path(&guest.tmp_dir); 5779 5780 let r = std::panic::catch_unwind(|| { 5781 guest.wait_vm_boot(None).unwrap(); 5782 5783 // Check the number of vCPUs 5784 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 4); 5785 // Check the guest RAM 5786 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 5787 if use_hotplug { 5788 // Increase guest RAM with virtio-mem 5789 resize_command( 5790 &api_socket_source, 5791 None, 5792 Some(6 << 30), 5793 None, 5794 Some(&event_path), 5795 ); 5796 thread::sleep(std::time::Duration::new(5, 0)); 5797 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 5798 // Use balloon to remove RAM from the VM 5799 resize_command( 5800 &api_socket_source, 5801 None, 5802 None, 5803 Some(1 << 30), 5804 Some(&event_path), 5805 ); 5806 thread::sleep(std::time::Duration::new(5, 0)); 5807 let total_memory = guest.get_total_memory().unwrap_or_default(); 5808 assert!(total_memory > 4_800_000); 5809 assert!(total_memory < 5_760_000); 5810 } 5811 // Check the guest virtio-devices, e.g. block, rng, vsock, console, and net 5812 guest.check_devices_common(Some(&socket), Some(&console_text), None); 5813 5814 // x86_64: We check that removing and adding back the virtio-net device 5815 // does not break the snapshot/restore support for virtio-pci. 5816 // This is an important thing to test as the hotplug will 5817 // trigger a PCI BAR reprogramming, which is a good way of 5818 // checking if the stored resources are correctly restored. 5819 // Unplug the virtio-net device 5820 // AArch64: Device hotplug is currently not supported, skipping here. 5821 #[cfg(target_arch = "x86_64")] 5822 { 5823 assert!(remote_command( 5824 &api_socket_source, 5825 "remove-device", 5826 Some(net_id), 5827 )); 5828 thread::sleep(std::time::Duration::new(10, 0)); 5829 let latest_events = [&MetaEvent { 5830 event: "device-removed".to_string(), 5831 device_id: Some(net_id.to_string()), 5832 }]; 5833 assert!(check_latest_events_exact(&latest_events, &event_path)); 5834 5835 // Plug the virtio-net device again 5836 assert!(remote_command( 5837 &api_socket_source, 5838 "add-net", 5839 Some(net_params.as_str()), 5840 )); 5841 thread::sleep(std::time::Duration::new(10, 0)); 5842 } 5843 5844 // Pause the VM 5845 assert!(remote_command(&api_socket_source, "pause", None)); 5846 let latest_events = [ 5847 &MetaEvent { 5848 event: "pausing".to_string(), 5849 device_id: None, 5850 }, 5851 &MetaEvent { 5852 event: "paused".to_string(), 5853 device_id: None, 5854 }, 5855 ]; 5856 assert!(check_latest_events_exact(&latest_events, &event_path)); 5857 5858 // Take a snapshot from the VM 5859 assert!(remote_command( 5860 &api_socket_source, 5861 "snapshot", 5862 Some(format!("file://{snapshot_dir}").as_str()), 5863 )); 5864 5865 // Wait to make sure the snapshot is completed 5866 thread::sleep(std::time::Duration::new(10, 0)); 5867 5868 let latest_events = [ 5869 &MetaEvent { 5870 event: "snapshotting".to_string(), 5871 device_id: None, 5872 }, 5873 &MetaEvent { 5874 event: "snapshotted".to_string(), 5875 device_id: None, 5876 }, 5877 ]; 5878 assert!(check_latest_events_exact(&latest_events, &event_path)); 5879 }); 5880 5881 // Shutdown the source VM and check console output 5882 let _ = child.kill(); 5883 let output = child.wait_with_output().unwrap(); 5884 handle_child_output(r, &output); 5885 5886 let r = std::panic::catch_unwind(|| { 5887 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 5888 }); 5889 5890 handle_child_output(r, &output); 5891 5892 // Remove the vsock socket file. 5893 Command::new("rm") 5894 .arg("-f") 5895 .arg(socket.as_str()) 5896 .output() 5897 .unwrap(); 5898 5899 let api_socket_restored = format!("{}.2", temp_api_path(&guest.tmp_dir)); 5900 let event_path_restored = format!("{}.2", temp_event_monitor_path(&guest.tmp_dir)); 5901 5902 // Restore the VM from the snapshot 5903 let mut child = GuestCommand::new(&guest) 5904 .args(["--api-socket", &api_socket_restored]) 5905 .args([ 5906 "--event-monitor", 5907 format!("path={event_path_restored}").as_str(), 5908 ]) 5909 .args([ 5910 "--restore", 5911 format!("source_url=file://{snapshot_dir}").as_str(), 5912 ]) 5913 .capture_output() 5914 .spawn() 5915 .unwrap(); 5916 5917 // Wait for the VM to be restored 5918 thread::sleep(std::time::Duration::new(10, 0)); 5919 let expected_events = [ 5920 &MetaEvent { 5921 event: "starting".to_string(), 5922 device_id: None, 5923 }, 5924 &MetaEvent { 5925 event: "activated".to_string(), 5926 device_id: Some("__console".to_string()), 5927 }, 5928 &MetaEvent { 5929 event: "activated".to_string(), 5930 device_id: Some("__rng".to_string()), 5931 }, 5932 &MetaEvent { 5933 event: "restoring".to_string(), 5934 device_id: None, 5935 }, 5936 ]; 5937 assert!(check_sequential_events( 5938 &expected_events, 5939 &event_path_restored 5940 )); 5941 let latest_events = [&MetaEvent { 5942 event: "restored".to_string(), 5943 device_id: None, 5944 }]; 5945 assert!(check_latest_events_exact( 5946 &latest_events, 5947 &event_path_restored 5948 )); 5949 5950 let r = std::panic::catch_unwind(|| { 5951 // Resume the VM 5952 assert!(remote_command(&api_socket_restored, "resume", None)); 5953 let latest_events = [ 5954 &MetaEvent { 5955 event: "resuming".to_string(), 5956 device_id: None, 5957 }, 5958 &MetaEvent { 5959 event: "resumed".to_string(), 5960 device_id: None, 5961 }, 5962 ]; 5963 assert!(check_latest_events_exact( 5964 &latest_events, 5965 &event_path_restored 5966 )); 5967 5968 // Perform same checks to validate VM has been properly restored 5969 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 4); 5970 let total_memory = guest.get_total_memory().unwrap_or_default(); 5971 if !use_hotplug { 5972 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 5973 } else { 5974 assert!(total_memory > 4_800_000); 5975 assert!(total_memory < 5_760_000); 5976 // Deflate balloon to restore entire RAM to the VM 5977 resize_command(&api_socket_restored, None, None, Some(0), None); 5978 thread::sleep(std::time::Duration::new(5, 0)); 5979 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 5980 // Decrease guest RAM with virtio-mem 5981 resize_command(&api_socket_restored, None, Some(5 << 30), None, None); 5982 thread::sleep(std::time::Duration::new(5, 0)); 5983 let total_memory = guest.get_total_memory().unwrap_or_default(); 5984 assert!(total_memory > 4_800_000); 5985 assert!(total_memory < 5_760_000); 5986 } 5987 5988 guest.check_devices_common(Some(&socket), Some(&console_text), None); 5989 }); 5990 // Shutdown the target VM and check console output 5991 let _ = child.kill(); 5992 let output = child.wait_with_output().unwrap(); 5993 handle_child_output(r, &output); 5994 5995 let r = std::panic::catch_unwind(|| { 5996 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 5997 }); 5998 5999 handle_child_output(r, &output); 6000 } 6001 6002 #[test] 6003 fn test_counters() { 6004 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6005 let guest = Guest::new(Box::new(focal)); 6006 let api_socket = temp_api_path(&guest.tmp_dir); 6007 6008 let mut cmd = GuestCommand::new(&guest); 6009 cmd.args(["--cpus", "boot=1"]) 6010 .args(["--memory", "size=512M"]) 6011 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 6012 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6013 .default_disks() 6014 .args(["--net", guest.default_net_string().as_str()]) 6015 .args(["--api-socket", &api_socket]) 6016 .capture_output(); 6017 6018 let mut child = cmd.spawn().unwrap(); 6019 6020 let r = std::panic::catch_unwind(|| { 6021 guest.wait_vm_boot(None).unwrap(); 6022 6023 let orig_counters = get_counters(&api_socket); 6024 guest 6025 .ssh_command("dd if=/dev/zero of=test count=8 bs=1M") 6026 .unwrap(); 6027 6028 let new_counters = get_counters(&api_socket); 6029 6030 // Check that all the counters have increased 6031 assert!(new_counters > orig_counters); 6032 }); 6033 6034 let _ = child.kill(); 6035 let output = child.wait_with_output().unwrap(); 6036 6037 handle_child_output(r, &output); 6038 } 6039 6040 #[test] 6041 #[cfg(feature = "guest_debug")] 6042 fn test_coredump() { 6043 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6044 let guest = Guest::new(Box::new(focal)); 6045 let api_socket = temp_api_path(&guest.tmp_dir); 6046 6047 let mut cmd = GuestCommand::new(&guest); 6048 cmd.args(["--cpus", "boot=4"]) 6049 .args(["--memory", "size=4G"]) 6050 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 6051 .default_disks() 6052 .args(["--net", guest.default_net_string().as_str()]) 6053 .args(["--api-socket", &api_socket]) 6054 .capture_output(); 6055 6056 let mut child = cmd.spawn().unwrap(); 6057 let vmcore_file = temp_vmcore_file_path(&guest.tmp_dir); 6058 6059 let r = std::panic::catch_unwind(|| { 6060 guest.wait_vm_boot(None).unwrap(); 6061 6062 assert!(remote_command(&api_socket, "pause", None)); 6063 6064 assert!(remote_command( 6065 &api_socket, 6066 "coredump", 6067 Some(format!("file://{vmcore_file}").as_str()), 6068 )); 6069 6070 // the num of CORE notes should equals to vcpu 6071 let readelf_core_num_cmd = 6072 format!("readelf --all {vmcore_file} |grep CORE |grep -v Type |wc -l"); 6073 let core_num_in_elf = exec_host_command_output(&readelf_core_num_cmd); 6074 assert_eq!(String::from_utf8_lossy(&core_num_in_elf.stdout).trim(), "4"); 6075 6076 // the num of QEMU notes should equals to vcpu 6077 let readelf_vmm_num_cmd = format!("readelf --all {vmcore_file} |grep QEMU |wc -l"); 6078 let vmm_num_in_elf = exec_host_command_output(&readelf_vmm_num_cmd); 6079 assert_eq!(String::from_utf8_lossy(&vmm_num_in_elf.stdout).trim(), "4"); 6080 }); 6081 6082 let _ = child.kill(); 6083 let output = child.wait_with_output().unwrap(); 6084 6085 handle_child_output(r, &output); 6086 } 6087 6088 #[test] 6089 #[cfg(feature = "guest_debug")] 6090 fn test_coredump_no_pause() { 6091 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6092 let guest = Guest::new(Box::new(focal)); 6093 let api_socket = temp_api_path(&guest.tmp_dir); 6094 6095 let mut cmd = GuestCommand::new(&guest); 6096 cmd.args(["--cpus", "boot=4"]) 6097 .args(["--memory", "size=4G"]) 6098 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 6099 .default_disks() 6100 .args(["--net", guest.default_net_string().as_str()]) 6101 .args(["--api-socket", &api_socket]) 6102 .capture_output(); 6103 6104 let mut child = cmd.spawn().unwrap(); 6105 let vmcore_file = temp_vmcore_file_path(&guest.tmp_dir); 6106 6107 let r = std::panic::catch_unwind(|| { 6108 guest.wait_vm_boot(None).unwrap(); 6109 6110 assert!(remote_command( 6111 &api_socket, 6112 "coredump", 6113 Some(format!("file://{vmcore_file}").as_str()), 6114 )); 6115 6116 assert_eq!(vm_state(&api_socket), "Running"); 6117 }); 6118 6119 let _ = child.kill(); 6120 let output = child.wait_with_output().unwrap(); 6121 6122 handle_child_output(r, &output); 6123 } 6124 6125 #[test] 6126 fn test_watchdog() { 6127 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6128 let guest = Guest::new(Box::new(focal)); 6129 let api_socket = temp_api_path(&guest.tmp_dir); 6130 6131 let kernel_path = direct_kernel_boot_path(); 6132 6133 let mut cmd = GuestCommand::new(&guest); 6134 cmd.args(["--cpus", "boot=1"]) 6135 .args(["--memory", "size=512M"]) 6136 .args(["--kernel", kernel_path.to_str().unwrap()]) 6137 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6138 .default_disks() 6139 .args(["--net", guest.default_net_string().as_str()]) 6140 .args(["--watchdog"]) 6141 .args(["--api-socket", &api_socket]) 6142 .capture_output(); 6143 6144 let mut child = cmd.spawn().unwrap(); 6145 6146 let r = std::panic::catch_unwind(|| { 6147 guest.wait_vm_boot(None).unwrap(); 6148 6149 let mut expected_reboot_count = 1; 6150 6151 // Enable the watchdog with a 15s timeout 6152 enable_guest_watchdog(&guest, 15); 6153 6154 // Reboot and check that systemd has activated the watchdog 6155 guest.ssh_command("sudo reboot").unwrap(); 6156 guest.wait_vm_boot(None).unwrap(); 6157 expected_reboot_count += 1; 6158 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6159 assert_eq!( 6160 guest 6161 .ssh_command("sudo journalctl | grep -c -- \"Watchdog started\"") 6162 .unwrap() 6163 .trim() 6164 .parse::<u32>() 6165 .unwrap_or_default(), 6166 2 6167 ); 6168 6169 // Allow some normal time to elapse to check we don't get spurious reboots 6170 thread::sleep(std::time::Duration::new(40, 0)); 6171 // Check no reboot 6172 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6173 6174 // Trigger a panic (sync first). We need to do this inside a screen with a delay so the SSH command returns. 6175 guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap(); 6176 // Allow some time for the watchdog to trigger (max 30s) and reboot to happen 6177 guest.wait_vm_boot(Some(50)).unwrap(); 6178 // Check a reboot is triggered by the watchdog 6179 expected_reboot_count += 1; 6180 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6181 6182 #[cfg(target_arch = "x86_64")] 6183 { 6184 // Now pause the VM and remain offline for 30s 6185 assert!(remote_command(&api_socket, "pause", None)); 6186 thread::sleep(std::time::Duration::new(30, 0)); 6187 assert!(remote_command(&api_socket, "resume", None)); 6188 6189 // Check no reboot 6190 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6191 } 6192 }); 6193 6194 let _ = child.kill(); 6195 let output = child.wait_with_output().unwrap(); 6196 6197 handle_child_output(r, &output); 6198 } 6199 6200 #[test] 6201 fn test_pvpanic() { 6202 let jammy = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string()); 6203 let guest = Guest::new(Box::new(jammy)); 6204 let api_socket = temp_api_path(&guest.tmp_dir); 6205 let event_path = temp_event_monitor_path(&guest.tmp_dir); 6206 6207 let kernel_path = direct_kernel_boot_path(); 6208 6209 let mut cmd = GuestCommand::new(&guest); 6210 cmd.args(["--cpus", "boot=1"]) 6211 .args(["--memory", "size=512M"]) 6212 .args(["--kernel", kernel_path.to_str().unwrap()]) 6213 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6214 .default_disks() 6215 .args(["--net", guest.default_net_string().as_str()]) 6216 .args(["--pvpanic"]) 6217 .args(["--api-socket", &api_socket]) 6218 .args(["--event-monitor", format!("path={event_path}").as_str()]) 6219 .capture_output(); 6220 6221 let mut child = cmd.spawn().unwrap(); 6222 6223 let r = std::panic::catch_unwind(|| { 6224 guest.wait_vm_boot(None).unwrap(); 6225 6226 // Trigger guest a panic 6227 make_guest_panic(&guest); 6228 6229 // Wait a while for guest 6230 thread::sleep(std::time::Duration::new(10, 0)); 6231 6232 let expected_sequential_events = [&MetaEvent { 6233 event: "panic".to_string(), 6234 device_id: None, 6235 }]; 6236 assert!(check_latest_events_exact( 6237 &expected_sequential_events, 6238 &event_path 6239 )); 6240 }); 6241 6242 let _ = child.kill(); 6243 let output = child.wait_with_output().unwrap(); 6244 6245 handle_child_output(r, &output); 6246 } 6247 6248 #[test] 6249 fn test_tap_from_fd() { 6250 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6251 let guest = Guest::new(Box::new(focal)); 6252 let kernel_path = direct_kernel_boot_path(); 6253 6254 // Create a TAP interface with multi-queue enabled 6255 let num_queue_pairs: usize = 2; 6256 6257 use std::str::FromStr; 6258 let taps = net_util::open_tap( 6259 Some("chtap0"), 6260 Some(std::net::Ipv4Addr::from_str(&guest.network.host_ip).unwrap()), 6261 None, 6262 &mut None, 6263 None, 6264 num_queue_pairs, 6265 Some(libc::O_RDWR | libc::O_NONBLOCK), 6266 ) 6267 .unwrap(); 6268 6269 let mut child = GuestCommand::new(&guest) 6270 .args(["--cpus", &format!("boot={num_queue_pairs}")]) 6271 .args(["--memory", "size=512M"]) 6272 .args(["--kernel", kernel_path.to_str().unwrap()]) 6273 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6274 .default_disks() 6275 .args([ 6276 "--net", 6277 &format!( 6278 "fd=[{},{}],mac={},num_queues={}", 6279 taps[0].as_raw_fd(), 6280 taps[1].as_raw_fd(), 6281 guest.network.guest_mac, 6282 num_queue_pairs * 2 6283 ), 6284 ]) 6285 .capture_output() 6286 .spawn() 6287 .unwrap(); 6288 6289 let r = std::panic::catch_unwind(|| { 6290 guest.wait_vm_boot(None).unwrap(); 6291 6292 assert_eq!( 6293 guest 6294 .ssh_command("ip -o link | wc -l") 6295 .unwrap() 6296 .trim() 6297 .parse::<u32>() 6298 .unwrap_or_default(), 6299 2 6300 ); 6301 6302 guest.reboot_linux(0, None); 6303 6304 assert_eq!( 6305 guest 6306 .ssh_command("ip -o link | wc -l") 6307 .unwrap() 6308 .trim() 6309 .parse::<u32>() 6310 .unwrap_or_default(), 6311 2 6312 ); 6313 }); 6314 6315 let _ = child.kill(); 6316 let output = child.wait_with_output().unwrap(); 6317 6318 handle_child_output(r, &output); 6319 } 6320 6321 // By design, a guest VM won't be able to connect to the host 6322 // machine when using a macvtap network interface (while it can 6323 // communicate externally). As a workaround, this integration 6324 // test creates two macvtap interfaces in 'bridge' mode on the 6325 // same physical net interface, one for the guest and one for 6326 // the host. With additional setup on the IP address and the 6327 // routing table, it enables the communications between the 6328 // guest VM and the host machine. 6329 // Details: https://wiki.libvirt.org/page/TroubleshootMacvtapHostFail 6330 fn _test_macvtap(hotplug: bool, guest_macvtap_name: &str, host_macvtap_name: &str) { 6331 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6332 let guest = Guest::new(Box::new(focal)); 6333 let api_socket = temp_api_path(&guest.tmp_dir); 6334 6335 #[cfg(target_arch = "x86_64")] 6336 let kernel_path = direct_kernel_boot_path(); 6337 #[cfg(target_arch = "aarch64")] 6338 let kernel_path = edk2_path(); 6339 6340 let phy_net = "eth0"; 6341 6342 // Create a macvtap interface for the guest VM to use 6343 assert!(exec_host_command_status(&format!( 6344 "sudo ip link add link {phy_net} name {guest_macvtap_name} type macvtap mod bridge" 6345 )) 6346 .success()); 6347 assert!(exec_host_command_status(&format!( 6348 "sudo ip link set {} address {} up", 6349 guest_macvtap_name, guest.network.guest_mac 6350 )) 6351 .success()); 6352 assert!( 6353 exec_host_command_status(&format!("sudo ip link show {guest_macvtap_name}")).success() 6354 ); 6355 6356 let tap_index = 6357 fs::read_to_string(format!("/sys/class/net/{guest_macvtap_name}/ifindex")).unwrap(); 6358 let tap_device = format!("/dev/tap{}", tap_index.trim()); 6359 6360 assert!(exec_host_command_status(&format!("sudo chown $UID.$UID {tap_device}")).success()); 6361 6362 let cstr_tap_device = std::ffi::CString::new(tap_device).unwrap(); 6363 let tap_fd1 = unsafe { libc::open(cstr_tap_device.as_ptr(), libc::O_RDWR) }; 6364 assert!(tap_fd1 > 0); 6365 let tap_fd2 = unsafe { libc::open(cstr_tap_device.as_ptr(), libc::O_RDWR) }; 6366 assert!(tap_fd2 > 0); 6367 6368 // Create a macvtap on the same physical net interface for 6369 // the host machine to use 6370 assert!(exec_host_command_status(&format!( 6371 "sudo ip link add link {phy_net} name {host_macvtap_name} type macvtap mod bridge" 6372 )) 6373 .success()); 6374 // Use default mask "255.255.255.0" 6375 assert!(exec_host_command_status(&format!( 6376 "sudo ip address add {}/24 dev {}", 6377 guest.network.host_ip, host_macvtap_name 6378 )) 6379 .success()); 6380 assert!( 6381 exec_host_command_status(&format!("sudo ip link set dev {host_macvtap_name} up")) 6382 .success() 6383 ); 6384 6385 let mut guest_command = GuestCommand::new(&guest); 6386 guest_command 6387 .args(["--cpus", "boot=2"]) 6388 .args(["--memory", "size=512M"]) 6389 .args(["--kernel", kernel_path.to_str().unwrap()]) 6390 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6391 .default_disks() 6392 .args(["--api-socket", &api_socket]); 6393 6394 let net_params = format!( 6395 "fd=[{},{}],mac={},num_queues=4", 6396 tap_fd1, tap_fd2, guest.network.guest_mac 6397 ); 6398 6399 if !hotplug { 6400 guest_command.args(["--net", &net_params]); 6401 } 6402 6403 let mut child = guest_command.capture_output().spawn().unwrap(); 6404 6405 if hotplug { 6406 // Give some time to the VMM process to listen to the API 6407 // socket. This is the only requirement to avoid the following 6408 // call to ch-remote from failing. 6409 thread::sleep(std::time::Duration::new(10, 0)); 6410 // Hotplug the virtio-net device 6411 let (cmd_success, cmd_output) = 6412 remote_command_w_output(&api_socket, "add-net", Some(&net_params)); 6413 assert!(cmd_success); 6414 #[cfg(target_arch = "x86_64")] 6415 assert!(String::from_utf8_lossy(&cmd_output) 6416 .contains("{\"id\":\"_net2\",\"bdf\":\"0000:00:05.0\"}")); 6417 #[cfg(target_arch = "aarch64")] 6418 assert!(String::from_utf8_lossy(&cmd_output) 6419 .contains("{\"id\":\"_net0\",\"bdf\":\"0000:00:05.0\"}")); 6420 } 6421 6422 // The functional connectivity provided by the virtio-net device 6423 // gets tested through wait_vm_boot() as it expects to receive a 6424 // HTTP request, and through the SSH command as well. 6425 let r = std::panic::catch_unwind(|| { 6426 guest.wait_vm_boot(None).unwrap(); 6427 6428 assert_eq!( 6429 guest 6430 .ssh_command("ip -o link | wc -l") 6431 .unwrap() 6432 .trim() 6433 .parse::<u32>() 6434 .unwrap_or_default(), 6435 2 6436 ); 6437 6438 guest.reboot_linux(0, None); 6439 6440 assert_eq!( 6441 guest 6442 .ssh_command("ip -o link | wc -l") 6443 .unwrap() 6444 .trim() 6445 .parse::<u32>() 6446 .unwrap_or_default(), 6447 2 6448 ); 6449 }); 6450 6451 let _ = child.kill(); 6452 6453 exec_host_command_status(&format!("sudo ip link del {guest_macvtap_name}")); 6454 exec_host_command_status(&format!("sudo ip link del {host_macvtap_name}")); 6455 6456 let output = child.wait_with_output().unwrap(); 6457 6458 handle_child_output(r, &output); 6459 } 6460 6461 #[test] 6462 #[cfg_attr(target_arch = "aarch64", ignore = "See #5443")] 6463 fn test_macvtap() { 6464 _test_macvtap(false, "guestmacvtap0", "hostmacvtap0") 6465 } 6466 6467 #[test] 6468 #[cfg_attr(target_arch = "aarch64", ignore = "See #5443")] 6469 fn test_macvtap_hotplug() { 6470 _test_macvtap(true, "guestmacvtap1", "hostmacvtap1") 6471 } 6472 6473 #[test] 6474 #[cfg(not(feature = "mshv"))] 6475 fn test_ovs_dpdk() { 6476 let focal1 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6477 let guest1 = Guest::new(Box::new(focal1)); 6478 6479 let focal2 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6480 let guest2 = Guest::new(Box::new(focal2)); 6481 let api_socket_source = format!("{}.1", temp_api_path(&guest2.tmp_dir)); 6482 6483 let (mut child1, mut child2) = 6484 setup_ovs_dpdk_guests(&guest1, &guest2, &api_socket_source, false); 6485 6486 // Create the snapshot directory 6487 let snapshot_dir = temp_snapshot_dir_path(&guest2.tmp_dir); 6488 6489 let r = std::panic::catch_unwind(|| { 6490 // Remove one of the two ports from the OVS bridge 6491 assert!(exec_host_command_status("ovs-vsctl del-port vhost-user1").success()); 6492 6493 // Spawn a new netcat listener in the first VM 6494 let guest_ip = guest1.network.guest_ip.clone(); 6495 thread::spawn(move || { 6496 ssh_command_ip( 6497 "nc -l 12345", 6498 &guest_ip, 6499 DEFAULT_SSH_RETRIES, 6500 DEFAULT_SSH_TIMEOUT, 6501 ) 6502 .unwrap(); 6503 }); 6504 6505 // Wait for the server to be listening 6506 thread::sleep(std::time::Duration::new(5, 0)); 6507 6508 // Check the connection fails this time 6509 assert!(guest2.ssh_command("nc -vz 172.100.0.1 12345").is_err()); 6510 6511 // Add the OVS port back 6512 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface vhost-user1 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient1").success()); 6513 6514 // And finally check the connection is functional again 6515 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap(); 6516 6517 // Pause the VM 6518 assert!(remote_command(&api_socket_source, "pause", None)); 6519 6520 // Take a snapshot from the VM 6521 assert!(remote_command( 6522 &api_socket_source, 6523 "snapshot", 6524 Some(format!("file://{snapshot_dir}").as_str()), 6525 )); 6526 6527 // Wait to make sure the snapshot is completed 6528 thread::sleep(std::time::Duration::new(10, 0)); 6529 }); 6530 6531 // Shutdown the source VM 6532 let _ = child2.kill(); 6533 let output = child2.wait_with_output().unwrap(); 6534 handle_child_output(r, &output); 6535 6536 // Remove the vhost-user socket file. 6537 Command::new("rm") 6538 .arg("-f") 6539 .arg("/tmp/dpdkvhostclient2") 6540 .output() 6541 .unwrap(); 6542 6543 let api_socket_restored = format!("{}.2", temp_api_path(&guest2.tmp_dir)); 6544 // Restore the VM from the snapshot 6545 let mut child2 = GuestCommand::new(&guest2) 6546 .args(["--api-socket", &api_socket_restored]) 6547 .args([ 6548 "--restore", 6549 format!("source_url=file://{snapshot_dir}").as_str(), 6550 ]) 6551 .capture_output() 6552 .spawn() 6553 .unwrap(); 6554 6555 // Wait for the VM to be restored 6556 thread::sleep(std::time::Duration::new(10, 0)); 6557 6558 let r = std::panic::catch_unwind(|| { 6559 // Resume the VM 6560 assert!(remote_command(&api_socket_restored, "resume", None)); 6561 6562 // Spawn a new netcat listener in the first VM 6563 let guest_ip = guest1.network.guest_ip.clone(); 6564 thread::spawn(move || { 6565 ssh_command_ip( 6566 "nc -l 12345", 6567 &guest_ip, 6568 DEFAULT_SSH_RETRIES, 6569 DEFAULT_SSH_TIMEOUT, 6570 ) 6571 .unwrap(); 6572 }); 6573 6574 // Wait for the server to be listening 6575 thread::sleep(std::time::Duration::new(5, 0)); 6576 6577 // And check the connection is still functional after restore 6578 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap(); 6579 }); 6580 6581 let _ = child1.kill(); 6582 let _ = child2.kill(); 6583 6584 let output = child1.wait_with_output().unwrap(); 6585 child2.wait().unwrap(); 6586 6587 cleanup_ovs_dpdk(); 6588 6589 handle_child_output(r, &output); 6590 } 6591 6592 fn setup_spdk_nvme(nvme_dir: &std::path::Path) { 6593 cleanup_spdk_nvme(); 6594 6595 assert!(exec_host_command_status(&format!( 6596 "mkdir -p {}", 6597 nvme_dir.join("nvme-vfio-user").to_str().unwrap() 6598 )) 6599 .success()); 6600 assert!(exec_host_command_status(&format!( 6601 "truncate {} -s 128M", 6602 nvme_dir.join("test-disk.raw").to_str().unwrap() 6603 )) 6604 .success()); 6605 assert!(exec_host_command_status(&format!( 6606 "mkfs.ext4 {}", 6607 nvme_dir.join("test-disk.raw").to_str().unwrap() 6608 )) 6609 .success()); 6610 6611 // Start the SPDK nvmf_tgt daemon to present NVMe device as a VFIO user device 6612 Command::new("/usr/local/bin/spdk-nvme/nvmf_tgt") 6613 .args(["-i", "0", "-m", "0x1"]) 6614 .spawn() 6615 .unwrap(); 6616 thread::sleep(std::time::Duration::new(2, 0)); 6617 6618 assert!(exec_host_command_status( 6619 "/usr/local/bin/spdk-nvme/rpc.py nvmf_create_transport -t VFIOUSER" 6620 ) 6621 .success()); 6622 assert!(exec_host_command_status(&format!( 6623 "/usr/local/bin/spdk-nvme/rpc.py bdev_aio_create {} test 512", 6624 nvme_dir.join("test-disk.raw").to_str().unwrap() 6625 )) 6626 .success()); 6627 assert!(exec_host_command_status( 6628 "/usr/local/bin/spdk-nvme/rpc.py nvmf_create_subsystem nqn.2019-07.io.spdk:cnode -a -s test" 6629 ) 6630 .success()); 6631 assert!(exec_host_command_status( 6632 "/usr/local/bin/spdk-nvme/rpc.py nvmf_subsystem_add_ns nqn.2019-07.io.spdk:cnode test" 6633 ) 6634 .success()); 6635 assert!(exec_host_command_status(&format!( 6636 "/usr/local/bin/spdk-nvme/rpc.py nvmf_subsystem_add_listener nqn.2019-07.io.spdk:cnode -t VFIOUSER -a {} -s 0", 6637 nvme_dir.join("nvme-vfio-user").to_str().unwrap() 6638 )) 6639 .success()); 6640 } 6641 6642 fn cleanup_spdk_nvme() { 6643 exec_host_command_status("pkill -f nvmf_tgt"); 6644 } 6645 6646 #[test] 6647 fn test_vfio_user() { 6648 let jammy_image = JAMMY_IMAGE_NAME.to_string(); 6649 let jammy = UbuntuDiskConfig::new(jammy_image); 6650 let guest = Guest::new(Box::new(jammy)); 6651 6652 let spdk_nvme_dir = guest.tmp_dir.as_path().join("test-vfio-user"); 6653 setup_spdk_nvme(spdk_nvme_dir.as_path()); 6654 6655 let api_socket = temp_api_path(&guest.tmp_dir); 6656 let mut child = GuestCommand::new(&guest) 6657 .args(["--api-socket", &api_socket]) 6658 .args(["--cpus", "boot=1"]) 6659 .args(["--memory", "size=512M,shared=on,hugepages=on"]) 6660 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 6661 .args(["--serial", "tty", "--console", "off"]) 6662 .default_disks() 6663 .default_net() 6664 .capture_output() 6665 .spawn() 6666 .unwrap(); 6667 6668 let r = std::panic::catch_unwind(|| { 6669 guest.wait_vm_boot(None).unwrap(); 6670 6671 // Hotplug the SPDK-NVMe device to the VM 6672 let (cmd_success, cmd_output) = remote_command_w_output( 6673 &api_socket, 6674 "add-user-device", 6675 Some(&format!( 6676 "socket={},id=vfio_user0", 6677 spdk_nvme_dir 6678 .as_path() 6679 .join("nvme-vfio-user/cntrl") 6680 .to_str() 6681 .unwrap(), 6682 )), 6683 ); 6684 assert!(cmd_success); 6685 assert!(String::from_utf8_lossy(&cmd_output) 6686 .contains("{\"id\":\"vfio_user0\",\"bdf\":\"0000:00:05.0\"}")); 6687 6688 thread::sleep(std::time::Duration::new(10, 0)); 6689 6690 // Check both if /dev/nvme exists and if the block size is 128M. 6691 assert_eq!( 6692 guest 6693 .ssh_command("lsblk | grep nvme0n1 | grep -c 128M") 6694 .unwrap() 6695 .trim() 6696 .parse::<u32>() 6697 .unwrap_or_default(), 6698 1 6699 ); 6700 6701 // Check changes persist after reboot 6702 assert_eq!( 6703 guest.ssh_command("sudo mount /dev/nvme0n1 /mnt").unwrap(), 6704 "" 6705 ); 6706 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), "lost+found\n"); 6707 guest 6708 .ssh_command("echo test123 | sudo tee /mnt/test") 6709 .unwrap(); 6710 assert_eq!(guest.ssh_command("sudo umount /mnt").unwrap(), ""); 6711 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), ""); 6712 6713 guest.reboot_linux(0, None); 6714 assert_eq!( 6715 guest.ssh_command("sudo mount /dev/nvme0n1 /mnt").unwrap(), 6716 "" 6717 ); 6718 assert_eq!( 6719 guest.ssh_command("sudo cat /mnt/test").unwrap().trim(), 6720 "test123" 6721 ); 6722 }); 6723 6724 cleanup_spdk_nvme(); 6725 6726 let _ = child.kill(); 6727 let output = child.wait_with_output().unwrap(); 6728 6729 handle_child_output(r, &output); 6730 } 6731 6732 #[test] 6733 #[cfg(target_arch = "x86_64")] 6734 fn test_vdpa_block() { 6735 // Before trying to run the test, verify the vdpa_sim_blk module is correctly loaded. 6736 if !exec_host_command_status("lsmod | grep vdpa_sim_blk").success() { 6737 return; 6738 } 6739 6740 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6741 let guest = Guest::new(Box::new(focal)); 6742 let api_socket = temp_api_path(&guest.tmp_dir); 6743 6744 let kernel_path = direct_kernel_boot_path(); 6745 6746 let mut child = GuestCommand::new(&guest) 6747 .args(["--cpus", "boot=2"]) 6748 .args(["--memory", "size=512M,hugepages=on"]) 6749 .args(["--kernel", kernel_path.to_str().unwrap()]) 6750 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6751 .default_disks() 6752 .default_net() 6753 .args(["--vdpa", "path=/dev/vhost-vdpa-0,num_queues=1"]) 6754 .args(["--platform", "num_pci_segments=2,iommu_segments=1"]) 6755 .args(["--api-socket", &api_socket]) 6756 .capture_output() 6757 .spawn() 6758 .unwrap(); 6759 6760 let r = std::panic::catch_unwind(|| { 6761 guest.wait_vm_boot(None).unwrap(); 6762 6763 // Check both if /dev/vdc exists and if the block size is 128M. 6764 assert_eq!( 6765 guest 6766 .ssh_command("lsblk | grep vdc | grep -c 128M") 6767 .unwrap() 6768 .trim() 6769 .parse::<u32>() 6770 .unwrap_or_default(), 6771 1 6772 ); 6773 6774 // Check the content of the block device after we wrote to it. 6775 // The vpda-sim-blk should let us read what we previously wrote. 6776 guest 6777 .ssh_command("sudo bash -c 'echo foobar > /dev/vdc'") 6778 .unwrap(); 6779 assert_eq!( 6780 guest.ssh_command("sudo head -1 /dev/vdc").unwrap().trim(), 6781 "foobar" 6782 ); 6783 6784 // Hotplug an extra vDPA block device behind the vIOMMU 6785 // Add a new vDPA device to the VM 6786 let (cmd_success, cmd_output) = remote_command_w_output( 6787 &api_socket, 6788 "add-vdpa", 6789 Some("id=myvdpa0,path=/dev/vhost-vdpa-1,num_queues=1,pci_segment=1,iommu=on"), 6790 ); 6791 assert!(cmd_success); 6792 assert!(String::from_utf8_lossy(&cmd_output) 6793 .contains("{\"id\":\"myvdpa0\",\"bdf\":\"0001:00:01.0\"}")); 6794 6795 thread::sleep(std::time::Duration::new(10, 0)); 6796 6797 // Check IOMMU setup 6798 assert!(guest 6799 .does_device_vendor_pair_match("0x1057", "0x1af4") 6800 .unwrap_or_default()); 6801 assert_eq!( 6802 guest 6803 .ssh_command("ls /sys/kernel/iommu_groups/0/devices") 6804 .unwrap() 6805 .trim(), 6806 "0001:00:01.0" 6807 ); 6808 6809 // Check both if /dev/vdd exists and if the block size is 128M. 6810 assert_eq!( 6811 guest 6812 .ssh_command("lsblk | grep vdd | grep -c 128M") 6813 .unwrap() 6814 .trim() 6815 .parse::<u32>() 6816 .unwrap_or_default(), 6817 1 6818 ); 6819 6820 // Write some content to the block device we've just plugged. 6821 guest 6822 .ssh_command("sudo bash -c 'echo foobar > /dev/vdd'") 6823 .unwrap(); 6824 6825 // Check we can read the content back. 6826 assert_eq!( 6827 guest.ssh_command("sudo head -1 /dev/vdd").unwrap().trim(), 6828 "foobar" 6829 ); 6830 6831 // Unplug the device 6832 let cmd_success = remote_command(&api_socket, "remove-device", Some("myvdpa0")); 6833 assert!(cmd_success); 6834 thread::sleep(std::time::Duration::new(10, 0)); 6835 6836 // Check /dev/vdd doesn't exist anymore 6837 assert_eq!( 6838 guest 6839 .ssh_command("lsblk | grep -c vdd || true") 6840 .unwrap() 6841 .trim() 6842 .parse::<u32>() 6843 .unwrap_or(1), 6844 0 6845 ); 6846 }); 6847 6848 let _ = child.kill(); 6849 let output = child.wait_with_output().unwrap(); 6850 6851 handle_child_output(r, &output); 6852 } 6853 6854 #[test] 6855 #[cfg(target_arch = "x86_64")] 6856 #[ignore = "See #5756"] 6857 fn test_vdpa_net() { 6858 // Before trying to run the test, verify the vdpa_sim_net module is correctly loaded. 6859 if !exec_host_command_status("lsmod | grep vdpa_sim_net").success() { 6860 return; 6861 } 6862 6863 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6864 let guest = Guest::new(Box::new(focal)); 6865 6866 let kernel_path = direct_kernel_boot_path(); 6867 6868 let mut child = GuestCommand::new(&guest) 6869 .args(["--cpus", "boot=2"]) 6870 .args(["--memory", "size=512M,hugepages=on"]) 6871 .args(["--kernel", kernel_path.to_str().unwrap()]) 6872 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6873 .default_disks() 6874 .default_net() 6875 .args(["--vdpa", "path=/dev/vhost-vdpa-2,num_queues=2"]) 6876 .capture_output() 6877 .spawn() 6878 .unwrap(); 6879 6880 let r = std::panic::catch_unwind(|| { 6881 guest.wait_vm_boot(None).unwrap(); 6882 6883 // Check we can find network interface related to vDPA device 6884 assert_eq!( 6885 guest 6886 .ssh_command("ip -o link | grep -c ens6") 6887 .unwrap() 6888 .trim() 6889 .parse::<u32>() 6890 .unwrap_or(0), 6891 1 6892 ); 6893 6894 guest 6895 .ssh_command("sudo ip addr add 172.16.1.2/24 dev ens6") 6896 .unwrap(); 6897 guest.ssh_command("sudo ip link set up dev ens6").unwrap(); 6898 6899 // Check there is no packet yet on both TX/RX of the network interface 6900 assert_eq!( 6901 guest 6902 .ssh_command("ip -j -p -s link show ens6 | grep -c '\"packets\": 0'") 6903 .unwrap() 6904 .trim() 6905 .parse::<u32>() 6906 .unwrap_or(0), 6907 2 6908 ); 6909 6910 // Send 6 packets with ping command 6911 guest.ssh_command("ping 172.16.1.10 -c 6 || true").unwrap(); 6912 6913 // Check we can find 6 packets on both TX/RX of the network interface 6914 assert_eq!( 6915 guest 6916 .ssh_command("ip -j -p -s link show ens6 | grep -c '\"packets\": 6'") 6917 .unwrap() 6918 .trim() 6919 .parse::<u32>() 6920 .unwrap_or(0), 6921 2 6922 ); 6923 6924 // No need to check for hotplug as we already tested it through 6925 // test_vdpa_block() 6926 }); 6927 6928 let _ = child.kill(); 6929 let output = child.wait_with_output().unwrap(); 6930 6931 handle_child_output(r, &output); 6932 } 6933 6934 #[test] 6935 #[cfg(target_arch = "x86_64")] 6936 fn test_tpm() { 6937 let focal = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string()); 6938 let guest = Guest::new(Box::new(focal)); 6939 6940 let (mut swtpm_command, swtpm_socket_path) = prepare_swtpm_daemon(&guest.tmp_dir); 6941 6942 let mut guest_cmd = GuestCommand::new(&guest); 6943 guest_cmd 6944 .args(["--cpus", "boot=1"]) 6945 .args(["--memory", "size=512M"]) 6946 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 6947 .args(["--tpm", &format!("socket={swtpm_socket_path}")]) 6948 .capture_output() 6949 .default_disks() 6950 .default_net(); 6951 6952 // Start swtpm daemon 6953 let mut swtpm_child = swtpm_command.spawn().unwrap(); 6954 thread::sleep(std::time::Duration::new(10, 0)); 6955 let mut child = guest_cmd.spawn().unwrap(); 6956 let r = std::panic::catch_unwind(|| { 6957 guest.wait_vm_boot(None).unwrap(); 6958 assert_eq!( 6959 guest.ssh_command("ls /dev/tpm0").unwrap().trim(), 6960 "/dev/tpm0" 6961 ); 6962 guest.ssh_command("sudo tpm2_selftest -f").unwrap(); 6963 guest 6964 .ssh_command("echo 'hello' > /tmp/checksum_test; ") 6965 .unwrap(); 6966 guest.ssh_command("cmp <(sudo tpm2_pcrevent /tmp/checksum_test | grep sha256 | awk '{print $2}') <(sha256sum /tmp/checksum_test| awk '{print $1}')").unwrap(); 6967 }); 6968 6969 let _ = swtpm_child.kill(); 6970 let _d_out = swtpm_child.wait_with_output().unwrap(); 6971 6972 let _ = child.kill(); 6973 let output = child.wait_with_output().unwrap(); 6974 6975 handle_child_output(r, &output); 6976 } 6977 } 6978 6979 mod dbus_api { 6980 use crate::*; 6981 6982 // Start cloud-hypervisor with no VM parameters, running both the HTTP 6983 // and DBus APIs. Alternate calls to the external APIs (HTTP and DBus) 6984 // to create a VM, boot it, and verify that it can be shut down and then 6985 // booted again. 6986 #[test] 6987 fn test_api_dbus_and_http_interleaved() { 6988 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6989 let guest = Guest::new(Box::new(focal)); 6990 let dbus_api = TargetApi::new_dbus_api(&guest.tmp_dir); 6991 let http_api = TargetApi::new_http_api(&guest.tmp_dir); 6992 6993 let mut child = GuestCommand::new(&guest) 6994 .args(dbus_api.guest_args()) 6995 .args(http_api.guest_args()) 6996 .capture_output() 6997 .spawn() 6998 .unwrap(); 6999 7000 thread::sleep(std::time::Duration::new(1, 0)); 7001 7002 // Verify API servers are running 7003 assert!(dbus_api.remote_command("ping", None)); 7004 assert!(http_api.remote_command("ping", None)); 7005 7006 // Create the VM first 7007 let cpu_count: u8 = 4; 7008 let request_body = guest.api_create_body( 7009 cpu_count, 7010 direct_kernel_boot_path().to_str().unwrap(), 7011 DIRECT_KERNEL_BOOT_CMDLINE, 7012 ); 7013 7014 let temp_config_path = guest.tmp_dir.as_path().join("config"); 7015 std::fs::write(&temp_config_path, request_body).unwrap(); 7016 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 7017 7018 let r = std::panic::catch_unwind(|| { 7019 // Create the VM 7020 assert!(dbus_api.remote_command("create", Some(create_config),)); 7021 7022 // Then boot it 7023 assert!(http_api.remote_command("boot", None)); 7024 guest.wait_vm_boot(None).unwrap(); 7025 7026 // Check that the VM booted as expected 7027 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 7028 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 7029 7030 // Sync and shutdown without powering off to prevent filesystem 7031 // corruption. 7032 guest.ssh_command("sync").unwrap(); 7033 guest.ssh_command("sudo shutdown -H now").unwrap(); 7034 7035 // Wait for the guest to be fully shutdown 7036 thread::sleep(std::time::Duration::new(20, 0)); 7037 7038 // Then shutdown the VM 7039 assert!(dbus_api.remote_command("shutdown", None)); 7040 7041 // Then boot it again 7042 assert!(http_api.remote_command("boot", None)); 7043 guest.wait_vm_boot(None).unwrap(); 7044 7045 // Check that the VM booted as expected 7046 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 7047 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 7048 }); 7049 7050 let _ = child.kill(); 7051 let output = child.wait_with_output().unwrap(); 7052 7053 handle_child_output(r, &output); 7054 } 7055 7056 #[test] 7057 fn test_api_dbus_create_boot() { 7058 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7059 let guest = Guest::new(Box::new(focal)); 7060 7061 _test_api_create_boot(TargetApi::new_dbus_api(&guest.tmp_dir), guest) 7062 } 7063 7064 #[test] 7065 fn test_api_dbus_shutdown() { 7066 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7067 let guest = Guest::new(Box::new(focal)); 7068 7069 _test_api_shutdown(TargetApi::new_dbus_api(&guest.tmp_dir), guest) 7070 } 7071 7072 #[test] 7073 fn test_api_dbus_delete() { 7074 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7075 let guest = Guest::new(Box::new(focal)); 7076 7077 _test_api_delete(TargetApi::new_dbus_api(&guest.tmp_dir), guest); 7078 } 7079 7080 #[test] 7081 fn test_api_dbus_pause_resume() { 7082 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7083 let guest = Guest::new(Box::new(focal)); 7084 7085 _test_api_pause_resume(TargetApi::new_dbus_api(&guest.tmp_dir), guest) 7086 } 7087 } 7088 7089 mod common_sequential { 7090 #[cfg(not(feature = "mshv"))] 7091 use crate::*; 7092 7093 #[test] 7094 #[cfg(not(feature = "mshv"))] 7095 fn test_memory_mergeable_on() { 7096 test_memory_mergeable(true) 7097 } 7098 } 7099 7100 mod windows { 7101 use crate::*; 7102 use once_cell::sync::Lazy; 7103 7104 static NEXT_DISK_ID: Lazy<Mutex<u8>> = Lazy::new(|| Mutex::new(1)); 7105 7106 struct WindowsGuest { 7107 guest: Guest, 7108 auth: PasswordAuth, 7109 } 7110 7111 trait FsType { 7112 const FS_FAT: u8; 7113 const FS_NTFS: u8; 7114 } 7115 impl FsType for WindowsGuest { 7116 const FS_FAT: u8 = 0; 7117 const FS_NTFS: u8 = 1; 7118 } 7119 7120 impl WindowsGuest { 7121 fn new() -> Self { 7122 let disk = WindowsDiskConfig::new(WINDOWS_IMAGE_NAME.to_string()); 7123 let guest = Guest::new(Box::new(disk)); 7124 let auth = PasswordAuth { 7125 username: String::from("administrator"), 7126 password: String::from("Admin123"), 7127 }; 7128 7129 WindowsGuest { guest, auth } 7130 } 7131 7132 fn guest(&self) -> &Guest { 7133 &self.guest 7134 } 7135 7136 fn ssh_cmd(&self, cmd: &str) -> String { 7137 ssh_command_ip_with_auth( 7138 cmd, 7139 &self.auth, 7140 &self.guest.network.guest_ip, 7141 DEFAULT_SSH_RETRIES, 7142 DEFAULT_SSH_TIMEOUT, 7143 ) 7144 .unwrap() 7145 } 7146 7147 fn cpu_count(&self) -> u8 { 7148 self.ssh_cmd("powershell -Command \"(Get-CimInstance win32_computersystem).NumberOfLogicalProcessors\"") 7149 .trim() 7150 .parse::<u8>() 7151 .unwrap_or(0) 7152 } 7153 7154 fn ram_size(&self) -> usize { 7155 self.ssh_cmd("powershell -Command \"(Get-CimInstance win32_computersystem).TotalPhysicalMemory\"") 7156 .trim() 7157 .parse::<usize>() 7158 .unwrap_or(0) 7159 } 7160 7161 fn netdev_count(&self) -> u8 { 7162 self.ssh_cmd("powershell -Command \"netsh int ipv4 show interfaces | Select-String ethernet | Measure-Object -Line | Format-Table -HideTableHeaders\"") 7163 .trim() 7164 .parse::<u8>() 7165 .unwrap_or(0) 7166 } 7167 7168 fn disk_count(&self) -> u8 { 7169 self.ssh_cmd("powershell -Command \"Get-Disk | Measure-Object -Line | Format-Table -HideTableHeaders\"") 7170 .trim() 7171 .parse::<u8>() 7172 .unwrap_or(0) 7173 } 7174 7175 fn reboot(&self) { 7176 let _ = self.ssh_cmd("shutdown /r /t 0"); 7177 } 7178 7179 fn shutdown(&self) { 7180 let _ = self.ssh_cmd("shutdown /s /t 0"); 7181 } 7182 7183 fn run_dnsmasq(&self) -> std::process::Child { 7184 let listen_address = format!("--listen-address={}", self.guest.network.host_ip); 7185 let dhcp_host = format!( 7186 "--dhcp-host={},{}", 7187 self.guest.network.guest_mac, self.guest.network.guest_ip 7188 ); 7189 let dhcp_range = format!( 7190 "--dhcp-range=eth,{},{}", 7191 self.guest.network.guest_ip, self.guest.network.guest_ip 7192 ); 7193 7194 Command::new("dnsmasq") 7195 .arg("--no-daemon") 7196 .arg("--log-queries") 7197 .arg(listen_address.as_str()) 7198 .arg("--except-interface=lo") 7199 .arg("--bind-dynamic") // Allow listening to host_ip while the interface is not ready yet. 7200 .arg("--conf-file=/dev/null") 7201 .arg(dhcp_host.as_str()) 7202 .arg(dhcp_range.as_str()) 7203 .spawn() 7204 .unwrap() 7205 } 7206 7207 // TODO Cleanup image file explicitly after test, if there's some space issues. 7208 fn disk_new(&self, fs: u8, sz: usize) -> String { 7209 let mut guard = NEXT_DISK_ID.lock().unwrap(); 7210 let id = *guard; 7211 *guard = id + 1; 7212 7213 let img = PathBuf::from(format!("/tmp/test-hotplug-{id}.raw")); 7214 let _ = fs::remove_file(&img); 7215 7216 // Create an image file 7217 let out = Command::new("qemu-img") 7218 .args([ 7219 "create", 7220 "-f", 7221 "raw", 7222 img.to_str().unwrap(), 7223 format!("{sz}m").as_str(), 7224 ]) 7225 .output() 7226 .expect("qemu-img command failed") 7227 .stdout; 7228 println!("{out:?}"); 7229 7230 // Associate image to a loop device 7231 let out = Command::new("losetup") 7232 .args(["--show", "-f", img.to_str().unwrap()]) 7233 .output() 7234 .expect("failed to create loop device") 7235 .stdout; 7236 let _tmp = String::from_utf8_lossy(&out); 7237 let loop_dev = _tmp.trim(); 7238 println!("{out:?}"); 7239 7240 // Create a partition table 7241 // echo 'type=7' | sudo sfdisk "${LOOP}" 7242 let mut child = Command::new("sfdisk") 7243 .args([loop_dev]) 7244 .stdin(Stdio::piped()) 7245 .spawn() 7246 .unwrap(); 7247 let stdin = child.stdin.as_mut().expect("failed to open stdin"); 7248 stdin 7249 .write_all("type=7".as_bytes()) 7250 .expect("failed to write stdin"); 7251 let out = child.wait_with_output().expect("sfdisk failed").stdout; 7252 println!("{out:?}"); 7253 7254 // Disengage the loop device 7255 let out = Command::new("losetup") 7256 .args(["-d", loop_dev]) 7257 .output() 7258 .expect("loop device not found") 7259 .stdout; 7260 println!("{out:?}"); 7261 7262 // Re-associate loop device pointing to the partition only 7263 let out = Command::new("losetup") 7264 .args([ 7265 "--show", 7266 "--offset", 7267 (512 * 2048).to_string().as_str(), 7268 "-f", 7269 img.to_str().unwrap(), 7270 ]) 7271 .output() 7272 .expect("failed to create loop device") 7273 .stdout; 7274 let _tmp = String::from_utf8_lossy(&out); 7275 let loop_dev = _tmp.trim(); 7276 println!("{out:?}"); 7277 7278 // Create filesystem. 7279 let fs_cmd = match fs { 7280 WindowsGuest::FS_FAT => "mkfs.msdos", 7281 WindowsGuest::FS_NTFS => "mkfs.ntfs", 7282 _ => panic!("Unknown filesystem type '{fs}'"), 7283 }; 7284 let out = Command::new(fs_cmd) 7285 .args([&loop_dev]) 7286 .output() 7287 .unwrap_or_else(|_| panic!("{fs_cmd} failed")) 7288 .stdout; 7289 println!("{out:?}"); 7290 7291 // Disengage the loop device 7292 let out = Command::new("losetup") 7293 .args(["-d", loop_dev]) 7294 .output() 7295 .unwrap_or_else(|_| panic!("loop device '{loop_dev}' not found")) 7296 .stdout; 7297 println!("{out:?}"); 7298 7299 img.to_str().unwrap().to_string() 7300 } 7301 7302 fn disks_set_rw(&self) { 7303 let _ = self.ssh_cmd("powershell -Command \"Get-Disk | Where-Object IsOffline -eq $True | Set-Disk -IsReadOnly $False\""); 7304 } 7305 7306 fn disks_online(&self) { 7307 let _ = self.ssh_cmd("powershell -Command \"Get-Disk | Where-Object IsOffline -eq $True | Set-Disk -IsOffline $False\""); 7308 } 7309 7310 fn disk_file_put(&self, fname: &str, data: &str) { 7311 let _ = self.ssh_cmd(&format!( 7312 "powershell -Command \"'{data}' | Set-Content -Path {fname}\"" 7313 )); 7314 } 7315 7316 fn disk_file_read(&self, fname: &str) -> String { 7317 self.ssh_cmd(&format!( 7318 "powershell -Command \"Get-Content -Path {fname}\"" 7319 )) 7320 } 7321 7322 fn wait_for_boot(&self) -> bool { 7323 let cmd = "dir /b c:\\ | find \"Windows\""; 7324 let tmo_max = 180; 7325 // The timeout increase by n*1+n*2+n*3+..., therefore the initial 7326 // interval must be small. 7327 let tmo_int = 2; 7328 let out = ssh_command_ip_with_auth( 7329 cmd, 7330 &self.auth, 7331 &self.guest.network.guest_ip, 7332 { 7333 let mut ret = 1; 7334 let mut tmo_acc = 0; 7335 loop { 7336 tmo_acc += tmo_int * ret; 7337 if tmo_acc >= tmo_max { 7338 break; 7339 } 7340 ret += 1; 7341 } 7342 ret 7343 }, 7344 tmo_int, 7345 ) 7346 .unwrap(); 7347 7348 if "Windows" == out.trim() { 7349 return true; 7350 } 7351 7352 false 7353 } 7354 } 7355 7356 fn vcpu_threads_count(pid: u32) -> u8 { 7357 // ps -T -p 12345 | grep vcpu | wc -l 7358 let out = Command::new("ps") 7359 .args(["-T", "-p", format!("{pid}").as_str()]) 7360 .output() 7361 .expect("ps command failed") 7362 .stdout; 7363 return String::from_utf8_lossy(&out).matches("vcpu").count() as u8; 7364 } 7365 7366 fn netdev_ctrl_threads_count(pid: u32) -> u8 { 7367 // ps -T -p 12345 | grep "_net[0-9]*_ctrl" | wc -l 7368 let out = Command::new("ps") 7369 .args(["-T", "-p", format!("{pid}").as_str()]) 7370 .output() 7371 .expect("ps command failed") 7372 .stdout; 7373 let mut n = 0; 7374 String::from_utf8_lossy(&out) 7375 .split_whitespace() 7376 .for_each(|s| n += (s.starts_with("_net") && s.ends_with("_ctrl")) as u8); // _net1_ctrl 7377 n 7378 } 7379 7380 fn disk_ctrl_threads_count(pid: u32) -> u8 { 7381 // ps -T -p 15782 | grep "_disk[0-9]*_q0" | wc -l 7382 let out = Command::new("ps") 7383 .args(["-T", "-p", format!("{pid}").as_str()]) 7384 .output() 7385 .expect("ps command failed") 7386 .stdout; 7387 let mut n = 0; 7388 String::from_utf8_lossy(&out) 7389 .split_whitespace() 7390 .for_each(|s| n += (s.starts_with("_disk") && s.ends_with("_q0")) as u8); // _disk0_q0, don't care about multiple queues as they're related to the same hdd 7391 n 7392 } 7393 7394 #[test] 7395 fn test_windows_guest() { 7396 let windows_guest = WindowsGuest::new(); 7397 7398 let mut child = GuestCommand::new(windows_guest.guest()) 7399 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 7400 .args(["--memory", "size=4G"]) 7401 .args(["--kernel", edk2_path().to_str().unwrap()]) 7402 .args(["--serial", "tty"]) 7403 .args(["--console", "off"]) 7404 .default_disks() 7405 .default_net() 7406 .capture_output() 7407 .spawn() 7408 .unwrap(); 7409 7410 let fd = child.stdout.as_ref().unwrap().as_raw_fd(); 7411 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7412 let fd = child.stderr.as_ref().unwrap().as_raw_fd(); 7413 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7414 7415 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE); 7416 7417 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7418 7419 let r = std::panic::catch_unwind(|| { 7420 // Wait to make sure Windows boots up 7421 assert!(windows_guest.wait_for_boot()); 7422 7423 windows_guest.shutdown(); 7424 }); 7425 7426 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7427 let _ = child.kill(); 7428 let output = child.wait_with_output().unwrap(); 7429 7430 let _ = child_dnsmasq.kill(); 7431 let _ = child_dnsmasq.wait(); 7432 7433 handle_child_output(r, &output); 7434 } 7435 7436 #[test] 7437 fn test_windows_guest_multiple_queues() { 7438 let windows_guest = WindowsGuest::new(); 7439 7440 let mut ovmf_path = dirs::home_dir().unwrap(); 7441 ovmf_path.push("workloads"); 7442 ovmf_path.push(OVMF_NAME); 7443 7444 let mut child = GuestCommand::new(windows_guest.guest()) 7445 .args(["--cpus", "boot=4,kvm_hyperv=on"]) 7446 .args(["--memory", "size=4G"]) 7447 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7448 .args(["--serial", "tty"]) 7449 .args(["--console", "off"]) 7450 .args([ 7451 "--disk", 7452 format!( 7453 "path={},num_queues=4", 7454 windows_guest 7455 .guest() 7456 .disk_config 7457 .disk(DiskType::OperatingSystem) 7458 .unwrap() 7459 ) 7460 .as_str(), 7461 ]) 7462 .args([ 7463 "--net", 7464 format!( 7465 "tap=,mac={},ip={},mask=255.255.255.0,num_queues=8", 7466 windows_guest.guest().network.guest_mac, 7467 windows_guest.guest().network.host_ip 7468 ) 7469 .as_str(), 7470 ]) 7471 .capture_output() 7472 .spawn() 7473 .unwrap(); 7474 7475 let fd = child.stdout.as_ref().unwrap().as_raw_fd(); 7476 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7477 let fd = child.stderr.as_ref().unwrap().as_raw_fd(); 7478 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7479 7480 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE); 7481 7482 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7483 7484 let r = std::panic::catch_unwind(|| { 7485 // Wait to make sure Windows boots up 7486 assert!(windows_guest.wait_for_boot()); 7487 7488 windows_guest.shutdown(); 7489 }); 7490 7491 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7492 let _ = child.kill(); 7493 let output = child.wait_with_output().unwrap(); 7494 7495 let _ = child_dnsmasq.kill(); 7496 let _ = child_dnsmasq.wait(); 7497 7498 handle_child_output(r, &output); 7499 } 7500 7501 #[test] 7502 #[cfg(not(feature = "mshv"))] 7503 #[ignore = "See #4327"] 7504 fn test_windows_guest_snapshot_restore() { 7505 let windows_guest = WindowsGuest::new(); 7506 7507 let mut ovmf_path = dirs::home_dir().unwrap(); 7508 ovmf_path.push("workloads"); 7509 ovmf_path.push(OVMF_NAME); 7510 7511 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 7512 let api_socket_source = format!("{}.1", temp_api_path(&tmp_dir)); 7513 7514 let mut child = GuestCommand::new(windows_guest.guest()) 7515 .args(["--api-socket", &api_socket_source]) 7516 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 7517 .args(["--memory", "size=4G"]) 7518 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7519 .args(["--serial", "tty"]) 7520 .args(["--console", "off"]) 7521 .default_disks() 7522 .default_net() 7523 .capture_output() 7524 .spawn() 7525 .unwrap(); 7526 7527 let fd = child.stdout.as_ref().unwrap().as_raw_fd(); 7528 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7529 let fd = child.stderr.as_ref().unwrap().as_raw_fd(); 7530 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7531 7532 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE); 7533 7534 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7535 7536 // Wait to make sure Windows boots up 7537 assert!(windows_guest.wait_for_boot()); 7538 7539 let snapshot_dir = temp_snapshot_dir_path(&tmp_dir); 7540 7541 // Pause the VM 7542 assert!(remote_command(&api_socket_source, "pause", None)); 7543 7544 // Take a snapshot from the VM 7545 assert!(remote_command( 7546 &api_socket_source, 7547 "snapshot", 7548 Some(format!("file://{snapshot_dir}").as_str()), 7549 )); 7550 7551 // Wait to make sure the snapshot is completed 7552 thread::sleep(std::time::Duration::new(30, 0)); 7553 7554 let _ = child.kill(); 7555 child.wait().unwrap(); 7556 7557 let api_socket_restored = format!("{}.2", temp_api_path(&tmp_dir)); 7558 7559 // Restore the VM from the snapshot 7560 let mut child = GuestCommand::new(windows_guest.guest()) 7561 .args(["--api-socket", &api_socket_restored]) 7562 .args([ 7563 "--restore", 7564 format!("source_url=file://{snapshot_dir}").as_str(), 7565 ]) 7566 .capture_output() 7567 .spawn() 7568 .unwrap(); 7569 7570 // Wait for the VM to be restored 7571 thread::sleep(std::time::Duration::new(20, 0)); 7572 7573 let r = std::panic::catch_unwind(|| { 7574 // Resume the VM 7575 assert!(remote_command(&api_socket_restored, "resume", None)); 7576 7577 windows_guest.shutdown(); 7578 }); 7579 7580 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7581 let _ = child.kill(); 7582 let output = child.wait_with_output().unwrap(); 7583 7584 let _ = child_dnsmasq.kill(); 7585 let _ = child_dnsmasq.wait(); 7586 7587 handle_child_output(r, &output); 7588 } 7589 7590 #[test] 7591 #[cfg(not(feature = "mshv"))] 7592 #[cfg(not(target_arch = "aarch64"))] 7593 fn test_windows_guest_cpu_hotplug() { 7594 let windows_guest = WindowsGuest::new(); 7595 7596 let mut ovmf_path = dirs::home_dir().unwrap(); 7597 ovmf_path.push("workloads"); 7598 ovmf_path.push(OVMF_NAME); 7599 7600 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 7601 let api_socket = temp_api_path(&tmp_dir); 7602 7603 let mut child = GuestCommand::new(windows_guest.guest()) 7604 .args(["--api-socket", &api_socket]) 7605 .args(["--cpus", "boot=2,max=8,kvm_hyperv=on"]) 7606 .args(["--memory", "size=4G"]) 7607 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7608 .args(["--serial", "tty"]) 7609 .args(["--console", "off"]) 7610 .default_disks() 7611 .default_net() 7612 .capture_output() 7613 .spawn() 7614 .unwrap(); 7615 7616 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7617 7618 let r = std::panic::catch_unwind(|| { 7619 // Wait to make sure Windows boots up 7620 assert!(windows_guest.wait_for_boot()); 7621 7622 let vcpu_num = 2; 7623 // Check the initial number of CPUs the guest sees 7624 assert_eq!(windows_guest.cpu_count(), vcpu_num); 7625 // Check the initial number of vcpu threads in the CH process 7626 assert_eq!(vcpu_threads_count(child.id()), vcpu_num); 7627 7628 let vcpu_num = 6; 7629 // Hotplug some CPUs 7630 resize_command(&api_socket, Some(vcpu_num), None, None, None); 7631 // Wait to make sure CPUs are added 7632 thread::sleep(std::time::Duration::new(10, 0)); 7633 // Check the guest sees the correct number 7634 assert_eq!(windows_guest.cpu_count(), vcpu_num); 7635 // Check the CH process has the correct number of vcpu threads 7636 assert_eq!(vcpu_threads_count(child.id()), vcpu_num); 7637 7638 let vcpu_num = 4; 7639 // Remove some CPUs. Note that Windows doesn't support hot-remove. 7640 resize_command(&api_socket, Some(vcpu_num), None, None, None); 7641 // Wait to make sure CPUs are removed 7642 thread::sleep(std::time::Duration::new(10, 0)); 7643 // Reboot to let Windows catch up 7644 windows_guest.reboot(); 7645 // Wait to make sure Windows completely rebooted 7646 thread::sleep(std::time::Duration::new(60, 0)); 7647 // Check the guest sees the correct number 7648 assert_eq!(windows_guest.cpu_count(), vcpu_num); 7649 // Check the CH process has the correct number of vcpu threads 7650 assert_eq!(vcpu_threads_count(child.id()), vcpu_num); 7651 7652 windows_guest.shutdown(); 7653 }); 7654 7655 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7656 let _ = child.kill(); 7657 let output = child.wait_with_output().unwrap(); 7658 7659 let _ = child_dnsmasq.kill(); 7660 let _ = child_dnsmasq.wait(); 7661 7662 handle_child_output(r, &output); 7663 } 7664 7665 #[test] 7666 #[cfg(not(feature = "mshv"))] 7667 #[cfg(not(target_arch = "aarch64"))] 7668 fn test_windows_guest_ram_hotplug() { 7669 let windows_guest = WindowsGuest::new(); 7670 7671 let mut ovmf_path = dirs::home_dir().unwrap(); 7672 ovmf_path.push("workloads"); 7673 ovmf_path.push(OVMF_NAME); 7674 7675 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 7676 let api_socket = temp_api_path(&tmp_dir); 7677 7678 let mut child = GuestCommand::new(windows_guest.guest()) 7679 .args(["--api-socket", &api_socket]) 7680 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 7681 .args(["--memory", "size=2G,hotplug_size=5G"]) 7682 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7683 .args(["--serial", "tty"]) 7684 .args(["--console", "off"]) 7685 .default_disks() 7686 .default_net() 7687 .capture_output() 7688 .spawn() 7689 .unwrap(); 7690 7691 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7692 7693 let r = std::panic::catch_unwind(|| { 7694 // Wait to make sure Windows boots up 7695 assert!(windows_guest.wait_for_boot()); 7696 7697 let ram_size = 2 * 1024 * 1024 * 1024; 7698 // Check the initial number of RAM the guest sees 7699 let current_ram_size = windows_guest.ram_size(); 7700 // This size seems to be reserved by the system and thus the 7701 // reported amount differs by this constant value. 7702 let reserved_ram_size = ram_size - current_ram_size; 7703 // Verify that there's not more than 4mb constant diff wasted 7704 // by the reserved ram. 7705 assert!(reserved_ram_size < 4 * 1024 * 1024); 7706 7707 let ram_size = 4 * 1024 * 1024 * 1024; 7708 // Hotplug some RAM 7709 resize_command(&api_socket, None, Some(ram_size), None, None); 7710 // Wait to make sure RAM has been added 7711 thread::sleep(std::time::Duration::new(10, 0)); 7712 // Check the guest sees the correct number 7713 assert_eq!(windows_guest.ram_size(), ram_size - reserved_ram_size); 7714 7715 let ram_size = 3 * 1024 * 1024 * 1024; 7716 // Unplug some RAM. Note that hot-remove most likely won't work. 7717 resize_command(&api_socket, None, Some(ram_size), None, None); 7718 // Wait to make sure RAM has been added 7719 thread::sleep(std::time::Duration::new(10, 0)); 7720 // Reboot to let Windows catch up 7721 windows_guest.reboot(); 7722 // Wait to make sure guest completely rebooted 7723 thread::sleep(std::time::Duration::new(60, 0)); 7724 // Check the guest sees the correct number 7725 assert_eq!(windows_guest.ram_size(), ram_size - reserved_ram_size); 7726 7727 windows_guest.shutdown(); 7728 }); 7729 7730 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7731 let _ = child.kill(); 7732 let output = child.wait_with_output().unwrap(); 7733 7734 let _ = child_dnsmasq.kill(); 7735 let _ = child_dnsmasq.wait(); 7736 7737 handle_child_output(r, &output); 7738 } 7739 7740 #[test] 7741 #[cfg(not(feature = "mshv"))] 7742 fn test_windows_guest_netdev_hotplug() { 7743 let windows_guest = WindowsGuest::new(); 7744 7745 let mut ovmf_path = dirs::home_dir().unwrap(); 7746 ovmf_path.push("workloads"); 7747 ovmf_path.push(OVMF_NAME); 7748 7749 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 7750 let api_socket = temp_api_path(&tmp_dir); 7751 7752 let mut child = GuestCommand::new(windows_guest.guest()) 7753 .args(["--api-socket", &api_socket]) 7754 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 7755 .args(["--memory", "size=4G"]) 7756 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7757 .args(["--serial", "tty"]) 7758 .args(["--console", "off"]) 7759 .default_disks() 7760 .default_net() 7761 .capture_output() 7762 .spawn() 7763 .unwrap(); 7764 7765 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7766 7767 let r = std::panic::catch_unwind(|| { 7768 // Wait to make sure Windows boots up 7769 assert!(windows_guest.wait_for_boot()); 7770 7771 // Initially present network device 7772 let netdev_num = 1; 7773 assert_eq!(windows_guest.netdev_count(), netdev_num); 7774 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 7775 7776 // Hotplug network device 7777 let (cmd_success, cmd_output) = remote_command_w_output( 7778 &api_socket, 7779 "add-net", 7780 Some(windows_guest.guest().default_net_string().as_str()), 7781 ); 7782 assert!(cmd_success); 7783 assert!(String::from_utf8_lossy(&cmd_output).contains("\"id\":\"_net2\"")); 7784 thread::sleep(std::time::Duration::new(5, 0)); 7785 // Verify the device is on the system 7786 let netdev_num = 2; 7787 assert_eq!(windows_guest.netdev_count(), netdev_num); 7788 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 7789 7790 // Remove network device 7791 let cmd_success = remote_command(&api_socket, "remove-device", Some("_net2")); 7792 assert!(cmd_success); 7793 thread::sleep(std::time::Duration::new(5, 0)); 7794 // Verify the device has been removed 7795 let netdev_num = 1; 7796 assert_eq!(windows_guest.netdev_count(), netdev_num); 7797 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 7798 7799 windows_guest.shutdown(); 7800 }); 7801 7802 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7803 let _ = child.kill(); 7804 let output = child.wait_with_output().unwrap(); 7805 7806 let _ = child_dnsmasq.kill(); 7807 let _ = child_dnsmasq.wait(); 7808 7809 handle_child_output(r, &output); 7810 } 7811 7812 #[test] 7813 #[cfg(not(feature = "mshv"))] 7814 #[cfg(not(target_arch = "aarch64"))] 7815 fn test_windows_guest_disk_hotplug() { 7816 let windows_guest = WindowsGuest::new(); 7817 7818 let mut ovmf_path = dirs::home_dir().unwrap(); 7819 ovmf_path.push("workloads"); 7820 ovmf_path.push(OVMF_NAME); 7821 7822 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 7823 let api_socket = temp_api_path(&tmp_dir); 7824 7825 let mut child = GuestCommand::new(windows_guest.guest()) 7826 .args(["--api-socket", &api_socket]) 7827 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 7828 .args(["--memory", "size=4G"]) 7829 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7830 .args(["--serial", "tty"]) 7831 .args(["--console", "off"]) 7832 .default_disks() 7833 .default_net() 7834 .capture_output() 7835 .spawn() 7836 .unwrap(); 7837 7838 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7839 7840 let disk = windows_guest.disk_new(WindowsGuest::FS_FAT, 100); 7841 7842 let r = std::panic::catch_unwind(|| { 7843 // Wait to make sure Windows boots up 7844 assert!(windows_guest.wait_for_boot()); 7845 7846 // Initially present disk device 7847 let disk_num = 1; 7848 assert_eq!(windows_guest.disk_count(), disk_num); 7849 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 7850 7851 // Hotplug disk device 7852 let (cmd_success, cmd_output) = remote_command_w_output( 7853 &api_socket, 7854 "add-disk", 7855 Some(format!("path={disk},readonly=off").as_str()), 7856 ); 7857 assert!(cmd_success); 7858 assert!(String::from_utf8_lossy(&cmd_output).contains("\"id\":\"_disk2\"")); 7859 thread::sleep(std::time::Duration::new(5, 0)); 7860 // Online disk device 7861 windows_guest.disks_set_rw(); 7862 windows_guest.disks_online(); 7863 // Verify the device is on the system 7864 let disk_num = 2; 7865 assert_eq!(windows_guest.disk_count(), disk_num); 7866 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 7867 7868 let data = "hello"; 7869 let fname = "d:\\world"; 7870 windows_guest.disk_file_put(fname, data); 7871 7872 // Unmount disk device 7873 let cmd_success = remote_command(&api_socket, "remove-device", Some("_disk2")); 7874 assert!(cmd_success); 7875 thread::sleep(std::time::Duration::new(5, 0)); 7876 // Verify the device has been removed 7877 let disk_num = 1; 7878 assert_eq!(windows_guest.disk_count(), disk_num); 7879 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 7880 7881 // Remount and check the file exists with the expected contents 7882 let (cmd_success, _cmd_output) = remote_command_w_output( 7883 &api_socket, 7884 "add-disk", 7885 Some(format!("path={disk},readonly=off").as_str()), 7886 ); 7887 assert!(cmd_success); 7888 thread::sleep(std::time::Duration::new(5, 0)); 7889 let out = windows_guest.disk_file_read(fname); 7890 assert_eq!(data, out.trim()); 7891 7892 // Intentionally no unmount, it'll happen at shutdown. 7893 7894 windows_guest.shutdown(); 7895 }); 7896 7897 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7898 let _ = child.kill(); 7899 let output = child.wait_with_output().unwrap(); 7900 7901 let _ = child_dnsmasq.kill(); 7902 let _ = child_dnsmasq.wait(); 7903 7904 handle_child_output(r, &output); 7905 } 7906 7907 #[test] 7908 #[cfg(not(feature = "mshv"))] 7909 #[cfg(not(target_arch = "aarch64"))] 7910 fn test_windows_guest_disk_hotplug_multi() { 7911 let windows_guest = WindowsGuest::new(); 7912 7913 let mut ovmf_path = dirs::home_dir().unwrap(); 7914 ovmf_path.push("workloads"); 7915 ovmf_path.push(OVMF_NAME); 7916 7917 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 7918 let api_socket = temp_api_path(&tmp_dir); 7919 7920 let mut child = GuestCommand::new(windows_guest.guest()) 7921 .args(["--api-socket", &api_socket]) 7922 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 7923 .args(["--memory", "size=2G"]) 7924 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7925 .args(["--serial", "tty"]) 7926 .args(["--console", "off"]) 7927 .default_disks() 7928 .default_net() 7929 .capture_output() 7930 .spawn() 7931 .unwrap(); 7932 7933 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7934 7935 // Predefined data to used at various test stages 7936 let disk_test_data: [[String; 4]; 2] = [ 7937 [ 7938 "_disk2".to_string(), 7939 windows_guest.disk_new(WindowsGuest::FS_FAT, 123), 7940 "d:\\world".to_string(), 7941 "hello".to_string(), 7942 ], 7943 [ 7944 "_disk3".to_string(), 7945 windows_guest.disk_new(WindowsGuest::FS_NTFS, 333), 7946 "e:\\hello".to_string(), 7947 "world".to_string(), 7948 ], 7949 ]; 7950 7951 let r = std::panic::catch_unwind(|| { 7952 // Wait to make sure Windows boots up 7953 assert!(windows_guest.wait_for_boot()); 7954 7955 // Initially present disk device 7956 let disk_num = 1; 7957 assert_eq!(windows_guest.disk_count(), disk_num); 7958 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 7959 7960 for it in &disk_test_data { 7961 let disk_id = it[0].as_str(); 7962 let disk = it[1].as_str(); 7963 // Hotplug disk device 7964 let (cmd_success, cmd_output) = remote_command_w_output( 7965 &api_socket, 7966 "add-disk", 7967 Some(format!("path={disk},readonly=off").as_str()), 7968 ); 7969 assert!(cmd_success); 7970 assert!(String::from_utf8_lossy(&cmd_output) 7971 .contains(format!("\"id\":\"{disk_id}\"").as_str())); 7972 thread::sleep(std::time::Duration::new(5, 0)); 7973 // Online disk devices 7974 windows_guest.disks_set_rw(); 7975 windows_guest.disks_online(); 7976 } 7977 // Verify the devices are on the system 7978 let disk_num = (disk_test_data.len() + 1) as u8; 7979 assert_eq!(windows_guest.disk_count(), disk_num); 7980 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 7981 7982 // Put test data 7983 for it in &disk_test_data { 7984 let fname = it[2].as_str(); 7985 let data = it[3].as_str(); 7986 windows_guest.disk_file_put(fname, data); 7987 } 7988 7989 // Unmount disk devices 7990 for it in &disk_test_data { 7991 let disk_id = it[0].as_str(); 7992 let cmd_success = remote_command(&api_socket, "remove-device", Some(disk_id)); 7993 assert!(cmd_success); 7994 thread::sleep(std::time::Duration::new(5, 0)); 7995 } 7996 7997 // Verify the devices have been removed 7998 let disk_num = 1; 7999 assert_eq!(windows_guest.disk_count(), disk_num); 8000 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8001 8002 // Remount 8003 for it in &disk_test_data { 8004 let disk = it[1].as_str(); 8005 let (cmd_success, _cmd_output) = remote_command_w_output( 8006 &api_socket, 8007 "add-disk", 8008 Some(format!("path={disk},readonly=off").as_str()), 8009 ); 8010 assert!(cmd_success); 8011 thread::sleep(std::time::Duration::new(5, 0)); 8012 } 8013 8014 // Check the files exists with the expected contents 8015 for it in &disk_test_data { 8016 let fname = it[2].as_str(); 8017 let data = it[3].as_str(); 8018 let out = windows_guest.disk_file_read(fname); 8019 assert_eq!(data, out.trim()); 8020 } 8021 8022 // Intentionally no unmount, it'll happen at shutdown. 8023 8024 windows_guest.shutdown(); 8025 }); 8026 8027 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8028 let _ = child.kill(); 8029 let output = child.wait_with_output().unwrap(); 8030 8031 let _ = child_dnsmasq.kill(); 8032 let _ = child_dnsmasq.wait(); 8033 8034 handle_child_output(r, &output); 8035 } 8036 8037 #[test] 8038 #[cfg(not(feature = "mshv"))] 8039 #[cfg(not(target_arch = "aarch64"))] 8040 fn test_windows_guest_netdev_multi() { 8041 let windows_guest = WindowsGuest::new(); 8042 8043 let mut ovmf_path = dirs::home_dir().unwrap(); 8044 ovmf_path.push("workloads"); 8045 ovmf_path.push(OVMF_NAME); 8046 8047 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8048 let api_socket = temp_api_path(&tmp_dir); 8049 8050 let mut child = GuestCommand::new(windows_guest.guest()) 8051 .args(["--api-socket", &api_socket]) 8052 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8053 .args(["--memory", "size=4G"]) 8054 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8055 .args(["--serial", "tty"]) 8056 .args(["--console", "off"]) 8057 .default_disks() 8058 // The multi net dev config is borrowed from test_multiple_network_interfaces 8059 .args([ 8060 "--net", 8061 windows_guest.guest().default_net_string().as_str(), 8062 "--net", 8063 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0", 8064 "--net", 8065 "tap=mytap42,mac=fe:1f:9e:e1:60:f2,ip=192.168.4.1,mask=255.255.255.0", 8066 ]) 8067 .capture_output() 8068 .spawn() 8069 .unwrap(); 8070 8071 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8072 8073 let r = std::panic::catch_unwind(|| { 8074 // Wait to make sure Windows boots up 8075 assert!(windows_guest.wait_for_boot()); 8076 8077 let netdev_num = 3; 8078 assert_eq!(windows_guest.netdev_count(), netdev_num); 8079 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 8080 8081 let tap_count = exec_host_command_output("ip link | grep -c mytap42"); 8082 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1"); 8083 8084 windows_guest.shutdown(); 8085 }); 8086 8087 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8088 let _ = child.kill(); 8089 let output = child.wait_with_output().unwrap(); 8090 8091 let _ = child_dnsmasq.kill(); 8092 let _ = child_dnsmasq.wait(); 8093 8094 handle_child_output(r, &output); 8095 } 8096 } 8097 8098 #[cfg(target_arch = "x86_64")] 8099 mod sgx { 8100 use crate::*; 8101 8102 #[test] 8103 fn test_sgx() { 8104 let jammy_image = JAMMY_IMAGE_NAME.to_string(); 8105 let jammy = UbuntuDiskConfig::new(jammy_image); 8106 let guest = Guest::new(Box::new(jammy)); 8107 8108 let mut child = GuestCommand::new(&guest) 8109 .args(["--cpus", "boot=1"]) 8110 .args(["--memory", "size=512M"]) 8111 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 8112 .default_disks() 8113 .default_net() 8114 .args(["--sgx-epc", "id=epc0,size=64M"]) 8115 .capture_output() 8116 .spawn() 8117 .unwrap(); 8118 8119 let r = std::panic::catch_unwind(|| { 8120 guest.wait_vm_boot(None).unwrap(); 8121 8122 // Check if SGX is correctly detected in the guest. 8123 guest.check_sgx_support().unwrap(); 8124 8125 // Validate the SGX EPC section is 64MiB. 8126 assert_eq!( 8127 guest 8128 .ssh_command("cpuid -l 0x12 -s 2 | grep 'section size' | cut -d '=' -f 2") 8129 .unwrap() 8130 .trim(), 8131 "0x0000000004000000" 8132 ); 8133 }); 8134 8135 let _ = child.kill(); 8136 let output = child.wait_with_output().unwrap(); 8137 8138 handle_child_output(r, &output); 8139 } 8140 } 8141 8142 #[cfg(target_arch = "x86_64")] 8143 mod vfio { 8144 use crate::*; 8145 8146 #[test] 8147 // The VFIO integration test starts cloud-hypervisor guest with 3 TAP 8148 // backed networking interfaces, bound through a simple bridge on the host. 8149 // So if the nested cloud-hypervisor succeeds in getting a directly 8150 // assigned interface from its cloud-hypervisor host, we should be able to 8151 // ssh into it, and verify that it's running with the right kernel command 8152 // line (We tag the command line from cloud-hypervisor for that purpose). 8153 // The third device is added to validate that hotplug works correctly since 8154 // it is being added to the L2 VM through hotplugging mechanism. 8155 // Also, we pass-through a vitio-blk device to the L2 VM to test the 32-bit 8156 // vfio device support 8157 fn test_vfio() { 8158 setup_vfio_network_interfaces(); 8159 8160 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 8161 let guest = Guest::new_from_ip_range(Box::new(focal), "172.18", 0); 8162 8163 let mut workload_path = dirs::home_dir().unwrap(); 8164 workload_path.push("workloads"); 8165 8166 let kernel_path = direct_kernel_boot_path(); 8167 8168 let mut vfio_path = workload_path.clone(); 8169 vfio_path.push("vfio"); 8170 8171 let mut cloud_init_vfio_base_path = vfio_path.clone(); 8172 cloud_init_vfio_base_path.push("cloudinit.img"); 8173 8174 // We copy our cloudinit into the vfio mount point, for the nested 8175 // cloud-hypervisor guest to use. 8176 rate_limited_copy( 8177 guest.disk_config.disk(DiskType::CloudInit).unwrap(), 8178 &cloud_init_vfio_base_path, 8179 ) 8180 .expect("copying of cloud-init disk failed"); 8181 8182 let mut vfio_disk_path = workload_path.clone(); 8183 vfio_disk_path.push("vfio.img"); 8184 8185 // Create the vfio disk image 8186 let output = Command::new("mkfs.ext4") 8187 .arg("-d") 8188 .arg(vfio_path.to_str().unwrap()) 8189 .arg(vfio_disk_path.to_str().unwrap()) 8190 .arg("2g") 8191 .output() 8192 .unwrap(); 8193 if !output.status.success() { 8194 eprintln!("{}", String::from_utf8_lossy(&output.stderr)); 8195 panic!("mkfs.ext4 command generated an error"); 8196 } 8197 8198 let mut blk_file_path = workload_path; 8199 blk_file_path.push("blk.img"); 8200 8201 let vfio_tap0 = "vfio-tap0"; 8202 let vfio_tap1 = "vfio-tap1"; 8203 let vfio_tap2 = "vfio-tap2"; 8204 let vfio_tap3 = "vfio-tap3"; 8205 8206 let mut child = GuestCommand::new(&guest) 8207 .args(["--cpus", "boot=4"]) 8208 .args(["--memory", "size=2G,hugepages=on,shared=on"]) 8209 .args(["--kernel", kernel_path.to_str().unwrap()]) 8210 .args([ 8211 "--disk", 8212 format!( 8213 "path={}", 8214 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 8215 ) 8216 .as_str(), 8217 "--disk", 8218 format!( 8219 "path={}", 8220 guest.disk_config.disk(DiskType::CloudInit).unwrap() 8221 ) 8222 .as_str(), 8223 "--disk", 8224 format!("path={}", vfio_disk_path.to_str().unwrap()).as_str(), 8225 "--disk", 8226 format!("path={},iommu=on", blk_file_path.to_str().unwrap()).as_str(), 8227 ]) 8228 .args([ 8229 "--cmdline", 8230 format!( 8231 "{DIRECT_KERNEL_BOOT_CMDLINE} kvm-intel.nested=1 vfio_iommu_type1.allow_unsafe_interrupts" 8232 ) 8233 .as_str(), 8234 ]) 8235 .args([ 8236 "--net", 8237 format!("tap={},mac={}", vfio_tap0, guest.network.guest_mac).as_str(), 8238 "--net", 8239 format!( 8240 "tap={},mac={},iommu=on", 8241 vfio_tap1, guest.network.l2_guest_mac1 8242 ) 8243 .as_str(), 8244 "--net", 8245 format!( 8246 "tap={},mac={},iommu=on", 8247 vfio_tap2, guest.network.l2_guest_mac2 8248 ) 8249 .as_str(), 8250 "--net", 8251 format!( 8252 "tap={},mac={},iommu=on", 8253 vfio_tap3, guest.network.l2_guest_mac3 8254 ) 8255 .as_str(), 8256 ]) 8257 .capture_output() 8258 .spawn() 8259 .unwrap(); 8260 8261 thread::sleep(std::time::Duration::new(30, 0)); 8262 8263 let r = std::panic::catch_unwind(|| { 8264 guest.ssh_command_l1("sudo systemctl start vfio").unwrap(); 8265 thread::sleep(std::time::Duration::new(120, 0)); 8266 8267 // We booted our cloud hypervisor L2 guest with a "VFIOTAG" tag 8268 // added to its kernel command line. 8269 // Let's ssh into it and verify that it's there. If it is it means 8270 // we're in the right guest (The L2 one) because the QEMU L1 guest 8271 // does not have this command line tag. 8272 assert_eq!( 8273 guest 8274 .ssh_command_l2_1("grep -c VFIOTAG /proc/cmdline") 8275 .unwrap() 8276 .trim() 8277 .parse::<u32>() 8278 .unwrap_or_default(), 8279 1 8280 ); 8281 8282 // Let's also verify from the second virtio-net device passed to 8283 // the L2 VM. 8284 assert_eq!( 8285 guest 8286 .ssh_command_l2_2("grep -c VFIOTAG /proc/cmdline") 8287 .unwrap() 8288 .trim() 8289 .parse::<u32>() 8290 .unwrap_or_default(), 8291 1 8292 ); 8293 8294 // Check the amount of PCI devices appearing in L2 VM. 8295 assert_eq!( 8296 guest 8297 .ssh_command_l2_1("ls /sys/bus/pci/devices | wc -l") 8298 .unwrap() 8299 .trim() 8300 .parse::<u32>() 8301 .unwrap_or_default(), 8302 8, 8303 ); 8304 8305 // Check both if /dev/vdc exists and if the block size is 16M in L2 VM 8306 assert_eq!( 8307 guest 8308 .ssh_command_l2_1("lsblk | grep vdc | grep -c 16M") 8309 .unwrap() 8310 .trim() 8311 .parse::<u32>() 8312 .unwrap_or_default(), 8313 1 8314 ); 8315 8316 // Hotplug an extra virtio-net device through L2 VM. 8317 guest 8318 .ssh_command_l1( 8319 "echo 0000:00:09.0 | sudo tee /sys/bus/pci/devices/0000:00:09.0/driver/unbind", 8320 ) 8321 .unwrap(); 8322 guest 8323 .ssh_command_l1("echo 0000:00:09.0 | sudo tee /sys/bus/pci/drivers/vfio-pci/bind") 8324 .unwrap(); 8325 let vfio_hotplug_output = guest 8326 .ssh_command_l1( 8327 "sudo /mnt/ch-remote \ 8328 --api-socket /tmp/ch_api.sock \ 8329 add-device path=/sys/bus/pci/devices/0000:00:09.0,id=vfio123", 8330 ) 8331 .unwrap(); 8332 assert!(vfio_hotplug_output.contains("{\"id\":\"vfio123\",\"bdf\":\"0000:00:08.0\"}")); 8333 8334 thread::sleep(std::time::Duration::new(10, 0)); 8335 8336 // Let's also verify from the third virtio-net device passed to 8337 // the L2 VM. This third device has been hotplugged through the L2 8338 // VM, so this is our way to validate hotplug works for VFIO PCI. 8339 assert_eq!( 8340 guest 8341 .ssh_command_l2_3("grep -c VFIOTAG /proc/cmdline") 8342 .unwrap() 8343 .trim() 8344 .parse::<u32>() 8345 .unwrap_or_default(), 8346 1 8347 ); 8348 8349 // Check the amount of PCI devices appearing in L2 VM. 8350 // There should be one more device than before, raising the count 8351 // up to 9 PCI devices. 8352 assert_eq!( 8353 guest 8354 .ssh_command_l2_1("ls /sys/bus/pci/devices | wc -l") 8355 .unwrap() 8356 .trim() 8357 .parse::<u32>() 8358 .unwrap_or_default(), 8359 9, 8360 ); 8361 8362 // Let's now verify that we can correctly remove the virtio-net 8363 // device through the "remove-device" command responsible for 8364 // unplugging VFIO devices. 8365 guest 8366 .ssh_command_l1( 8367 "sudo /mnt/ch-remote \ 8368 --api-socket /tmp/ch_api.sock \ 8369 remove-device vfio123", 8370 ) 8371 .unwrap(); 8372 thread::sleep(std::time::Duration::new(10, 0)); 8373 8374 // Check the amount of PCI devices appearing in L2 VM is back down 8375 // to 8 devices. 8376 assert_eq!( 8377 guest 8378 .ssh_command_l2_1("ls /sys/bus/pci/devices | wc -l") 8379 .unwrap() 8380 .trim() 8381 .parse::<u32>() 8382 .unwrap_or_default(), 8383 8, 8384 ); 8385 8386 // Perform memory hotplug in L2 and validate the memory is showing 8387 // up as expected. In order to check, we will use the virtio-net 8388 // device already passed through L2 as a VFIO device, this will 8389 // verify that VFIO devices are functional with memory hotplug. 8390 assert!(guest.get_total_memory_l2().unwrap_or_default() > 480_000); 8391 guest 8392 .ssh_command_l2_1( 8393 "sudo bash -c 'echo online > /sys/devices/system/memory/auto_online_blocks'", 8394 ) 8395 .unwrap(); 8396 guest 8397 .ssh_command_l1( 8398 "sudo /mnt/ch-remote \ 8399 --api-socket /tmp/ch_api.sock \ 8400 resize --memory 1073741824", 8401 ) 8402 .unwrap(); 8403 assert!(guest.get_total_memory_l2().unwrap_or_default() > 960_000); 8404 }); 8405 8406 let _ = child.kill(); 8407 let output = child.wait_with_output().unwrap(); 8408 8409 cleanup_vfio_network_interfaces(); 8410 8411 handle_child_output(r, &output); 8412 } 8413 8414 fn test_nvidia_card_memory_hotplug(hotplug_method: &str) { 8415 let jammy = UbuntuDiskConfig::new(JAMMY_NVIDIA_IMAGE_NAME.to_string()); 8416 let guest = Guest::new(Box::new(jammy)); 8417 let api_socket = temp_api_path(&guest.tmp_dir); 8418 8419 let mut child = GuestCommand::new(&guest) 8420 .args(["--cpus", "boot=4"]) 8421 .args([ 8422 "--memory", 8423 format!("size=4G,hotplug_size=4G,hotplug_method={hotplug_method}").as_str(), 8424 ]) 8425 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 8426 .args(["--device", "path=/sys/bus/pci/devices/0000:31:00.0/"]) 8427 .args(["--api-socket", &api_socket]) 8428 .default_disks() 8429 .default_net() 8430 .capture_output() 8431 .spawn() 8432 .unwrap(); 8433 8434 let r = std::panic::catch_unwind(|| { 8435 guest.wait_vm_boot(None).unwrap(); 8436 8437 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 8438 8439 guest.enable_memory_hotplug(); 8440 8441 // Add RAM to the VM 8442 let desired_ram = 6 << 30; 8443 resize_command(&api_socket, None, Some(desired_ram), None, None); 8444 thread::sleep(std::time::Duration::new(30, 0)); 8445 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 8446 8447 // Check the VFIO device works when RAM is increased to 6GiB 8448 guest.check_nvidia_gpu(); 8449 }); 8450 8451 let _ = child.kill(); 8452 let output = child.wait_with_output().unwrap(); 8453 8454 handle_child_output(r, &output); 8455 } 8456 8457 #[test] 8458 fn test_nvidia_card_memory_hotplug_acpi() { 8459 test_nvidia_card_memory_hotplug("acpi") 8460 } 8461 8462 #[test] 8463 fn test_nvidia_card_memory_hotplug_virtio_mem() { 8464 test_nvidia_card_memory_hotplug("virtio-mem") 8465 } 8466 8467 #[test] 8468 fn test_nvidia_card_pci_hotplug() { 8469 let jammy = UbuntuDiskConfig::new(JAMMY_NVIDIA_IMAGE_NAME.to_string()); 8470 let guest = Guest::new(Box::new(jammy)); 8471 let api_socket = temp_api_path(&guest.tmp_dir); 8472 8473 let mut child = GuestCommand::new(&guest) 8474 .args(["--cpus", "boot=4"]) 8475 .args(["--memory", "size=4G"]) 8476 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 8477 .args(["--api-socket", &api_socket]) 8478 .default_disks() 8479 .default_net() 8480 .capture_output() 8481 .spawn() 8482 .unwrap(); 8483 8484 let r = std::panic::catch_unwind(|| { 8485 guest.wait_vm_boot(None).unwrap(); 8486 8487 // Hotplug the card to the VM 8488 let (cmd_success, cmd_output) = remote_command_w_output( 8489 &api_socket, 8490 "add-device", 8491 Some("id=vfio0,path=/sys/bus/pci/devices/0000:31:00.0/"), 8492 ); 8493 assert!(cmd_success); 8494 assert!(String::from_utf8_lossy(&cmd_output) 8495 .contains("{\"id\":\"vfio0\",\"bdf\":\"0000:00:06.0\"}")); 8496 8497 thread::sleep(std::time::Duration::new(10, 0)); 8498 8499 // Check the VFIO device works after hotplug 8500 guest.check_nvidia_gpu(); 8501 }); 8502 8503 let _ = child.kill(); 8504 let output = child.wait_with_output().unwrap(); 8505 8506 handle_child_output(r, &output); 8507 } 8508 8509 #[test] 8510 fn test_nvidia_card_reboot() { 8511 let jammy = UbuntuDiskConfig::new(JAMMY_NVIDIA_IMAGE_NAME.to_string()); 8512 let guest = Guest::new(Box::new(jammy)); 8513 let api_socket = temp_api_path(&guest.tmp_dir); 8514 8515 let mut child = GuestCommand::new(&guest) 8516 .args(["--cpus", "boot=4"]) 8517 .args(["--memory", "size=4G"]) 8518 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 8519 .args(["--device", "path=/sys/bus/pci/devices/0000:31:00.0/"]) 8520 .args(["--api-socket", &api_socket]) 8521 .default_disks() 8522 .default_net() 8523 .capture_output() 8524 .spawn() 8525 .unwrap(); 8526 8527 let r = std::panic::catch_unwind(|| { 8528 guest.wait_vm_boot(None).unwrap(); 8529 8530 // Check the VFIO device works after boot 8531 guest.check_nvidia_gpu(); 8532 8533 guest.reboot_linux(0, None); 8534 8535 // Check the VFIO device works after reboot 8536 guest.check_nvidia_gpu(); 8537 }); 8538 8539 let _ = child.kill(); 8540 let output = child.wait_with_output().unwrap(); 8541 8542 handle_child_output(r, &output); 8543 } 8544 } 8545 8546 mod live_migration { 8547 use crate::*; 8548 8549 fn start_live_migration( 8550 migration_socket: &str, 8551 src_api_socket: &str, 8552 dest_api_socket: &str, 8553 local: bool, 8554 ) -> bool { 8555 // Start to receive migration from the destintion VM 8556 let mut receive_migration = Command::new(clh_command("ch-remote")) 8557 .args([ 8558 "--api-socket", 8559 dest_api_socket, 8560 "receive-migration", 8561 &format! {"unix:{migration_socket}"}, 8562 ]) 8563 .stderr(Stdio::piped()) 8564 .stdout(Stdio::piped()) 8565 .spawn() 8566 .unwrap(); 8567 // Give it '1s' to make sure the 'migration_socket' file is properly created 8568 thread::sleep(std::time::Duration::new(1, 0)); 8569 // Start to send migration from the source VM 8570 8571 let mut args = [ 8572 "--api-socket".to_string(), 8573 src_api_socket.to_string(), 8574 "send-migration".to_string(), 8575 format! {"unix:{migration_socket}"}, 8576 ] 8577 .to_vec(); 8578 8579 if local { 8580 args.insert(3, "--local".to_string()); 8581 } 8582 8583 let mut send_migration = Command::new(clh_command("ch-remote")) 8584 .args(&args) 8585 .stderr(Stdio::piped()) 8586 .stdout(Stdio::piped()) 8587 .spawn() 8588 .unwrap(); 8589 8590 // The 'send-migration' command should be executed successfully within the given timeout 8591 let send_success = if let Some(status) = send_migration 8592 .wait_timeout(std::time::Duration::from_secs(30)) 8593 .unwrap() 8594 { 8595 status.success() 8596 } else { 8597 false 8598 }; 8599 8600 if !send_success { 8601 let _ = send_migration.kill(); 8602 let output = send_migration.wait_with_output().unwrap(); 8603 eprintln!("\n\n==== Start 'send_migration' output ====\n\n---stdout---\n{}\n\n---stderr---\n{}\n\n==== End 'send_migration' output ====\n\n", 8604 String::from_utf8_lossy(&output.stdout), String::from_utf8_lossy(&output.stderr)); 8605 } 8606 8607 // The 'receive-migration' command should be executed successfully within the given timeout 8608 let receive_success = if let Some(status) = receive_migration 8609 .wait_timeout(std::time::Duration::from_secs(30)) 8610 .unwrap() 8611 { 8612 status.success() 8613 } else { 8614 false 8615 }; 8616 8617 if !receive_success { 8618 let _ = receive_migration.kill(); 8619 let output = receive_migration.wait_with_output().unwrap(); 8620 eprintln!("\n\n==== Start 'receive_migration' output ====\n\n---stdout---\n{}\n\n---stderr---\n{}\n\n==== End 'receive_migration' output ====\n\n", 8621 String::from_utf8_lossy(&output.stdout), String::from_utf8_lossy(&output.stderr)); 8622 } 8623 8624 send_success && receive_success 8625 } 8626 8627 fn print_and_panic(src_vm: Child, dest_vm: Child, ovs_vm: Option<Child>, message: &str) -> ! { 8628 let mut src_vm = src_vm; 8629 let mut dest_vm = dest_vm; 8630 8631 let _ = src_vm.kill(); 8632 let src_output = src_vm.wait_with_output().unwrap(); 8633 eprintln!( 8634 "\n\n==== Start 'source_vm' stdout ====\n\n{}\n\n==== End 'source_vm' stdout ====", 8635 String::from_utf8_lossy(&src_output.stdout) 8636 ); 8637 eprintln!( 8638 "\n\n==== Start 'source_vm' stderr ====\n\n{}\n\n==== End 'source_vm' stderr ====", 8639 String::from_utf8_lossy(&src_output.stderr) 8640 ); 8641 let _ = dest_vm.kill(); 8642 let dest_output = dest_vm.wait_with_output().unwrap(); 8643 eprintln!( 8644 "\n\n==== Start 'destination_vm' stdout ====\n\n{}\n\n==== End 'destination_vm' stdout ====", 8645 String::from_utf8_lossy(&dest_output.stdout) 8646 ); 8647 eprintln!( 8648 "\n\n==== Start 'destination_vm' stderr ====\n\n{}\n\n==== End 'destination_vm' stderr ====", 8649 String::from_utf8_lossy(&dest_output.stderr) 8650 ); 8651 8652 if let Some(ovs_vm) = ovs_vm { 8653 let mut ovs_vm = ovs_vm; 8654 let _ = ovs_vm.kill(); 8655 let ovs_output = ovs_vm.wait_with_output().unwrap(); 8656 eprintln!( 8657 "\n\n==== Start 'ovs_vm' stdout ====\n\n{}\n\n==== End 'ovs_vm' stdout ====", 8658 String::from_utf8_lossy(&ovs_output.stdout) 8659 ); 8660 eprintln!( 8661 "\n\n==== Start 'ovs_vm' stderr ====\n\n{}\n\n==== End 'ovs_vm' stderr ====", 8662 String::from_utf8_lossy(&ovs_output.stderr) 8663 ); 8664 8665 cleanup_ovs_dpdk(); 8666 } 8667 8668 panic!("Test failed: {message}") 8669 } 8670 8671 // This test exercises the local live-migration between two Cloud Hypervisor VMs on the 8672 // same host. It ensures the following behaviors: 8673 // 1. The source VM is up and functional (including various virtio-devices are working properly); 8674 // 2. The 'send-migration' and 'receive-migration' command finished successfully; 8675 // 3. The source VM terminated gracefully after live migration; 8676 // 4. The destination VM is functional (including various virtio-devices are working properly) after 8677 // live migration; 8678 // Note: This test does not use vsock as we can't create two identical vsock on the same host. 8679 fn _test_live_migration(upgrade_test: bool, local: bool) { 8680 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 8681 let guest = Guest::new(Box::new(focal)); 8682 let kernel_path = direct_kernel_boot_path(); 8683 let console_text = String::from("On a branch floating down river a cricket, singing."); 8684 let net_id = "net123"; 8685 let net_params = format!( 8686 "id={},tap=,mac={},ip={},mask=255.255.255.0", 8687 net_id, guest.network.guest_mac, guest.network.host_ip 8688 ); 8689 8690 let memory_param: &[&str] = if local { 8691 &["--memory", "size=4G,shared=on"] 8692 } else { 8693 &["--memory", "size=4G"] 8694 }; 8695 8696 let boot_vcpus = 2; 8697 let max_vcpus = 4; 8698 8699 let pmem_temp_file = TempFile::new().unwrap(); 8700 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 8701 std::process::Command::new("mkfs.ext4") 8702 .arg(pmem_temp_file.as_path()) 8703 .output() 8704 .expect("Expect creating disk image to succeed"); 8705 let pmem_path = String::from("/dev/pmem0"); 8706 8707 // Start the source VM 8708 let src_vm_path = if !upgrade_test { 8709 clh_command("cloud-hypervisor") 8710 } else { 8711 cloud_hypervisor_release_path() 8712 }; 8713 let src_api_socket = temp_api_path(&guest.tmp_dir); 8714 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 8715 src_vm_cmd 8716 .args([ 8717 "--cpus", 8718 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 8719 ]) 8720 .args(memory_param) 8721 .args(["--kernel", kernel_path.to_str().unwrap()]) 8722 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 8723 .default_disks() 8724 .args(["--net", net_params.as_str()]) 8725 .args(["--api-socket", &src_api_socket]) 8726 .args([ 8727 "--pmem", 8728 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 8729 ]); 8730 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 8731 8732 // Start the destination VM 8733 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 8734 dest_api_socket.push_str(".dest"); 8735 let mut dest_child = GuestCommand::new(&guest) 8736 .args(["--api-socket", &dest_api_socket]) 8737 .capture_output() 8738 .spawn() 8739 .unwrap(); 8740 8741 let r = std::panic::catch_unwind(|| { 8742 guest.wait_vm_boot(None).unwrap(); 8743 8744 // Make sure the source VM is functaionl 8745 // Check the number of vCPUs 8746 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 8747 8748 // Check the guest RAM 8749 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 8750 8751 // Check the guest virtio-devices, e.g. block, rng, console, and net 8752 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 8753 8754 // x86_64: Following what's done in the `test_snapshot_restore`, we need 8755 // to make sure that removing and adding back the virtio-net device does 8756 // not break the live-migration support for virtio-pci. 8757 #[cfg(target_arch = "x86_64")] 8758 { 8759 assert!(remote_command( 8760 &src_api_socket, 8761 "remove-device", 8762 Some(net_id), 8763 )); 8764 thread::sleep(std::time::Duration::new(10, 0)); 8765 8766 // Plug the virtio-net device again 8767 assert!(remote_command( 8768 &src_api_socket, 8769 "add-net", 8770 Some(net_params.as_str()), 8771 )); 8772 thread::sleep(std::time::Duration::new(10, 0)); 8773 } 8774 8775 // Start the live-migration 8776 let migration_socket = String::from( 8777 guest 8778 .tmp_dir 8779 .as_path() 8780 .join("live-migration.sock") 8781 .to_str() 8782 .unwrap(), 8783 ); 8784 8785 assert!( 8786 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 8787 "Unsuccessful command: 'send-migration' or 'receive-migration'." 8788 ); 8789 }); 8790 8791 // Check and report any errors occurred during the live-migration 8792 if r.is_err() { 8793 print_and_panic( 8794 src_child, 8795 dest_child, 8796 None, 8797 "Error occurred during live-migration", 8798 ); 8799 } 8800 8801 // Check the source vm has been terminated successful (give it '3s' to settle) 8802 thread::sleep(std::time::Duration::new(3, 0)); 8803 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 8804 print_and_panic( 8805 src_child, 8806 dest_child, 8807 None, 8808 "source VM was not terminated successfully.", 8809 ); 8810 }; 8811 8812 // Post live-migration check to make sure the destination VM is funcational 8813 let r = std::panic::catch_unwind(|| { 8814 // Perform same checks to validate VM has been properly migrated 8815 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 8816 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 8817 8818 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 8819 }); 8820 8821 // Clean-up the destination VM and make sure it terminated correctly 8822 let _ = dest_child.kill(); 8823 let dest_output = dest_child.wait_with_output().unwrap(); 8824 handle_child_output(r, &dest_output); 8825 8826 // Check the destination VM has the expected 'concole_text' from its output 8827 let r = std::panic::catch_unwind(|| { 8828 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 8829 }); 8830 handle_child_output(r, &dest_output); 8831 } 8832 8833 fn _test_live_migration_balloon(upgrade_test: bool, local: bool) { 8834 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 8835 let guest = Guest::new(Box::new(focal)); 8836 let kernel_path = direct_kernel_boot_path(); 8837 let console_text = String::from("On a branch floating down river a cricket, singing."); 8838 let net_id = "net123"; 8839 let net_params = format!( 8840 "id={},tap=,mac={},ip={},mask=255.255.255.0", 8841 net_id, guest.network.guest_mac, guest.network.host_ip 8842 ); 8843 8844 let memory_param: &[&str] = if local { 8845 &[ 8846 "--memory", 8847 "size=4G,hotplug_method=virtio-mem,hotplug_size=8G,shared=on", 8848 "--balloon", 8849 "size=0", 8850 ] 8851 } else { 8852 &[ 8853 "--memory", 8854 "size=4G,hotplug_method=virtio-mem,hotplug_size=8G", 8855 "--balloon", 8856 "size=0", 8857 ] 8858 }; 8859 8860 let boot_vcpus = 2; 8861 let max_vcpus = 4; 8862 8863 let pmem_temp_file = TempFile::new().unwrap(); 8864 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 8865 std::process::Command::new("mkfs.ext4") 8866 .arg(pmem_temp_file.as_path()) 8867 .output() 8868 .expect("Expect creating disk image to succeed"); 8869 let pmem_path = String::from("/dev/pmem0"); 8870 8871 // Start the source VM 8872 let src_vm_path = if !upgrade_test { 8873 clh_command("cloud-hypervisor") 8874 } else { 8875 cloud_hypervisor_release_path() 8876 }; 8877 let src_api_socket = temp_api_path(&guest.tmp_dir); 8878 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 8879 src_vm_cmd 8880 .args([ 8881 "--cpus", 8882 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 8883 ]) 8884 .args(memory_param) 8885 .args(["--kernel", kernel_path.to_str().unwrap()]) 8886 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 8887 .default_disks() 8888 .args(["--net", net_params.as_str()]) 8889 .args(["--api-socket", &src_api_socket]) 8890 .args([ 8891 "--pmem", 8892 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 8893 ]); 8894 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 8895 8896 // Start the destination VM 8897 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 8898 dest_api_socket.push_str(".dest"); 8899 let mut dest_child = GuestCommand::new(&guest) 8900 .args(["--api-socket", &dest_api_socket]) 8901 .capture_output() 8902 .spawn() 8903 .unwrap(); 8904 8905 let r = std::panic::catch_unwind(|| { 8906 guest.wait_vm_boot(None).unwrap(); 8907 8908 // Make sure the source VM is functaionl 8909 // Check the number of vCPUs 8910 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 8911 8912 // Check the guest RAM 8913 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 8914 // Increase the guest RAM 8915 resize_command(&src_api_socket, None, Some(6 << 30), None, None); 8916 thread::sleep(std::time::Duration::new(5, 0)); 8917 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 8918 // Use balloon to remove RAM from the VM 8919 resize_command(&src_api_socket, None, None, Some(1 << 30), None); 8920 thread::sleep(std::time::Duration::new(5, 0)); 8921 let total_memory = guest.get_total_memory().unwrap_or_default(); 8922 assert!(total_memory > 4_800_000); 8923 assert!(total_memory < 5_760_000); 8924 8925 // Check the guest virtio-devices, e.g. block, rng, console, and net 8926 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 8927 8928 // x86_64: Following what's done in the `test_snapshot_restore`, we need 8929 // to make sure that removing and adding back the virtio-net device does 8930 // not break the live-migration support for virtio-pci. 8931 #[cfg(target_arch = "x86_64")] 8932 { 8933 assert!(remote_command( 8934 &src_api_socket, 8935 "remove-device", 8936 Some(net_id), 8937 )); 8938 thread::sleep(std::time::Duration::new(10, 0)); 8939 8940 // Plug the virtio-net device again 8941 assert!(remote_command( 8942 &src_api_socket, 8943 "add-net", 8944 Some(net_params.as_str()), 8945 )); 8946 thread::sleep(std::time::Duration::new(10, 0)); 8947 } 8948 8949 // Start the live-migration 8950 let migration_socket = String::from( 8951 guest 8952 .tmp_dir 8953 .as_path() 8954 .join("live-migration.sock") 8955 .to_str() 8956 .unwrap(), 8957 ); 8958 8959 assert!( 8960 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 8961 "Unsuccessful command: 'send-migration' or 'receive-migration'." 8962 ); 8963 }); 8964 8965 // Check and report any errors occurred during the live-migration 8966 if r.is_err() { 8967 print_and_panic( 8968 src_child, 8969 dest_child, 8970 None, 8971 "Error occurred during live-migration", 8972 ); 8973 } 8974 8975 // Check the source vm has been terminated successful (give it '3s' to settle) 8976 thread::sleep(std::time::Duration::new(3, 0)); 8977 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 8978 print_and_panic( 8979 src_child, 8980 dest_child, 8981 None, 8982 "source VM was not terminated successfully.", 8983 ); 8984 }; 8985 8986 // Post live-migration check to make sure the destination VM is funcational 8987 let r = std::panic::catch_unwind(|| { 8988 // Perform same checks to validate VM has been properly migrated 8989 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 8990 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 8991 8992 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 8993 8994 // Perform checks on guest RAM using balloon 8995 let total_memory = guest.get_total_memory().unwrap_or_default(); 8996 assert!(total_memory > 4_800_000); 8997 assert!(total_memory < 5_760_000); 8998 // Deflate balloon to restore entire RAM to the VM 8999 resize_command(&dest_api_socket, None, None, Some(0), None); 9000 thread::sleep(std::time::Duration::new(5, 0)); 9001 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 9002 // Decrease guest RAM with virtio-mem 9003 resize_command(&dest_api_socket, None, Some(5 << 30), None, None); 9004 thread::sleep(std::time::Duration::new(5, 0)); 9005 let total_memory = guest.get_total_memory().unwrap_or_default(); 9006 assert!(total_memory > 4_800_000); 9007 assert!(total_memory < 5_760_000); 9008 }); 9009 9010 // Clean-up the destination VM and make sure it terminated correctly 9011 let _ = dest_child.kill(); 9012 let dest_output = dest_child.wait_with_output().unwrap(); 9013 handle_child_output(r, &dest_output); 9014 9015 // Check the destination VM has the expected 'concole_text' from its output 9016 let r = std::panic::catch_unwind(|| { 9017 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 9018 }); 9019 handle_child_output(r, &dest_output); 9020 } 9021 9022 fn _test_live_migration_numa(upgrade_test: bool, local: bool) { 9023 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9024 let guest = Guest::new(Box::new(focal)); 9025 let kernel_path = direct_kernel_boot_path(); 9026 let console_text = String::from("On a branch floating down river a cricket, singing."); 9027 let net_id = "net123"; 9028 let net_params = format!( 9029 "id={},tap=,mac={},ip={},mask=255.255.255.0", 9030 net_id, guest.network.guest_mac, guest.network.host_ip 9031 ); 9032 9033 let memory_param: &[&str] = if local { 9034 &[ 9035 "--memory", 9036 "size=0,hotplug_method=virtio-mem,shared=on", 9037 "--memory-zone", 9038 "id=mem0,size=1G,hotplug_size=4G,shared=on", 9039 "--memory-zone", 9040 "id=mem1,size=1G,hotplug_size=4G,shared=on", 9041 "--memory-zone", 9042 "id=mem2,size=2G,hotplug_size=4G,shared=on", 9043 "--numa", 9044 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0", 9045 "--numa", 9046 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1", 9047 "--numa", 9048 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2", 9049 ] 9050 } else { 9051 &[ 9052 "--memory", 9053 "size=0,hotplug_method=virtio-mem", 9054 "--memory-zone", 9055 "id=mem0,size=1G,hotplug_size=4G", 9056 "--memory-zone", 9057 "id=mem1,size=1G,hotplug_size=4G", 9058 "--memory-zone", 9059 "id=mem2,size=2G,hotplug_size=4G", 9060 "--numa", 9061 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0", 9062 "--numa", 9063 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1", 9064 "--numa", 9065 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2", 9066 ] 9067 }; 9068 9069 let boot_vcpus = 6; 9070 let max_vcpus = 12; 9071 9072 let pmem_temp_file = TempFile::new().unwrap(); 9073 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 9074 std::process::Command::new("mkfs.ext4") 9075 .arg(pmem_temp_file.as_path()) 9076 .output() 9077 .expect("Expect creating disk image to succeed"); 9078 let pmem_path = String::from("/dev/pmem0"); 9079 9080 // Start the source VM 9081 let src_vm_path = if !upgrade_test { 9082 clh_command("cloud-hypervisor") 9083 } else { 9084 cloud_hypervisor_release_path() 9085 }; 9086 let src_api_socket = temp_api_path(&guest.tmp_dir); 9087 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 9088 src_vm_cmd 9089 .args([ 9090 "--cpus", 9091 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 9092 ]) 9093 .args(memory_param) 9094 .args(["--kernel", kernel_path.to_str().unwrap()]) 9095 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9096 .default_disks() 9097 .args(["--net", net_params.as_str()]) 9098 .args(["--api-socket", &src_api_socket]) 9099 .args([ 9100 "--pmem", 9101 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 9102 ]); 9103 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 9104 9105 // Start the destination VM 9106 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 9107 dest_api_socket.push_str(".dest"); 9108 let mut dest_child = GuestCommand::new(&guest) 9109 .args(["--api-socket", &dest_api_socket]) 9110 .capture_output() 9111 .spawn() 9112 .unwrap(); 9113 9114 let r = std::panic::catch_unwind(|| { 9115 guest.wait_vm_boot(None).unwrap(); 9116 9117 // Make sure the source VM is functaionl 9118 // Check the number of vCPUs 9119 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9120 9121 // Check the guest RAM 9122 assert!(guest.get_total_memory().unwrap_or_default() > 2_880_000); 9123 9124 // Check the guest virtio-devices, e.g. block, rng, console, and net 9125 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9126 9127 // Check the NUMA parameters are applied correctly and resize 9128 // each zone to test the case where we migrate a VM with the 9129 // virtio-mem regions being used. 9130 { 9131 guest.check_numa_common( 9132 Some(&[960_000, 960_000, 1_920_000]), 9133 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 9134 Some(&["10 15 20", "20 10 25", "25 30 10"]), 9135 ); 9136 9137 // AArch64 currently does not support hotplug, and therefore we only 9138 // test hotplug-related function on x86_64 here. 9139 #[cfg(target_arch = "x86_64")] 9140 { 9141 guest.enable_memory_hotplug(); 9142 9143 // Resize every memory zone and check each associated NUMA node 9144 // has been assigned the right amount of memory. 9145 resize_zone_command(&src_api_socket, "mem0", "2G"); 9146 resize_zone_command(&src_api_socket, "mem1", "2G"); 9147 resize_zone_command(&src_api_socket, "mem2", "3G"); 9148 thread::sleep(std::time::Duration::new(5, 0)); 9149 9150 guest.check_numa_common(Some(&[1_920_000, 1_920_000, 1_920_000]), None, None); 9151 } 9152 } 9153 9154 // x86_64: Following what's done in the `test_snapshot_restore`, we need 9155 // to make sure that removing and adding back the virtio-net device does 9156 // not break the live-migration support for virtio-pci. 9157 #[cfg(target_arch = "x86_64")] 9158 { 9159 assert!(remote_command( 9160 &src_api_socket, 9161 "remove-device", 9162 Some(net_id), 9163 )); 9164 thread::sleep(std::time::Duration::new(10, 0)); 9165 9166 // Plug the virtio-net device again 9167 assert!(remote_command( 9168 &src_api_socket, 9169 "add-net", 9170 Some(net_params.as_str()), 9171 )); 9172 thread::sleep(std::time::Duration::new(10, 0)); 9173 } 9174 9175 // Start the live-migration 9176 let migration_socket = String::from( 9177 guest 9178 .tmp_dir 9179 .as_path() 9180 .join("live-migration.sock") 9181 .to_str() 9182 .unwrap(), 9183 ); 9184 9185 assert!( 9186 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9187 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9188 ); 9189 }); 9190 9191 // Check and report any errors occurred during the live-migration 9192 if r.is_err() { 9193 print_and_panic( 9194 src_child, 9195 dest_child, 9196 None, 9197 "Error occurred during live-migration", 9198 ); 9199 } 9200 9201 // Check the source vm has been terminated successful (give it '3s' to settle) 9202 thread::sleep(std::time::Duration::new(3, 0)); 9203 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 9204 print_and_panic( 9205 src_child, 9206 dest_child, 9207 None, 9208 "source VM was not terminated successfully.", 9209 ); 9210 }; 9211 9212 // Post live-migration check to make sure the destination VM is funcational 9213 let r = std::panic::catch_unwind(|| { 9214 // Perform same checks to validate VM has been properly migrated 9215 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9216 #[cfg(target_arch = "x86_64")] 9217 assert!(guest.get_total_memory().unwrap_or_default() > 6_720_000); 9218 #[cfg(target_arch = "aarch64")] 9219 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9220 9221 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9222 9223 // Perform NUMA related checks 9224 { 9225 #[cfg(target_arch = "aarch64")] 9226 { 9227 guest.check_numa_common( 9228 Some(&[960_000, 960_000, 1_920_000]), 9229 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 9230 Some(&["10 15 20", "20 10 25", "25 30 10"]), 9231 ); 9232 } 9233 9234 // AArch64 currently does not support hotplug, and therefore we only 9235 // test hotplug-related function on x86_64 here. 9236 #[cfg(target_arch = "x86_64")] 9237 { 9238 guest.check_numa_common( 9239 Some(&[1_920_000, 1_920_000, 2_880_000]), 9240 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 9241 Some(&["10 15 20", "20 10 25", "25 30 10"]), 9242 ); 9243 9244 guest.enable_memory_hotplug(); 9245 9246 // Resize every memory zone and check each associated NUMA node 9247 // has been assigned the right amount of memory. 9248 resize_zone_command(&dest_api_socket, "mem0", "4G"); 9249 resize_zone_command(&dest_api_socket, "mem1", "4G"); 9250 resize_zone_command(&dest_api_socket, "mem2", "4G"); 9251 // Resize to the maximum amount of CPUs and check each NUMA 9252 // node has been assigned the right CPUs set. 9253 resize_command(&dest_api_socket, Some(max_vcpus), None, None, None); 9254 thread::sleep(std::time::Duration::new(5, 0)); 9255 9256 guest.check_numa_common( 9257 Some(&[3_840_000, 3_840_000, 3_840_000]), 9258 Some(&[vec![0, 1, 2, 9], vec![3, 4, 6, 7, 8], vec![5, 10, 11]]), 9259 None, 9260 ); 9261 } 9262 } 9263 }); 9264 9265 // Clean-up the destination VM and make sure it terminated correctly 9266 let _ = dest_child.kill(); 9267 let dest_output = dest_child.wait_with_output().unwrap(); 9268 handle_child_output(r, &dest_output); 9269 9270 // Check the destination VM has the expected 'concole_text' from its output 9271 let r = std::panic::catch_unwind(|| { 9272 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 9273 }); 9274 handle_child_output(r, &dest_output); 9275 } 9276 9277 fn _test_live_migration_watchdog(upgrade_test: bool, local: bool) { 9278 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9279 let guest = Guest::new(Box::new(focal)); 9280 let kernel_path = direct_kernel_boot_path(); 9281 let console_text = String::from("On a branch floating down river a cricket, singing."); 9282 let net_id = "net123"; 9283 let net_params = format!( 9284 "id={},tap=,mac={},ip={},mask=255.255.255.0", 9285 net_id, guest.network.guest_mac, guest.network.host_ip 9286 ); 9287 9288 let memory_param: &[&str] = if local { 9289 &["--memory", "size=4G,shared=on"] 9290 } else { 9291 &["--memory", "size=4G"] 9292 }; 9293 9294 let boot_vcpus = 2; 9295 let max_vcpus = 4; 9296 9297 let pmem_temp_file = TempFile::new().unwrap(); 9298 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 9299 std::process::Command::new("mkfs.ext4") 9300 .arg(pmem_temp_file.as_path()) 9301 .output() 9302 .expect("Expect creating disk image to succeed"); 9303 let pmem_path = String::from("/dev/pmem0"); 9304 9305 // Start the source VM 9306 let src_vm_path = if !upgrade_test { 9307 clh_command("cloud-hypervisor") 9308 } else { 9309 cloud_hypervisor_release_path() 9310 }; 9311 let src_api_socket = temp_api_path(&guest.tmp_dir); 9312 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 9313 src_vm_cmd 9314 .args([ 9315 "--cpus", 9316 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 9317 ]) 9318 .args(memory_param) 9319 .args(["--kernel", kernel_path.to_str().unwrap()]) 9320 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9321 .default_disks() 9322 .args(["--net", net_params.as_str()]) 9323 .args(["--api-socket", &src_api_socket]) 9324 .args([ 9325 "--pmem", 9326 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 9327 ]) 9328 .args(["--watchdog"]); 9329 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 9330 9331 // Start the destination VM 9332 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 9333 dest_api_socket.push_str(".dest"); 9334 let mut dest_child = GuestCommand::new(&guest) 9335 .args(["--api-socket", &dest_api_socket]) 9336 .capture_output() 9337 .spawn() 9338 .unwrap(); 9339 9340 let r = std::panic::catch_unwind(|| { 9341 guest.wait_vm_boot(None).unwrap(); 9342 9343 // Make sure the source VM is functaionl 9344 // Check the number of vCPUs 9345 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9346 // Check the guest RAM 9347 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9348 // Check the guest virtio-devices, e.g. block, rng, console, and net 9349 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9350 // x86_64: Following what's done in the `test_snapshot_restore`, we need 9351 // to make sure that removing and adding back the virtio-net device does 9352 // not break the live-migration support for virtio-pci. 9353 #[cfg(target_arch = "x86_64")] 9354 { 9355 assert!(remote_command( 9356 &src_api_socket, 9357 "remove-device", 9358 Some(net_id), 9359 )); 9360 thread::sleep(std::time::Duration::new(10, 0)); 9361 9362 // Plug the virtio-net device again 9363 assert!(remote_command( 9364 &src_api_socket, 9365 "add-net", 9366 Some(net_params.as_str()), 9367 )); 9368 thread::sleep(std::time::Duration::new(10, 0)); 9369 } 9370 9371 // Enable watchdog and ensure its functional 9372 let mut expected_reboot_count = 1; 9373 // Enable the watchdog with a 15s timeout 9374 enable_guest_watchdog(&guest, 15); 9375 // Reboot and check that systemd has activated the watchdog 9376 guest.ssh_command("sudo reboot").unwrap(); 9377 guest.wait_vm_boot(None).unwrap(); 9378 expected_reboot_count += 1; 9379 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9380 assert_eq!( 9381 guest 9382 .ssh_command("sudo journalctl | grep -c -- \"Watchdog started\"") 9383 .unwrap() 9384 .trim() 9385 .parse::<u32>() 9386 .unwrap_or_default(), 9387 2 9388 ); 9389 // Allow some normal time to elapse to check we don't get spurious reboots 9390 thread::sleep(std::time::Duration::new(40, 0)); 9391 // Check no reboot 9392 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9393 9394 // Start the live-migration 9395 let migration_socket = String::from( 9396 guest 9397 .tmp_dir 9398 .as_path() 9399 .join("live-migration.sock") 9400 .to_str() 9401 .unwrap(), 9402 ); 9403 9404 assert!( 9405 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9406 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9407 ); 9408 }); 9409 9410 // Check and report any errors occurred during the live-migration 9411 if r.is_err() { 9412 print_and_panic( 9413 src_child, 9414 dest_child, 9415 None, 9416 "Error occurred during live-migration", 9417 ); 9418 } 9419 9420 // Check the source vm has been terminated successful (give it '3s' to settle) 9421 thread::sleep(std::time::Duration::new(3, 0)); 9422 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 9423 print_and_panic( 9424 src_child, 9425 dest_child, 9426 None, 9427 "source VM was not terminated successfully.", 9428 ); 9429 }; 9430 9431 // Post live-migration check to make sure the destination VM is funcational 9432 let r = std::panic::catch_unwind(|| { 9433 // Perform same checks to validate VM has been properly migrated 9434 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9435 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9436 9437 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9438 9439 // Perform checks on watchdog 9440 let mut expected_reboot_count = 2; 9441 9442 // Allow some normal time to elapse to check we don't get spurious reboots 9443 thread::sleep(std::time::Duration::new(40, 0)); 9444 // Check no reboot 9445 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9446 9447 // Trigger a panic (sync first). We need to do this inside a screen with a delay so the SSH command returns. 9448 guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap(); 9449 // Allow some time for the watchdog to trigger (max 30s) and reboot to happen 9450 guest.wait_vm_boot(Some(50)).unwrap(); 9451 // Check a reboot is triggered by the watchdog 9452 expected_reboot_count += 1; 9453 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9454 9455 #[cfg(target_arch = "x86_64")] 9456 { 9457 // Now pause the VM and remain offline for 30s 9458 assert!(remote_command(&dest_api_socket, "pause", None)); 9459 thread::sleep(std::time::Duration::new(30, 0)); 9460 assert!(remote_command(&dest_api_socket, "resume", None)); 9461 9462 // Check no reboot 9463 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9464 } 9465 }); 9466 9467 // Clean-up the destination VM and make sure it terminated correctly 9468 let _ = dest_child.kill(); 9469 let dest_output = dest_child.wait_with_output().unwrap(); 9470 handle_child_output(r, &dest_output); 9471 9472 // Check the destination VM has the expected 'concole_text' from its output 9473 let r = std::panic::catch_unwind(|| { 9474 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 9475 }); 9476 handle_child_output(r, &dest_output); 9477 } 9478 9479 fn _test_live_migration_ovs_dpdk(upgrade_test: bool, local: bool) { 9480 let ovs_focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9481 let ovs_guest = Guest::new(Box::new(ovs_focal)); 9482 9483 let migration_focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9484 let migration_guest = Guest::new(Box::new(migration_focal)); 9485 let src_api_socket = temp_api_path(&migration_guest.tmp_dir); 9486 9487 // Start two VMs that are connected through ovs-dpdk and one of the VMs is the source VM for live-migration 9488 let (mut ovs_child, mut src_child) = 9489 setup_ovs_dpdk_guests(&ovs_guest, &migration_guest, &src_api_socket, upgrade_test); 9490 9491 // Start the destination VM 9492 let mut dest_api_socket = temp_api_path(&migration_guest.tmp_dir); 9493 dest_api_socket.push_str(".dest"); 9494 let mut dest_child = GuestCommand::new(&migration_guest) 9495 .args(["--api-socket", &dest_api_socket]) 9496 .capture_output() 9497 .spawn() 9498 .unwrap(); 9499 9500 let r = std::panic::catch_unwind(|| { 9501 // Give it '1s' to make sure the 'dest_api_socket' file is properly created 9502 thread::sleep(std::time::Duration::new(1, 0)); 9503 9504 // Start the live-migration 9505 let migration_socket = String::from( 9506 migration_guest 9507 .tmp_dir 9508 .as_path() 9509 .join("live-migration.sock") 9510 .to_str() 9511 .unwrap(), 9512 ); 9513 9514 assert!( 9515 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9516 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9517 ); 9518 }); 9519 9520 // Check and report any errors occurred during the live-migration 9521 if r.is_err() { 9522 print_and_panic( 9523 src_child, 9524 dest_child, 9525 Some(ovs_child), 9526 "Error occurred during live-migration", 9527 ); 9528 } 9529 9530 // Check the source vm has been terminated successful (give it '3s' to settle) 9531 thread::sleep(std::time::Duration::new(3, 0)); 9532 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 9533 print_and_panic( 9534 src_child, 9535 dest_child, 9536 Some(ovs_child), 9537 "source VM was not terminated successfully.", 9538 ); 9539 }; 9540 9541 // Post live-migration check to make sure the destination VM is funcational 9542 let r = std::panic::catch_unwind(|| { 9543 // Perform same checks to validate VM has been properly migrated 9544 // Spawn a new netcat listener in the OVS VM 9545 let guest_ip = ovs_guest.network.guest_ip.clone(); 9546 thread::spawn(move || { 9547 ssh_command_ip( 9548 "nc -l 12345", 9549 &guest_ip, 9550 DEFAULT_SSH_RETRIES, 9551 DEFAULT_SSH_TIMEOUT, 9552 ) 9553 .unwrap(); 9554 }); 9555 9556 // Wait for the server to be listening 9557 thread::sleep(std::time::Duration::new(5, 0)); 9558 9559 // And check the connection is still functional after live-migration 9560 migration_guest 9561 .ssh_command("nc -vz 172.100.0.1 12345") 9562 .unwrap(); 9563 }); 9564 9565 // Clean-up the destination VM and OVS VM, and make sure they terminated correctly 9566 let _ = dest_child.kill(); 9567 let _ = ovs_child.kill(); 9568 let dest_output = dest_child.wait_with_output().unwrap(); 9569 let ovs_output = ovs_child.wait_with_output().unwrap(); 9570 9571 cleanup_ovs_dpdk(); 9572 9573 handle_child_output(r, &dest_output); 9574 handle_child_output(Ok(()), &ovs_output); 9575 } 9576 9577 mod live_migration_parallel { 9578 use super::*; 9579 #[test] 9580 fn test_live_migration_basic() { 9581 _test_live_migration(false, false) 9582 } 9583 9584 #[test] 9585 fn test_live_migration_local() { 9586 _test_live_migration(false, true) 9587 } 9588 9589 #[test] 9590 #[cfg(not(feature = "mshv"))] 9591 fn test_live_migration_numa() { 9592 _test_live_migration_numa(false, false) 9593 } 9594 9595 #[test] 9596 #[cfg(not(feature = "mshv"))] 9597 fn test_live_migration_numa_local() { 9598 _test_live_migration_numa(false, true) 9599 } 9600 9601 #[test] 9602 fn test_live_migration_watchdog() { 9603 _test_live_migration_watchdog(false, false) 9604 } 9605 9606 #[test] 9607 fn test_live_migration_watchdog_local() { 9608 _test_live_migration_watchdog(false, true) 9609 } 9610 9611 #[test] 9612 fn test_live_migration_balloon() { 9613 _test_live_migration_balloon(false, false) 9614 } 9615 9616 #[test] 9617 fn test_live_migration_balloon_local() { 9618 _test_live_migration_balloon(false, true) 9619 } 9620 9621 #[test] 9622 fn test_live_upgrade_basic() { 9623 _test_live_migration(true, false) 9624 } 9625 9626 #[test] 9627 fn test_live_upgrade_local() { 9628 _test_live_migration(true, true) 9629 } 9630 9631 #[test] 9632 #[cfg(not(feature = "mshv"))] 9633 fn test_live_upgrade_numa() { 9634 _test_live_migration_numa(true, false) 9635 } 9636 9637 #[test] 9638 #[cfg(not(feature = "mshv"))] 9639 fn test_live_upgrade_numa_local() { 9640 _test_live_migration_numa(true, true) 9641 } 9642 9643 #[test] 9644 fn test_live_upgrade_watchdog() { 9645 _test_live_migration_watchdog(true, false) 9646 } 9647 9648 #[test] 9649 fn test_live_upgrade_watchdog_local() { 9650 _test_live_migration_watchdog(true, true) 9651 } 9652 9653 #[test] 9654 fn test_live_upgrade_balloon() { 9655 _test_live_migration_balloon(true, false) 9656 } 9657 9658 #[test] 9659 fn test_live_upgrade_balloon_local() { 9660 _test_live_migration_balloon(true, true) 9661 } 9662 } 9663 9664 mod live_migration_sequential { 9665 #[cfg(target_arch = "x86_64")] 9666 #[cfg(not(feature = "mshv"))] 9667 use super::*; 9668 9669 // Require to run ovs-dpdk tests sequentially because they rely on the same ovs-dpdk setup 9670 #[test] 9671 #[cfg(target_arch = "x86_64")] 9672 #[cfg(not(feature = "mshv"))] 9673 fn test_live_migration_ovs_dpdk() { 9674 _test_live_migration_ovs_dpdk(false, false); 9675 } 9676 9677 #[test] 9678 #[cfg(target_arch = "x86_64")] 9679 #[cfg(not(feature = "mshv"))] 9680 fn test_live_migration_ovs_dpdk_local() { 9681 _test_live_migration_ovs_dpdk(false, true); 9682 } 9683 9684 #[test] 9685 #[cfg(target_arch = "x86_64")] 9686 #[cfg(not(feature = "mshv"))] 9687 fn test_live_upgrade_ovs_dpdk() { 9688 _test_live_migration_ovs_dpdk(true, false); 9689 } 9690 9691 #[test] 9692 #[cfg(target_arch = "x86_64")] 9693 #[cfg(not(feature = "mshv"))] 9694 fn test_live_upgrade_ovs_dpdk_local() { 9695 _test_live_migration_ovs_dpdk(true, true); 9696 } 9697 } 9698 } 9699 9700 #[cfg(target_arch = "aarch64")] 9701 mod aarch64_acpi { 9702 use crate::*; 9703 9704 #[test] 9705 fn test_simple_launch_acpi() { 9706 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9707 9708 vec![Box::new(focal)].drain(..).for_each(|disk_config| { 9709 let guest = Guest::new(disk_config); 9710 9711 let mut child = GuestCommand::new(&guest) 9712 .args(["--cpus", "boot=1"]) 9713 .args(["--memory", "size=512M"]) 9714 .args(["--kernel", edk2_path().to_str().unwrap()]) 9715 .default_disks() 9716 .default_net() 9717 .args(["--serial", "tty", "--console", "off"]) 9718 .capture_output() 9719 .spawn() 9720 .unwrap(); 9721 9722 let r = std::panic::catch_unwind(|| { 9723 guest.wait_vm_boot(Some(120)).unwrap(); 9724 9725 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 9726 assert!(guest.get_total_memory().unwrap_or_default() > 400_000); 9727 assert_eq!(guest.get_pci_bridge_class().unwrap_or_default(), "0x060000"); 9728 }); 9729 9730 let _ = child.kill(); 9731 let output = child.wait_with_output().unwrap(); 9732 9733 handle_child_output(r, &output); 9734 }); 9735 } 9736 9737 #[test] 9738 fn test_guest_numa_nodes_acpi() { 9739 _test_guest_numa_nodes(true); 9740 } 9741 9742 #[test] 9743 fn test_cpu_topology_421_acpi() { 9744 test_cpu_topology(4, 2, 1, true); 9745 } 9746 9747 #[test] 9748 fn test_cpu_topology_142_acpi() { 9749 test_cpu_topology(1, 4, 2, true); 9750 } 9751 9752 #[test] 9753 fn test_cpu_topology_262_acpi() { 9754 test_cpu_topology(2, 6, 2, true); 9755 } 9756 9757 #[test] 9758 fn test_power_button_acpi() { 9759 _test_power_button(true); 9760 } 9761 9762 #[test] 9763 fn test_virtio_iommu() { 9764 _test_virtio_iommu(true) 9765 } 9766 } 9767 9768 mod rate_limiter { 9769 use super::*; 9770 9771 // Check if the 'measured' rate is within the expected 'difference' (in percentage) 9772 // compared to given 'limit' rate. 9773 fn check_rate_limit(measured: f64, limit: f64, difference: f64) -> bool { 9774 let upper_limit = limit * (1_f64 + difference); 9775 let lower_limit = limit * (1_f64 - difference); 9776 9777 if measured > lower_limit && measured < upper_limit { 9778 return true; 9779 } 9780 9781 eprintln!( 9782 "\n\n==== check_rate_limit failed! ====\n\nmeasured={measured}, , lower_limit={lower_limit}, upper_limit={upper_limit}\n\n" 9783 ); 9784 9785 false 9786 } 9787 9788 fn _test_rate_limiter_net(rx: bool) { 9789 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9790 let guest = Guest::new(Box::new(focal)); 9791 9792 let test_timeout = 10; 9793 let num_queues = 2; 9794 let queue_size = 256; 9795 let bw_size = 10485760_u64; // bytes 9796 let bw_refill_time = 100; // ms 9797 let limit_bps = (bw_size * 8 * 1000) as f64 / bw_refill_time as f64; 9798 9799 let net_params = format!( 9800 "tap=,mac={},ip={},mask=255.255.255.0,num_queues={},queue_size={},bw_size={},bw_refill_time={}", 9801 guest.network.guest_mac, 9802 guest.network.host_ip, 9803 num_queues, 9804 queue_size, 9805 bw_size, 9806 bw_refill_time, 9807 ); 9808 9809 let mut child = GuestCommand::new(&guest) 9810 .args(["--cpus", &format!("boot={}", num_queues / 2)]) 9811 .args(["--memory", "size=4G"]) 9812 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 9813 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9814 .default_disks() 9815 .args(["--net", net_params.as_str()]) 9816 .capture_output() 9817 .spawn() 9818 .unwrap(); 9819 9820 let r = std::panic::catch_unwind(|| { 9821 guest.wait_vm_boot(None).unwrap(); 9822 let measured_bps = 9823 measure_virtio_net_throughput(test_timeout, num_queues / 2, &guest, rx, true) 9824 .unwrap(); 9825 assert!(check_rate_limit(measured_bps, limit_bps, 0.1)); 9826 }); 9827 9828 let _ = child.kill(); 9829 let output = child.wait_with_output().unwrap(); 9830 handle_child_output(r, &output); 9831 } 9832 9833 #[test] 9834 fn test_rate_limiter_net_rx() { 9835 _test_rate_limiter_net(true); 9836 } 9837 9838 #[test] 9839 fn test_rate_limiter_net_tx() { 9840 _test_rate_limiter_net(false); 9841 } 9842 9843 fn _test_rate_limiter_block(bandwidth: bool) { 9844 let test_timeout = 10; 9845 let num_queues = 1; 9846 let fio_ops = FioOps::RandRW; 9847 9848 let bw_size = if bandwidth { 9849 10485760_u64 // bytes 9850 } else { 9851 100_u64 // I/O 9852 }; 9853 let bw_refill_time = 100; // ms 9854 let limit_rate = (bw_size * 1000) as f64 / bw_refill_time as f64; 9855 9856 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9857 let guest = Guest::new(Box::new(focal)); 9858 let api_socket = temp_api_path(&guest.tmp_dir); 9859 let test_img_dir = TempDir::new_with_prefix("/var/tmp/ch").unwrap(); 9860 let blk_rate_limiter_test_img = 9861 String::from(test_img_dir.as_path().join("blk.img").to_str().unwrap()); 9862 9863 // Create the test block image 9864 assert!(exec_host_command_output(&format!( 9865 "dd if=/dev/zero of={blk_rate_limiter_test_img} bs=1M count=1024" 9866 )) 9867 .status 9868 .success()); 9869 9870 let test_blk_params = if bandwidth { 9871 format!( 9872 "path={blk_rate_limiter_test_img},bw_size={bw_size},bw_refill_time={bw_refill_time}" 9873 ) 9874 } else { 9875 format!( 9876 "path={blk_rate_limiter_test_img},ops_size={bw_size},ops_refill_time={bw_refill_time}" 9877 ) 9878 }; 9879 9880 let mut child = GuestCommand::new(&guest) 9881 .args(["--cpus", &format!("boot={num_queues}")]) 9882 .args(["--memory", "size=4G"]) 9883 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 9884 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9885 .args([ 9886 "--disk", 9887 format!( 9888 "path={}", 9889 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 9890 ) 9891 .as_str(), 9892 "--disk", 9893 format!( 9894 "path={}", 9895 guest.disk_config.disk(DiskType::CloudInit).unwrap() 9896 ) 9897 .as_str(), 9898 "--disk", 9899 test_blk_params.as_str(), 9900 ]) 9901 .default_net() 9902 .args(["--api-socket", &api_socket]) 9903 .capture_output() 9904 .spawn() 9905 .unwrap(); 9906 9907 let r = std::panic::catch_unwind(|| { 9908 guest.wait_vm_boot(None).unwrap(); 9909 9910 let fio_command = format!( 9911 "sudo fio --filename=/dev/vdc --name=test --output-format=json \ 9912 --direct=1 --bs=4k --ioengine=io_uring --iodepth=64 \ 9913 --rw={fio_ops} --runtime={test_timeout} --numjobs={num_queues}" 9914 ); 9915 let output = guest.ssh_command(&fio_command).unwrap(); 9916 9917 // Parse fio output 9918 let measured_rate = if bandwidth { 9919 parse_fio_output(&output, &fio_ops, num_queues).unwrap() 9920 } else { 9921 parse_fio_output_iops(&output, &fio_ops, num_queues).unwrap() 9922 }; 9923 assert!(check_rate_limit(measured_rate, limit_rate, 0.1)); 9924 }); 9925 9926 let _ = child.kill(); 9927 let output = child.wait_with_output().unwrap(); 9928 handle_child_output(r, &output); 9929 } 9930 9931 #[test] 9932 fn test_rate_limiter_block_bandwidth() { 9933 _test_rate_limiter_block(true) 9934 } 9935 9936 #[test] 9937 fn test_rate_limiter_block_iops() { 9938 _test_rate_limiter_block(false) 9939 } 9940 } 9941