1 // Copyright © 2020 Intel Corporation 2 // 3 // SPDX-License-Identifier: Apache-2.0 4 // 5 #![allow(clippy::undocumented_unsafe_blocks)] 6 // When enabling the `mshv` feature, we skip quite some tests and 7 // hence have known dead-code. This annotation silences dead-code 8 // related warnings for our quality workflow to pass. 9 #![allow(dead_code)] 10 11 extern crate test_infra; 12 13 use std::collections::HashMap; 14 use std::fs; 15 use std::io; 16 use std::io::BufRead; 17 use std::io::Read; 18 use std::io::Seek; 19 use std::io::Write; 20 use std::os::unix::io::AsRawFd; 21 use std::path::PathBuf; 22 use std::process::{Child, Command, Stdio}; 23 use std::string::String; 24 use std::sync::mpsc; 25 use std::sync::mpsc::Receiver; 26 use std::sync::Mutex; 27 use std::thread; 28 29 use net_util::MacAddr; 30 use test_infra::*; 31 use vmm_sys_util::{tempdir::TempDir, tempfile::TempFile}; 32 use wait_timeout::ChildExt; 33 34 // Constant taken from the VMM crate. 35 const MAX_NUM_PCI_SEGMENTS: u16 = 96; 36 37 #[cfg(target_arch = "x86_64")] 38 mod x86_64 { 39 pub const FOCAL_IMAGE_NAME: &str = "focal-server-cloudimg-amd64-custom-20210609-0.raw"; 40 pub const JAMMY_NVIDIA_IMAGE_NAME: &str = "jammy-server-cloudimg-amd64-nvidia.raw"; 41 pub const FOCAL_IMAGE_NAME_QCOW2: &str = "focal-server-cloudimg-amd64-custom-20210609-0.qcow2"; 42 pub const FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE: &str = 43 "focal-server-cloudimg-amd64-custom-20210609-0-backing.qcow2"; 44 pub const FOCAL_IMAGE_NAME_VHD: &str = "focal-server-cloudimg-amd64-custom-20210609-0.vhd"; 45 pub const FOCAL_IMAGE_NAME_VHDX: &str = "focal-server-cloudimg-amd64-custom-20210609-0.vhdx"; 46 pub const JAMMY_IMAGE_NAME: &str = "jammy-server-cloudimg-amd64-custom-20230119-0.raw"; 47 pub const WINDOWS_IMAGE_NAME: &str = "windows-server-2022-amd64-2.raw"; 48 pub const OVMF_NAME: &str = "CLOUDHV.fd"; 49 pub const GREP_SERIAL_IRQ_CMD: &str = "grep -c 'IO-APIC.*ttyS0' /proc/interrupts || true"; 50 } 51 52 #[cfg(target_arch = "x86_64")] 53 use x86_64::*; 54 55 #[cfg(target_arch = "aarch64")] 56 mod aarch64 { 57 pub const FOCAL_IMAGE_NAME: &str = "focal-server-cloudimg-arm64-custom-20210929-0.raw"; 58 pub const FOCAL_IMAGE_UPDATE_KERNEL_NAME: &str = 59 "focal-server-cloudimg-arm64-custom-20210929-0-update-kernel.raw"; 60 pub const FOCAL_IMAGE_NAME_QCOW2: &str = "focal-server-cloudimg-arm64-custom-20210929-0.qcow2"; 61 pub const FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE: &str = 62 "focal-server-cloudimg-arm64-custom-20210929-0-backing.qcow2"; 63 pub const FOCAL_IMAGE_NAME_VHD: &str = "focal-server-cloudimg-arm64-custom-20210929-0.vhd"; 64 pub const FOCAL_IMAGE_NAME_VHDX: &str = "focal-server-cloudimg-arm64-custom-20210929-0.vhdx"; 65 pub const JAMMY_IMAGE_NAME: &str = "jammy-server-cloudimg-arm64-custom-20220329-0.raw"; 66 pub const WINDOWS_IMAGE_NAME: &str = "windows-11-iot-enterprise-aarch64.raw"; 67 pub const OVMF_NAME: &str = "CLOUDHV_EFI.fd"; 68 pub const GREP_SERIAL_IRQ_CMD: &str = "grep -c 'GICv3.*uart-pl011' /proc/interrupts || true"; 69 pub const GREP_PMU_IRQ_CMD: &str = "grep -c 'GICv3.*arm-pmu' /proc/interrupts || true"; 70 } 71 72 #[cfg(target_arch = "aarch64")] 73 use aarch64::*; 74 75 const DIRECT_KERNEL_BOOT_CMDLINE: &str = 76 "root=/dev/vda1 console=hvc0 rw systemd.journald.forward_to_console=1"; 77 78 const CONSOLE_TEST_STRING: &str = "Started OpenBSD Secure Shell server"; 79 80 // This enum exists to make it more convenient to 81 // implement test for both D-Bus and REST APIs. 82 enum TargetApi { 83 // API socket 84 HttpApi(String), 85 // well known service name, object path 86 DBusApi(String, String), 87 } 88 89 impl TargetApi { 90 fn new_http_api(tmp_dir: &TempDir) -> Self { 91 Self::HttpApi(temp_api_path(tmp_dir)) 92 } 93 94 fn new_dbus_api(tmp_dir: &TempDir) -> Self { 95 // `tmp_dir` is in the form of "/tmp/chXXXXXX" 96 // and we take the `chXXXXXX` part as a unique identifier for the guest 97 let id = tmp_dir.as_path().file_name().unwrap().to_str().unwrap(); 98 99 Self::DBusApi( 100 format!("org.cloudhypervisor.{id}"), 101 format!("/org/cloudhypervisor/{id}"), 102 ) 103 } 104 105 fn guest_args(&self) -> Vec<String> { 106 match self { 107 TargetApi::HttpApi(api_socket) => { 108 vec![format!("--api-socket={}", api_socket.as_str())] 109 } 110 TargetApi::DBusApi(service_name, object_path) => { 111 vec![ 112 format!("--dbus-service-name={}", service_name.as_str()), 113 format!("--dbus-object-path={}", object_path.as_str()), 114 ] 115 } 116 } 117 } 118 119 fn remote_args(&self) -> Vec<String> { 120 // `guest_args` and `remote_args` are consistent with each other 121 self.guest_args() 122 } 123 124 fn remote_command(&self, command: &str, arg: Option<&str>) -> bool { 125 let mut cmd = Command::new(clh_command("ch-remote")); 126 cmd.args(self.remote_args()); 127 cmd.arg(command); 128 129 if let Some(arg) = arg { 130 cmd.arg(arg); 131 } 132 133 let output = cmd.output().unwrap(); 134 if output.status.success() { 135 true 136 } else { 137 eprintln!("Error running ch-remote command: {:?}", &cmd); 138 let stderr = String::from_utf8_lossy(&output.stderr); 139 eprintln!("stderr: {stderr}"); 140 false 141 } 142 } 143 } 144 145 // Start cloud-hypervisor with no VM parameters, only the API server running. 146 // From the API: Create a VM, boot it and check that it looks as expected. 147 fn _test_api_create_boot(target_api: TargetApi, guest: Guest) { 148 let mut child = GuestCommand::new(&guest) 149 .args(target_api.guest_args()) 150 .capture_output() 151 .spawn() 152 .unwrap(); 153 154 thread::sleep(std::time::Duration::new(1, 0)); 155 156 // Verify API server is running 157 assert!(target_api.remote_command("ping", None)); 158 159 // Create the VM first 160 let cpu_count: u8 = 4; 161 let request_body = guest.api_create_body( 162 cpu_count, 163 direct_kernel_boot_path().to_str().unwrap(), 164 DIRECT_KERNEL_BOOT_CMDLINE, 165 ); 166 167 let temp_config_path = guest.tmp_dir.as_path().join("config"); 168 std::fs::write(&temp_config_path, request_body).unwrap(); 169 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 170 171 assert!(target_api.remote_command("create", Some(create_config),)); 172 173 // Then boot it 174 assert!(target_api.remote_command("boot", None)); 175 thread::sleep(std::time::Duration::new(20, 0)); 176 177 let r = std::panic::catch_unwind(|| { 178 // Check that the VM booted as expected 179 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 180 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 181 }); 182 183 kill_child(&mut child); 184 let output = child.wait_with_output().unwrap(); 185 186 handle_child_output(r, &output); 187 } 188 189 // Start cloud-hypervisor with no VM parameters, only the API server running. 190 // From the API: Create a VM, boot it and check it can be shutdown and then 191 // booted again 192 fn _test_api_shutdown(target_api: TargetApi, guest: Guest) { 193 let mut child = GuestCommand::new(&guest) 194 .args(target_api.guest_args()) 195 .capture_output() 196 .spawn() 197 .unwrap(); 198 199 thread::sleep(std::time::Duration::new(1, 0)); 200 201 // Verify API server is running 202 assert!(target_api.remote_command("ping", None)); 203 204 // Create the VM first 205 let cpu_count: u8 = 4; 206 let request_body = guest.api_create_body( 207 cpu_count, 208 direct_kernel_boot_path().to_str().unwrap(), 209 DIRECT_KERNEL_BOOT_CMDLINE, 210 ); 211 212 let temp_config_path = guest.tmp_dir.as_path().join("config"); 213 std::fs::write(&temp_config_path, request_body).unwrap(); 214 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 215 216 let r = std::panic::catch_unwind(|| { 217 assert!(target_api.remote_command("create", Some(create_config))); 218 219 // Then boot it 220 assert!(target_api.remote_command("boot", None)); 221 222 guest.wait_vm_boot(None).unwrap(); 223 224 // Check that the VM booted as expected 225 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 226 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 227 228 // Sync and shutdown without powering off to prevent filesystem 229 // corruption. 230 guest.ssh_command("sync").unwrap(); 231 guest.ssh_command("sudo shutdown -H now").unwrap(); 232 233 // Wait for the guest to be fully shutdown 234 thread::sleep(std::time::Duration::new(20, 0)); 235 236 // Then shut it down 237 assert!(target_api.remote_command("shutdown", None)); 238 239 // Then boot it again 240 assert!(target_api.remote_command("boot", None)); 241 242 guest.wait_vm_boot(None).unwrap(); 243 244 // Check that the VM booted as expected 245 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 246 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 247 }); 248 249 kill_child(&mut child); 250 let output = child.wait_with_output().unwrap(); 251 252 handle_child_output(r, &output); 253 } 254 255 // Start cloud-hypervisor with no VM parameters, only the API server running. 256 // From the API: Create a VM, boot it and check it can be deleted and then recreated 257 // booted again. 258 fn _test_api_delete(target_api: TargetApi, guest: Guest) { 259 let mut child = GuestCommand::new(&guest) 260 .args(target_api.guest_args()) 261 .capture_output() 262 .spawn() 263 .unwrap(); 264 265 thread::sleep(std::time::Duration::new(1, 0)); 266 267 // Verify API server is running 268 assert!(target_api.remote_command("ping", None)); 269 270 // Create the VM first 271 let cpu_count: u8 = 4; 272 let request_body = guest.api_create_body( 273 cpu_count, 274 direct_kernel_boot_path().to_str().unwrap(), 275 DIRECT_KERNEL_BOOT_CMDLINE, 276 ); 277 let temp_config_path = guest.tmp_dir.as_path().join("config"); 278 std::fs::write(&temp_config_path, request_body).unwrap(); 279 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 280 281 let r = std::panic::catch_unwind(|| { 282 assert!(target_api.remote_command("create", Some(create_config))); 283 284 // Then boot it 285 assert!(target_api.remote_command("boot", None)); 286 287 guest.wait_vm_boot(None).unwrap(); 288 289 // Check that the VM booted as expected 290 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 291 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 292 293 // Sync and shutdown without powering off to prevent filesystem 294 // corruption. 295 guest.ssh_command("sync").unwrap(); 296 guest.ssh_command("sudo shutdown -H now").unwrap(); 297 298 // Wait for the guest to be fully shutdown 299 thread::sleep(std::time::Duration::new(20, 0)); 300 301 // Then delete it 302 assert!(target_api.remote_command("delete", None)); 303 304 assert!(target_api.remote_command("create", Some(create_config))); 305 306 // Then boot it again 307 assert!(target_api.remote_command("boot", None)); 308 309 guest.wait_vm_boot(None).unwrap(); 310 311 // Check that the VM booted as expected 312 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 313 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 314 }); 315 316 kill_child(&mut child); 317 let output = child.wait_with_output().unwrap(); 318 319 handle_child_output(r, &output); 320 } 321 322 // Start cloud-hypervisor with no VM parameters, only the API server running. 323 // From the API: Create a VM, boot it and check that it looks as expected. 324 // Then we pause the VM, check that it's no longer available. 325 // Finally we resume the VM and check that it's available. 326 fn _test_api_pause_resume(target_api: TargetApi, guest: Guest) { 327 let mut child = GuestCommand::new(&guest) 328 .args(target_api.guest_args()) 329 .capture_output() 330 .spawn() 331 .unwrap(); 332 333 thread::sleep(std::time::Duration::new(1, 0)); 334 335 // Verify API server is running 336 assert!(target_api.remote_command("ping", None)); 337 338 // Create the VM first 339 let cpu_count: u8 = 4; 340 let request_body = guest.api_create_body( 341 cpu_count, 342 direct_kernel_boot_path().to_str().unwrap(), 343 DIRECT_KERNEL_BOOT_CMDLINE, 344 ); 345 346 let temp_config_path = guest.tmp_dir.as_path().join("config"); 347 std::fs::write(&temp_config_path, request_body).unwrap(); 348 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 349 350 assert!(target_api.remote_command("create", Some(create_config))); 351 352 // Then boot it 353 assert!(target_api.remote_command("boot", None)); 354 thread::sleep(std::time::Duration::new(20, 0)); 355 356 let r = std::panic::catch_unwind(|| { 357 // Check that the VM booted as expected 358 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 359 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 360 361 // We now pause the VM 362 assert!(target_api.remote_command("pause", None)); 363 364 // Check pausing again fails 365 assert!(!target_api.remote_command("pause", None)); 366 367 thread::sleep(std::time::Duration::new(2, 0)); 368 369 // SSH into the VM should fail 370 assert!(ssh_command_ip( 371 "grep -c processor /proc/cpuinfo", 372 &guest.network.guest_ip, 373 2, 374 5 375 ) 376 .is_err()); 377 378 // Resume the VM 379 assert!(target_api.remote_command("resume", None)); 380 381 // Check resuming again fails 382 assert!(!target_api.remote_command("resume", None)); 383 384 thread::sleep(std::time::Duration::new(2, 0)); 385 386 // Now we should be able to SSH back in and get the right number of CPUs 387 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 388 }); 389 390 kill_child(&mut child); 391 let output = child.wait_with_output().unwrap(); 392 393 handle_child_output(r, &output); 394 } 395 396 fn _test_pty_interaction(pty_path: PathBuf) { 397 let mut cf = std::fs::OpenOptions::new() 398 .write(true) 399 .read(true) 400 .open(pty_path) 401 .unwrap(); 402 403 // Some dumb sleeps but we don't want to write 404 // before the console is up and we don't want 405 // to try and write the next line before the 406 // login process is ready. 407 thread::sleep(std::time::Duration::new(5, 0)); 408 assert_eq!(cf.write(b"cloud\n").unwrap(), 6); 409 thread::sleep(std::time::Duration::new(2, 0)); 410 assert_eq!(cf.write(b"cloud123\n").unwrap(), 9); 411 thread::sleep(std::time::Duration::new(2, 0)); 412 assert_eq!(cf.write(b"echo test_pty_console\n").unwrap(), 22); 413 thread::sleep(std::time::Duration::new(2, 0)); 414 415 // read pty and ensure they have a login shell 416 // some fairly hacky workarounds to avoid looping 417 // forever in case the channel is blocked getting output 418 let ptyc = pty_read(cf); 419 let mut empty = 0; 420 let mut prev = String::new(); 421 loop { 422 thread::sleep(std::time::Duration::new(2, 0)); 423 match ptyc.try_recv() { 424 Ok(line) => { 425 empty = 0; 426 prev = prev + &line; 427 if prev.contains("test_pty_console") { 428 break; 429 } 430 } 431 Err(mpsc::TryRecvError::Empty) => { 432 empty += 1; 433 assert!(empty <= 5, "No login on pty"); 434 } 435 _ => { 436 panic!("No login on pty") 437 } 438 } 439 } 440 } 441 442 fn prepare_virtiofsd(tmp_dir: &TempDir, shared_dir: &str) -> (std::process::Child, String) { 443 let mut workload_path = dirs::home_dir().unwrap(); 444 workload_path.push("workloads"); 445 446 let mut virtiofsd_path = workload_path; 447 virtiofsd_path.push("virtiofsd"); 448 let virtiofsd_path = String::from(virtiofsd_path.to_str().unwrap()); 449 450 let virtiofsd_socket_path = 451 String::from(tmp_dir.as_path().join("virtiofs.sock").to_str().unwrap()); 452 453 // Start the daemon 454 let child = Command::new(virtiofsd_path.as_str()) 455 .args(["--shared-dir", shared_dir]) 456 .args(["--socket-path", virtiofsd_socket_path.as_str()]) 457 .args(["--cache", "never"]) 458 .spawn() 459 .unwrap(); 460 461 thread::sleep(std::time::Duration::new(10, 0)); 462 463 (child, virtiofsd_socket_path) 464 } 465 466 fn prepare_vubd( 467 tmp_dir: &TempDir, 468 blk_img: &str, 469 num_queues: usize, 470 rdonly: bool, 471 direct: bool, 472 ) -> (std::process::Child, String) { 473 let mut workload_path = dirs::home_dir().unwrap(); 474 workload_path.push("workloads"); 475 476 let mut blk_file_path = workload_path; 477 blk_file_path.push(blk_img); 478 let blk_file_path = String::from(blk_file_path.to_str().unwrap()); 479 480 let vubd_socket_path = String::from(tmp_dir.as_path().join("vub.sock").to_str().unwrap()); 481 482 // Start the daemon 483 let child = Command::new(clh_command("vhost_user_block")) 484 .args([ 485 "--block-backend", 486 format!( 487 "path={blk_file_path},socket={vubd_socket_path},num_queues={num_queues},readonly={rdonly},direct={direct}" 488 ) 489 .as_str(), 490 ]) 491 .spawn() 492 .unwrap(); 493 494 thread::sleep(std::time::Duration::new(10, 0)); 495 496 (child, vubd_socket_path) 497 } 498 499 fn temp_vsock_path(tmp_dir: &TempDir) -> String { 500 String::from(tmp_dir.as_path().join("vsock").to_str().unwrap()) 501 } 502 503 fn temp_api_path(tmp_dir: &TempDir) -> String { 504 String::from( 505 tmp_dir 506 .as_path() 507 .join("cloud-hypervisor.sock") 508 .to_str() 509 .unwrap(), 510 ) 511 } 512 513 fn temp_event_monitor_path(tmp_dir: &TempDir) -> String { 514 String::from(tmp_dir.as_path().join("event.json").to_str().unwrap()) 515 } 516 517 // Creates the directory and returns the path. 518 fn temp_snapshot_dir_path(tmp_dir: &TempDir) -> String { 519 let snapshot_dir = String::from(tmp_dir.as_path().join("snapshot").to_str().unwrap()); 520 std::fs::create_dir(&snapshot_dir).unwrap(); 521 snapshot_dir 522 } 523 524 fn temp_vmcore_file_path(tmp_dir: &TempDir) -> String { 525 let vmcore_file = String::from(tmp_dir.as_path().join("vmcore").to_str().unwrap()); 526 vmcore_file 527 } 528 529 // Creates the path for direct kernel boot and return the path. 530 // For x86_64, this function returns the vmlinux kernel path. 531 // For AArch64, this function returns the PE kernel path. 532 fn direct_kernel_boot_path() -> PathBuf { 533 let mut workload_path = dirs::home_dir().unwrap(); 534 workload_path.push("workloads"); 535 536 let mut kernel_path = workload_path; 537 #[cfg(target_arch = "x86_64")] 538 kernel_path.push("vmlinux"); 539 #[cfg(target_arch = "aarch64")] 540 kernel_path.push("Image"); 541 542 kernel_path 543 } 544 545 fn edk2_path() -> PathBuf { 546 let mut workload_path = dirs::home_dir().unwrap(); 547 workload_path.push("workloads"); 548 let mut edk2_path = workload_path; 549 edk2_path.push(OVMF_NAME); 550 551 edk2_path 552 } 553 554 fn cloud_hypervisor_release_path() -> String { 555 let mut workload_path = dirs::home_dir().unwrap(); 556 workload_path.push("workloads"); 557 558 let mut ch_release_path = workload_path; 559 #[cfg(target_arch = "x86_64")] 560 ch_release_path.push("cloud-hypervisor-static"); 561 #[cfg(target_arch = "aarch64")] 562 ch_release_path.push("cloud-hypervisor-static-aarch64"); 563 564 ch_release_path.into_os_string().into_string().unwrap() 565 } 566 567 fn prepare_vhost_user_net_daemon( 568 tmp_dir: &TempDir, 569 ip: &str, 570 tap: Option<&str>, 571 mtu: Option<u16>, 572 num_queues: usize, 573 client_mode: bool, 574 ) -> (std::process::Command, String) { 575 let vunet_socket_path = String::from(tmp_dir.as_path().join("vunet.sock").to_str().unwrap()); 576 577 // Start the daemon 578 let mut net_params = format!( 579 "ip={ip},mask=255.255.255.0,socket={vunet_socket_path},num_queues={num_queues},queue_size=1024,client={client_mode}" 580 ); 581 582 if let Some(tap) = tap { 583 net_params.push_str(format!(",tap={tap}").as_str()); 584 } 585 586 if let Some(mtu) = mtu { 587 net_params.push_str(format!(",mtu={mtu}").as_str()); 588 } 589 590 let mut command = Command::new(clh_command("vhost_user_net")); 591 command.args(["--net-backend", net_params.as_str()]); 592 593 (command, vunet_socket_path) 594 } 595 596 fn prepare_swtpm_daemon(tmp_dir: &TempDir) -> (std::process::Command, String) { 597 let swtpm_tpm_dir = String::from(tmp_dir.as_path().join("swtpm").to_str().unwrap()); 598 let swtpm_socket_path = String::from( 599 tmp_dir 600 .as_path() 601 .join("swtpm") 602 .join("swtpm.sock") 603 .to_str() 604 .unwrap(), 605 ); 606 std::fs::create_dir(&swtpm_tpm_dir).unwrap(); 607 608 let mut swtpm_command = Command::new("swtpm"); 609 let swtpm_args = [ 610 "socket", 611 "--tpmstate", 612 &format!("dir={swtpm_tpm_dir}"), 613 "--ctrl", 614 &format!("type=unixio,path={swtpm_socket_path}"), 615 "--flags", 616 "startup-clear", 617 "--tpm2", 618 ]; 619 swtpm_command.args(swtpm_args); 620 621 (swtpm_command, swtpm_socket_path) 622 } 623 624 fn remote_command(api_socket: &str, command: &str, arg: Option<&str>) -> bool { 625 let mut cmd = Command::new(clh_command("ch-remote")); 626 cmd.args([&format!("--api-socket={api_socket}"), command]); 627 628 if let Some(arg) = arg { 629 cmd.arg(arg); 630 } 631 let output = cmd.output().unwrap(); 632 if output.status.success() { 633 true 634 } else { 635 eprintln!("Error running ch-remote command: {:?}", &cmd); 636 let stderr = String::from_utf8_lossy(&output.stderr); 637 eprintln!("stderr: {stderr}"); 638 false 639 } 640 } 641 642 fn remote_command_w_output(api_socket: &str, command: &str, arg: Option<&str>) -> (bool, Vec<u8>) { 643 let mut cmd = Command::new(clh_command("ch-remote")); 644 cmd.args([&format!("--api-socket={api_socket}"), command]); 645 646 if let Some(arg) = arg { 647 cmd.arg(arg); 648 } 649 650 let output = cmd.output().expect("Failed to launch ch-remote"); 651 652 (output.status.success(), output.stdout) 653 } 654 655 fn resize_command( 656 api_socket: &str, 657 desired_vcpus: Option<u8>, 658 desired_ram: Option<usize>, 659 desired_balloon: Option<usize>, 660 event_file: Option<&str>, 661 ) -> bool { 662 let mut cmd = Command::new(clh_command("ch-remote")); 663 cmd.args([&format!("--api-socket={api_socket}"), "resize"]); 664 665 if let Some(desired_vcpus) = desired_vcpus { 666 cmd.arg(format!("--cpus={desired_vcpus}")); 667 } 668 669 if let Some(desired_ram) = desired_ram { 670 cmd.arg(format!("--memory={desired_ram}")); 671 } 672 673 if let Some(desired_balloon) = desired_balloon { 674 cmd.arg(format!("--balloon={desired_balloon}")); 675 } 676 677 let ret = cmd.status().expect("Failed to launch ch-remote").success(); 678 679 if let Some(event_path) = event_file { 680 let latest_events = [ 681 &MetaEvent { 682 event: "resizing".to_string(), 683 device_id: None, 684 }, 685 &MetaEvent { 686 event: "resized".to_string(), 687 device_id: None, 688 }, 689 ]; 690 // See: #5938 691 thread::sleep(std::time::Duration::new(1, 0)); 692 assert!(check_latest_events_exact(&latest_events, event_path)); 693 } 694 695 ret 696 } 697 698 fn resize_zone_command(api_socket: &str, id: &str, desired_size: &str) -> bool { 699 let mut cmd = Command::new(clh_command("ch-remote")); 700 cmd.args([ 701 &format!("--api-socket={api_socket}"), 702 "resize-zone", 703 &format!("--id={id}"), 704 &format!("--size={desired_size}"), 705 ]); 706 707 cmd.status().expect("Failed to launch ch-remote").success() 708 } 709 710 // setup OVS-DPDK bridge and ports 711 fn setup_ovs_dpdk() { 712 // setup OVS-DPDK 713 assert!(exec_host_command_status("service openvswitch-switch start").success()); 714 assert!(exec_host_command_status("ovs-vsctl init").success()); 715 assert!( 716 exec_host_command_status("ovs-vsctl set Open_vSwitch . other_config:dpdk-init=true") 717 .success() 718 ); 719 assert!(exec_host_command_status("service openvswitch-switch restart").success()); 720 721 // Create OVS-DPDK bridge and ports 722 assert!(exec_host_command_status( 723 "ovs-vsctl add-br ovsbr0 -- set bridge ovsbr0 datapath_type=netdev", 724 ) 725 .success()); 726 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface vhost-user1 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient1").success()); 727 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user2 -- set Interface vhost-user2 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient2").success()); 728 assert!(exec_host_command_status("ip link set up dev ovsbr0").success()); 729 assert!(exec_host_command_status("service openvswitch-switch restart").success()); 730 } 731 fn cleanup_ovs_dpdk() { 732 assert!(exec_host_command_status("ovs-vsctl del-br ovsbr0").success()); 733 exec_host_command_status("rm -f ovs-vsctl /tmp/dpdkvhostclient1 /tmp/dpdkvhostclient2"); 734 } 735 // Setup two guests and ensure they are connected through ovs-dpdk 736 fn setup_ovs_dpdk_guests( 737 guest1: &Guest, 738 guest2: &Guest, 739 api_socket: &str, 740 release_binary: bool, 741 ) -> (Child, Child) { 742 setup_ovs_dpdk(); 743 744 let clh_path = if !release_binary { 745 clh_command("cloud-hypervisor") 746 } else { 747 cloud_hypervisor_release_path() 748 }; 749 750 let mut child1 = GuestCommand::new_with_binary_path(guest1, &clh_path) 751 .args(["--cpus", "boot=2"]) 752 .args(["--memory", "size=0,shared=on"]) 753 .args(["--memory-zone", "id=mem0,size=1G,shared=on,host_numa_node=0"]) 754 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 755 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 756 .default_disks() 757 .args(["--net", guest1.default_net_string().as_str(), "vhost_user=true,socket=/tmp/dpdkvhostclient1,num_queues=2,queue_size=256,vhost_mode=server"]) 758 .capture_output() 759 .spawn() 760 .unwrap(); 761 762 #[cfg(target_arch = "x86_64")] 763 let guest_net_iface = "ens5"; 764 #[cfg(target_arch = "aarch64")] 765 let guest_net_iface = "enp0s5"; 766 767 let r = std::panic::catch_unwind(|| { 768 guest1.wait_vm_boot(None).unwrap(); 769 770 guest1 771 .ssh_command(&format!( 772 "sudo ip addr add 172.100.0.1/24 dev {guest_net_iface}" 773 )) 774 .unwrap(); 775 guest1 776 .ssh_command(&format!("sudo ip link set up dev {guest_net_iface}")) 777 .unwrap(); 778 779 let guest_ip = guest1.network.guest_ip.clone(); 780 thread::spawn(move || { 781 ssh_command_ip( 782 "nc -l 12345", 783 &guest_ip, 784 DEFAULT_SSH_RETRIES, 785 DEFAULT_SSH_TIMEOUT, 786 ) 787 .unwrap(); 788 }); 789 }); 790 if r.is_err() { 791 cleanup_ovs_dpdk(); 792 793 let _ = child1.kill(); 794 let output = child1.wait_with_output().unwrap(); 795 handle_child_output(r, &output); 796 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return 797 } 798 799 let mut child2 = GuestCommand::new_with_binary_path(guest2, &clh_path) 800 .args(["--api-socket", api_socket]) 801 .args(["--cpus", "boot=2"]) 802 .args(["--memory", "size=0,shared=on"]) 803 .args(["--memory-zone", "id=mem0,size=1G,shared=on,host_numa_node=0"]) 804 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 805 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 806 .default_disks() 807 .args(["--net", guest2.default_net_string().as_str(), "vhost_user=true,socket=/tmp/dpdkvhostclient2,num_queues=2,queue_size=256,vhost_mode=server"]) 808 .capture_output() 809 .spawn() 810 .unwrap(); 811 812 let r = std::panic::catch_unwind(|| { 813 guest2.wait_vm_boot(None).unwrap(); 814 815 guest2 816 .ssh_command(&format!( 817 "sudo ip addr add 172.100.0.2/24 dev {guest_net_iface}" 818 )) 819 .unwrap(); 820 guest2 821 .ssh_command(&format!("sudo ip link set up dev {guest_net_iface}")) 822 .unwrap(); 823 824 // Check the connection works properly between the two VMs 825 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap(); 826 }); 827 if r.is_err() { 828 cleanup_ovs_dpdk(); 829 830 let _ = child1.kill(); 831 let _ = child2.kill(); 832 let output = child2.wait_with_output().unwrap(); 833 handle_child_output(r, &output); 834 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return 835 } 836 837 (child1, child2) 838 } 839 840 enum FwType { 841 Ovmf, 842 RustHypervisorFirmware, 843 } 844 845 fn fw_path(_fw_type: FwType) -> String { 846 let mut workload_path = dirs::home_dir().unwrap(); 847 workload_path.push("workloads"); 848 849 let mut fw_path = workload_path; 850 #[cfg(target_arch = "aarch64")] 851 fw_path.push("CLOUDHV_EFI.fd"); 852 #[cfg(target_arch = "x86_64")] 853 { 854 match _fw_type { 855 FwType::Ovmf => fw_path.push(OVMF_NAME), 856 FwType::RustHypervisorFirmware => fw_path.push("hypervisor-fw"), 857 } 858 } 859 860 fw_path.to_str().unwrap().to_string() 861 } 862 863 #[derive(Debug)] 864 struct MetaEvent { 865 event: String, 866 device_id: Option<String>, 867 } 868 869 impl MetaEvent { 870 pub fn match_with_json_event(&self, v: &serde_json::Value) -> bool { 871 let mut matched = false; 872 if v["event"].as_str().unwrap() == self.event { 873 if let Some(device_id) = &self.device_id { 874 if v["properties"]["id"].as_str().unwrap() == device_id { 875 matched = true 876 } 877 } else { 878 matched = true; 879 } 880 } 881 matched 882 } 883 } 884 885 // Parse the event_monitor file based on the format that each event 886 // is followed by a double newline 887 fn parse_event_file(event_file: &str) -> Vec<serde_json::Value> { 888 let content = fs::read(event_file).unwrap(); 889 let mut ret = Vec::new(); 890 for entry in String::from_utf8_lossy(&content) 891 .trim() 892 .split("\n\n") 893 .collect::<Vec<&str>>() 894 { 895 ret.push(serde_json::from_str(entry).unwrap()); 896 } 897 898 ret 899 } 900 901 // Return true if all events from the input 'expected_events' are matched sequentially 902 // with events from the 'event_file' 903 fn check_sequential_events(expected_events: &[&MetaEvent], event_file: &str) -> bool { 904 let json_events = parse_event_file(event_file); 905 let len = expected_events.len(); 906 let mut idx = 0; 907 for e in &json_events { 908 if idx == len { 909 break; 910 } 911 if expected_events[idx].match_with_json_event(e) { 912 idx += 1; 913 } 914 } 915 916 let ret = idx == len; 917 918 if !ret { 919 eprintln!( 920 "\n\n==== Start 'check_sequential_events' failed ==== \ 921 \n\nexpected_events={:?}\nactual_events={:?} \ 922 \n\n==== End 'check_sequential_events' failed ====", 923 expected_events, json_events, 924 ); 925 } 926 927 ret 928 } 929 930 // Return true if all events from the input 'expected_events' are matched exactly 931 // with events from the 'event_file' 932 fn check_sequential_events_exact(expected_events: &[&MetaEvent], event_file: &str) -> bool { 933 let json_events = parse_event_file(event_file); 934 assert!(expected_events.len() <= json_events.len()); 935 let json_events = &json_events[..expected_events.len()]; 936 937 for (idx, e) in json_events.iter().enumerate() { 938 if !expected_events[idx].match_with_json_event(e) { 939 eprintln!( 940 "\n\n==== Start 'check_sequential_events_exact' failed ==== \ 941 \n\nexpected_events={:?}\nactual_events={:?} \ 942 \n\n==== End 'check_sequential_events_exact' failed ====", 943 expected_events, json_events, 944 ); 945 946 return false; 947 } 948 } 949 950 true 951 } 952 953 // Return true if events from the input 'latest_events' are matched exactly 954 // with the most recent events from the 'event_file' 955 fn check_latest_events_exact(latest_events: &[&MetaEvent], event_file: &str) -> bool { 956 let json_events = parse_event_file(event_file); 957 assert!(latest_events.len() <= json_events.len()); 958 let json_events = &json_events[(json_events.len() - latest_events.len())..]; 959 960 for (idx, e) in json_events.iter().enumerate() { 961 if !latest_events[idx].match_with_json_event(e) { 962 eprintln!( 963 "\n\n==== Start 'check_latest_events_exact' failed ==== \ 964 \n\nexpected_events={:?}\nactual_events={:?} \ 965 \n\n==== End 'check_latest_events_exact' failed ====", 966 latest_events, json_events, 967 ); 968 969 return false; 970 } 971 } 972 973 true 974 } 975 976 fn test_cpu_topology(threads_per_core: u8, cores_per_package: u8, packages: u8, use_fw: bool) { 977 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 978 let guest = Guest::new(Box::new(focal)); 979 let total_vcpus = threads_per_core * cores_per_package * packages; 980 let direct_kernel_boot_path = direct_kernel_boot_path(); 981 let mut kernel_path = direct_kernel_boot_path.to_str().unwrap(); 982 let fw_path = fw_path(FwType::RustHypervisorFirmware); 983 if use_fw { 984 kernel_path = fw_path.as_str(); 985 } 986 987 let mut child = GuestCommand::new(&guest) 988 .args([ 989 "--cpus", 990 &format!( 991 "boot={total_vcpus},topology={threads_per_core}:{cores_per_package}:1:{packages}" 992 ), 993 ]) 994 .args(["--memory", "size=512M"]) 995 .args(["--kernel", kernel_path]) 996 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 997 .default_disks() 998 .default_net() 999 .capture_output() 1000 .spawn() 1001 .unwrap(); 1002 1003 let r = std::panic::catch_unwind(|| { 1004 guest.wait_vm_boot(None).unwrap(); 1005 assert_eq!( 1006 guest.get_cpu_count().unwrap_or_default(), 1007 u32::from(total_vcpus) 1008 ); 1009 assert_eq!( 1010 guest 1011 .ssh_command("lscpu | grep \"per core\" | cut -f 2 -d \":\" | sed \"s# *##\"") 1012 .unwrap() 1013 .trim() 1014 .parse::<u8>() 1015 .unwrap_or(0), 1016 threads_per_core 1017 ); 1018 1019 assert_eq!( 1020 guest 1021 .ssh_command("lscpu | grep \"per socket\" | cut -f 2 -d \":\" | sed \"s# *##\"") 1022 .unwrap() 1023 .trim() 1024 .parse::<u8>() 1025 .unwrap_or(0), 1026 cores_per_package 1027 ); 1028 1029 assert_eq!( 1030 guest 1031 .ssh_command("lscpu | grep \"Socket\" | cut -f 2 -d \":\" | sed \"s# *##\"") 1032 .unwrap() 1033 .trim() 1034 .parse::<u8>() 1035 .unwrap_or(0), 1036 packages 1037 ); 1038 1039 #[cfg(target_arch = "x86_64")] 1040 { 1041 let mut cpu_id = 0; 1042 for package_id in 0..packages { 1043 for core_id in 0..cores_per_package { 1044 for _ in 0..threads_per_core { 1045 assert_eq!( 1046 guest 1047 .ssh_command(&format!("cat /sys/devices/system/cpu/cpu{cpu_id}/topology/physical_package_id")) 1048 .unwrap() 1049 .trim() 1050 .parse::<u8>() 1051 .unwrap_or(0), 1052 package_id 1053 ); 1054 1055 assert_eq!( 1056 guest 1057 .ssh_command(&format!( 1058 "cat /sys/devices/system/cpu/cpu{cpu_id}/topology/core_id" 1059 )) 1060 .unwrap() 1061 .trim() 1062 .parse::<u8>() 1063 .unwrap_or(0), 1064 core_id 1065 ); 1066 1067 cpu_id += 1; 1068 } 1069 } 1070 } 1071 } 1072 }); 1073 1074 kill_child(&mut child); 1075 let output = child.wait_with_output().unwrap(); 1076 1077 handle_child_output(r, &output); 1078 } 1079 1080 #[allow(unused_variables)] 1081 fn _test_guest_numa_nodes(acpi: bool) { 1082 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1083 let guest = Guest::new(Box::new(focal)); 1084 let api_socket = temp_api_path(&guest.tmp_dir); 1085 #[cfg(target_arch = "x86_64")] 1086 let kernel_path = direct_kernel_boot_path(); 1087 #[cfg(target_arch = "aarch64")] 1088 let kernel_path = if acpi { 1089 edk2_path() 1090 } else { 1091 direct_kernel_boot_path() 1092 }; 1093 1094 let mut child = GuestCommand::new(&guest) 1095 .args(["--cpus", "boot=6,max=12"]) 1096 .args(["--memory", "size=0,hotplug_method=virtio-mem"]) 1097 .args([ 1098 "--memory-zone", 1099 "id=mem0,size=1G,hotplug_size=3G", 1100 "id=mem1,size=2G,hotplug_size=3G", 1101 "id=mem2,size=3G,hotplug_size=3G", 1102 ]) 1103 .args([ 1104 "--numa", 1105 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0", 1106 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1", 1107 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2", 1108 ]) 1109 .args(["--kernel", kernel_path.to_str().unwrap()]) 1110 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1111 .args(["--api-socket", &api_socket]) 1112 .capture_output() 1113 .default_disks() 1114 .default_net() 1115 .spawn() 1116 .unwrap(); 1117 1118 let r = std::panic::catch_unwind(|| { 1119 guest.wait_vm_boot(None).unwrap(); 1120 1121 guest.check_numa_common( 1122 Some(&[960_000, 1_920_000, 2_880_000]), 1123 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 1124 Some(&["10 15 20", "20 10 25", "25 30 10"]), 1125 ); 1126 1127 // AArch64 currently does not support hotplug, and therefore we only 1128 // test hotplug-related function on x86_64 here. 1129 #[cfg(target_arch = "x86_64")] 1130 { 1131 guest.enable_memory_hotplug(); 1132 1133 // Resize every memory zone and check each associated NUMA node 1134 // has been assigned the right amount of memory. 1135 resize_zone_command(&api_socket, "mem0", "4G"); 1136 resize_zone_command(&api_socket, "mem1", "4G"); 1137 resize_zone_command(&api_socket, "mem2", "4G"); 1138 // Resize to the maximum amount of CPUs and check each NUMA 1139 // node has been assigned the right CPUs set. 1140 resize_command(&api_socket, Some(12), None, None, None); 1141 thread::sleep(std::time::Duration::new(5, 0)); 1142 1143 guest.check_numa_common( 1144 Some(&[3_840_000, 3_840_000, 3_840_000]), 1145 Some(&[vec![0, 1, 2, 9], vec![3, 4, 6, 7, 8], vec![5, 10, 11]]), 1146 None, 1147 ); 1148 } 1149 }); 1150 1151 kill_child(&mut child); 1152 let output = child.wait_with_output().unwrap(); 1153 1154 handle_child_output(r, &output); 1155 } 1156 1157 #[allow(unused_variables)] 1158 fn _test_power_button(acpi: bool) { 1159 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1160 let guest = Guest::new(Box::new(focal)); 1161 let mut cmd = GuestCommand::new(&guest); 1162 let api_socket = temp_api_path(&guest.tmp_dir); 1163 1164 #[cfg(target_arch = "x86_64")] 1165 let kernel_path = direct_kernel_boot_path(); 1166 #[cfg(target_arch = "aarch64")] 1167 let kernel_path = if acpi { 1168 edk2_path() 1169 } else { 1170 direct_kernel_boot_path() 1171 }; 1172 1173 cmd.args(["--cpus", "boot=1"]) 1174 .args(["--memory", "size=512M"]) 1175 .args(["--kernel", kernel_path.to_str().unwrap()]) 1176 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1177 .capture_output() 1178 .default_disks() 1179 .default_net() 1180 .args(["--api-socket", &api_socket]); 1181 1182 let child = cmd.spawn().unwrap(); 1183 1184 let r = std::panic::catch_unwind(|| { 1185 guest.wait_vm_boot(None).unwrap(); 1186 assert!(remote_command(&api_socket, "power-button", None)); 1187 }); 1188 1189 let output = child.wait_with_output().unwrap(); 1190 assert!(output.status.success()); 1191 handle_child_output(r, &output); 1192 } 1193 1194 type PrepareNetDaemon = dyn Fn( 1195 &TempDir, 1196 &str, 1197 Option<&str>, 1198 Option<u16>, 1199 usize, 1200 bool, 1201 ) -> (std::process::Command, String); 1202 1203 fn test_vhost_user_net( 1204 tap: Option<&str>, 1205 num_queues: usize, 1206 prepare_daemon: &PrepareNetDaemon, 1207 generate_host_mac: bool, 1208 client_mode_daemon: bool, 1209 ) { 1210 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1211 let guest = Guest::new(Box::new(focal)); 1212 let api_socket = temp_api_path(&guest.tmp_dir); 1213 1214 let kernel_path = direct_kernel_boot_path(); 1215 1216 let host_mac = if generate_host_mac { 1217 Some(MacAddr::local_random()) 1218 } else { 1219 None 1220 }; 1221 1222 let mtu = Some(3000); 1223 1224 let (mut daemon_command, vunet_socket_path) = prepare_daemon( 1225 &guest.tmp_dir, 1226 &guest.network.host_ip, 1227 tap, 1228 mtu, 1229 num_queues, 1230 client_mode_daemon, 1231 ); 1232 1233 let net_params = format!( 1234 "vhost_user=true,mac={},socket={},num_queues={},queue_size=1024{},vhost_mode={},mtu=3000", 1235 guest.network.guest_mac, 1236 vunet_socket_path, 1237 num_queues, 1238 if let Some(host_mac) = host_mac { 1239 format!(",host_mac={host_mac}") 1240 } else { 1241 "".to_owned() 1242 }, 1243 if client_mode_daemon { 1244 "server" 1245 } else { 1246 "client" 1247 }, 1248 ); 1249 1250 let mut ch_command = GuestCommand::new(&guest); 1251 ch_command 1252 .args(["--cpus", format!("boot={}", num_queues / 2).as_str()]) 1253 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"]) 1254 .args(["--kernel", kernel_path.to_str().unwrap()]) 1255 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1256 .default_disks() 1257 .args(["--net", net_params.as_str()]) 1258 .args(["--api-socket", &api_socket]) 1259 .capture_output(); 1260 1261 let mut daemon_child: std::process::Child; 1262 let mut child: std::process::Child; 1263 1264 if client_mode_daemon { 1265 child = ch_command.spawn().unwrap(); 1266 // Make sure the VMM is waiting for the backend to connect 1267 thread::sleep(std::time::Duration::new(10, 0)); 1268 daemon_child = daemon_command.spawn().unwrap(); 1269 } else { 1270 daemon_child = daemon_command.spawn().unwrap(); 1271 // Make sure the backend is waiting for the VMM to connect 1272 thread::sleep(std::time::Duration::new(10, 0)); 1273 child = ch_command.spawn().unwrap(); 1274 } 1275 1276 let r = std::panic::catch_unwind(|| { 1277 guest.wait_vm_boot(None).unwrap(); 1278 1279 if let Some(tap_name) = tap { 1280 let tap_count = exec_host_command_output(&format!("ip link | grep -c {tap_name}")); 1281 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1"); 1282 } 1283 1284 if let Some(host_mac) = tap { 1285 let mac_count = exec_host_command_output(&format!("ip link | grep -c {host_mac}")); 1286 assert_eq!(String::from_utf8_lossy(&mac_count.stdout).trim(), "1"); 1287 } 1288 1289 #[cfg(target_arch = "aarch64")] 1290 let iface = "enp0s4"; 1291 #[cfg(target_arch = "x86_64")] 1292 let iface = "ens4"; 1293 1294 assert_eq!( 1295 guest 1296 .ssh_command(format!("cat /sys/class/net/{iface}/mtu").as_str()) 1297 .unwrap() 1298 .trim(), 1299 "3000" 1300 ); 1301 1302 // 1 network interface + default localhost ==> 2 interfaces 1303 // It's important to note that this test is fully exercising the 1304 // vhost-user-net implementation and the associated backend since 1305 // it does not define any --net network interface. That means all 1306 // the ssh communication in that test happens through the network 1307 // interface backed by vhost-user-net. 1308 assert_eq!( 1309 guest 1310 .ssh_command("ip -o link | wc -l") 1311 .unwrap() 1312 .trim() 1313 .parse::<u32>() 1314 .unwrap_or_default(), 1315 2 1316 ); 1317 1318 // The following pci devices will appear on guest with PCI-MSI 1319 // interrupt vectors assigned. 1320 // 1 virtio-console with 3 vectors: config, Rx, Tx 1321 // 1 virtio-blk with 2 vectors: config, Request 1322 // 1 virtio-blk with 2 vectors: config, Request 1323 // 1 virtio-rng with 2 vectors: config, Request 1324 // Since virtio-net has 2 queue pairs, its vectors is as follows: 1325 // 1 virtio-net with 5 vectors: config, Rx (2), Tx (2) 1326 // Based on the above, the total vectors should 14. 1327 #[cfg(target_arch = "x86_64")] 1328 let grep_cmd = "grep -c PCI-MSI /proc/interrupts"; 1329 #[cfg(target_arch = "aarch64")] 1330 let grep_cmd = "grep -c ITS-MSI /proc/interrupts"; 1331 assert_eq!( 1332 guest 1333 .ssh_command(grep_cmd) 1334 .unwrap() 1335 .trim() 1336 .parse::<u32>() 1337 .unwrap_or_default(), 1338 10 + (num_queues as u32) 1339 ); 1340 1341 // ACPI feature is needed. 1342 #[cfg(target_arch = "x86_64")] 1343 { 1344 guest.enable_memory_hotplug(); 1345 1346 // Add RAM to the VM 1347 let desired_ram = 1024 << 20; 1348 resize_command(&api_socket, None, Some(desired_ram), None, None); 1349 1350 thread::sleep(std::time::Duration::new(10, 0)); 1351 1352 // Here by simply checking the size (through ssh), we validate 1353 // the connection is still working, which means vhost-user-net 1354 // keeps working after the resize. 1355 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 1356 } 1357 }); 1358 1359 kill_child(&mut child); 1360 let output = child.wait_with_output().unwrap(); 1361 1362 thread::sleep(std::time::Duration::new(5, 0)); 1363 let _ = daemon_child.kill(); 1364 let _ = daemon_child.wait(); 1365 1366 handle_child_output(r, &output); 1367 } 1368 1369 type PrepareBlkDaemon = dyn Fn(&TempDir, &str, usize, bool, bool) -> (std::process::Child, String); 1370 1371 fn test_vhost_user_blk( 1372 num_queues: usize, 1373 readonly: bool, 1374 direct: bool, 1375 prepare_vhost_user_blk_daemon: Option<&PrepareBlkDaemon>, 1376 ) { 1377 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1378 let guest = Guest::new(Box::new(focal)); 1379 let api_socket = temp_api_path(&guest.tmp_dir); 1380 1381 let kernel_path = direct_kernel_boot_path(); 1382 1383 let (blk_params, daemon_child) = { 1384 let prepare_daemon = prepare_vhost_user_blk_daemon.unwrap(); 1385 // Start the daemon 1386 let (daemon_child, vubd_socket_path) = 1387 prepare_daemon(&guest.tmp_dir, "blk.img", num_queues, readonly, direct); 1388 1389 ( 1390 format!( 1391 "vhost_user=true,socket={vubd_socket_path},num_queues={num_queues},queue_size=128", 1392 ), 1393 Some(daemon_child), 1394 ) 1395 }; 1396 1397 let mut child = GuestCommand::new(&guest) 1398 .args(["--cpus", format!("boot={num_queues}").as_str()]) 1399 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"]) 1400 .args(["--kernel", kernel_path.to_str().unwrap()]) 1401 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1402 .args([ 1403 "--disk", 1404 format!( 1405 "path={}", 1406 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 1407 ) 1408 .as_str(), 1409 format!( 1410 "path={}", 1411 guest.disk_config.disk(DiskType::CloudInit).unwrap() 1412 ) 1413 .as_str(), 1414 blk_params.as_str(), 1415 ]) 1416 .default_net() 1417 .args(["--api-socket", &api_socket]) 1418 .capture_output() 1419 .spawn() 1420 .unwrap(); 1421 1422 let r = std::panic::catch_unwind(|| { 1423 guest.wait_vm_boot(None).unwrap(); 1424 1425 // Check both if /dev/vdc exists and if the block size is 16M. 1426 assert_eq!( 1427 guest 1428 .ssh_command("lsblk | grep vdc | grep -c 16M") 1429 .unwrap() 1430 .trim() 1431 .parse::<u32>() 1432 .unwrap_or_default(), 1433 1 1434 ); 1435 1436 // Check if this block is RO or RW. 1437 assert_eq!( 1438 guest 1439 .ssh_command("lsblk | grep vdc | awk '{print $5}'") 1440 .unwrap() 1441 .trim() 1442 .parse::<u32>() 1443 .unwrap_or_default(), 1444 readonly as u32 1445 ); 1446 1447 // Check if the number of queues in /sys/block/vdc/mq matches the 1448 // expected num_queues. 1449 assert_eq!( 1450 guest 1451 .ssh_command("ls -ll /sys/block/vdc/mq | grep ^d | wc -l") 1452 .unwrap() 1453 .trim() 1454 .parse::<u32>() 1455 .unwrap_or_default(), 1456 num_queues as u32 1457 ); 1458 1459 // Mount the device 1460 let mount_ro_rw_flag = if readonly { "ro,noload" } else { "rw" }; 1461 guest.ssh_command("mkdir mount_image").unwrap(); 1462 guest 1463 .ssh_command( 1464 format!("sudo mount -o {mount_ro_rw_flag} -t ext4 /dev/vdc mount_image/").as_str(), 1465 ) 1466 .unwrap(); 1467 1468 // Check the content of the block device. The file "foo" should 1469 // contain "bar". 1470 assert_eq!( 1471 guest.ssh_command("cat mount_image/foo").unwrap().trim(), 1472 "bar" 1473 ); 1474 1475 // ACPI feature is needed. 1476 #[cfg(target_arch = "x86_64")] 1477 { 1478 guest.enable_memory_hotplug(); 1479 1480 // Add RAM to the VM 1481 let desired_ram = 1024 << 20; 1482 resize_command(&api_socket, None, Some(desired_ram), None, None); 1483 1484 thread::sleep(std::time::Duration::new(10, 0)); 1485 1486 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 1487 1488 // Check again the content of the block device after the resize 1489 // has been performed. 1490 assert_eq!( 1491 guest.ssh_command("cat mount_image/foo").unwrap().trim(), 1492 "bar" 1493 ); 1494 } 1495 1496 // Unmount the device 1497 guest.ssh_command("sudo umount /dev/vdc").unwrap(); 1498 guest.ssh_command("rm -r mount_image").unwrap(); 1499 }); 1500 1501 kill_child(&mut child); 1502 let output = child.wait_with_output().unwrap(); 1503 1504 if let Some(mut daemon_child) = daemon_child { 1505 thread::sleep(std::time::Duration::new(5, 0)); 1506 let _ = daemon_child.kill(); 1507 let _ = daemon_child.wait(); 1508 } 1509 1510 handle_child_output(r, &output); 1511 } 1512 1513 fn test_boot_from_vhost_user_blk( 1514 num_queues: usize, 1515 readonly: bool, 1516 direct: bool, 1517 prepare_vhost_user_blk_daemon: Option<&PrepareBlkDaemon>, 1518 ) { 1519 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1520 let guest = Guest::new(Box::new(focal)); 1521 1522 let kernel_path = direct_kernel_boot_path(); 1523 1524 let disk_path = guest.disk_config.disk(DiskType::OperatingSystem).unwrap(); 1525 1526 let (blk_boot_params, daemon_child) = { 1527 let prepare_daemon = prepare_vhost_user_blk_daemon.unwrap(); 1528 // Start the daemon 1529 let (daemon_child, vubd_socket_path) = prepare_daemon( 1530 &guest.tmp_dir, 1531 disk_path.as_str(), 1532 num_queues, 1533 readonly, 1534 direct, 1535 ); 1536 1537 ( 1538 format!( 1539 "vhost_user=true,socket={vubd_socket_path},num_queues={num_queues},queue_size=128", 1540 ), 1541 Some(daemon_child), 1542 ) 1543 }; 1544 1545 let mut child = GuestCommand::new(&guest) 1546 .args(["--cpus", format!("boot={num_queues}").as_str()]) 1547 .args(["--memory", "size=512M,shared=on"]) 1548 .args(["--kernel", kernel_path.to_str().unwrap()]) 1549 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1550 .args([ 1551 "--disk", 1552 blk_boot_params.as_str(), 1553 format!( 1554 "path={}", 1555 guest.disk_config.disk(DiskType::CloudInit).unwrap() 1556 ) 1557 .as_str(), 1558 ]) 1559 .default_net() 1560 .capture_output() 1561 .spawn() 1562 .unwrap(); 1563 1564 let r = std::panic::catch_unwind(|| { 1565 guest.wait_vm_boot(None).unwrap(); 1566 1567 // Just check the VM booted correctly. 1568 assert_eq!(guest.get_cpu_count().unwrap_or_default(), num_queues as u32); 1569 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 1570 }); 1571 kill_child(&mut child); 1572 let output = child.wait_with_output().unwrap(); 1573 1574 if let Some(mut daemon_child) = daemon_child { 1575 thread::sleep(std::time::Duration::new(5, 0)); 1576 let _ = daemon_child.kill(); 1577 let _ = daemon_child.wait(); 1578 } 1579 1580 handle_child_output(r, &output); 1581 } 1582 1583 fn _test_virtio_fs( 1584 prepare_daemon: &dyn Fn(&TempDir, &str) -> (std::process::Child, String), 1585 hotplug: bool, 1586 pci_segment: Option<u16>, 1587 ) { 1588 #[cfg(target_arch = "aarch64")] 1589 let focal_image = if hotplug { 1590 FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string() 1591 } else { 1592 FOCAL_IMAGE_NAME.to_string() 1593 }; 1594 #[cfg(target_arch = "x86_64")] 1595 let focal_image = FOCAL_IMAGE_NAME.to_string(); 1596 let focal = UbuntuDiskConfig::new(focal_image); 1597 let guest = Guest::new(Box::new(focal)); 1598 let api_socket = temp_api_path(&guest.tmp_dir); 1599 1600 let mut workload_path = dirs::home_dir().unwrap(); 1601 workload_path.push("workloads"); 1602 1603 let mut shared_dir = workload_path; 1604 shared_dir.push("shared_dir"); 1605 1606 #[cfg(target_arch = "x86_64")] 1607 let kernel_path = direct_kernel_boot_path(); 1608 #[cfg(target_arch = "aarch64")] 1609 let kernel_path = if hotplug { 1610 edk2_path() 1611 } else { 1612 direct_kernel_boot_path() 1613 }; 1614 1615 let (mut daemon_child, virtiofsd_socket_path) = 1616 prepare_daemon(&guest.tmp_dir, shared_dir.to_str().unwrap()); 1617 1618 let mut guest_command = GuestCommand::new(&guest); 1619 guest_command 1620 .args(["--cpus", "boot=1"]) 1621 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"]) 1622 .args(["--kernel", kernel_path.to_str().unwrap()]) 1623 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1624 .default_disks() 1625 .default_net() 1626 .args(["--api-socket", &api_socket]); 1627 if pci_segment.is_some() { 1628 guest_command.args([ 1629 "--platform", 1630 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 1631 ]); 1632 } 1633 1634 let fs_params = format!( 1635 "id=myfs0,tag=myfs,socket={},num_queues=1,queue_size=1024{}", 1636 virtiofsd_socket_path, 1637 if let Some(pci_segment) = pci_segment { 1638 format!(",pci_segment={pci_segment}") 1639 } else { 1640 "".to_owned() 1641 } 1642 ); 1643 1644 if !hotplug { 1645 guest_command.args(["--fs", fs_params.as_str()]); 1646 } 1647 1648 let mut child = guest_command.capture_output().spawn().unwrap(); 1649 1650 let r = std::panic::catch_unwind(|| { 1651 guest.wait_vm_boot(None).unwrap(); 1652 1653 if hotplug { 1654 // Add fs to the VM 1655 let (cmd_success, cmd_output) = 1656 remote_command_w_output(&api_socket, "add-fs", Some(&fs_params)); 1657 assert!(cmd_success); 1658 1659 if let Some(pci_segment) = pci_segment { 1660 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 1661 "{{\"id\":\"myfs0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 1662 ))); 1663 } else { 1664 assert!(String::from_utf8_lossy(&cmd_output) 1665 .contains("{\"id\":\"myfs0\",\"bdf\":\"0000:00:06.0\"}")); 1666 } 1667 1668 thread::sleep(std::time::Duration::new(10, 0)); 1669 } 1670 1671 // Mount shared directory through virtio_fs filesystem 1672 guest 1673 .ssh_command("mkdir -p mount_dir && sudo mount -t virtiofs myfs mount_dir/") 1674 .unwrap(); 1675 1676 // Check file1 exists and its content is "foo" 1677 assert_eq!( 1678 guest.ssh_command("cat mount_dir/file1").unwrap().trim(), 1679 "foo" 1680 ); 1681 // Check file2 does not exist 1682 guest 1683 .ssh_command("[ ! -f 'mount_dir/file2' ] || true") 1684 .unwrap(); 1685 1686 // Check file3 exists and its content is "bar" 1687 assert_eq!( 1688 guest.ssh_command("cat mount_dir/file3").unwrap().trim(), 1689 "bar" 1690 ); 1691 1692 // ACPI feature is needed. 1693 #[cfg(target_arch = "x86_64")] 1694 { 1695 guest.enable_memory_hotplug(); 1696 1697 // Add RAM to the VM 1698 let desired_ram = 1024 << 20; 1699 resize_command(&api_socket, None, Some(desired_ram), None, None); 1700 1701 thread::sleep(std::time::Duration::new(30, 0)); 1702 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 1703 1704 // After the resize, check again that file1 exists and its 1705 // content is "foo". 1706 assert_eq!( 1707 guest.ssh_command("cat mount_dir/file1").unwrap().trim(), 1708 "foo" 1709 ); 1710 } 1711 1712 if hotplug { 1713 // Remove from VM 1714 guest.ssh_command("sudo umount mount_dir").unwrap(); 1715 assert!(remote_command(&api_socket, "remove-device", Some("myfs0"))); 1716 } 1717 }); 1718 1719 let (r, hotplug_daemon_child) = if r.is_ok() && hotplug { 1720 thread::sleep(std::time::Duration::new(10, 0)); 1721 let (daemon_child, virtiofsd_socket_path) = 1722 prepare_daemon(&guest.tmp_dir, shared_dir.to_str().unwrap()); 1723 1724 let r = std::panic::catch_unwind(|| { 1725 thread::sleep(std::time::Duration::new(10, 0)); 1726 let fs_params = format!( 1727 "id=myfs0,tag=myfs,socket={},num_queues=1,queue_size=1024{}", 1728 virtiofsd_socket_path, 1729 if let Some(pci_segment) = pci_segment { 1730 format!(",pci_segment={pci_segment}") 1731 } else { 1732 "".to_owned() 1733 } 1734 ); 1735 1736 // Add back and check it works 1737 let (cmd_success, cmd_output) = 1738 remote_command_w_output(&api_socket, "add-fs", Some(&fs_params)); 1739 assert!(cmd_success); 1740 if let Some(pci_segment) = pci_segment { 1741 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 1742 "{{\"id\":\"myfs0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 1743 ))); 1744 } else { 1745 assert!(String::from_utf8_lossy(&cmd_output) 1746 .contains("{\"id\":\"myfs0\",\"bdf\":\"0000:00:06.0\"}")); 1747 } 1748 1749 thread::sleep(std::time::Duration::new(10, 0)); 1750 // Mount shared directory through virtio_fs filesystem 1751 guest 1752 .ssh_command("mkdir -p mount_dir && sudo mount -t virtiofs myfs mount_dir/") 1753 .unwrap(); 1754 1755 // Check file1 exists and its content is "foo" 1756 assert_eq!( 1757 guest.ssh_command("cat mount_dir/file1").unwrap().trim(), 1758 "foo" 1759 ); 1760 }); 1761 1762 (r, Some(daemon_child)) 1763 } else { 1764 (r, None) 1765 }; 1766 1767 kill_child(&mut child); 1768 let output = child.wait_with_output().unwrap(); 1769 1770 let _ = daemon_child.kill(); 1771 let _ = daemon_child.wait(); 1772 1773 if let Some(mut daemon_child) = hotplug_daemon_child { 1774 let _ = daemon_child.kill(); 1775 let _ = daemon_child.wait(); 1776 } 1777 1778 handle_child_output(r, &output); 1779 } 1780 1781 fn test_virtio_pmem(discard_writes: bool, specify_size: bool) { 1782 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1783 let guest = Guest::new(Box::new(focal)); 1784 1785 let kernel_path = direct_kernel_boot_path(); 1786 1787 let pmem_temp_file = TempFile::new().unwrap(); 1788 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 1789 1790 std::process::Command::new("mkfs.ext4") 1791 .arg(pmem_temp_file.as_path()) 1792 .output() 1793 .expect("Expect creating disk image to succeed"); 1794 1795 let mut child = GuestCommand::new(&guest) 1796 .args(["--cpus", "boot=1"]) 1797 .args(["--memory", "size=512M"]) 1798 .args(["--kernel", kernel_path.to_str().unwrap()]) 1799 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1800 .default_disks() 1801 .default_net() 1802 .args([ 1803 "--pmem", 1804 format!( 1805 "file={}{}{}", 1806 pmem_temp_file.as_path().to_str().unwrap(), 1807 if specify_size { ",size=128M" } else { "" }, 1808 if discard_writes { 1809 ",discard_writes=on" 1810 } else { 1811 "" 1812 } 1813 ) 1814 .as_str(), 1815 ]) 1816 .capture_output() 1817 .spawn() 1818 .unwrap(); 1819 1820 let r = std::panic::catch_unwind(|| { 1821 guest.wait_vm_boot(None).unwrap(); 1822 1823 // Check for the presence of /dev/pmem0 1824 assert_eq!( 1825 guest.ssh_command("ls /dev/pmem0").unwrap().trim(), 1826 "/dev/pmem0" 1827 ); 1828 1829 // Check changes persist after reboot 1830 assert_eq!(guest.ssh_command("sudo mount /dev/pmem0 /mnt").unwrap(), ""); 1831 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), "lost+found\n"); 1832 guest 1833 .ssh_command("echo test123 | sudo tee /mnt/test") 1834 .unwrap(); 1835 assert_eq!(guest.ssh_command("sudo umount /mnt").unwrap(), ""); 1836 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), ""); 1837 1838 guest.reboot_linux(0, None); 1839 assert_eq!(guest.ssh_command("sudo mount /dev/pmem0 /mnt").unwrap(), ""); 1840 assert_eq!( 1841 guest 1842 .ssh_command("sudo cat /mnt/test || true") 1843 .unwrap() 1844 .trim(), 1845 if discard_writes { "" } else { "test123" } 1846 ); 1847 }); 1848 1849 kill_child(&mut child); 1850 let output = child.wait_with_output().unwrap(); 1851 1852 handle_child_output(r, &output); 1853 } 1854 1855 fn get_fd_count(pid: u32) -> usize { 1856 fs::read_dir(format!("/proc/{pid}/fd")).unwrap().count() 1857 } 1858 1859 fn _test_virtio_vsock(hotplug: bool) { 1860 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1861 let guest = Guest::new(Box::new(focal)); 1862 1863 #[cfg(target_arch = "x86_64")] 1864 let kernel_path = direct_kernel_boot_path(); 1865 #[cfg(target_arch = "aarch64")] 1866 let kernel_path = if hotplug { 1867 edk2_path() 1868 } else { 1869 direct_kernel_boot_path() 1870 }; 1871 1872 let socket = temp_vsock_path(&guest.tmp_dir); 1873 let api_socket = temp_api_path(&guest.tmp_dir); 1874 1875 let mut cmd = GuestCommand::new(&guest); 1876 cmd.args(["--api-socket", &api_socket]); 1877 cmd.args(["--cpus", "boot=1"]); 1878 cmd.args(["--memory", "size=512M"]); 1879 cmd.args(["--kernel", kernel_path.to_str().unwrap()]); 1880 cmd.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]); 1881 cmd.default_disks(); 1882 cmd.default_net(); 1883 1884 if !hotplug { 1885 cmd.args(["--vsock", format!("cid=3,socket={socket}").as_str()]); 1886 } 1887 1888 let mut child = cmd.capture_output().spawn().unwrap(); 1889 1890 let r = std::panic::catch_unwind(|| { 1891 guest.wait_vm_boot(None).unwrap(); 1892 1893 if hotplug { 1894 let (cmd_success, cmd_output) = remote_command_w_output( 1895 &api_socket, 1896 "add-vsock", 1897 Some(format!("cid=3,socket={socket},id=test0").as_str()), 1898 ); 1899 assert!(cmd_success); 1900 assert!(String::from_utf8_lossy(&cmd_output) 1901 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 1902 thread::sleep(std::time::Duration::new(10, 0)); 1903 // Check adding a second one fails 1904 assert!(!remote_command( 1905 &api_socket, 1906 "add-vsock", 1907 Some("cid=1234,socket=/tmp/fail") 1908 )); 1909 } 1910 1911 // Validate vsock works as expected. 1912 guest.check_vsock(socket.as_str()); 1913 guest.reboot_linux(0, None); 1914 // Validate vsock still works after a reboot. 1915 guest.check_vsock(socket.as_str()); 1916 1917 if hotplug { 1918 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 1919 } 1920 }); 1921 1922 kill_child(&mut child); 1923 let output = child.wait_with_output().unwrap(); 1924 1925 handle_child_output(r, &output); 1926 } 1927 1928 fn get_ksm_pages_shared() -> u32 { 1929 fs::read_to_string("/sys/kernel/mm/ksm/pages_shared") 1930 .unwrap() 1931 .trim() 1932 .parse::<u32>() 1933 .unwrap() 1934 } 1935 1936 fn test_memory_mergeable(mergeable: bool) { 1937 let memory_param = if mergeable { 1938 "mergeable=on" 1939 } else { 1940 "mergeable=off" 1941 }; 1942 1943 // We are assuming the rest of the system in our CI is not using mergeable memory 1944 let ksm_ps_init = get_ksm_pages_shared(); 1945 assert!(ksm_ps_init == 0); 1946 1947 let focal1 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1948 let guest1 = Guest::new(Box::new(focal1)); 1949 let mut child1 = GuestCommand::new(&guest1) 1950 .args(["--cpus", "boot=1"]) 1951 .args(["--memory", format!("size=512M,{memory_param}").as_str()]) 1952 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 1953 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1954 .default_disks() 1955 .args(["--net", guest1.default_net_string().as_str()]) 1956 .args(["--serial", "tty", "--console", "off"]) 1957 .capture_output() 1958 .spawn() 1959 .unwrap(); 1960 1961 let r = std::panic::catch_unwind(|| { 1962 guest1.wait_vm_boot(None).unwrap(); 1963 }); 1964 if r.is_err() { 1965 kill_child(&mut child1); 1966 let output = child1.wait_with_output().unwrap(); 1967 handle_child_output(r, &output); 1968 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return 1969 } 1970 1971 let ksm_ps_guest1 = get_ksm_pages_shared(); 1972 1973 let focal2 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1974 let guest2 = Guest::new(Box::new(focal2)); 1975 let mut child2 = GuestCommand::new(&guest2) 1976 .args(["--cpus", "boot=1"]) 1977 .args(["--memory", format!("size=512M,{memory_param}").as_str()]) 1978 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 1979 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1980 .default_disks() 1981 .args(["--net", guest2.default_net_string().as_str()]) 1982 .args(["--serial", "tty", "--console", "off"]) 1983 .capture_output() 1984 .spawn() 1985 .unwrap(); 1986 1987 let r = std::panic::catch_unwind(|| { 1988 guest2.wait_vm_boot(None).unwrap(); 1989 let ksm_ps_guest2 = get_ksm_pages_shared(); 1990 1991 if mergeable { 1992 println!( 1993 "ksm pages_shared after vm1 booted '{ksm_ps_guest1}', ksm pages_shared after vm2 booted '{ksm_ps_guest2}'" 1994 ); 1995 // We are expecting the number of shared pages to increase as the number of VM increases 1996 assert!(ksm_ps_guest1 < ksm_ps_guest2); 1997 } else { 1998 assert!(ksm_ps_guest1 == 0); 1999 assert!(ksm_ps_guest2 == 0); 2000 } 2001 }); 2002 2003 kill_child(&mut child1); 2004 kill_child(&mut child2); 2005 2006 let output = child1.wait_with_output().unwrap(); 2007 child2.wait().unwrap(); 2008 2009 handle_child_output(r, &output); 2010 } 2011 2012 fn _get_vmm_overhead(pid: u32, guest_memory_size: u32) -> HashMap<String, u32> { 2013 let smaps = fs::File::open(format!("/proc/{pid}/smaps")).unwrap(); 2014 let reader = io::BufReader::new(smaps); 2015 2016 let mut skip_map: bool = false; 2017 let mut region_name: String = "".to_string(); 2018 let mut region_maps = HashMap::new(); 2019 for line in reader.lines() { 2020 let l = line.unwrap(); 2021 2022 if l.contains('-') { 2023 let values: Vec<&str> = l.split_whitespace().collect(); 2024 region_name = values.last().unwrap().trim().to_string(); 2025 if region_name == "0" { 2026 region_name = "anonymous".to_string() 2027 } 2028 } 2029 2030 // Each section begins with something that looks like: 2031 // Size: 2184 kB 2032 if l.starts_with("Size:") { 2033 let values: Vec<&str> = l.split_whitespace().collect(); 2034 let map_size = values[1].parse::<u32>().unwrap(); 2035 // We skip the assigned guest RAM map, its RSS is only 2036 // dependent on the guest actual memory usage. 2037 // Everything else can be added to the VMM overhead. 2038 skip_map = map_size >= guest_memory_size; 2039 continue; 2040 } 2041 2042 // If this is a map we're taking into account, then we only 2043 // count the RSS. The sum of all counted RSS is the VMM overhead. 2044 if !skip_map && l.starts_with("Rss:") { 2045 let values: Vec<&str> = l.split_whitespace().collect(); 2046 let value = values[1].trim().parse::<u32>().unwrap(); 2047 *region_maps.entry(region_name.clone()).or_insert(0) += value; 2048 } 2049 } 2050 2051 region_maps 2052 } 2053 2054 fn get_vmm_overhead(pid: u32, guest_memory_size: u32) -> u32 { 2055 let mut total = 0; 2056 2057 for (region_name, value) in &_get_vmm_overhead(pid, guest_memory_size) { 2058 eprintln!("{region_name}: {value}"); 2059 total += value; 2060 } 2061 2062 total 2063 } 2064 2065 fn process_rss_kib(pid: u32) -> usize { 2066 let command = format!("ps -q {pid} -o rss="); 2067 let rss = exec_host_command_output(&command); 2068 String::from_utf8_lossy(&rss.stdout).trim().parse().unwrap() 2069 } 2070 2071 // 10MB is our maximum accepted overhead. 2072 const MAXIMUM_VMM_OVERHEAD_KB: u32 = 10 * 1024; 2073 2074 #[derive(PartialEq, Eq, PartialOrd)] 2075 struct Counters { 2076 rx_bytes: u64, 2077 rx_frames: u64, 2078 tx_bytes: u64, 2079 tx_frames: u64, 2080 read_bytes: u64, 2081 write_bytes: u64, 2082 read_ops: u64, 2083 write_ops: u64, 2084 } 2085 2086 fn get_counters(api_socket: &str) -> Counters { 2087 // Get counters 2088 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "counters", None); 2089 assert!(cmd_success); 2090 2091 let counters: HashMap<&str, HashMap<&str, u64>> = 2092 serde_json::from_slice(&cmd_output).unwrap_or_default(); 2093 2094 let rx_bytes = *counters.get("_net2").unwrap().get("rx_bytes").unwrap(); 2095 let rx_frames = *counters.get("_net2").unwrap().get("rx_frames").unwrap(); 2096 let tx_bytes = *counters.get("_net2").unwrap().get("tx_bytes").unwrap(); 2097 let tx_frames = *counters.get("_net2").unwrap().get("tx_frames").unwrap(); 2098 2099 let read_bytes = *counters.get("_disk0").unwrap().get("read_bytes").unwrap(); 2100 let write_bytes = *counters.get("_disk0").unwrap().get("write_bytes").unwrap(); 2101 let read_ops = *counters.get("_disk0").unwrap().get("read_ops").unwrap(); 2102 let write_ops = *counters.get("_disk0").unwrap().get("write_ops").unwrap(); 2103 2104 Counters { 2105 rx_bytes, 2106 rx_frames, 2107 tx_bytes, 2108 tx_frames, 2109 read_bytes, 2110 write_bytes, 2111 read_ops, 2112 write_ops, 2113 } 2114 } 2115 2116 fn pty_read(mut pty: std::fs::File) -> Receiver<String> { 2117 let (tx, rx) = mpsc::channel::<String>(); 2118 thread::spawn(move || loop { 2119 thread::sleep(std::time::Duration::new(1, 0)); 2120 let mut buf = [0; 512]; 2121 match pty.read(&mut buf) { 2122 Ok(_bytes) => { 2123 let output = std::str::from_utf8(&buf).unwrap().to_string(); 2124 match tx.send(output) { 2125 Ok(_) => (), 2126 Err(_) => break, 2127 } 2128 } 2129 Err(_) => break, 2130 } 2131 }); 2132 rx 2133 } 2134 2135 fn get_pty_path(api_socket: &str, pty_type: &str) -> PathBuf { 2136 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None); 2137 assert!(cmd_success); 2138 let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default(); 2139 assert_eq!("Pty", info["config"][pty_type]["mode"]); 2140 PathBuf::from( 2141 info["config"][pty_type]["file"] 2142 .as_str() 2143 .expect("Missing pty path"), 2144 ) 2145 } 2146 2147 // VFIO test network setup. 2148 // We reserve a different IP class for it: 172.18.0.0/24. 2149 #[cfg(target_arch = "x86_64")] 2150 fn setup_vfio_network_interfaces() { 2151 // 'vfio-br0' 2152 assert!(exec_host_command_status("sudo ip link add name vfio-br0 type bridge").success()); 2153 assert!(exec_host_command_status("sudo ip link set vfio-br0 up").success()); 2154 assert!(exec_host_command_status("sudo ip addr add 172.18.0.1/24 dev vfio-br0").success()); 2155 // 'vfio-tap0' 2156 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap0 mode tap").success()); 2157 assert!(exec_host_command_status("sudo ip link set vfio-tap0 master vfio-br0").success()); 2158 assert!(exec_host_command_status("sudo ip link set vfio-tap0 up").success()); 2159 // 'vfio-tap1' 2160 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap1 mode tap").success()); 2161 assert!(exec_host_command_status("sudo ip link set vfio-tap1 master vfio-br0").success()); 2162 assert!(exec_host_command_status("sudo ip link set vfio-tap1 up").success()); 2163 // 'vfio-tap2' 2164 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap2 mode tap").success()); 2165 assert!(exec_host_command_status("sudo ip link set vfio-tap2 master vfio-br0").success()); 2166 assert!(exec_host_command_status("sudo ip link set vfio-tap2 up").success()); 2167 // 'vfio-tap3' 2168 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap3 mode tap").success()); 2169 assert!(exec_host_command_status("sudo ip link set vfio-tap3 master vfio-br0").success()); 2170 assert!(exec_host_command_status("sudo ip link set vfio-tap3 up").success()); 2171 } 2172 2173 // Tear VFIO test network down 2174 #[cfg(target_arch = "x86_64")] 2175 fn cleanup_vfio_network_interfaces() { 2176 assert!(exec_host_command_status("sudo ip link del vfio-br0").success()); 2177 assert!(exec_host_command_status("sudo ip link del vfio-tap0").success()); 2178 assert!(exec_host_command_status("sudo ip link del vfio-tap1").success()); 2179 assert!(exec_host_command_status("sudo ip link del vfio-tap2").success()); 2180 assert!(exec_host_command_status("sudo ip link del vfio-tap3").success()); 2181 } 2182 2183 fn balloon_size(api_socket: &str) -> u64 { 2184 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None); 2185 assert!(cmd_success); 2186 2187 let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default(); 2188 let total_mem = &info["config"]["memory"]["size"] 2189 .to_string() 2190 .parse::<u64>() 2191 .unwrap(); 2192 let actual_mem = &info["memory_actual_size"] 2193 .to_string() 2194 .parse::<u64>() 2195 .unwrap(); 2196 total_mem - actual_mem 2197 } 2198 2199 fn vm_state(api_socket: &str) -> String { 2200 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None); 2201 assert!(cmd_success); 2202 2203 let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default(); 2204 let state = &info["state"].as_str().unwrap(); 2205 2206 state.to_string() 2207 } 2208 2209 // This test validates that it can find the virtio-iommu device at first. 2210 // It also verifies that both disks and the network card are attached to 2211 // the virtual IOMMU by looking at /sys/kernel/iommu_groups directory. 2212 // The last interesting part of this test is that it exercises the network 2213 // interface attached to the virtual IOMMU since this is the one used to 2214 // send all commands through SSH. 2215 fn _test_virtio_iommu(acpi: bool) { 2216 // Virtio-iommu support is ready in recent kernel (v5.14). But the kernel in 2217 // Focal image is still old. 2218 // So if ACPI is enabled on AArch64, we use a modified Focal image in which 2219 // the kernel binary has been updated. 2220 #[cfg(target_arch = "aarch64")] 2221 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string(); 2222 #[cfg(target_arch = "x86_64")] 2223 let focal_image = FOCAL_IMAGE_NAME.to_string(); 2224 let focal = UbuntuDiskConfig::new(focal_image); 2225 let guest = Guest::new(Box::new(focal)); 2226 2227 #[cfg(target_arch = "x86_64")] 2228 let kernel_path = direct_kernel_boot_path(); 2229 #[cfg(target_arch = "aarch64")] 2230 let kernel_path = if acpi { 2231 edk2_path() 2232 } else { 2233 direct_kernel_boot_path() 2234 }; 2235 2236 let mut child = GuestCommand::new(&guest) 2237 .args(["--cpus", "boot=1"]) 2238 .args(["--memory", "size=512M"]) 2239 .args(["--kernel", kernel_path.to_str().unwrap()]) 2240 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2241 .args([ 2242 "--disk", 2243 format!( 2244 "path={},iommu=on", 2245 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 2246 ) 2247 .as_str(), 2248 format!( 2249 "path={},iommu=on", 2250 guest.disk_config.disk(DiskType::CloudInit).unwrap() 2251 ) 2252 .as_str(), 2253 ]) 2254 .args(["--net", guest.default_net_string_w_iommu().as_str()]) 2255 .capture_output() 2256 .spawn() 2257 .unwrap(); 2258 2259 let r = std::panic::catch_unwind(|| { 2260 guest.wait_vm_boot(None).unwrap(); 2261 2262 // Verify the virtio-iommu device is present. 2263 assert!(guest 2264 .does_device_vendor_pair_match("0x1057", "0x1af4") 2265 .unwrap_or_default()); 2266 2267 // On AArch64, if the guest system boots from FDT, the behavior of IOMMU is a bit 2268 // different with ACPI. 2269 // All devices on the PCI bus will be attached to the virtual IOMMU, except the 2270 // virtio-iommu device itself. So these devices will all be added to IOMMU groups, 2271 // and appear under folder '/sys/kernel/iommu_groups/'. 2272 // The result is, in the case of FDT, IOMMU group '0' contains "0000:00:01.0" 2273 // which is the console. The first disk "0000:00:02.0" is in group '1'. 2274 // While on ACPI, console device is not attached to IOMMU. So the IOMMU group '0' 2275 // contains "0000:00:02.0" which is the first disk. 2276 // 2277 // Verify the iommu group of the first disk. 2278 let iommu_group = !acpi as i32; 2279 assert_eq!( 2280 guest 2281 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str()) 2282 .unwrap() 2283 .trim(), 2284 "0000:00:02.0" 2285 ); 2286 2287 // Verify the iommu group of the second disk. 2288 let iommu_group = if acpi { 1 } else { 2 }; 2289 assert_eq!( 2290 guest 2291 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str()) 2292 .unwrap() 2293 .trim(), 2294 "0000:00:03.0" 2295 ); 2296 2297 // Verify the iommu group of the network card. 2298 let iommu_group = if acpi { 2 } else { 3 }; 2299 assert_eq!( 2300 guest 2301 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str()) 2302 .unwrap() 2303 .trim(), 2304 "0000:00:04.0" 2305 ); 2306 }); 2307 2308 kill_child(&mut child); 2309 let output = child.wait_with_output().unwrap(); 2310 2311 handle_child_output(r, &output); 2312 } 2313 2314 fn get_reboot_count(guest: &Guest) -> u32 { 2315 guest 2316 .ssh_command("sudo last | grep -c reboot") 2317 .unwrap() 2318 .trim() 2319 .parse::<u32>() 2320 .unwrap_or_default() 2321 } 2322 2323 fn enable_guest_watchdog(guest: &Guest, watchdog_sec: u32) { 2324 // Check for PCI device 2325 assert!(guest 2326 .does_device_vendor_pair_match("0x1063", "0x1af4") 2327 .unwrap_or_default()); 2328 2329 // Enable systemd watchdog 2330 guest 2331 .ssh_command(&format!( 2332 "echo RuntimeWatchdogSec={watchdog_sec}s | sudo tee -a /etc/systemd/system.conf" 2333 )) 2334 .unwrap(); 2335 2336 guest.ssh_command("sudo systemctl daemon-reexec").unwrap(); 2337 } 2338 2339 fn make_guest_panic(guest: &Guest) { 2340 // Check for pvpanic device 2341 assert!(guest 2342 .does_device_vendor_pair_match("0x0011", "0x1b36") 2343 .unwrap_or_default()); 2344 2345 // Trigger guest a panic 2346 guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap(); 2347 } 2348 2349 mod common_parallel { 2350 use std::{fs::OpenOptions, io::SeekFrom}; 2351 2352 use crate::*; 2353 2354 #[test] 2355 #[cfg(target_arch = "x86_64")] 2356 fn test_focal_hypervisor_fw() { 2357 test_simple_launch(fw_path(FwType::RustHypervisorFirmware), FOCAL_IMAGE_NAME) 2358 } 2359 2360 #[test] 2361 #[cfg(target_arch = "x86_64")] 2362 fn test_focal_ovmf() { 2363 test_simple_launch(fw_path(FwType::Ovmf), FOCAL_IMAGE_NAME) 2364 } 2365 2366 #[cfg(target_arch = "x86_64")] 2367 fn test_simple_launch(fw_path: String, disk_path: &str) { 2368 let disk_config = Box::new(UbuntuDiskConfig::new(disk_path.to_string())); 2369 let guest = Guest::new(disk_config); 2370 let event_path = temp_event_monitor_path(&guest.tmp_dir); 2371 2372 let mut child = GuestCommand::new(&guest) 2373 .args(["--cpus", "boot=1"]) 2374 .args(["--memory", "size=512M"]) 2375 .args(["--kernel", fw_path.as_str()]) 2376 .default_disks() 2377 .default_net() 2378 .args(["--serial", "tty", "--console", "off"]) 2379 .args(["--event-monitor", format!("path={event_path}").as_str()]) 2380 .capture_output() 2381 .spawn() 2382 .unwrap(); 2383 2384 let r = std::panic::catch_unwind(|| { 2385 guest.wait_vm_boot(Some(120)).unwrap(); 2386 2387 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 2388 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 2389 assert_eq!(guest.get_pci_bridge_class().unwrap_or_default(), "0x060000"); 2390 2391 let expected_sequential_events = [ 2392 &MetaEvent { 2393 event: "starting".to_string(), 2394 device_id: None, 2395 }, 2396 &MetaEvent { 2397 event: "booting".to_string(), 2398 device_id: None, 2399 }, 2400 &MetaEvent { 2401 event: "booted".to_string(), 2402 device_id: None, 2403 }, 2404 &MetaEvent { 2405 event: "activated".to_string(), 2406 device_id: Some("_disk0".to_string()), 2407 }, 2408 &MetaEvent { 2409 event: "reset".to_string(), 2410 device_id: Some("_disk0".to_string()), 2411 }, 2412 ]; 2413 assert!(check_sequential_events( 2414 &expected_sequential_events, 2415 &event_path 2416 )); 2417 2418 // It's been observed on the Bionic image that udev and snapd 2419 // services can cause some delay in the VM's shutdown. Disabling 2420 // them improves the reliability of this test. 2421 let _ = guest.ssh_command("sudo systemctl disable udev"); 2422 let _ = guest.ssh_command("sudo systemctl stop udev"); 2423 let _ = guest.ssh_command("sudo systemctl disable snapd"); 2424 let _ = guest.ssh_command("sudo systemctl stop snapd"); 2425 2426 guest.ssh_command("sudo poweroff").unwrap(); 2427 thread::sleep(std::time::Duration::new(20, 0)); 2428 let latest_events = [ 2429 &MetaEvent { 2430 event: "shutdown".to_string(), 2431 device_id: None, 2432 }, 2433 &MetaEvent { 2434 event: "deleted".to_string(), 2435 device_id: None, 2436 }, 2437 &MetaEvent { 2438 event: "shutdown".to_string(), 2439 device_id: None, 2440 }, 2441 ]; 2442 assert!(check_latest_events_exact(&latest_events, &event_path)); 2443 }); 2444 2445 kill_child(&mut child); 2446 let output = child.wait_with_output().unwrap(); 2447 2448 handle_child_output(r, &output); 2449 } 2450 2451 #[test] 2452 fn test_multi_cpu() { 2453 let jammy_image = JAMMY_IMAGE_NAME.to_string(); 2454 let jammy = UbuntuDiskConfig::new(jammy_image); 2455 let guest = Guest::new(Box::new(jammy)); 2456 2457 let mut cmd = GuestCommand::new(&guest); 2458 cmd.args(["--cpus", "boot=2,max=4"]) 2459 .args(["--memory", "size=512M"]) 2460 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2461 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2462 .capture_output() 2463 .default_disks() 2464 .default_net(); 2465 2466 let mut child = cmd.spawn().unwrap(); 2467 2468 let r = std::panic::catch_unwind(|| { 2469 guest.wait_vm_boot(Some(120)).unwrap(); 2470 2471 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 2472 2473 assert_eq!( 2474 guest 2475 .ssh_command( 2476 r#"sudo dmesg | grep "smp: Brought up" | sed "s/\[\ *[0-9.]*\] //""# 2477 ) 2478 .unwrap() 2479 .trim(), 2480 "smp: Brought up 1 node, 2 CPUs" 2481 ); 2482 }); 2483 2484 kill_child(&mut child); 2485 let output = child.wait_with_output().unwrap(); 2486 2487 handle_child_output(r, &output); 2488 } 2489 2490 #[test] 2491 fn test_cpu_topology_421() { 2492 test_cpu_topology(4, 2, 1, false); 2493 } 2494 2495 #[test] 2496 fn test_cpu_topology_142() { 2497 test_cpu_topology(1, 4, 2, false); 2498 } 2499 2500 #[test] 2501 fn test_cpu_topology_262() { 2502 test_cpu_topology(2, 6, 2, false); 2503 } 2504 2505 #[test] 2506 #[cfg(target_arch = "x86_64")] 2507 #[cfg(not(feature = "mshv"))] 2508 fn test_cpu_physical_bits() { 2509 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2510 let guest = Guest::new(Box::new(focal)); 2511 let max_phys_bits: u8 = 36; 2512 let mut child = GuestCommand::new(&guest) 2513 .args(["--cpus", &format!("max_phys_bits={max_phys_bits}")]) 2514 .args(["--memory", "size=512M"]) 2515 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2516 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2517 .default_disks() 2518 .default_net() 2519 .capture_output() 2520 .spawn() 2521 .unwrap(); 2522 2523 let r = std::panic::catch_unwind(|| { 2524 guest.wait_vm_boot(None).unwrap(); 2525 2526 assert!( 2527 guest 2528 .ssh_command("lscpu | grep \"Address sizes:\" | cut -f 2 -d \":\" | sed \"s# *##\" | cut -f 1 -d \" \"") 2529 .unwrap() 2530 .trim() 2531 .parse::<u8>() 2532 .unwrap_or(max_phys_bits + 1) <= max_phys_bits, 2533 ); 2534 }); 2535 2536 kill_child(&mut child); 2537 let output = child.wait_with_output().unwrap(); 2538 2539 handle_child_output(r, &output); 2540 } 2541 2542 #[test] 2543 fn test_cpu_affinity() { 2544 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2545 let guest = Guest::new(Box::new(focal)); 2546 2547 // We need the host to have at least 4 CPUs if we want to be able 2548 // to run this test. 2549 let host_cpus_count = exec_host_command_output("nproc"); 2550 assert!( 2551 String::from_utf8_lossy(&host_cpus_count.stdout) 2552 .trim() 2553 .parse::<u16>() 2554 .unwrap_or(0) 2555 >= 4 2556 ); 2557 2558 let mut child = GuestCommand::new(&guest) 2559 .args(["--cpus", "boot=2,affinity=[0@[0,2],1@[1,3]]"]) 2560 .args(["--memory", "size=512M"]) 2561 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2562 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2563 .default_disks() 2564 .default_net() 2565 .capture_output() 2566 .spawn() 2567 .unwrap(); 2568 2569 let r = std::panic::catch_unwind(|| { 2570 guest.wait_vm_boot(None).unwrap(); 2571 let pid = child.id(); 2572 let taskset_vcpu0 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep vcpu0 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2573 assert_eq!(String::from_utf8_lossy(&taskset_vcpu0.stdout).trim(), "0,2"); 2574 let taskset_vcpu1 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep vcpu1 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2575 assert_eq!(String::from_utf8_lossy(&taskset_vcpu1.stdout).trim(), "1,3"); 2576 }); 2577 2578 kill_child(&mut child); 2579 let output = child.wait_with_output().unwrap(); 2580 handle_child_output(r, &output); 2581 } 2582 2583 #[test] 2584 fn test_virtio_queue_affinity() { 2585 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2586 let guest = Guest::new(Box::new(focal)); 2587 2588 // We need the host to have at least 4 CPUs if we want to be able 2589 // to run this test. 2590 let host_cpus_count = exec_host_command_output("nproc"); 2591 assert!( 2592 String::from_utf8_lossy(&host_cpus_count.stdout) 2593 .trim() 2594 .parse::<u16>() 2595 .unwrap_or(0) 2596 >= 4 2597 ); 2598 2599 let mut child = GuestCommand::new(&guest) 2600 .args(["--cpus", "boot=4"]) 2601 .args(["--memory", "size=512M"]) 2602 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2603 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2604 .args([ 2605 "--disk", 2606 format!( 2607 "path={}", 2608 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 2609 ) 2610 .as_str(), 2611 format!( 2612 "path={},num_queues=4,queue_affinity=[0@[0,2],1@[1,3],2@[1],3@[3]]", 2613 guest.disk_config.disk(DiskType::CloudInit).unwrap() 2614 ) 2615 .as_str(), 2616 ]) 2617 .default_net() 2618 .capture_output() 2619 .spawn() 2620 .unwrap(); 2621 2622 let r = std::panic::catch_unwind(|| { 2623 guest.wait_vm_boot(None).unwrap(); 2624 let pid = child.id(); 2625 let taskset_q0 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep disk1_q0 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2626 assert_eq!(String::from_utf8_lossy(&taskset_q0.stdout).trim(), "0,2"); 2627 let taskset_q1 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep disk1_q1 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2628 assert_eq!(String::from_utf8_lossy(&taskset_q1.stdout).trim(), "1,3"); 2629 let taskset_q2 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep disk1_q2 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2630 assert_eq!(String::from_utf8_lossy(&taskset_q2.stdout).trim(), "1"); 2631 let taskset_q3 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep disk1_q3 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2632 assert_eq!(String::from_utf8_lossy(&taskset_q3.stdout).trim(), "3"); 2633 }); 2634 2635 kill_child(&mut child); 2636 let output = child.wait_with_output().unwrap(); 2637 handle_child_output(r, &output); 2638 } 2639 2640 #[test] 2641 #[cfg(not(feature = "mshv"))] 2642 fn test_large_vm() { 2643 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2644 let guest = Guest::new(Box::new(focal)); 2645 let mut cmd = GuestCommand::new(&guest); 2646 cmd.args(["--cpus", "boot=48"]) 2647 .args(["--memory", "size=5120M"]) 2648 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2649 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2650 .args(["--serial", "tty"]) 2651 .args(["--console", "off"]) 2652 .capture_output() 2653 .default_disks() 2654 .default_net(); 2655 2656 let mut child = cmd.spawn().unwrap(); 2657 2658 guest.wait_vm_boot(None).unwrap(); 2659 2660 let r = std::panic::catch_unwind(|| { 2661 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 48); 2662 assert_eq!( 2663 guest 2664 .ssh_command("lscpu | grep \"On-line\" | cut -f 2 -d \":\" | sed \"s# *##\"") 2665 .unwrap() 2666 .trim(), 2667 "0-47" 2668 ); 2669 2670 assert!(guest.get_total_memory().unwrap_or_default() > 5_000_000); 2671 }); 2672 2673 kill_child(&mut child); 2674 let output = child.wait_with_output().unwrap(); 2675 2676 handle_child_output(r, &output); 2677 } 2678 2679 #[test] 2680 #[cfg(not(feature = "mshv"))] 2681 fn test_huge_memory() { 2682 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2683 let guest = Guest::new(Box::new(focal)); 2684 let mut cmd = GuestCommand::new(&guest); 2685 cmd.args(["--cpus", "boot=1"]) 2686 .args(["--memory", "size=128G"]) 2687 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2688 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2689 .capture_output() 2690 .default_disks() 2691 .default_net(); 2692 2693 let mut child = cmd.spawn().unwrap(); 2694 2695 guest.wait_vm_boot(Some(120)).unwrap(); 2696 2697 let r = std::panic::catch_unwind(|| { 2698 assert!(guest.get_total_memory().unwrap_or_default() > 128_000_000); 2699 }); 2700 2701 kill_child(&mut child); 2702 let output = child.wait_with_output().unwrap(); 2703 2704 handle_child_output(r, &output); 2705 } 2706 2707 #[test] 2708 fn test_power_button() { 2709 _test_power_button(false); 2710 } 2711 2712 #[test] 2713 #[cfg(not(feature = "mshv"))] 2714 fn test_user_defined_memory_regions() { 2715 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2716 let guest = Guest::new(Box::new(focal)); 2717 let api_socket = temp_api_path(&guest.tmp_dir); 2718 2719 let kernel_path = direct_kernel_boot_path(); 2720 2721 let mut child = GuestCommand::new(&guest) 2722 .args(["--cpus", "boot=1"]) 2723 .args(["--memory", "size=0,hotplug_method=virtio-mem"]) 2724 .args([ 2725 "--memory-zone", 2726 "id=mem0,size=1G,hotplug_size=2G", 2727 "id=mem1,size=1G,shared=on", 2728 "id=mem2,size=1G,host_numa_node=0,hotplug_size=2G", 2729 ]) 2730 .args(["--kernel", kernel_path.to_str().unwrap()]) 2731 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2732 .args(["--api-socket", &api_socket]) 2733 .capture_output() 2734 .default_disks() 2735 .default_net() 2736 .spawn() 2737 .unwrap(); 2738 2739 let r = std::panic::catch_unwind(|| { 2740 guest.wait_vm_boot(None).unwrap(); 2741 2742 assert!(guest.get_total_memory().unwrap_or_default() > 2_880_000); 2743 2744 guest.enable_memory_hotplug(); 2745 2746 resize_zone_command(&api_socket, "mem0", "3G"); 2747 thread::sleep(std::time::Duration::new(5, 0)); 2748 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000); 2749 resize_zone_command(&api_socket, "mem2", "3G"); 2750 thread::sleep(std::time::Duration::new(5, 0)); 2751 assert!(guest.get_total_memory().unwrap_or_default() > 6_720_000); 2752 resize_zone_command(&api_socket, "mem0", "2G"); 2753 thread::sleep(std::time::Duration::new(5, 0)); 2754 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 2755 resize_zone_command(&api_socket, "mem2", "2G"); 2756 thread::sleep(std::time::Duration::new(5, 0)); 2757 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000); 2758 2759 guest.reboot_linux(0, None); 2760 2761 // Check the amount of RAM after reboot 2762 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000); 2763 assert!(guest.get_total_memory().unwrap_or_default() < 5_760_000); 2764 2765 // Check if we can still resize down to the initial 'boot'size 2766 resize_zone_command(&api_socket, "mem0", "1G"); 2767 thread::sleep(std::time::Duration::new(5, 0)); 2768 assert!(guest.get_total_memory().unwrap_or_default() < 4_800_000); 2769 resize_zone_command(&api_socket, "mem2", "1G"); 2770 thread::sleep(std::time::Duration::new(5, 0)); 2771 assert!(guest.get_total_memory().unwrap_or_default() < 3_840_000); 2772 }); 2773 2774 kill_child(&mut child); 2775 let output = child.wait_with_output().unwrap(); 2776 2777 handle_child_output(r, &output); 2778 } 2779 2780 #[test] 2781 #[cfg(not(feature = "mshv"))] 2782 fn test_guest_numa_nodes() { 2783 _test_guest_numa_nodes(false); 2784 } 2785 2786 #[test] 2787 #[cfg(target_arch = "x86_64")] 2788 fn test_iommu_segments() { 2789 let focal_image = FOCAL_IMAGE_NAME.to_string(); 2790 let focal = UbuntuDiskConfig::new(focal_image); 2791 let guest = Guest::new(Box::new(focal)); 2792 2793 // Prepare another disk file for the virtio-disk device 2794 let test_disk_path = String::from( 2795 guest 2796 .tmp_dir 2797 .as_path() 2798 .join("test-disk.raw") 2799 .to_str() 2800 .unwrap(), 2801 ); 2802 assert!( 2803 exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success() 2804 ); 2805 assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success()); 2806 2807 let api_socket = temp_api_path(&guest.tmp_dir); 2808 let mut cmd = GuestCommand::new(&guest); 2809 2810 cmd.args(["--cpus", "boot=1"]) 2811 .args(["--api-socket", &api_socket]) 2812 .args(["--memory", "size=512M"]) 2813 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2814 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2815 .args([ 2816 "--platform", 2817 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS},iommu_segments=[1]"), 2818 ]) 2819 .default_disks() 2820 .capture_output() 2821 .default_net(); 2822 2823 let mut child = cmd.spawn().unwrap(); 2824 2825 guest.wait_vm_boot(None).unwrap(); 2826 2827 let r = std::panic::catch_unwind(|| { 2828 let (cmd_success, cmd_output) = remote_command_w_output( 2829 &api_socket, 2830 "add-disk", 2831 Some( 2832 format!( 2833 "path={},id=test0,pci_segment=1,iommu=on", 2834 test_disk_path.as_str() 2835 ) 2836 .as_str(), 2837 ), 2838 ); 2839 assert!(cmd_success); 2840 assert!(String::from_utf8_lossy(&cmd_output) 2841 .contains("{\"id\":\"test0\",\"bdf\":\"0001:00:01.0\"}")); 2842 2843 // Check IOMMU setup 2844 assert!(guest 2845 .does_device_vendor_pair_match("0x1057", "0x1af4") 2846 .unwrap_or_default()); 2847 assert_eq!( 2848 guest 2849 .ssh_command("ls /sys/kernel/iommu_groups/0/devices") 2850 .unwrap() 2851 .trim(), 2852 "0001:00:01.0" 2853 ); 2854 }); 2855 2856 kill_child(&mut child); 2857 let output = child.wait_with_output().unwrap(); 2858 2859 handle_child_output(r, &output); 2860 } 2861 2862 #[test] 2863 fn test_pci_msi() { 2864 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2865 let guest = Guest::new(Box::new(focal)); 2866 let mut cmd = GuestCommand::new(&guest); 2867 cmd.args(["--cpus", "boot=1"]) 2868 .args(["--memory", "size=512M"]) 2869 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2870 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2871 .capture_output() 2872 .default_disks() 2873 .default_net(); 2874 2875 let mut child = cmd.spawn().unwrap(); 2876 2877 guest.wait_vm_boot(None).unwrap(); 2878 2879 #[cfg(target_arch = "x86_64")] 2880 let grep_cmd = "grep -c PCI-MSI /proc/interrupts"; 2881 #[cfg(target_arch = "aarch64")] 2882 let grep_cmd = "grep -c ITS-MSI /proc/interrupts"; 2883 2884 let r = std::panic::catch_unwind(|| { 2885 assert_eq!( 2886 guest 2887 .ssh_command(grep_cmd) 2888 .unwrap() 2889 .trim() 2890 .parse::<u32>() 2891 .unwrap_or_default(), 2892 12 2893 ); 2894 }); 2895 2896 kill_child(&mut child); 2897 let output = child.wait_with_output().unwrap(); 2898 2899 handle_child_output(r, &output); 2900 } 2901 2902 #[test] 2903 fn test_virtio_net_ctrl_queue() { 2904 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2905 let guest = Guest::new(Box::new(focal)); 2906 let mut cmd = GuestCommand::new(&guest); 2907 cmd.args(["--cpus", "boot=1"]) 2908 .args(["--memory", "size=512M"]) 2909 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2910 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2911 .args(["--net", guest.default_net_string_w_mtu(3000).as_str()]) 2912 .capture_output() 2913 .default_disks(); 2914 2915 let mut child = cmd.spawn().unwrap(); 2916 2917 guest.wait_vm_boot(None).unwrap(); 2918 2919 #[cfg(target_arch = "aarch64")] 2920 let iface = "enp0s4"; 2921 #[cfg(target_arch = "x86_64")] 2922 let iface = "ens4"; 2923 2924 let r = std::panic::catch_unwind(|| { 2925 assert_eq!( 2926 guest 2927 .ssh_command( 2928 format!("sudo ethtool -K {iface} rx-gro-hw off && echo success").as_str() 2929 ) 2930 .unwrap() 2931 .trim(), 2932 "success" 2933 ); 2934 assert_eq!( 2935 guest 2936 .ssh_command(format!("cat /sys/class/net/{iface}/mtu").as_str()) 2937 .unwrap() 2938 .trim(), 2939 "3000" 2940 ); 2941 }); 2942 2943 kill_child(&mut child); 2944 let output = child.wait_with_output().unwrap(); 2945 2946 handle_child_output(r, &output); 2947 } 2948 2949 #[test] 2950 fn test_pci_multiple_segments() { 2951 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2952 let guest = Guest::new(Box::new(focal)); 2953 2954 // Prepare another disk file for the virtio-disk device 2955 let test_disk_path = String::from( 2956 guest 2957 .tmp_dir 2958 .as_path() 2959 .join("test-disk.raw") 2960 .to_str() 2961 .unwrap(), 2962 ); 2963 assert!( 2964 exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success() 2965 ); 2966 assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success()); 2967 2968 let mut cmd = GuestCommand::new(&guest); 2969 cmd.args(["--cpus", "boot=1"]) 2970 .args(["--memory", "size=512M"]) 2971 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2972 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2973 .args([ 2974 "--platform", 2975 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 2976 ]) 2977 .args([ 2978 "--disk", 2979 format!( 2980 "path={}", 2981 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 2982 ) 2983 .as_str(), 2984 format!( 2985 "path={}", 2986 guest.disk_config.disk(DiskType::CloudInit).unwrap() 2987 ) 2988 .as_str(), 2989 format!("path={test_disk_path},pci_segment=15").as_str(), 2990 ]) 2991 .capture_output() 2992 .default_net(); 2993 2994 let mut child = cmd.spawn().unwrap(); 2995 2996 guest.wait_vm_boot(None).unwrap(); 2997 2998 let grep_cmd = "lspci | grep \"Host bridge\" | wc -l"; 2999 3000 let r = std::panic::catch_unwind(|| { 3001 // There should be MAX_NUM_PCI_SEGMENTS PCI host bridges in the guest. 3002 assert_eq!( 3003 guest 3004 .ssh_command(grep_cmd) 3005 .unwrap() 3006 .trim() 3007 .parse::<u16>() 3008 .unwrap_or_default(), 3009 MAX_NUM_PCI_SEGMENTS 3010 ); 3011 3012 // Check both if /dev/vdc exists and if the block size is 4M. 3013 assert_eq!( 3014 guest 3015 .ssh_command("lsblk | grep vdc | grep -c 4M") 3016 .unwrap() 3017 .trim() 3018 .parse::<u32>() 3019 .unwrap_or_default(), 3020 1 3021 ); 3022 3023 // Mount the device. 3024 guest.ssh_command("mkdir mount_image").unwrap(); 3025 guest 3026 .ssh_command("sudo mount -o rw -t ext4 /dev/vdc mount_image/") 3027 .unwrap(); 3028 // Grant all users with write permission. 3029 guest.ssh_command("sudo chmod a+w mount_image/").unwrap(); 3030 3031 // Write something to the device. 3032 guest 3033 .ssh_command("sudo echo \"bar\" >> mount_image/foo") 3034 .unwrap(); 3035 3036 // Check the content of the block device. The file "foo" should 3037 // contain "bar". 3038 assert_eq!( 3039 guest 3040 .ssh_command("sudo cat mount_image/foo") 3041 .unwrap() 3042 .trim(), 3043 "bar" 3044 ); 3045 }); 3046 3047 kill_child(&mut child); 3048 let output = child.wait_with_output().unwrap(); 3049 3050 handle_child_output(r, &output); 3051 } 3052 3053 #[test] 3054 fn test_pci_multiple_segments_numa_node() { 3055 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3056 let guest = Guest::new(Box::new(focal)); 3057 let api_socket = temp_api_path(&guest.tmp_dir); 3058 #[cfg(target_arch = "x86_64")] 3059 let kernel_path = direct_kernel_boot_path(); 3060 #[cfg(target_arch = "aarch64")] 3061 let kernel_path = edk2_path(); 3062 3063 // Prepare another disk file for the virtio-disk device 3064 let test_disk_path = String::from( 3065 guest 3066 .tmp_dir 3067 .as_path() 3068 .join("test-disk.raw") 3069 .to_str() 3070 .unwrap(), 3071 ); 3072 assert!( 3073 exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success() 3074 ); 3075 assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success()); 3076 const TEST_DISK_NODE: u16 = 1; 3077 3078 let mut child = GuestCommand::new(&guest) 3079 .args(["--platform", "num_pci_segments=2"]) 3080 .args(["--cpus", "boot=2"]) 3081 .args(["--memory", "size=0"]) 3082 .args(["--memory-zone", "id=mem0,size=256M", "id=mem1,size=256M"]) 3083 .args([ 3084 "--numa", 3085 "guest_numa_id=0,cpus=[0],distances=[1@20],memory_zones=mem0,pci_segments=[0]", 3086 "guest_numa_id=1,cpus=[1],distances=[0@20],memory_zones=mem1,pci_segments=[1]", 3087 ]) 3088 .args(["--kernel", kernel_path.to_str().unwrap()]) 3089 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3090 .args(["--api-socket", &api_socket]) 3091 .capture_output() 3092 .args([ 3093 "--disk", 3094 format!( 3095 "path={}", 3096 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 3097 ) 3098 .as_str(), 3099 format!( 3100 "path={}", 3101 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3102 ) 3103 .as_str(), 3104 format!("path={test_disk_path},pci_segment={TEST_DISK_NODE}").as_str(), 3105 ]) 3106 .default_net() 3107 .spawn() 3108 .unwrap(); 3109 3110 let cmd = "cat /sys/block/vdc/device/../numa_node"; 3111 3112 let r = std::panic::catch_unwind(|| { 3113 guest.wait_vm_boot(None).unwrap(); 3114 3115 assert_eq!( 3116 guest 3117 .ssh_command(cmd) 3118 .unwrap() 3119 .trim() 3120 .parse::<u16>() 3121 .unwrap_or_default(), 3122 TEST_DISK_NODE 3123 ); 3124 }); 3125 3126 kill_child(&mut child); 3127 let output = child.wait_with_output().unwrap(); 3128 3129 handle_child_output(r, &output); 3130 } 3131 3132 #[test] 3133 fn test_direct_kernel_boot() { 3134 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3135 let guest = Guest::new(Box::new(focal)); 3136 3137 let kernel_path = direct_kernel_boot_path(); 3138 3139 let mut child = GuestCommand::new(&guest) 3140 .args(["--cpus", "boot=1"]) 3141 .args(["--memory", "size=512M"]) 3142 .args(["--kernel", kernel_path.to_str().unwrap()]) 3143 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3144 .default_disks() 3145 .default_net() 3146 .capture_output() 3147 .spawn() 3148 .unwrap(); 3149 3150 let r = std::panic::catch_unwind(|| { 3151 guest.wait_vm_boot(None).unwrap(); 3152 3153 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 3154 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 3155 3156 let grep_cmd = if cfg!(target_arch = "x86_64") { 3157 "grep -c PCI-MSI /proc/interrupts" 3158 } else { 3159 "grep -c ITS-MSI /proc/interrupts" 3160 }; 3161 assert_eq!( 3162 guest 3163 .ssh_command(grep_cmd) 3164 .unwrap() 3165 .trim() 3166 .parse::<u32>() 3167 .unwrap_or_default(), 3168 12 3169 ); 3170 }); 3171 3172 kill_child(&mut child); 3173 let output = child.wait_with_output().unwrap(); 3174 3175 handle_child_output(r, &output); 3176 } 3177 3178 #[test] 3179 #[cfg(target_arch = "x86_64")] 3180 fn test_direct_kernel_boot_bzimage() { 3181 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3182 let guest = Guest::new(Box::new(focal)); 3183 3184 let mut kernel_path = direct_kernel_boot_path(); 3185 // Replace the default kernel with the bzImage. 3186 kernel_path.pop(); 3187 kernel_path.push("bzImage"); 3188 3189 let mut child = GuestCommand::new(&guest) 3190 .args(["--cpus", "boot=1"]) 3191 .args(["--memory", "size=512M"]) 3192 .args(["--kernel", kernel_path.to_str().unwrap()]) 3193 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3194 .default_disks() 3195 .default_net() 3196 .capture_output() 3197 .spawn() 3198 .unwrap(); 3199 3200 let r = std::panic::catch_unwind(|| { 3201 guest.wait_vm_boot(None).unwrap(); 3202 3203 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 3204 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 3205 3206 let grep_cmd = if cfg!(target_arch = "x86_64") { 3207 "grep -c PCI-MSI /proc/interrupts" 3208 } else { 3209 "grep -c ITS-MSI /proc/interrupts" 3210 }; 3211 assert_eq!( 3212 guest 3213 .ssh_command(grep_cmd) 3214 .unwrap() 3215 .trim() 3216 .parse::<u32>() 3217 .unwrap_or_default(), 3218 12 3219 ); 3220 }); 3221 3222 kill_child(&mut child); 3223 let output = child.wait_with_output().unwrap(); 3224 3225 handle_child_output(r, &output); 3226 } 3227 3228 fn _test_virtio_block(image_name: &str, disable_io_uring: bool, disable_aio: bool) { 3229 let focal = UbuntuDiskConfig::new(image_name.to_string()); 3230 let guest = Guest::new(Box::new(focal)); 3231 3232 let mut workload_path = dirs::home_dir().unwrap(); 3233 workload_path.push("workloads"); 3234 3235 let mut blk_file_path = workload_path; 3236 blk_file_path.push("blk.img"); 3237 3238 let kernel_path = direct_kernel_boot_path(); 3239 3240 let mut cloud_child = GuestCommand::new(&guest) 3241 .args(["--cpus", "boot=4"]) 3242 .args(["--memory", "size=512M,shared=on"]) 3243 .args(["--kernel", kernel_path.to_str().unwrap()]) 3244 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3245 .args([ 3246 "--disk", 3247 format!( 3248 "path={}", 3249 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 3250 ) 3251 .as_str(), 3252 format!( 3253 "path={}", 3254 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3255 ) 3256 .as_str(), 3257 format!( 3258 "path={},readonly=on,direct=on,num_queues=4,_disable_io_uring={},_disable_aio={}", 3259 blk_file_path.to_str().unwrap(), 3260 disable_io_uring, 3261 disable_aio, 3262 ) 3263 .as_str(), 3264 ]) 3265 .default_net() 3266 .capture_output() 3267 .spawn() 3268 .unwrap(); 3269 3270 let r = std::panic::catch_unwind(|| { 3271 guest.wait_vm_boot(None).unwrap(); 3272 3273 // Check both if /dev/vdc exists and if the block size is 16M. 3274 assert_eq!( 3275 guest 3276 .ssh_command("lsblk | grep vdc | grep -c 16M") 3277 .unwrap() 3278 .trim() 3279 .parse::<u32>() 3280 .unwrap_or_default(), 3281 1 3282 ); 3283 3284 // Check both if /dev/vdc exists and if this block is RO. 3285 assert_eq!( 3286 guest 3287 .ssh_command("lsblk | grep vdc | awk '{print $5}'") 3288 .unwrap() 3289 .trim() 3290 .parse::<u32>() 3291 .unwrap_or_default(), 3292 1 3293 ); 3294 3295 // Check if the number of queues is 4. 3296 assert_eq!( 3297 guest 3298 .ssh_command("ls -ll /sys/block/vdc/mq | grep ^d | wc -l") 3299 .unwrap() 3300 .trim() 3301 .parse::<u32>() 3302 .unwrap_or_default(), 3303 4 3304 ); 3305 }); 3306 3307 let _ = cloud_child.kill(); 3308 let output = cloud_child.wait_with_output().unwrap(); 3309 3310 handle_child_output(r, &output); 3311 } 3312 3313 #[test] 3314 fn test_virtio_block_io_uring() { 3315 _test_virtio_block(FOCAL_IMAGE_NAME, false, true) 3316 } 3317 3318 #[test] 3319 fn test_virtio_block_aio() { 3320 _test_virtio_block(FOCAL_IMAGE_NAME, true, false) 3321 } 3322 3323 #[test] 3324 fn test_virtio_block_sync() { 3325 _test_virtio_block(FOCAL_IMAGE_NAME, true, true) 3326 } 3327 3328 #[test] 3329 fn test_virtio_block_qcow2() { 3330 _test_virtio_block(FOCAL_IMAGE_NAME_QCOW2, false, false) 3331 } 3332 3333 #[test] 3334 fn test_virtio_block_qcow2_backing_file() { 3335 _test_virtio_block(FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE, false, false) 3336 } 3337 3338 #[test] 3339 fn test_virtio_block_vhd() { 3340 let mut workload_path = dirs::home_dir().unwrap(); 3341 workload_path.push("workloads"); 3342 3343 let mut raw_file_path = workload_path.clone(); 3344 let mut vhd_file_path = workload_path; 3345 raw_file_path.push(FOCAL_IMAGE_NAME); 3346 vhd_file_path.push(FOCAL_IMAGE_NAME_VHD); 3347 3348 // Generate VHD file from RAW file 3349 std::process::Command::new("qemu-img") 3350 .arg("convert") 3351 .arg("-p") 3352 .args(["-f", "raw"]) 3353 .args(["-O", "vpc"]) 3354 .args(["-o", "subformat=fixed"]) 3355 .arg(raw_file_path.to_str().unwrap()) 3356 .arg(vhd_file_path.to_str().unwrap()) 3357 .output() 3358 .expect("Expect generating VHD image from RAW image"); 3359 3360 _test_virtio_block(FOCAL_IMAGE_NAME_VHD, false, false) 3361 } 3362 3363 #[test] 3364 fn test_virtio_block_vhdx() { 3365 let mut workload_path = dirs::home_dir().unwrap(); 3366 workload_path.push("workloads"); 3367 3368 let mut raw_file_path = workload_path.clone(); 3369 let mut vhdx_file_path = workload_path; 3370 raw_file_path.push(FOCAL_IMAGE_NAME); 3371 vhdx_file_path.push(FOCAL_IMAGE_NAME_VHDX); 3372 3373 // Generate dynamic VHDX file from RAW file 3374 std::process::Command::new("qemu-img") 3375 .arg("convert") 3376 .arg("-p") 3377 .args(["-f", "raw"]) 3378 .args(["-O", "vhdx"]) 3379 .arg(raw_file_path.to_str().unwrap()) 3380 .arg(vhdx_file_path.to_str().unwrap()) 3381 .output() 3382 .expect("Expect generating dynamic VHDx image from RAW image"); 3383 3384 _test_virtio_block(FOCAL_IMAGE_NAME_VHDX, false, false) 3385 } 3386 3387 #[test] 3388 fn test_virtio_block_dynamic_vhdx_expand() { 3389 const VIRTUAL_DISK_SIZE: u64 = 100 << 20; 3390 const EMPTY_VHDX_FILE_SIZE: u64 = 8 << 20; 3391 const FULL_VHDX_FILE_SIZE: u64 = 112 << 20; 3392 const DYNAMIC_VHDX_NAME: &str = "dynamic.vhdx"; 3393 3394 let mut workload_path = dirs::home_dir().unwrap(); 3395 workload_path.push("workloads"); 3396 3397 let mut vhdx_file_path = workload_path; 3398 vhdx_file_path.push(DYNAMIC_VHDX_NAME); 3399 let vhdx_path = vhdx_file_path.to_str().unwrap(); 3400 3401 // Generate a 100 MiB dynamic VHDX file 3402 std::process::Command::new("qemu-img") 3403 .arg("create") 3404 .args(["-f", "vhdx"]) 3405 .arg(vhdx_path) 3406 .arg(VIRTUAL_DISK_SIZE.to_string()) 3407 .output() 3408 .expect("Expect generating dynamic VHDx image from RAW image"); 3409 3410 // Check if the size matches with empty VHDx file size 3411 assert_eq!(vhdx_image_size(vhdx_path), EMPTY_VHDX_FILE_SIZE); 3412 3413 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3414 let guest = Guest::new(Box::new(focal)); 3415 let kernel_path = direct_kernel_boot_path(); 3416 3417 let mut cloud_child = GuestCommand::new(&guest) 3418 .args(["--cpus", "boot=1"]) 3419 .args(["--memory", "size=512M"]) 3420 .args(["--kernel", kernel_path.to_str().unwrap()]) 3421 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3422 .args([ 3423 "--disk", 3424 format!( 3425 "path={}", 3426 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 3427 ) 3428 .as_str(), 3429 format!( 3430 "path={}", 3431 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3432 ) 3433 .as_str(), 3434 format!("path={vhdx_path}").as_str(), 3435 ]) 3436 .default_net() 3437 .capture_output() 3438 .spawn() 3439 .unwrap(); 3440 3441 let r = std::panic::catch_unwind(|| { 3442 guest.wait_vm_boot(None).unwrap(); 3443 3444 // Check both if /dev/vdc exists and if the block size is 100 MiB. 3445 assert_eq!( 3446 guest 3447 .ssh_command("lsblk | grep vdc | grep -c 100M") 3448 .unwrap() 3449 .trim() 3450 .parse::<u32>() 3451 .unwrap_or_default(), 3452 1 3453 ); 3454 3455 // Write 100 MB of data to the VHDx disk 3456 guest 3457 .ssh_command("sudo dd if=/dev/urandom of=/dev/vdc bs=1M count=100") 3458 .unwrap(); 3459 }); 3460 3461 // Check if the size matches with expected expanded VHDx file size 3462 assert_eq!(vhdx_image_size(vhdx_path), FULL_VHDX_FILE_SIZE); 3463 3464 kill_child(&mut cloud_child); 3465 let output = cloud_child.wait_with_output().unwrap(); 3466 3467 handle_child_output(r, &output); 3468 } 3469 3470 fn vhdx_image_size(disk_name: &str) -> u64 { 3471 std::fs::File::open(disk_name) 3472 .unwrap() 3473 .seek(SeekFrom::End(0)) 3474 .unwrap() 3475 } 3476 3477 #[test] 3478 fn test_virtio_block_direct_and_firmware() { 3479 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3480 let guest = Guest::new(Box::new(focal)); 3481 3482 // The OS disk must be copied to a location that is not backed by 3483 // tmpfs, otherwise the syscall openat(2) with O_DIRECT simply fails 3484 // with EINVAL because tmpfs doesn't support this flag. 3485 let mut workloads_path = dirs::home_dir().unwrap(); 3486 workloads_path.push("workloads"); 3487 let os_dir = TempDir::new_in(workloads_path.as_path()).unwrap(); 3488 let mut os_path = os_dir.as_path().to_path_buf(); 3489 os_path.push("osdisk.img"); 3490 rate_limited_copy( 3491 guest.disk_config.disk(DiskType::OperatingSystem).unwrap(), 3492 os_path.as_path(), 3493 ) 3494 .expect("copying of OS disk failed"); 3495 3496 let mut child = GuestCommand::new(&guest) 3497 .args(["--cpus", "boot=1"]) 3498 .args(["--memory", "size=512M"]) 3499 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 3500 .args([ 3501 "--disk", 3502 format!("path={},direct=on", os_path.as_path().to_str().unwrap()).as_str(), 3503 format!( 3504 "path={}", 3505 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3506 ) 3507 .as_str(), 3508 ]) 3509 .default_net() 3510 .capture_output() 3511 .spawn() 3512 .unwrap(); 3513 3514 let r = std::panic::catch_unwind(|| { 3515 guest.wait_vm_boot(Some(120)).unwrap(); 3516 }); 3517 3518 kill_child(&mut child); 3519 let output = child.wait_with_output().unwrap(); 3520 3521 handle_child_output(r, &output); 3522 } 3523 3524 #[test] 3525 fn test_vhost_user_net_default() { 3526 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, false, false) 3527 } 3528 3529 #[test] 3530 fn test_vhost_user_net_named_tap() { 3531 test_vhost_user_net( 3532 Some("mytap0"), 3533 2, 3534 &prepare_vhost_user_net_daemon, 3535 false, 3536 false, 3537 ) 3538 } 3539 3540 #[test] 3541 fn test_vhost_user_net_existing_tap() { 3542 test_vhost_user_net( 3543 Some("vunet-tap0"), 3544 2, 3545 &prepare_vhost_user_net_daemon, 3546 false, 3547 false, 3548 ) 3549 } 3550 3551 #[test] 3552 fn test_vhost_user_net_multiple_queues() { 3553 test_vhost_user_net(None, 4, &prepare_vhost_user_net_daemon, false, false) 3554 } 3555 3556 #[test] 3557 fn test_vhost_user_net_tap_multiple_queues() { 3558 test_vhost_user_net( 3559 Some("vunet-tap1"), 3560 4, 3561 &prepare_vhost_user_net_daemon, 3562 false, 3563 false, 3564 ) 3565 } 3566 3567 #[test] 3568 fn test_vhost_user_net_host_mac() { 3569 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, true, false) 3570 } 3571 3572 #[test] 3573 fn test_vhost_user_net_client_mode() { 3574 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, false, true) 3575 } 3576 3577 #[test] 3578 #[cfg(not(target_arch = "aarch64"))] 3579 fn test_vhost_user_blk_default() { 3580 test_vhost_user_blk(2, false, false, Some(&prepare_vubd)) 3581 } 3582 3583 #[test] 3584 #[cfg(not(target_arch = "aarch64"))] 3585 fn test_vhost_user_blk_readonly() { 3586 test_vhost_user_blk(1, true, false, Some(&prepare_vubd)) 3587 } 3588 3589 #[test] 3590 #[cfg(not(target_arch = "aarch64"))] 3591 fn test_vhost_user_blk_direct() { 3592 test_vhost_user_blk(1, false, true, Some(&prepare_vubd)) 3593 } 3594 3595 #[test] 3596 fn test_boot_from_vhost_user_blk_default() { 3597 test_boot_from_vhost_user_blk(1, false, false, Some(&prepare_vubd)) 3598 } 3599 3600 #[test] 3601 #[cfg(target_arch = "x86_64")] 3602 fn test_split_irqchip() { 3603 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3604 let guest = Guest::new(Box::new(focal)); 3605 3606 let mut child = GuestCommand::new(&guest) 3607 .args(["--cpus", "boot=1"]) 3608 .args(["--memory", "size=512M"]) 3609 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3610 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3611 .default_disks() 3612 .default_net() 3613 .capture_output() 3614 .spawn() 3615 .unwrap(); 3616 3617 let r = std::panic::catch_unwind(|| { 3618 guest.wait_vm_boot(None).unwrap(); 3619 3620 assert_eq!( 3621 guest 3622 .ssh_command("grep -c IO-APIC.*timer /proc/interrupts || true") 3623 .unwrap() 3624 .trim() 3625 .parse::<u32>() 3626 .unwrap_or(1), 3627 0 3628 ); 3629 assert_eq!( 3630 guest 3631 .ssh_command("grep -c IO-APIC.*cascade /proc/interrupts || true") 3632 .unwrap() 3633 .trim() 3634 .parse::<u32>() 3635 .unwrap_or(1), 3636 0 3637 ); 3638 }); 3639 3640 kill_child(&mut child); 3641 let output = child.wait_with_output().unwrap(); 3642 3643 handle_child_output(r, &output); 3644 } 3645 3646 #[test] 3647 #[cfg(target_arch = "x86_64")] 3648 fn test_dmi_serial_number() { 3649 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3650 let guest = Guest::new(Box::new(focal)); 3651 3652 let mut child = GuestCommand::new(&guest) 3653 .args(["--cpus", "boot=1"]) 3654 .args(["--memory", "size=512M"]) 3655 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3656 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3657 .args(["--platform", "serial_number=a=b;c=d"]) 3658 .default_disks() 3659 .default_net() 3660 .capture_output() 3661 .spawn() 3662 .unwrap(); 3663 3664 let r = std::panic::catch_unwind(|| { 3665 guest.wait_vm_boot(None).unwrap(); 3666 3667 assert_eq!( 3668 guest 3669 .ssh_command("sudo cat /sys/class/dmi/id/product_serial") 3670 .unwrap() 3671 .trim(), 3672 "a=b;c=d" 3673 ); 3674 }); 3675 3676 kill_child(&mut child); 3677 let output = child.wait_with_output().unwrap(); 3678 3679 handle_child_output(r, &output); 3680 } 3681 3682 #[test] 3683 #[cfg(target_arch = "x86_64")] 3684 fn test_dmi_uuid() { 3685 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3686 let guest = Guest::new(Box::new(focal)); 3687 3688 let mut child = GuestCommand::new(&guest) 3689 .args(["--cpus", "boot=1"]) 3690 .args(["--memory", "size=512M"]) 3691 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3692 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3693 .args(["--platform", "uuid=1e8aa28a-435d-4027-87f4-40dceff1fa0a"]) 3694 .default_disks() 3695 .default_net() 3696 .capture_output() 3697 .spawn() 3698 .unwrap(); 3699 3700 let r = std::panic::catch_unwind(|| { 3701 guest.wait_vm_boot(None).unwrap(); 3702 3703 assert_eq!( 3704 guest 3705 .ssh_command("sudo cat /sys/class/dmi/id/product_uuid") 3706 .unwrap() 3707 .trim(), 3708 "1e8aa28a-435d-4027-87f4-40dceff1fa0a" 3709 ); 3710 }); 3711 3712 kill_child(&mut child); 3713 let output = child.wait_with_output().unwrap(); 3714 3715 handle_child_output(r, &output); 3716 } 3717 3718 #[test] 3719 #[cfg(target_arch = "x86_64")] 3720 fn test_dmi_oem_strings() { 3721 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3722 let guest = Guest::new(Box::new(focal)); 3723 3724 let s1 = "io.systemd.credential:xx=yy"; 3725 let s2 = "This is a test string"; 3726 3727 let oem_strings = format!("oem_strings=[{s1},{s2}]"); 3728 3729 let mut child = GuestCommand::new(&guest) 3730 .args(["--cpus", "boot=1"]) 3731 .args(["--memory", "size=512M"]) 3732 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3733 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3734 .args(["--platform", &oem_strings]) 3735 .default_disks() 3736 .default_net() 3737 .capture_output() 3738 .spawn() 3739 .unwrap(); 3740 3741 let r = std::panic::catch_unwind(|| { 3742 guest.wait_vm_boot(None).unwrap(); 3743 3744 assert_eq!( 3745 guest 3746 .ssh_command("sudo dmidecode --oem-string count") 3747 .unwrap() 3748 .trim(), 3749 "2" 3750 ); 3751 3752 assert_eq!( 3753 guest 3754 .ssh_command("sudo dmidecode --oem-string 1") 3755 .unwrap() 3756 .trim(), 3757 s1 3758 ); 3759 3760 assert_eq!( 3761 guest 3762 .ssh_command("sudo dmidecode --oem-string 2") 3763 .unwrap() 3764 .trim(), 3765 s2 3766 ); 3767 }); 3768 3769 kill_child(&mut child); 3770 let output = child.wait_with_output().unwrap(); 3771 3772 handle_child_output(r, &output); 3773 } 3774 3775 #[test] 3776 fn test_virtio_fs() { 3777 _test_virtio_fs(&prepare_virtiofsd, false, None) 3778 } 3779 3780 #[test] 3781 fn test_virtio_fs_hotplug() { 3782 _test_virtio_fs(&prepare_virtiofsd, true, None) 3783 } 3784 3785 #[test] 3786 #[cfg(not(feature = "mshv"))] 3787 fn test_virtio_fs_multi_segment_hotplug() { 3788 _test_virtio_fs(&prepare_virtiofsd, true, Some(15)) 3789 } 3790 3791 #[test] 3792 #[cfg(not(feature = "mshv"))] 3793 fn test_virtio_fs_multi_segment() { 3794 _test_virtio_fs(&prepare_virtiofsd, false, Some(15)) 3795 } 3796 3797 #[test] 3798 fn test_virtio_pmem_persist_writes() { 3799 test_virtio_pmem(false, false) 3800 } 3801 3802 #[test] 3803 fn test_virtio_pmem_discard_writes() { 3804 test_virtio_pmem(true, false) 3805 } 3806 3807 #[test] 3808 fn test_virtio_pmem_with_size() { 3809 test_virtio_pmem(true, true) 3810 } 3811 3812 #[test] 3813 fn test_boot_from_virtio_pmem() { 3814 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3815 let guest = Guest::new(Box::new(focal)); 3816 3817 let kernel_path = direct_kernel_boot_path(); 3818 3819 let mut child = GuestCommand::new(&guest) 3820 .args(["--cpus", "boot=1"]) 3821 .args(["--memory", "size=512M"]) 3822 .args(["--kernel", kernel_path.to_str().unwrap()]) 3823 .args([ 3824 "--disk", 3825 format!( 3826 "path={}", 3827 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3828 ) 3829 .as_str(), 3830 ]) 3831 .default_net() 3832 .args([ 3833 "--pmem", 3834 format!( 3835 "file={},size={}", 3836 guest.disk_config.disk(DiskType::OperatingSystem).unwrap(), 3837 fs::metadata(guest.disk_config.disk(DiskType::OperatingSystem).unwrap()) 3838 .unwrap() 3839 .len() 3840 ) 3841 .as_str(), 3842 ]) 3843 .args([ 3844 "--cmdline", 3845 DIRECT_KERNEL_BOOT_CMDLINE 3846 .replace("vda1", "pmem0p1") 3847 .as_str(), 3848 ]) 3849 .capture_output() 3850 .spawn() 3851 .unwrap(); 3852 3853 let r = std::panic::catch_unwind(|| { 3854 guest.wait_vm_boot(None).unwrap(); 3855 3856 // Simple checks to validate the VM booted properly 3857 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 3858 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 3859 }); 3860 3861 kill_child(&mut child); 3862 let output = child.wait_with_output().unwrap(); 3863 3864 handle_child_output(r, &output); 3865 } 3866 3867 #[test] 3868 fn test_multiple_network_interfaces() { 3869 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3870 let guest = Guest::new(Box::new(focal)); 3871 3872 let kernel_path = direct_kernel_boot_path(); 3873 3874 let mut child = GuestCommand::new(&guest) 3875 .args(["--cpus", "boot=1"]) 3876 .args(["--memory", "size=512M"]) 3877 .args(["--kernel", kernel_path.to_str().unwrap()]) 3878 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3879 .default_disks() 3880 .args([ 3881 "--net", 3882 guest.default_net_string().as_str(), 3883 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0", 3884 "tap=mytap1,mac=fe:1f:9e:e1:60:f2,ip=192.168.4.1,mask=255.255.255.0", 3885 ]) 3886 .capture_output() 3887 .spawn() 3888 .unwrap(); 3889 3890 let r = std::panic::catch_unwind(|| { 3891 guest.wait_vm_boot(None).unwrap(); 3892 3893 let tap_count = exec_host_command_output("ip link | grep -c mytap1"); 3894 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1"); 3895 3896 // 3 network interfaces + default localhost ==> 4 interfaces 3897 assert_eq!( 3898 guest 3899 .ssh_command("ip -o link | wc -l") 3900 .unwrap() 3901 .trim() 3902 .parse::<u32>() 3903 .unwrap_or_default(), 3904 4 3905 ); 3906 }); 3907 3908 kill_child(&mut child); 3909 let output = child.wait_with_output().unwrap(); 3910 3911 handle_child_output(r, &output); 3912 } 3913 3914 #[test] 3915 #[cfg(target_arch = "aarch64")] 3916 fn test_pmu_on() { 3917 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3918 let guest = Guest::new(Box::new(focal)); 3919 let mut child = GuestCommand::new(&guest) 3920 .args(["--cpus", "boot=1"]) 3921 .args(["--memory", "size=512M"]) 3922 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3923 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3924 .default_disks() 3925 .default_net() 3926 .capture_output() 3927 .spawn() 3928 .unwrap(); 3929 3930 let r = std::panic::catch_unwind(|| { 3931 guest.wait_vm_boot(None).unwrap(); 3932 3933 // Test that PMU exists. 3934 assert_eq!( 3935 guest 3936 .ssh_command(GREP_PMU_IRQ_CMD) 3937 .unwrap() 3938 .trim() 3939 .parse::<u32>() 3940 .unwrap_or_default(), 3941 1 3942 ); 3943 }); 3944 3945 kill_child(&mut child); 3946 let output = child.wait_with_output().unwrap(); 3947 3948 handle_child_output(r, &output); 3949 } 3950 3951 #[test] 3952 fn test_serial_off() { 3953 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3954 let guest = Guest::new(Box::new(focal)); 3955 let mut child = GuestCommand::new(&guest) 3956 .args(["--cpus", "boot=1"]) 3957 .args(["--memory", "size=512M"]) 3958 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3959 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3960 .default_disks() 3961 .default_net() 3962 .args(["--serial", "off"]) 3963 .capture_output() 3964 .spawn() 3965 .unwrap(); 3966 3967 let r = std::panic::catch_unwind(|| { 3968 guest.wait_vm_boot(None).unwrap(); 3969 3970 // Test that there is no ttyS0 3971 assert_eq!( 3972 guest 3973 .ssh_command(GREP_SERIAL_IRQ_CMD) 3974 .unwrap() 3975 .trim() 3976 .parse::<u32>() 3977 .unwrap_or(1), 3978 0 3979 ); 3980 }); 3981 3982 kill_child(&mut child); 3983 let output = child.wait_with_output().unwrap(); 3984 3985 handle_child_output(r, &output); 3986 } 3987 3988 #[test] 3989 fn test_serial_null() { 3990 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3991 let guest = Guest::new(Box::new(focal)); 3992 let mut cmd = GuestCommand::new(&guest); 3993 #[cfg(target_arch = "x86_64")] 3994 let console_str: &str = "console=ttyS0"; 3995 #[cfg(target_arch = "aarch64")] 3996 let console_str: &str = "console=ttyAMA0"; 3997 3998 cmd.args(["--cpus", "boot=1"]) 3999 .args(["--memory", "size=512M"]) 4000 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4001 .args([ 4002 "--cmdline", 4003 DIRECT_KERNEL_BOOT_CMDLINE 4004 .replace("console=hvc0 ", console_str) 4005 .as_str(), 4006 ]) 4007 .default_disks() 4008 .default_net() 4009 .args(["--serial", "null"]) 4010 .args(["--console", "off"]) 4011 .capture_output(); 4012 4013 let mut child = cmd.spawn().unwrap(); 4014 4015 let r = std::panic::catch_unwind(|| { 4016 guest.wait_vm_boot(None).unwrap(); 4017 4018 // Test that there is a ttyS0 4019 assert_eq!( 4020 guest 4021 .ssh_command(GREP_SERIAL_IRQ_CMD) 4022 .unwrap() 4023 .trim() 4024 .parse::<u32>() 4025 .unwrap_or_default(), 4026 1 4027 ); 4028 }); 4029 4030 kill_child(&mut child); 4031 let output = child.wait_with_output().unwrap(); 4032 handle_child_output(r, &output); 4033 4034 let r = std::panic::catch_unwind(|| { 4035 assert!(!String::from_utf8_lossy(&output.stdout).contains(CONSOLE_TEST_STRING)); 4036 }); 4037 4038 handle_child_output(r, &output); 4039 } 4040 4041 #[test] 4042 fn test_serial_tty() { 4043 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4044 let guest = Guest::new(Box::new(focal)); 4045 4046 let kernel_path = direct_kernel_boot_path(); 4047 4048 #[cfg(target_arch = "x86_64")] 4049 let console_str: &str = "console=ttyS0"; 4050 #[cfg(target_arch = "aarch64")] 4051 let console_str: &str = "console=ttyAMA0"; 4052 4053 let mut child = GuestCommand::new(&guest) 4054 .args(["--cpus", "boot=1"]) 4055 .args(["--memory", "size=512M"]) 4056 .args(["--kernel", kernel_path.to_str().unwrap()]) 4057 .args([ 4058 "--cmdline", 4059 DIRECT_KERNEL_BOOT_CMDLINE 4060 .replace("console=hvc0 ", console_str) 4061 .as_str(), 4062 ]) 4063 .default_disks() 4064 .default_net() 4065 .args(["--serial", "tty"]) 4066 .args(["--console", "off"]) 4067 .capture_output() 4068 .spawn() 4069 .unwrap(); 4070 4071 let r = std::panic::catch_unwind(|| { 4072 guest.wait_vm_boot(None).unwrap(); 4073 4074 // Test that there is a ttyS0 4075 assert_eq!( 4076 guest 4077 .ssh_command(GREP_SERIAL_IRQ_CMD) 4078 .unwrap() 4079 .trim() 4080 .parse::<u32>() 4081 .unwrap_or_default(), 4082 1 4083 ); 4084 }); 4085 4086 // This sleep is needed to wait for the login prompt 4087 thread::sleep(std::time::Duration::new(2, 0)); 4088 4089 kill_child(&mut child); 4090 let output = child.wait_with_output().unwrap(); 4091 handle_child_output(r, &output); 4092 4093 let r = std::panic::catch_unwind(|| { 4094 assert!(String::from_utf8_lossy(&output.stdout).contains(CONSOLE_TEST_STRING)); 4095 }); 4096 4097 handle_child_output(r, &output); 4098 } 4099 4100 #[test] 4101 fn test_serial_file() { 4102 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4103 let guest = Guest::new(Box::new(focal)); 4104 4105 let serial_path = guest.tmp_dir.as_path().join("serial-output"); 4106 #[cfg(target_arch = "x86_64")] 4107 let console_str: &str = "console=ttyS0"; 4108 #[cfg(target_arch = "aarch64")] 4109 let console_str: &str = "console=ttyAMA0"; 4110 4111 let mut child = GuestCommand::new(&guest) 4112 .args(["--cpus", "boot=1"]) 4113 .args(["--memory", "size=512M"]) 4114 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4115 .args([ 4116 "--cmdline", 4117 DIRECT_KERNEL_BOOT_CMDLINE 4118 .replace("console=hvc0 ", console_str) 4119 .as_str(), 4120 ]) 4121 .default_disks() 4122 .default_net() 4123 .args([ 4124 "--serial", 4125 format!("file={}", serial_path.to_str().unwrap()).as_str(), 4126 ]) 4127 .capture_output() 4128 .spawn() 4129 .unwrap(); 4130 4131 let r = std::panic::catch_unwind(|| { 4132 guest.wait_vm_boot(None).unwrap(); 4133 4134 // Test that there is a ttyS0 4135 assert_eq!( 4136 guest 4137 .ssh_command(GREP_SERIAL_IRQ_CMD) 4138 .unwrap() 4139 .trim() 4140 .parse::<u32>() 4141 .unwrap_or_default(), 4142 1 4143 ); 4144 4145 guest.ssh_command("sudo shutdown -h now").unwrap(); 4146 }); 4147 4148 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 4149 kill_child(&mut child); 4150 let output = child.wait_with_output().unwrap(); 4151 handle_child_output(r, &output); 4152 4153 let r = std::panic::catch_unwind(|| { 4154 // Check that the cloud-hypervisor binary actually terminated 4155 assert!(output.status.success()); 4156 4157 // Do this check after shutdown of the VM as an easy way to ensure 4158 // all writes are flushed to disk 4159 let mut f = std::fs::File::open(serial_path).unwrap(); 4160 let mut buf = String::new(); 4161 f.read_to_string(&mut buf).unwrap(); 4162 assert!(buf.contains(CONSOLE_TEST_STRING)); 4163 }); 4164 4165 handle_child_output(r, &output); 4166 } 4167 4168 #[test] 4169 fn test_pty_interaction() { 4170 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4171 let guest = Guest::new(Box::new(focal)); 4172 let api_socket = temp_api_path(&guest.tmp_dir); 4173 let serial_option = if cfg!(target_arch = "x86_64") { 4174 " console=ttyS0" 4175 } else { 4176 " console=ttyAMA0" 4177 }; 4178 let cmdline = DIRECT_KERNEL_BOOT_CMDLINE.to_owned() + serial_option; 4179 4180 let mut child = GuestCommand::new(&guest) 4181 .args(["--cpus", "boot=1"]) 4182 .args(["--memory", "size=512M"]) 4183 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4184 .args(["--cmdline", &cmdline]) 4185 .default_disks() 4186 .default_net() 4187 .args(["--serial", "null"]) 4188 .args(["--console", "pty"]) 4189 .args(["--api-socket", &api_socket]) 4190 .spawn() 4191 .unwrap(); 4192 4193 let r = std::panic::catch_unwind(|| { 4194 guest.wait_vm_boot(None).unwrap(); 4195 // Get pty fd for console 4196 let console_path = get_pty_path(&api_socket, "console"); 4197 _test_pty_interaction(console_path); 4198 4199 guest.ssh_command("sudo shutdown -h now").unwrap(); 4200 }); 4201 4202 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 4203 let _ = child.kill(); 4204 let output = child.wait_with_output().unwrap(); 4205 handle_child_output(r, &output); 4206 4207 let r = std::panic::catch_unwind(|| { 4208 // Check that the cloud-hypervisor binary actually terminated 4209 assert!(output.status.success()) 4210 }); 4211 handle_child_output(r, &output); 4212 } 4213 4214 #[test] 4215 fn test_serial_socket_interaction() { 4216 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4217 let guest = Guest::new(Box::new(focal)); 4218 let serial_socket = guest.tmp_dir.as_path().join("serial.socket"); 4219 let serial_socket_pty = guest.tmp_dir.as_path().join("serial.pty"); 4220 let serial_option = if cfg!(target_arch = "x86_64") { 4221 " console=ttyS0" 4222 } else { 4223 " console=ttyAMA0" 4224 }; 4225 let cmdline = DIRECT_KERNEL_BOOT_CMDLINE.to_owned() + serial_option; 4226 4227 let mut child = GuestCommand::new(&guest) 4228 .args(["--cpus", "boot=1"]) 4229 .args(["--memory", "size=512M"]) 4230 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4231 .args(["--cmdline", &cmdline]) 4232 .default_disks() 4233 .default_net() 4234 .args(["--console", "null"]) 4235 .args([ 4236 "--serial", 4237 format!("socket={}", serial_socket.to_str().unwrap()).as_str(), 4238 ]) 4239 .spawn() 4240 .unwrap(); 4241 4242 let _ = std::panic::catch_unwind(|| { 4243 guest.wait_vm_boot(None).unwrap(); 4244 }); 4245 4246 let mut socat_command = Command::new("socat"); 4247 let socat_args = [ 4248 &format!("pty,link={},raw", serial_socket_pty.display()), 4249 &format!("UNIX-CONNECT:{}", serial_socket.display()), 4250 ]; 4251 socat_command.args(socat_args); 4252 4253 let mut socat_child = socat_command.spawn().unwrap(); 4254 thread::sleep(std::time::Duration::new(1, 0)); 4255 4256 let _ = std::panic::catch_unwind(|| { 4257 _test_pty_interaction(serial_socket_pty); 4258 }); 4259 4260 let _ = socat_child.kill(); 4261 4262 let r = std::panic::catch_unwind(|| { 4263 guest.ssh_command("sudo shutdown -h now").unwrap(); 4264 }); 4265 4266 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 4267 kill_child(&mut child); 4268 let output = child.wait_with_output().unwrap(); 4269 handle_child_output(r, &output); 4270 4271 let r = std::panic::catch_unwind(|| { 4272 // Check that the cloud-hypervisor binary actually terminated 4273 if !output.status.success() { 4274 panic!( 4275 "Cloud Hypervisor process failed to terminate gracefully: {:?}", 4276 output.status 4277 ); 4278 } 4279 }); 4280 handle_child_output(r, &output); 4281 } 4282 4283 #[test] 4284 fn test_virtio_console() { 4285 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4286 let guest = Guest::new(Box::new(focal)); 4287 4288 let kernel_path = direct_kernel_boot_path(); 4289 4290 let mut child = GuestCommand::new(&guest) 4291 .args(["--cpus", "boot=1"]) 4292 .args(["--memory", "size=512M"]) 4293 .args(["--kernel", kernel_path.to_str().unwrap()]) 4294 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4295 .default_disks() 4296 .default_net() 4297 .args(["--console", "tty"]) 4298 .args(["--serial", "null"]) 4299 .capture_output() 4300 .spawn() 4301 .unwrap(); 4302 4303 let text = String::from("On a branch floating down river a cricket, singing."); 4304 let cmd = format!("echo {text} | sudo tee /dev/hvc0"); 4305 4306 let r = std::panic::catch_unwind(|| { 4307 guest.wait_vm_boot(None).unwrap(); 4308 4309 assert!(guest 4310 .does_device_vendor_pair_match("0x1043", "0x1af4") 4311 .unwrap_or_default()); 4312 4313 guest.ssh_command(&cmd).unwrap(); 4314 }); 4315 4316 kill_child(&mut child); 4317 let output = child.wait_with_output().unwrap(); 4318 handle_child_output(r, &output); 4319 4320 let r = std::panic::catch_unwind(|| { 4321 assert!(String::from_utf8_lossy(&output.stdout).contains(&text)); 4322 }); 4323 4324 handle_child_output(r, &output); 4325 } 4326 4327 #[test] 4328 fn test_console_file() { 4329 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4330 let guest = Guest::new(Box::new(focal)); 4331 4332 let console_path = guest.tmp_dir.as_path().join("console-output"); 4333 let mut child = GuestCommand::new(&guest) 4334 .args(["--cpus", "boot=1"]) 4335 .args(["--memory", "size=512M"]) 4336 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4337 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4338 .default_disks() 4339 .default_net() 4340 .args([ 4341 "--console", 4342 format!("file={}", console_path.to_str().unwrap()).as_str(), 4343 ]) 4344 .capture_output() 4345 .spawn() 4346 .unwrap(); 4347 4348 guest.wait_vm_boot(None).unwrap(); 4349 4350 guest.ssh_command("sudo shutdown -h now").unwrap(); 4351 4352 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 4353 kill_child(&mut child); 4354 let output = child.wait_with_output().unwrap(); 4355 4356 let r = std::panic::catch_unwind(|| { 4357 // Check that the cloud-hypervisor binary actually terminated 4358 assert!(output.status.success()); 4359 4360 // Do this check after shutdown of the VM as an easy way to ensure 4361 // all writes are flushed to disk 4362 let mut f = std::fs::File::open(console_path).unwrap(); 4363 let mut buf = String::new(); 4364 f.read_to_string(&mut buf).unwrap(); 4365 4366 if !buf.contains(CONSOLE_TEST_STRING) { 4367 eprintln!( 4368 "\n\n==== Console file output ====\n\n{buf}\n\n==== End console file output ====" 4369 ); 4370 } 4371 assert!(buf.contains(CONSOLE_TEST_STRING)); 4372 }); 4373 4374 handle_child_output(r, &output); 4375 } 4376 4377 #[test] 4378 #[cfg(target_arch = "x86_64")] 4379 #[cfg(not(feature = "mshv"))] 4380 // The VFIO integration test starts cloud-hypervisor guest with 3 TAP 4381 // backed networking interfaces, bound through a simple bridge on the host. 4382 // So if the nested cloud-hypervisor succeeds in getting a directly 4383 // assigned interface from its cloud-hypervisor host, we should be able to 4384 // ssh into it, and verify that it's running with the right kernel command 4385 // line (We tag the command line from cloud-hypervisor for that purpose). 4386 // The third device is added to validate that hotplug works correctly since 4387 // it is being added to the L2 VM through hotplugging mechanism. 4388 // Also, we pass-through a virtio-blk device to the L2 VM to test the 32-bit 4389 // vfio device support 4390 fn test_vfio() { 4391 setup_vfio_network_interfaces(); 4392 4393 let jammy = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string()); 4394 let guest = Guest::new_from_ip_range(Box::new(jammy), "172.18", 0); 4395 4396 let mut workload_path = dirs::home_dir().unwrap(); 4397 workload_path.push("workloads"); 4398 4399 let kernel_path = direct_kernel_boot_path(); 4400 4401 let mut vfio_path = workload_path.clone(); 4402 vfio_path.push("vfio"); 4403 4404 let mut cloud_init_vfio_base_path = vfio_path.clone(); 4405 cloud_init_vfio_base_path.push("cloudinit.img"); 4406 4407 // We copy our cloudinit into the vfio mount point, for the nested 4408 // cloud-hypervisor guest to use. 4409 rate_limited_copy( 4410 guest.disk_config.disk(DiskType::CloudInit).unwrap(), 4411 &cloud_init_vfio_base_path, 4412 ) 4413 .expect("copying of cloud-init disk failed"); 4414 4415 let mut vfio_disk_path = workload_path.clone(); 4416 vfio_disk_path.push("vfio.img"); 4417 4418 // Create the vfio disk image 4419 let output = Command::new("mkfs.ext4") 4420 .arg("-d") 4421 .arg(vfio_path.to_str().unwrap()) 4422 .arg(vfio_disk_path.to_str().unwrap()) 4423 .arg("2g") 4424 .output() 4425 .unwrap(); 4426 if !output.status.success() { 4427 eprintln!("{}", String::from_utf8_lossy(&output.stderr)); 4428 panic!("mkfs.ext4 command generated an error"); 4429 } 4430 4431 let mut blk_file_path = workload_path; 4432 blk_file_path.push("blk.img"); 4433 4434 let vfio_tap0 = "vfio-tap0"; 4435 let vfio_tap1 = "vfio-tap1"; 4436 let vfio_tap2 = "vfio-tap2"; 4437 let vfio_tap3 = "vfio-tap3"; 4438 4439 let mut child = GuestCommand::new(&guest) 4440 .args(["--cpus", "boot=4"]) 4441 .args(["--memory", "size=2G,hugepages=on,shared=on"]) 4442 .args(["--kernel", kernel_path.to_str().unwrap()]) 4443 .args([ 4444 "--disk", 4445 format!( 4446 "path={}", 4447 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 4448 ) 4449 .as_str(), 4450 format!( 4451 "path={}", 4452 guest.disk_config.disk(DiskType::CloudInit).unwrap() 4453 ) 4454 .as_str(), 4455 format!("path={}", vfio_disk_path.to_str().unwrap()).as_str(), 4456 format!("path={},iommu=on", blk_file_path.to_str().unwrap()).as_str(), 4457 ]) 4458 .args([ 4459 "--cmdline", 4460 format!( 4461 "{DIRECT_KERNEL_BOOT_CMDLINE} kvm-intel.nested=1 vfio_iommu_type1.allow_unsafe_interrupts" 4462 ) 4463 .as_str(), 4464 ]) 4465 .args([ 4466 "--net", 4467 format!("tap={},mac={}", vfio_tap0, guest.network.guest_mac).as_str(), 4468 format!( 4469 "tap={},mac={},iommu=on", 4470 vfio_tap1, guest.network.l2_guest_mac1 4471 ) 4472 .as_str(), 4473 format!( 4474 "tap={},mac={},iommu=on", 4475 vfio_tap2, guest.network.l2_guest_mac2 4476 ) 4477 .as_str(), 4478 format!( 4479 "tap={},mac={},iommu=on", 4480 vfio_tap3, guest.network.l2_guest_mac3 4481 ) 4482 .as_str(), 4483 ]) 4484 .capture_output() 4485 .spawn() 4486 .unwrap(); 4487 4488 thread::sleep(std::time::Duration::new(30, 0)); 4489 4490 let r = std::panic::catch_unwind(|| { 4491 guest.ssh_command_l1("sudo systemctl start vfio").unwrap(); 4492 thread::sleep(std::time::Duration::new(120, 0)); 4493 4494 // We booted our cloud hypervisor L2 guest with a "VFIOTAG" tag 4495 // added to its kernel command line. 4496 // Let's ssh into it and verify that it's there. If it is it means 4497 // we're in the right guest (The L2 one) because the QEMU L1 guest 4498 // does not have this command line tag. 4499 assert!(check_matched_lines_count( 4500 guest.ssh_command_l2_1("cat /proc/cmdline").unwrap().trim(), 4501 vec!["VFIOTAG"], 4502 1 4503 )); 4504 4505 // Let's also verify from the second virtio-net device passed to 4506 // the L2 VM. 4507 assert!(check_matched_lines_count( 4508 guest.ssh_command_l2_2("cat /proc/cmdline").unwrap().trim(), 4509 vec!["VFIOTAG"], 4510 1 4511 )); 4512 4513 // Check the amount of PCI devices appearing in L2 VM. 4514 assert!(check_lines_count( 4515 guest 4516 .ssh_command_l2_1("ls /sys/bus/pci/devices") 4517 .unwrap() 4518 .trim(), 4519 8 4520 )); 4521 4522 // Check both if /dev/vdc exists and if the block size is 16M in L2 VM 4523 assert!(check_matched_lines_count( 4524 guest.ssh_command_l2_1("lsblk").unwrap().trim(), 4525 vec!["vdc", "16M"], 4526 1 4527 )); 4528 4529 // Hotplug an extra virtio-net device through L2 VM. 4530 guest 4531 .ssh_command_l1( 4532 "echo 0000:00:09.0 | sudo tee /sys/bus/pci/devices/0000:00:09.0/driver/unbind", 4533 ) 4534 .unwrap(); 4535 guest 4536 .ssh_command_l1("echo 0000:00:09.0 | sudo tee /sys/bus/pci/drivers/vfio-pci/bind") 4537 .unwrap(); 4538 let vfio_hotplug_output = guest 4539 .ssh_command_l1( 4540 "sudo /mnt/ch-remote \ 4541 --api-socket=/tmp/ch_api.sock \ 4542 add-device path=/sys/bus/pci/devices/0000:00:09.0,id=vfio123", 4543 ) 4544 .unwrap(); 4545 assert!(check_matched_lines_count( 4546 vfio_hotplug_output.trim(), 4547 vec!["{\"id\":\"vfio123\",\"bdf\":\"0000:00:08.0\"}"], 4548 1 4549 )); 4550 4551 thread::sleep(std::time::Duration::new(10, 0)); 4552 4553 // Let's also verify from the third virtio-net device passed to 4554 // the L2 VM. This third device has been hotplugged through the L2 4555 // VM, so this is our way to validate hotplug works for VFIO PCI. 4556 assert!(check_matched_lines_count( 4557 guest.ssh_command_l2_3("cat /proc/cmdline").unwrap().trim(), 4558 vec!["VFIOTAG"], 4559 1 4560 )); 4561 4562 // Check the amount of PCI devices appearing in L2 VM. 4563 // There should be one more device than before, raising the count 4564 // up to 9 PCI devices. 4565 assert!(check_lines_count( 4566 guest 4567 .ssh_command_l2_1("ls /sys/bus/pci/devices") 4568 .unwrap() 4569 .trim(), 4570 9 4571 )); 4572 4573 // Let's now verify that we can correctly remove the virtio-net 4574 // device through the "remove-device" command responsible for 4575 // unplugging VFIO devices. 4576 guest 4577 .ssh_command_l1( 4578 "sudo /mnt/ch-remote \ 4579 --api-socket=/tmp/ch_api.sock \ 4580 remove-device vfio123", 4581 ) 4582 .unwrap(); 4583 thread::sleep(std::time::Duration::new(10, 0)); 4584 4585 // Check the amount of PCI devices appearing in L2 VM is back down 4586 // to 8 devices. 4587 assert!(check_lines_count( 4588 guest 4589 .ssh_command_l2_1("ls /sys/bus/pci/devices") 4590 .unwrap() 4591 .trim(), 4592 8 4593 )); 4594 4595 // Perform memory hotplug in L2 and validate the memory is showing 4596 // up as expected. In order to check, we will use the virtio-net 4597 // device already passed through L2 as a VFIO device, this will 4598 // verify that VFIO devices are functional with memory hotplug. 4599 assert!(guest.get_total_memory_l2().unwrap_or_default() > 480_000); 4600 guest 4601 .ssh_command_l2_1( 4602 "sudo bash -c 'echo online > /sys/devices/system/memory/auto_online_blocks'", 4603 ) 4604 .unwrap(); 4605 guest 4606 .ssh_command_l1( 4607 "sudo /mnt/ch-remote \ 4608 --api-socket=/tmp/ch_api.sock \ 4609 resize --memory=1073741824", 4610 ) 4611 .unwrap(); 4612 assert!(guest.get_total_memory_l2().unwrap_or_default() > 960_000); 4613 }); 4614 4615 kill_child(&mut child); 4616 let output = child.wait_with_output().unwrap(); 4617 4618 cleanup_vfio_network_interfaces(); 4619 4620 handle_child_output(r, &output); 4621 } 4622 4623 #[test] 4624 fn test_direct_kernel_boot_noacpi() { 4625 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4626 let guest = Guest::new(Box::new(focal)); 4627 4628 let kernel_path = direct_kernel_boot_path(); 4629 4630 let mut child = GuestCommand::new(&guest) 4631 .args(["--cpus", "boot=1"]) 4632 .args(["--memory", "size=512M"]) 4633 .args(["--kernel", kernel_path.to_str().unwrap()]) 4634 .args([ 4635 "--cmdline", 4636 format!("{DIRECT_KERNEL_BOOT_CMDLINE} acpi=off").as_str(), 4637 ]) 4638 .default_disks() 4639 .default_net() 4640 .capture_output() 4641 .spawn() 4642 .unwrap(); 4643 4644 let r = std::panic::catch_unwind(|| { 4645 guest.wait_vm_boot(None).unwrap(); 4646 4647 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 4648 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4649 }); 4650 4651 kill_child(&mut child); 4652 let output = child.wait_with_output().unwrap(); 4653 4654 handle_child_output(r, &output); 4655 } 4656 4657 #[test] 4658 fn test_virtio_vsock() { 4659 _test_virtio_vsock(false) 4660 } 4661 4662 #[test] 4663 fn test_virtio_vsock_hotplug() { 4664 _test_virtio_vsock(true); 4665 } 4666 4667 #[test] 4668 fn test_api_http_shutdown() { 4669 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4670 let guest = Guest::new(Box::new(focal)); 4671 4672 _test_api_shutdown(TargetApi::new_http_api(&guest.tmp_dir), guest) 4673 } 4674 4675 #[test] 4676 fn test_api_http_delete() { 4677 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4678 let guest = Guest::new(Box::new(focal)); 4679 4680 _test_api_delete(TargetApi::new_http_api(&guest.tmp_dir), guest); 4681 } 4682 4683 #[test] 4684 fn test_api_http_pause_resume() { 4685 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4686 let guest = Guest::new(Box::new(focal)); 4687 4688 _test_api_pause_resume(TargetApi::new_http_api(&guest.tmp_dir), guest) 4689 } 4690 4691 #[test] 4692 fn test_api_http_create_boot() { 4693 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4694 let guest = Guest::new(Box::new(focal)); 4695 4696 _test_api_create_boot(TargetApi::new_http_api(&guest.tmp_dir), guest) 4697 } 4698 4699 #[test] 4700 fn test_virtio_iommu() { 4701 _test_virtio_iommu(cfg!(target_arch = "x86_64")) 4702 } 4703 4704 #[test] 4705 // We cannot force the software running in the guest to reprogram the BAR 4706 // with some different addresses, but we have a reliable way of testing it 4707 // with a standard Linux kernel. 4708 // By removing a device from the PCI tree, and then rescanning the tree, 4709 // Linux consistently chooses to reorganize the PCI device BARs to other 4710 // locations in the guest address space. 4711 // This test creates a dedicated PCI network device to be checked as being 4712 // properly probed first, then removing it, and adding it again by doing a 4713 // rescan. 4714 fn test_pci_bar_reprogramming() { 4715 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4716 let guest = Guest::new(Box::new(focal)); 4717 4718 #[cfg(target_arch = "x86_64")] 4719 let kernel_path = direct_kernel_boot_path(); 4720 #[cfg(target_arch = "aarch64")] 4721 let kernel_path = edk2_path(); 4722 4723 let mut child = GuestCommand::new(&guest) 4724 .args(["--cpus", "boot=1"]) 4725 .args(["--memory", "size=512M"]) 4726 .args(["--kernel", kernel_path.to_str().unwrap()]) 4727 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4728 .default_disks() 4729 .args([ 4730 "--net", 4731 guest.default_net_string().as_str(), 4732 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0", 4733 ]) 4734 .capture_output() 4735 .spawn() 4736 .unwrap(); 4737 4738 let r = std::panic::catch_unwind(|| { 4739 guest.wait_vm_boot(None).unwrap(); 4740 4741 // 2 network interfaces + default localhost ==> 3 interfaces 4742 assert_eq!( 4743 guest 4744 .ssh_command("ip -o link | wc -l") 4745 .unwrap() 4746 .trim() 4747 .parse::<u32>() 4748 .unwrap_or_default(), 4749 3 4750 ); 4751 4752 let init_bar_addr = guest 4753 .ssh_command( 4754 "sudo awk '{print $1; exit}' /sys/bus/pci/devices/0000:00:05.0/resource", 4755 ) 4756 .unwrap(); 4757 4758 // Remove the PCI device 4759 guest 4760 .ssh_command("echo 1 | sudo tee /sys/bus/pci/devices/0000:00:05.0/remove") 4761 .unwrap(); 4762 4763 // Only 1 network interface left + default localhost ==> 2 interfaces 4764 assert_eq!( 4765 guest 4766 .ssh_command("ip -o link | wc -l") 4767 .unwrap() 4768 .trim() 4769 .parse::<u32>() 4770 .unwrap_or_default(), 4771 2 4772 ); 4773 4774 // Remove the PCI device 4775 guest 4776 .ssh_command("echo 1 | sudo tee /sys/bus/pci/rescan") 4777 .unwrap(); 4778 4779 // Back to 2 network interface + default localhost ==> 3 interfaces 4780 assert_eq!( 4781 guest 4782 .ssh_command("ip -o link | wc -l") 4783 .unwrap() 4784 .trim() 4785 .parse::<u32>() 4786 .unwrap_or_default(), 4787 3 4788 ); 4789 4790 let new_bar_addr = guest 4791 .ssh_command( 4792 "sudo awk '{print $1; exit}' /sys/bus/pci/devices/0000:00:05.0/resource", 4793 ) 4794 .unwrap(); 4795 4796 // Let's compare the BAR addresses for our virtio-net device. 4797 // They should be different as we expect the BAR reprogramming 4798 // to have happened. 4799 assert_ne!(init_bar_addr, new_bar_addr); 4800 }); 4801 4802 kill_child(&mut child); 4803 let output = child.wait_with_output().unwrap(); 4804 4805 handle_child_output(r, &output); 4806 } 4807 4808 #[test] 4809 fn test_memory_mergeable_off() { 4810 test_memory_mergeable(false) 4811 } 4812 4813 #[test] 4814 #[cfg(target_arch = "x86_64")] 4815 fn test_cpu_hotplug() { 4816 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4817 let guest = Guest::new(Box::new(focal)); 4818 let api_socket = temp_api_path(&guest.tmp_dir); 4819 4820 let kernel_path = direct_kernel_boot_path(); 4821 4822 let mut child = GuestCommand::new(&guest) 4823 .args(["--cpus", "boot=2,max=4"]) 4824 .args(["--memory", "size=512M"]) 4825 .args(["--kernel", kernel_path.to_str().unwrap()]) 4826 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4827 .default_disks() 4828 .default_net() 4829 .args(["--api-socket", &api_socket]) 4830 .capture_output() 4831 .spawn() 4832 .unwrap(); 4833 4834 let r = std::panic::catch_unwind(|| { 4835 guest.wait_vm_boot(None).unwrap(); 4836 4837 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 4838 4839 // Resize the VM 4840 let desired_vcpus = 4; 4841 resize_command(&api_socket, Some(desired_vcpus), None, None, None); 4842 4843 guest 4844 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online") 4845 .unwrap(); 4846 guest 4847 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online") 4848 .unwrap(); 4849 thread::sleep(std::time::Duration::new(10, 0)); 4850 assert_eq!( 4851 guest.get_cpu_count().unwrap_or_default(), 4852 u32::from(desired_vcpus) 4853 ); 4854 4855 guest.reboot_linux(0, None); 4856 4857 assert_eq!( 4858 guest.get_cpu_count().unwrap_or_default(), 4859 u32::from(desired_vcpus) 4860 ); 4861 4862 // Resize the VM 4863 let desired_vcpus = 2; 4864 resize_command(&api_socket, Some(desired_vcpus), None, None, None); 4865 4866 thread::sleep(std::time::Duration::new(10, 0)); 4867 assert_eq!( 4868 guest.get_cpu_count().unwrap_or_default(), 4869 u32::from(desired_vcpus) 4870 ); 4871 4872 // Resize the VM back up to 4 4873 let desired_vcpus = 4; 4874 resize_command(&api_socket, Some(desired_vcpus), None, None, None); 4875 4876 guest 4877 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online") 4878 .unwrap(); 4879 guest 4880 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online") 4881 .unwrap(); 4882 thread::sleep(std::time::Duration::new(10, 0)); 4883 assert_eq!( 4884 guest.get_cpu_count().unwrap_or_default(), 4885 u32::from(desired_vcpus) 4886 ); 4887 }); 4888 4889 kill_child(&mut child); 4890 let output = child.wait_with_output().unwrap(); 4891 4892 handle_child_output(r, &output); 4893 } 4894 4895 #[test] 4896 fn test_memory_hotplug() { 4897 #[cfg(target_arch = "aarch64")] 4898 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string(); 4899 #[cfg(target_arch = "x86_64")] 4900 let focal_image = FOCAL_IMAGE_NAME.to_string(); 4901 let focal = UbuntuDiskConfig::new(focal_image); 4902 let guest = Guest::new(Box::new(focal)); 4903 let api_socket = temp_api_path(&guest.tmp_dir); 4904 4905 #[cfg(target_arch = "aarch64")] 4906 let kernel_path = edk2_path(); 4907 #[cfg(target_arch = "x86_64")] 4908 let kernel_path = direct_kernel_boot_path(); 4909 4910 let mut child = GuestCommand::new(&guest) 4911 .args(["--cpus", "boot=2,max=4"]) 4912 .args(["--memory", "size=512M,hotplug_size=8192M"]) 4913 .args(["--kernel", kernel_path.to_str().unwrap()]) 4914 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4915 .default_disks() 4916 .default_net() 4917 .args(["--balloon", "size=0"]) 4918 .args(["--api-socket", &api_socket]) 4919 .capture_output() 4920 .spawn() 4921 .unwrap(); 4922 4923 let r = std::panic::catch_unwind(|| { 4924 guest.wait_vm_boot(None).unwrap(); 4925 4926 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4927 4928 guest.enable_memory_hotplug(); 4929 4930 // Add RAM to the VM 4931 let desired_ram = 1024 << 20; 4932 resize_command(&api_socket, None, Some(desired_ram), None, None); 4933 4934 thread::sleep(std::time::Duration::new(10, 0)); 4935 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4936 4937 // Use balloon to remove RAM from the VM 4938 let desired_balloon = 512 << 20; 4939 resize_command(&api_socket, None, None, Some(desired_balloon), None); 4940 4941 thread::sleep(std::time::Duration::new(10, 0)); 4942 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4943 assert!(guest.get_total_memory().unwrap_or_default() < 960_000); 4944 4945 guest.reboot_linux(0, None); 4946 4947 assert!(guest.get_total_memory().unwrap_or_default() < 960_000); 4948 4949 // Use balloon add RAM to the VM 4950 let desired_balloon = 0; 4951 resize_command(&api_socket, None, None, Some(desired_balloon), None); 4952 4953 thread::sleep(std::time::Duration::new(10, 0)); 4954 4955 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4956 4957 guest.enable_memory_hotplug(); 4958 4959 // Add RAM to the VM 4960 let desired_ram = 2048 << 20; 4961 resize_command(&api_socket, None, Some(desired_ram), None, None); 4962 4963 thread::sleep(std::time::Duration::new(10, 0)); 4964 assert!(guest.get_total_memory().unwrap_or_default() > 1_920_000); 4965 4966 // Remove RAM to the VM (only applies after reboot) 4967 let desired_ram = 1024 << 20; 4968 resize_command(&api_socket, None, Some(desired_ram), None, None); 4969 4970 guest.reboot_linux(1, None); 4971 4972 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4973 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000); 4974 }); 4975 4976 kill_child(&mut child); 4977 let output = child.wait_with_output().unwrap(); 4978 4979 handle_child_output(r, &output); 4980 } 4981 4982 #[test] 4983 #[cfg(not(feature = "mshv"))] 4984 fn test_virtio_mem() { 4985 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4986 let guest = Guest::new(Box::new(focal)); 4987 let api_socket = temp_api_path(&guest.tmp_dir); 4988 4989 let kernel_path = direct_kernel_boot_path(); 4990 4991 let mut child = GuestCommand::new(&guest) 4992 .args(["--cpus", "boot=2,max=4"]) 4993 .args([ 4994 "--memory", 4995 "size=512M,hotplug_method=virtio-mem,hotplug_size=8192M", 4996 ]) 4997 .args(["--kernel", kernel_path.to_str().unwrap()]) 4998 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4999 .default_disks() 5000 .default_net() 5001 .args(["--api-socket", &api_socket]) 5002 .capture_output() 5003 .spawn() 5004 .unwrap(); 5005 5006 let r = std::panic::catch_unwind(|| { 5007 guest.wait_vm_boot(None).unwrap(); 5008 5009 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 5010 5011 guest.enable_memory_hotplug(); 5012 5013 // Add RAM to the VM 5014 let desired_ram = 1024 << 20; 5015 resize_command(&api_socket, None, Some(desired_ram), None, None); 5016 5017 thread::sleep(std::time::Duration::new(10, 0)); 5018 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 5019 5020 // Add RAM to the VM 5021 let desired_ram = 2048 << 20; 5022 resize_command(&api_socket, None, Some(desired_ram), None, None); 5023 5024 thread::sleep(std::time::Duration::new(10, 0)); 5025 assert!(guest.get_total_memory().unwrap_or_default() > 1_920_000); 5026 5027 // Remove RAM from the VM 5028 let desired_ram = 1024 << 20; 5029 resize_command(&api_socket, None, Some(desired_ram), None, None); 5030 5031 thread::sleep(std::time::Duration::new(10, 0)); 5032 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 5033 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000); 5034 5035 guest.reboot_linux(0, None); 5036 5037 // Check the amount of memory after reboot is 1GiB 5038 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 5039 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000); 5040 5041 // Check we can still resize to 512MiB 5042 let desired_ram = 512 << 20; 5043 resize_command(&api_socket, None, Some(desired_ram), None, None); 5044 thread::sleep(std::time::Duration::new(10, 0)); 5045 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 5046 assert!(guest.get_total_memory().unwrap_or_default() < 960_000); 5047 }); 5048 5049 kill_child(&mut child); 5050 let output = child.wait_with_output().unwrap(); 5051 5052 handle_child_output(r, &output); 5053 } 5054 5055 #[test] 5056 #[cfg(target_arch = "x86_64")] 5057 #[cfg(not(feature = "mshv"))] 5058 // Test both vCPU and memory resizing together 5059 fn test_resize() { 5060 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5061 let guest = Guest::new(Box::new(focal)); 5062 let api_socket = temp_api_path(&guest.tmp_dir); 5063 5064 let kernel_path = direct_kernel_boot_path(); 5065 5066 let mut child = GuestCommand::new(&guest) 5067 .args(["--cpus", "boot=2,max=4"]) 5068 .args(["--memory", "size=512M,hotplug_size=8192M"]) 5069 .args(["--kernel", kernel_path.to_str().unwrap()]) 5070 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5071 .default_disks() 5072 .default_net() 5073 .args(["--api-socket", &api_socket]) 5074 .capture_output() 5075 .spawn() 5076 .unwrap(); 5077 5078 let r = std::panic::catch_unwind(|| { 5079 guest.wait_vm_boot(None).unwrap(); 5080 5081 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 5082 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 5083 5084 guest.enable_memory_hotplug(); 5085 5086 // Resize the VM 5087 let desired_vcpus = 4; 5088 let desired_ram = 1024 << 20; 5089 resize_command( 5090 &api_socket, 5091 Some(desired_vcpus), 5092 Some(desired_ram), 5093 None, 5094 None, 5095 ); 5096 5097 guest 5098 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online") 5099 .unwrap(); 5100 guest 5101 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online") 5102 .unwrap(); 5103 thread::sleep(std::time::Duration::new(10, 0)); 5104 assert_eq!( 5105 guest.get_cpu_count().unwrap_or_default(), 5106 u32::from(desired_vcpus) 5107 ); 5108 5109 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 5110 }); 5111 5112 kill_child(&mut child); 5113 let output = child.wait_with_output().unwrap(); 5114 5115 handle_child_output(r, &output); 5116 } 5117 5118 #[test] 5119 fn test_memory_overhead() { 5120 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5121 let guest = Guest::new(Box::new(focal)); 5122 5123 let kernel_path = direct_kernel_boot_path(); 5124 5125 let guest_memory_size_kb = 512 * 1024; 5126 5127 let mut child = GuestCommand::new(&guest) 5128 .args(["--cpus", "boot=1"]) 5129 .args(["--memory", format!("size={guest_memory_size_kb}K").as_str()]) 5130 .args(["--kernel", kernel_path.to_str().unwrap()]) 5131 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5132 .default_net() 5133 .default_disks() 5134 .capture_output() 5135 .spawn() 5136 .unwrap(); 5137 5138 guest.wait_vm_boot(None).unwrap(); 5139 5140 let r = std::panic::catch_unwind(|| { 5141 let overhead = get_vmm_overhead(child.id(), guest_memory_size_kb); 5142 eprintln!("Guest memory overhead: {overhead} vs {MAXIMUM_VMM_OVERHEAD_KB}"); 5143 assert!(overhead <= MAXIMUM_VMM_OVERHEAD_KB); 5144 }); 5145 5146 kill_child(&mut child); 5147 let output = child.wait_with_output().unwrap(); 5148 5149 handle_child_output(r, &output); 5150 } 5151 5152 #[test] 5153 #[cfg(target_arch = "x86_64")] 5154 // This test runs a guest with Landlock enabled and hotplugs a new disk. As 5155 // the path for the hotplug disk is not pre-added to Landlock rules, this 5156 // the test will result in a failure. 5157 fn test_landlock() { 5158 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5159 let guest = Guest::new(Box::new(focal)); 5160 5161 #[cfg(target_arch = "x86_64")] 5162 let kernel_path = direct_kernel_boot_path(); 5163 #[cfg(target_arch = "aarch64")] 5164 let kernel_path = edk2_path(); 5165 5166 let api_socket = temp_api_path(&guest.tmp_dir); 5167 5168 let mut child = GuestCommand::new(&guest) 5169 .args(["--api-socket", &api_socket]) 5170 .args(["--cpus", "boot=1"]) 5171 .args(["--memory", "size=512M"]) 5172 .args(["--kernel", kernel_path.to_str().unwrap()]) 5173 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5174 .args(["--landlock"]) 5175 .default_disks() 5176 .default_net() 5177 .capture_output() 5178 .spawn() 5179 .unwrap(); 5180 5181 let r = std::panic::catch_unwind(|| { 5182 guest.wait_vm_boot(None).unwrap(); 5183 5184 // Check /dev/vdc is not there 5185 assert_eq!( 5186 guest 5187 .ssh_command("lsblk | grep -c vdc.*16M || true") 5188 .unwrap() 5189 .trim() 5190 .parse::<u32>() 5191 .unwrap_or(1), 5192 0 5193 ); 5194 5195 // Now let's add the extra disk. 5196 let mut blk_file_path = dirs::home_dir().unwrap(); 5197 blk_file_path.push("workloads"); 5198 blk_file_path.push("blk.img"); 5199 // As the path to the hotplug disk is not pre-added, this remote 5200 // command will fail. 5201 assert!(!remote_command( 5202 &api_socket, 5203 "add-disk", 5204 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 5205 )); 5206 }); 5207 5208 let _ = child.kill(); 5209 let output = child.wait_with_output().unwrap(); 5210 5211 handle_child_output(r, &output); 5212 } 5213 5214 fn _test_disk_hotplug(landlock_enabled: bool) { 5215 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5216 let guest = Guest::new(Box::new(focal)); 5217 5218 #[cfg(target_arch = "x86_64")] 5219 let kernel_path = direct_kernel_boot_path(); 5220 #[cfg(target_arch = "aarch64")] 5221 let kernel_path = edk2_path(); 5222 5223 let api_socket = temp_api_path(&guest.tmp_dir); 5224 5225 let mut blk_file_path = dirs::home_dir().unwrap(); 5226 blk_file_path.push("workloads"); 5227 blk_file_path.push("blk.img"); 5228 5229 let mut cmd = GuestCommand::new(&guest); 5230 if landlock_enabled { 5231 cmd.args(["--landlock"]).args([ 5232 "--landlock-rules", 5233 format!("path={:?},access=rw", blk_file_path).as_str(), 5234 ]); 5235 } 5236 5237 cmd.args(["--api-socket", &api_socket]) 5238 .args(["--cpus", "boot=1"]) 5239 .args(["--memory", "size=512M"]) 5240 .args(["--kernel", kernel_path.to_str().unwrap()]) 5241 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5242 .default_disks() 5243 .default_net() 5244 .capture_output(); 5245 5246 let mut child = cmd.spawn().unwrap(); 5247 5248 let r = std::panic::catch_unwind(|| { 5249 guest.wait_vm_boot(None).unwrap(); 5250 5251 // Check /dev/vdc is not there 5252 assert_eq!( 5253 guest 5254 .ssh_command("lsblk | grep -c vdc.*16M || true") 5255 .unwrap() 5256 .trim() 5257 .parse::<u32>() 5258 .unwrap_or(1), 5259 0 5260 ); 5261 5262 // Now let's add the extra disk. 5263 let (cmd_success, cmd_output) = remote_command_w_output( 5264 &api_socket, 5265 "add-disk", 5266 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 5267 ); 5268 assert!(cmd_success); 5269 assert!(String::from_utf8_lossy(&cmd_output) 5270 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 5271 5272 thread::sleep(std::time::Duration::new(10, 0)); 5273 5274 // Check that /dev/vdc exists and the block size is 16M. 5275 assert_eq!( 5276 guest 5277 .ssh_command("lsblk | grep vdc | grep -c 16M") 5278 .unwrap() 5279 .trim() 5280 .parse::<u32>() 5281 .unwrap_or_default(), 5282 1 5283 ); 5284 // And check the block device can be read. 5285 guest 5286 .ssh_command("sudo dd if=/dev/vdc of=/dev/null bs=1M iflag=direct count=16") 5287 .unwrap(); 5288 5289 // Let's remove it the extra disk. 5290 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 5291 thread::sleep(std::time::Duration::new(5, 0)); 5292 // And check /dev/vdc is not there 5293 assert_eq!( 5294 guest 5295 .ssh_command("lsblk | grep -c vdc.*16M || true") 5296 .unwrap() 5297 .trim() 5298 .parse::<u32>() 5299 .unwrap_or(1), 5300 0 5301 ); 5302 5303 // And add it back to validate unplug did work correctly. 5304 let (cmd_success, cmd_output) = remote_command_w_output( 5305 &api_socket, 5306 "add-disk", 5307 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 5308 ); 5309 assert!(cmd_success); 5310 assert!(String::from_utf8_lossy(&cmd_output) 5311 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 5312 5313 thread::sleep(std::time::Duration::new(10, 0)); 5314 5315 // Check that /dev/vdc exists and the block size is 16M. 5316 assert_eq!( 5317 guest 5318 .ssh_command("lsblk | grep vdc | grep -c 16M") 5319 .unwrap() 5320 .trim() 5321 .parse::<u32>() 5322 .unwrap_or_default(), 5323 1 5324 ); 5325 // And check the block device can be read. 5326 guest 5327 .ssh_command("sudo dd if=/dev/vdc of=/dev/null bs=1M iflag=direct count=16") 5328 .unwrap(); 5329 5330 // Reboot the VM. 5331 guest.reboot_linux(0, None); 5332 5333 // Check still there after reboot 5334 assert_eq!( 5335 guest 5336 .ssh_command("lsblk | grep vdc | grep -c 16M") 5337 .unwrap() 5338 .trim() 5339 .parse::<u32>() 5340 .unwrap_or_default(), 5341 1 5342 ); 5343 5344 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 5345 5346 thread::sleep(std::time::Duration::new(20, 0)); 5347 5348 // Check device has gone away 5349 assert_eq!( 5350 guest 5351 .ssh_command("lsblk | grep -c vdc.*16M || true") 5352 .unwrap() 5353 .trim() 5354 .parse::<u32>() 5355 .unwrap_or(1), 5356 0 5357 ); 5358 5359 guest.reboot_linux(1, None); 5360 5361 // Check device still absent 5362 assert_eq!( 5363 guest 5364 .ssh_command("lsblk | grep -c vdc.*16M || true") 5365 .unwrap() 5366 .trim() 5367 .parse::<u32>() 5368 .unwrap_or(1), 5369 0 5370 ); 5371 }); 5372 5373 kill_child(&mut child); 5374 let output = child.wait_with_output().unwrap(); 5375 5376 handle_child_output(r, &output); 5377 } 5378 5379 #[test] 5380 fn test_disk_hotplug() { 5381 _test_disk_hotplug(false) 5382 } 5383 5384 #[test] 5385 #[cfg(target_arch = "x86_64")] 5386 fn test_disk_hotplug_with_landlock() { 5387 _test_disk_hotplug(true) 5388 } 5389 5390 fn create_loop_device(backing_file_path: &str, block_size: u32, num_retries: usize) -> String { 5391 const LOOP_CONFIGURE: u64 = 0x4c0a; 5392 const LOOP_CTL_GET_FREE: u64 = 0x4c82; 5393 const LOOP_CTL_PATH: &str = "/dev/loop-control"; 5394 const LOOP_DEVICE_PREFIX: &str = "/dev/loop"; 5395 5396 #[repr(C)] 5397 struct LoopInfo64 { 5398 lo_device: u64, 5399 lo_inode: u64, 5400 lo_rdevice: u64, 5401 lo_offset: u64, 5402 lo_sizelimit: u64, 5403 lo_number: u32, 5404 lo_encrypt_type: u32, 5405 lo_encrypt_key_size: u32, 5406 lo_flags: u32, 5407 lo_file_name: [u8; 64], 5408 lo_crypt_name: [u8; 64], 5409 lo_encrypt_key: [u8; 32], 5410 lo_init: [u64; 2], 5411 } 5412 5413 impl Default for LoopInfo64 { 5414 fn default() -> Self { 5415 LoopInfo64 { 5416 lo_device: 0, 5417 lo_inode: 0, 5418 lo_rdevice: 0, 5419 lo_offset: 0, 5420 lo_sizelimit: 0, 5421 lo_number: 0, 5422 lo_encrypt_type: 0, 5423 lo_encrypt_key_size: 0, 5424 lo_flags: 0, 5425 lo_file_name: [0; 64], 5426 lo_crypt_name: [0; 64], 5427 lo_encrypt_key: [0; 32], 5428 lo_init: [0; 2], 5429 } 5430 } 5431 } 5432 5433 #[derive(Default)] 5434 #[repr(C)] 5435 struct LoopConfig { 5436 fd: u32, 5437 block_size: u32, 5438 info: LoopInfo64, 5439 _reserved: [u64; 8], 5440 } 5441 5442 // Open loop-control device 5443 let loop_ctl_file = OpenOptions::new() 5444 .read(true) 5445 .write(true) 5446 .open(LOOP_CTL_PATH) 5447 .unwrap(); 5448 5449 // Request a free loop device 5450 let loop_device_number = 5451 unsafe { libc::ioctl(loop_ctl_file.as_raw_fd(), LOOP_CTL_GET_FREE as _) }; 5452 5453 if loop_device_number < 0 { 5454 panic!("Couldn't find a free loop device"); 5455 } 5456 5457 // Create loop device path 5458 let loop_device_path = format!("{LOOP_DEVICE_PREFIX}{loop_device_number}"); 5459 5460 // Open loop device 5461 let loop_device_file = OpenOptions::new() 5462 .read(true) 5463 .write(true) 5464 .open(&loop_device_path) 5465 .unwrap(); 5466 5467 // Open backing file 5468 let backing_file = OpenOptions::new() 5469 .read(true) 5470 .write(true) 5471 .open(backing_file_path) 5472 .unwrap(); 5473 5474 let loop_config = LoopConfig { 5475 fd: backing_file.as_raw_fd() as u32, 5476 block_size, 5477 ..Default::default() 5478 }; 5479 5480 for i in 0..num_retries { 5481 let ret = unsafe { 5482 libc::ioctl( 5483 loop_device_file.as_raw_fd(), 5484 LOOP_CONFIGURE as _, 5485 &loop_config, 5486 ) 5487 }; 5488 if ret != 0 { 5489 if i < num_retries - 1 { 5490 println!( 5491 "Iteration {}: Failed to configure the loop device {}: {}", 5492 i, 5493 loop_device_path, 5494 std::io::Error::last_os_error() 5495 ); 5496 } else { 5497 panic!( 5498 "Failed {} times trying to configure the loop device {}: {}", 5499 num_retries, 5500 loop_device_path, 5501 std::io::Error::last_os_error() 5502 ); 5503 } 5504 } else { 5505 break; 5506 } 5507 5508 // Wait for a bit before retrying 5509 thread::sleep(std::time::Duration::new(5, 0)); 5510 } 5511 5512 loop_device_path 5513 } 5514 5515 #[test] 5516 fn test_virtio_block_topology() { 5517 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5518 let guest = Guest::new(Box::new(focal)); 5519 5520 let kernel_path = direct_kernel_boot_path(); 5521 let test_disk_path = guest.tmp_dir.as_path().join("test.img"); 5522 5523 let output = exec_host_command_output( 5524 format!( 5525 "qemu-img create -f raw {} 16M", 5526 test_disk_path.to_str().unwrap() 5527 ) 5528 .as_str(), 5529 ); 5530 if !output.status.success() { 5531 let stdout = String::from_utf8_lossy(&output.stdout); 5532 let stderr = String::from_utf8_lossy(&output.stderr); 5533 panic!("qemu-img command failed\nstdout\n{stdout}\nstderr\n{stderr}"); 5534 } 5535 5536 let loop_dev = create_loop_device(test_disk_path.to_str().unwrap(), 4096, 5); 5537 5538 let mut child = GuestCommand::new(&guest) 5539 .args(["--cpus", "boot=1"]) 5540 .args(["--memory", "size=512M"]) 5541 .args(["--kernel", kernel_path.to_str().unwrap()]) 5542 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5543 .args([ 5544 "--disk", 5545 format!( 5546 "path={}", 5547 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 5548 ) 5549 .as_str(), 5550 format!( 5551 "path={}", 5552 guest.disk_config.disk(DiskType::CloudInit).unwrap() 5553 ) 5554 .as_str(), 5555 format!("path={}", &loop_dev).as_str(), 5556 ]) 5557 .default_net() 5558 .capture_output() 5559 .spawn() 5560 .unwrap(); 5561 5562 let r = std::panic::catch_unwind(|| { 5563 guest.wait_vm_boot(None).unwrap(); 5564 5565 // MIN-IO column 5566 assert_eq!( 5567 guest 5568 .ssh_command("lsblk -t| grep vdc | awk '{print $3}'") 5569 .unwrap() 5570 .trim() 5571 .parse::<u32>() 5572 .unwrap_or_default(), 5573 4096 5574 ); 5575 // PHY-SEC column 5576 assert_eq!( 5577 guest 5578 .ssh_command("lsblk -t| grep vdc | awk '{print $5}'") 5579 .unwrap() 5580 .trim() 5581 .parse::<u32>() 5582 .unwrap_or_default(), 5583 4096 5584 ); 5585 // LOG-SEC column 5586 assert_eq!( 5587 guest 5588 .ssh_command("lsblk -t| grep vdc | awk '{print $6}'") 5589 .unwrap() 5590 .trim() 5591 .parse::<u32>() 5592 .unwrap_or_default(), 5593 4096 5594 ); 5595 }); 5596 5597 kill_child(&mut child); 5598 let output = child.wait_with_output().unwrap(); 5599 5600 handle_child_output(r, &output); 5601 5602 Command::new("losetup") 5603 .args(["-d", &loop_dev]) 5604 .output() 5605 .expect("loop device not found"); 5606 } 5607 5608 #[test] 5609 fn test_virtio_balloon_deflate_on_oom() { 5610 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5611 let guest = Guest::new(Box::new(focal)); 5612 5613 let kernel_path = direct_kernel_boot_path(); 5614 5615 let api_socket = temp_api_path(&guest.tmp_dir); 5616 5617 //Let's start a 4G guest with balloon occupied 2G memory 5618 let mut child = GuestCommand::new(&guest) 5619 .args(["--api-socket", &api_socket]) 5620 .args(["--cpus", "boot=1"]) 5621 .args(["--memory", "size=4G"]) 5622 .args(["--kernel", kernel_path.to_str().unwrap()]) 5623 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5624 .args(["--balloon", "size=2G,deflate_on_oom=on"]) 5625 .default_disks() 5626 .default_net() 5627 .capture_output() 5628 .spawn() 5629 .unwrap(); 5630 5631 let r = std::panic::catch_unwind(|| { 5632 guest.wait_vm_boot(None).unwrap(); 5633 5634 // Wait for balloon memory's initialization and check its size. 5635 // The virtio-balloon driver might take a few seconds to report the 5636 // balloon effective size back to the VMM. 5637 thread::sleep(std::time::Duration::new(20, 0)); 5638 5639 let orig_balloon = balloon_size(&api_socket); 5640 println!("The original balloon memory size is {orig_balloon} bytes"); 5641 assert!(orig_balloon == 2147483648); 5642 5643 // Two steps to verify if the 'deflate_on_oom' parameter works. 5644 // 1st: run a command to trigger an OOM in the guest. 5645 guest 5646 .ssh_command("echo f | sudo tee /proc/sysrq-trigger") 5647 .unwrap(); 5648 5649 // Give some time for the OOM to happen in the guest and be reported 5650 // back to the host. 5651 thread::sleep(std::time::Duration::new(20, 0)); 5652 5653 // 2nd: check balloon_mem's value to verify balloon has been automatically deflated 5654 let deflated_balloon = balloon_size(&api_socket); 5655 println!("After deflating, balloon memory size is {deflated_balloon} bytes"); 5656 // Verify the balloon size deflated 5657 assert!(deflated_balloon < 2147483648); 5658 }); 5659 5660 kill_child(&mut child); 5661 let output = child.wait_with_output().unwrap(); 5662 5663 handle_child_output(r, &output); 5664 } 5665 5666 #[test] 5667 #[cfg(not(feature = "mshv"))] 5668 fn test_virtio_balloon_free_page_reporting() { 5669 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5670 let guest = Guest::new(Box::new(focal)); 5671 5672 //Let's start a 4G guest with balloon occupied 2G memory 5673 let mut child = GuestCommand::new(&guest) 5674 .args(["--cpus", "boot=1"]) 5675 .args(["--memory", "size=4G"]) 5676 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 5677 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5678 .args(["--balloon", "size=0,free_page_reporting=on"]) 5679 .default_disks() 5680 .default_net() 5681 .capture_output() 5682 .spawn() 5683 .unwrap(); 5684 5685 let pid = child.id(); 5686 let r = std::panic::catch_unwind(|| { 5687 guest.wait_vm_boot(None).unwrap(); 5688 5689 // Check the initial RSS is less than 1GiB 5690 let rss = process_rss_kib(pid); 5691 println!("RSS {rss} < 1048576"); 5692 assert!(rss < 1048576); 5693 5694 // Spawn a command inside the guest to consume 2GiB of RAM for 60 5695 // seconds 5696 let guest_ip = guest.network.guest_ip.clone(); 5697 thread::spawn(move || { 5698 ssh_command_ip( 5699 "stress --vm 1 --vm-bytes 2G --vm-keep --timeout 60", 5700 &guest_ip, 5701 DEFAULT_SSH_RETRIES, 5702 DEFAULT_SSH_TIMEOUT, 5703 ) 5704 .unwrap(); 5705 }); 5706 5707 // Wait for 50 seconds to make sure the stress command is consuming 5708 // the expected amount of memory. 5709 thread::sleep(std::time::Duration::new(50, 0)); 5710 let rss = process_rss_kib(pid); 5711 println!("RSS {rss} >= 2097152"); 5712 assert!(rss >= 2097152); 5713 5714 // Wait for an extra minute to make sure the stress command has 5715 // completed and that the guest reported the free pages to the VMM 5716 // through the virtio-balloon device. We expect the RSS to be under 5717 // 2GiB. 5718 thread::sleep(std::time::Duration::new(60, 0)); 5719 let rss = process_rss_kib(pid); 5720 println!("RSS {rss} < 2097152"); 5721 assert!(rss < 2097152); 5722 }); 5723 5724 kill_child(&mut child); 5725 let output = child.wait_with_output().unwrap(); 5726 5727 handle_child_output(r, &output); 5728 } 5729 5730 #[test] 5731 fn test_pmem_hotplug() { 5732 _test_pmem_hotplug(None) 5733 } 5734 5735 #[test] 5736 fn test_pmem_multi_segment_hotplug() { 5737 _test_pmem_hotplug(Some(15)) 5738 } 5739 5740 fn _test_pmem_hotplug(pci_segment: Option<u16>) { 5741 #[cfg(target_arch = "aarch64")] 5742 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string(); 5743 #[cfg(target_arch = "x86_64")] 5744 let focal_image = FOCAL_IMAGE_NAME.to_string(); 5745 let focal = UbuntuDiskConfig::new(focal_image); 5746 let guest = Guest::new(Box::new(focal)); 5747 5748 #[cfg(target_arch = "x86_64")] 5749 let kernel_path = direct_kernel_boot_path(); 5750 #[cfg(target_arch = "aarch64")] 5751 let kernel_path = edk2_path(); 5752 5753 let api_socket = temp_api_path(&guest.tmp_dir); 5754 5755 let mut cmd = GuestCommand::new(&guest); 5756 5757 cmd.args(["--api-socket", &api_socket]) 5758 .args(["--cpus", "boot=1"]) 5759 .args(["--memory", "size=512M"]) 5760 .args(["--kernel", kernel_path.to_str().unwrap()]) 5761 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5762 .default_disks() 5763 .default_net() 5764 .capture_output(); 5765 5766 if pci_segment.is_some() { 5767 cmd.args([ 5768 "--platform", 5769 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 5770 ]); 5771 } 5772 5773 let mut child = cmd.spawn().unwrap(); 5774 5775 let r = std::panic::catch_unwind(|| { 5776 guest.wait_vm_boot(None).unwrap(); 5777 5778 // Check /dev/pmem0 is not there 5779 assert_eq!( 5780 guest 5781 .ssh_command("lsblk | grep -c pmem0 || true") 5782 .unwrap() 5783 .trim() 5784 .parse::<u32>() 5785 .unwrap_or(1), 5786 0 5787 ); 5788 5789 let pmem_temp_file = TempFile::new().unwrap(); 5790 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 5791 let (cmd_success, cmd_output) = remote_command_w_output( 5792 &api_socket, 5793 "add-pmem", 5794 Some(&format!( 5795 "file={},id=test0{}", 5796 pmem_temp_file.as_path().to_str().unwrap(), 5797 if let Some(pci_segment) = pci_segment { 5798 format!(",pci_segment={pci_segment}") 5799 } else { 5800 "".to_owned() 5801 } 5802 )), 5803 ); 5804 assert!(cmd_success); 5805 if let Some(pci_segment) = pci_segment { 5806 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 5807 "{{\"id\":\"test0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 5808 ))); 5809 } else { 5810 assert!(String::from_utf8_lossy(&cmd_output) 5811 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 5812 } 5813 5814 // Check that /dev/pmem0 exists and the block size is 128M 5815 assert_eq!( 5816 guest 5817 .ssh_command("lsblk | grep pmem0 | grep -c 128M") 5818 .unwrap() 5819 .trim() 5820 .parse::<u32>() 5821 .unwrap_or_default(), 5822 1 5823 ); 5824 5825 guest.reboot_linux(0, None); 5826 5827 // Check still there after reboot 5828 assert_eq!( 5829 guest 5830 .ssh_command("lsblk | grep pmem0 | grep -c 128M") 5831 .unwrap() 5832 .trim() 5833 .parse::<u32>() 5834 .unwrap_or_default(), 5835 1 5836 ); 5837 5838 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 5839 5840 thread::sleep(std::time::Duration::new(20, 0)); 5841 5842 // Check device has gone away 5843 assert_eq!( 5844 guest 5845 .ssh_command("lsblk | grep -c pmem0.*128M || true") 5846 .unwrap() 5847 .trim() 5848 .parse::<u32>() 5849 .unwrap_or(1), 5850 0 5851 ); 5852 5853 guest.reboot_linux(1, None); 5854 5855 // Check still absent after reboot 5856 assert_eq!( 5857 guest 5858 .ssh_command("lsblk | grep -c pmem0.*128M || true") 5859 .unwrap() 5860 .trim() 5861 .parse::<u32>() 5862 .unwrap_or(1), 5863 0 5864 ); 5865 }); 5866 5867 kill_child(&mut child); 5868 let output = child.wait_with_output().unwrap(); 5869 5870 handle_child_output(r, &output); 5871 } 5872 5873 #[test] 5874 fn test_net_hotplug() { 5875 _test_net_hotplug(None) 5876 } 5877 5878 #[test] 5879 fn test_net_multi_segment_hotplug() { 5880 _test_net_hotplug(Some(15)) 5881 } 5882 5883 fn _test_net_hotplug(pci_segment: Option<u16>) { 5884 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5885 let guest = Guest::new(Box::new(focal)); 5886 5887 #[cfg(target_arch = "x86_64")] 5888 let kernel_path = direct_kernel_boot_path(); 5889 #[cfg(target_arch = "aarch64")] 5890 let kernel_path = edk2_path(); 5891 5892 let api_socket = temp_api_path(&guest.tmp_dir); 5893 5894 // Boot without network 5895 let mut cmd = GuestCommand::new(&guest); 5896 5897 cmd.args(["--api-socket", &api_socket]) 5898 .args(["--cpus", "boot=1"]) 5899 .args(["--memory", "size=512M"]) 5900 .args(["--kernel", kernel_path.to_str().unwrap()]) 5901 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5902 .default_disks() 5903 .capture_output(); 5904 5905 if pci_segment.is_some() { 5906 cmd.args([ 5907 "--platform", 5908 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 5909 ]); 5910 } 5911 5912 let mut child = cmd.spawn().unwrap(); 5913 5914 thread::sleep(std::time::Duration::new(20, 0)); 5915 5916 let r = std::panic::catch_unwind(|| { 5917 // Add network 5918 let (cmd_success, cmd_output) = remote_command_w_output( 5919 &api_socket, 5920 "add-net", 5921 Some( 5922 format!( 5923 "{}{},id=test0", 5924 guest.default_net_string(), 5925 if let Some(pci_segment) = pci_segment { 5926 format!(",pci_segment={pci_segment}") 5927 } else { 5928 "".to_owned() 5929 } 5930 ) 5931 .as_str(), 5932 ), 5933 ); 5934 assert!(cmd_success); 5935 5936 if let Some(pci_segment) = pci_segment { 5937 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 5938 "{{\"id\":\"test0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 5939 ))); 5940 } else { 5941 assert!(String::from_utf8_lossy(&cmd_output) 5942 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:05.0\"}")); 5943 } 5944 5945 thread::sleep(std::time::Duration::new(5, 0)); 5946 5947 // 1 network interfaces + default localhost ==> 2 interfaces 5948 assert_eq!( 5949 guest 5950 .ssh_command("ip -o link | wc -l") 5951 .unwrap() 5952 .trim() 5953 .parse::<u32>() 5954 .unwrap_or_default(), 5955 2 5956 ); 5957 5958 // Remove network 5959 assert!(remote_command(&api_socket, "remove-device", Some("test0"),)); 5960 thread::sleep(std::time::Duration::new(5, 0)); 5961 5962 let (cmd_success, cmd_output) = remote_command_w_output( 5963 &api_socket, 5964 "add-net", 5965 Some( 5966 format!( 5967 "{}{},id=test1", 5968 guest.default_net_string(), 5969 if let Some(pci_segment) = pci_segment { 5970 format!(",pci_segment={pci_segment}") 5971 } else { 5972 "".to_owned() 5973 } 5974 ) 5975 .as_str(), 5976 ), 5977 ); 5978 assert!(cmd_success); 5979 5980 if let Some(pci_segment) = pci_segment { 5981 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 5982 "{{\"id\":\"test1\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 5983 ))); 5984 } else { 5985 assert!(String::from_utf8_lossy(&cmd_output) 5986 .contains("{\"id\":\"test1\",\"bdf\":\"0000:00:05.0\"}")); 5987 } 5988 5989 thread::sleep(std::time::Duration::new(5, 0)); 5990 5991 // 1 network interfaces + default localhost ==> 2 interfaces 5992 assert_eq!( 5993 guest 5994 .ssh_command("ip -o link | wc -l") 5995 .unwrap() 5996 .trim() 5997 .parse::<u32>() 5998 .unwrap_or_default(), 5999 2 6000 ); 6001 6002 guest.reboot_linux(0, None); 6003 6004 // Check still there after reboot 6005 // 1 network interfaces + default localhost ==> 2 interfaces 6006 assert_eq!( 6007 guest 6008 .ssh_command("ip -o link | wc -l") 6009 .unwrap() 6010 .trim() 6011 .parse::<u32>() 6012 .unwrap_or_default(), 6013 2 6014 ); 6015 }); 6016 6017 kill_child(&mut child); 6018 let output = child.wait_with_output().unwrap(); 6019 6020 handle_child_output(r, &output); 6021 } 6022 6023 #[test] 6024 fn test_initramfs() { 6025 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6026 let guest = Guest::new(Box::new(focal)); 6027 let mut workload_path = dirs::home_dir().unwrap(); 6028 workload_path.push("workloads"); 6029 6030 #[cfg(target_arch = "x86_64")] 6031 let mut kernels = vec![direct_kernel_boot_path()]; 6032 #[cfg(target_arch = "aarch64")] 6033 let kernels = [direct_kernel_boot_path()]; 6034 6035 #[cfg(target_arch = "x86_64")] 6036 { 6037 let mut pvh_kernel_path = workload_path.clone(); 6038 pvh_kernel_path.push("vmlinux"); 6039 kernels.push(pvh_kernel_path); 6040 } 6041 6042 let mut initramfs_path = workload_path; 6043 initramfs_path.push("alpine_initramfs.img"); 6044 6045 let test_string = String::from("axz34i9rylotd8n50wbv6kcj7f2qushme1pg"); 6046 let cmdline = format!("console=hvc0 quiet TEST_STRING={test_string}"); 6047 6048 kernels.iter().for_each(|k_path| { 6049 let mut child = GuestCommand::new(&guest) 6050 .args(["--kernel", k_path.to_str().unwrap()]) 6051 .args(["--initramfs", initramfs_path.to_str().unwrap()]) 6052 .args(["--cmdline", &cmdline]) 6053 .capture_output() 6054 .spawn() 6055 .unwrap(); 6056 6057 thread::sleep(std::time::Duration::new(20, 0)); 6058 6059 kill_child(&mut child); 6060 let output = child.wait_with_output().unwrap(); 6061 6062 let r = std::panic::catch_unwind(|| { 6063 let s = String::from_utf8_lossy(&output.stdout); 6064 6065 assert_ne!(s.lines().position(|line| line == test_string), None); 6066 }); 6067 6068 handle_child_output(r, &output); 6069 }); 6070 } 6071 6072 #[test] 6073 fn test_counters() { 6074 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6075 let guest = Guest::new(Box::new(focal)); 6076 let api_socket = temp_api_path(&guest.tmp_dir); 6077 6078 let mut cmd = GuestCommand::new(&guest); 6079 cmd.args(["--cpus", "boot=1"]) 6080 .args(["--memory", "size=512M"]) 6081 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 6082 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6083 .default_disks() 6084 .args(["--net", guest.default_net_string().as_str()]) 6085 .args(["--api-socket", &api_socket]) 6086 .capture_output(); 6087 6088 let mut child = cmd.spawn().unwrap(); 6089 6090 let r = std::panic::catch_unwind(|| { 6091 guest.wait_vm_boot(None).unwrap(); 6092 6093 let orig_counters = get_counters(&api_socket); 6094 guest 6095 .ssh_command("dd if=/dev/zero of=test count=8 bs=1M") 6096 .unwrap(); 6097 6098 let new_counters = get_counters(&api_socket); 6099 6100 // Check that all the counters have increased 6101 assert!(new_counters > orig_counters); 6102 }); 6103 6104 kill_child(&mut child); 6105 let output = child.wait_with_output().unwrap(); 6106 6107 handle_child_output(r, &output); 6108 } 6109 6110 #[test] 6111 #[cfg(feature = "guest_debug")] 6112 fn test_coredump() { 6113 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6114 let guest = Guest::new(Box::new(focal)); 6115 let api_socket = temp_api_path(&guest.tmp_dir); 6116 6117 let mut cmd = GuestCommand::new(&guest); 6118 cmd.args(["--cpus", "boot=4"]) 6119 .args(["--memory", "size=4G"]) 6120 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 6121 .default_disks() 6122 .args(["--net", guest.default_net_string().as_str()]) 6123 .args(["--api-socket", &api_socket]) 6124 .capture_output(); 6125 6126 let mut child = cmd.spawn().unwrap(); 6127 let vmcore_file = temp_vmcore_file_path(&guest.tmp_dir); 6128 6129 let r = std::panic::catch_unwind(|| { 6130 guest.wait_vm_boot(None).unwrap(); 6131 6132 assert!(remote_command(&api_socket, "pause", None)); 6133 6134 assert!(remote_command( 6135 &api_socket, 6136 "coredump", 6137 Some(format!("file://{vmcore_file}").as_str()), 6138 )); 6139 6140 // the num of CORE notes should equals to vcpu 6141 let readelf_core_num_cmd = 6142 format!("readelf --all {vmcore_file} |grep CORE |grep -v Type |wc -l"); 6143 let core_num_in_elf = exec_host_command_output(&readelf_core_num_cmd); 6144 assert_eq!(String::from_utf8_lossy(&core_num_in_elf.stdout).trim(), "4"); 6145 6146 // the num of QEMU notes should equals to vcpu 6147 let readelf_vmm_num_cmd = format!("readelf --all {vmcore_file} |grep QEMU |wc -l"); 6148 let vmm_num_in_elf = exec_host_command_output(&readelf_vmm_num_cmd); 6149 assert_eq!(String::from_utf8_lossy(&vmm_num_in_elf.stdout).trim(), "4"); 6150 }); 6151 6152 kill_child(&mut child); 6153 let output = child.wait_with_output().unwrap(); 6154 6155 handle_child_output(r, &output); 6156 } 6157 6158 #[test] 6159 #[cfg(feature = "guest_debug")] 6160 fn test_coredump_no_pause() { 6161 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6162 let guest = Guest::new(Box::new(focal)); 6163 let api_socket = temp_api_path(&guest.tmp_dir); 6164 6165 let mut cmd = GuestCommand::new(&guest); 6166 cmd.args(["--cpus", "boot=4"]) 6167 .args(["--memory", "size=4G"]) 6168 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 6169 .default_disks() 6170 .args(["--net", guest.default_net_string().as_str()]) 6171 .args(["--api-socket", &api_socket]) 6172 .capture_output(); 6173 6174 let mut child = cmd.spawn().unwrap(); 6175 let vmcore_file = temp_vmcore_file_path(&guest.tmp_dir); 6176 6177 let r = std::panic::catch_unwind(|| { 6178 guest.wait_vm_boot(None).unwrap(); 6179 6180 assert!(remote_command( 6181 &api_socket, 6182 "coredump", 6183 Some(format!("file://{vmcore_file}").as_str()), 6184 )); 6185 6186 assert_eq!(vm_state(&api_socket), "Running"); 6187 }); 6188 6189 kill_child(&mut child); 6190 let output = child.wait_with_output().unwrap(); 6191 6192 handle_child_output(r, &output); 6193 } 6194 6195 #[test] 6196 fn test_watchdog() { 6197 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6198 let guest = Guest::new(Box::new(focal)); 6199 let api_socket = temp_api_path(&guest.tmp_dir); 6200 6201 let kernel_path = direct_kernel_boot_path(); 6202 let event_path = temp_event_monitor_path(&guest.tmp_dir); 6203 6204 let mut cmd = GuestCommand::new(&guest); 6205 cmd.args(["--cpus", "boot=1"]) 6206 .args(["--memory", "size=512M"]) 6207 .args(["--kernel", kernel_path.to_str().unwrap()]) 6208 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6209 .default_disks() 6210 .args(["--net", guest.default_net_string().as_str()]) 6211 .args(["--watchdog"]) 6212 .args(["--api-socket", &api_socket]) 6213 .args(["--event-monitor", format!("path={event_path}").as_str()]) 6214 .capture_output(); 6215 6216 let mut child = cmd.spawn().unwrap(); 6217 6218 let r = std::panic::catch_unwind(|| { 6219 guest.wait_vm_boot(None).unwrap(); 6220 6221 let mut expected_reboot_count = 1; 6222 6223 // Enable the watchdog with a 15s timeout 6224 enable_guest_watchdog(&guest, 15); 6225 6226 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6227 assert_eq!( 6228 guest 6229 .ssh_command("sudo journalctl | grep -c -- \"Watchdog started\"") 6230 .unwrap() 6231 .trim() 6232 .parse::<u32>() 6233 .unwrap_or_default(), 6234 1 6235 ); 6236 6237 // Allow some normal time to elapse to check we don't get spurious reboots 6238 thread::sleep(std::time::Duration::new(40, 0)); 6239 // Check no reboot 6240 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6241 6242 // Trigger a panic (sync first). We need to do this inside a screen with a delay so the SSH command returns. 6243 guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap(); 6244 // Allow some time for the watchdog to trigger (max 30s) and reboot to happen 6245 guest.wait_vm_boot(Some(50)).unwrap(); 6246 // Check a reboot is triggered by the watchdog 6247 expected_reboot_count += 1; 6248 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6249 6250 #[cfg(target_arch = "x86_64")] 6251 { 6252 // Now pause the VM and remain offline for 30s 6253 assert!(remote_command(&api_socket, "pause", None)); 6254 let latest_events = [ 6255 &MetaEvent { 6256 event: "pausing".to_string(), 6257 device_id: None, 6258 }, 6259 &MetaEvent { 6260 event: "paused".to_string(), 6261 device_id: None, 6262 }, 6263 ]; 6264 assert!(check_latest_events_exact(&latest_events, &event_path)); 6265 assert!(remote_command(&api_socket, "resume", None)); 6266 6267 // Check no reboot 6268 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6269 } 6270 }); 6271 6272 kill_child(&mut child); 6273 let output = child.wait_with_output().unwrap(); 6274 6275 handle_child_output(r, &output); 6276 } 6277 6278 #[test] 6279 fn test_pvpanic() { 6280 let jammy = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string()); 6281 let guest = Guest::new(Box::new(jammy)); 6282 let api_socket = temp_api_path(&guest.tmp_dir); 6283 let event_path = temp_event_monitor_path(&guest.tmp_dir); 6284 6285 let kernel_path = direct_kernel_boot_path(); 6286 6287 let mut cmd = GuestCommand::new(&guest); 6288 cmd.args(["--cpus", "boot=1"]) 6289 .args(["--memory", "size=512M"]) 6290 .args(["--kernel", kernel_path.to_str().unwrap()]) 6291 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6292 .default_disks() 6293 .args(["--net", guest.default_net_string().as_str()]) 6294 .args(["--pvpanic"]) 6295 .args(["--api-socket", &api_socket]) 6296 .args(["--event-monitor", format!("path={event_path}").as_str()]) 6297 .capture_output(); 6298 6299 let mut child = cmd.spawn().unwrap(); 6300 6301 let r = std::panic::catch_unwind(|| { 6302 guest.wait_vm_boot(None).unwrap(); 6303 6304 // Trigger guest a panic 6305 make_guest_panic(&guest); 6306 6307 // Wait a while for guest 6308 thread::sleep(std::time::Duration::new(10, 0)); 6309 6310 let expected_sequential_events = [&MetaEvent { 6311 event: "panic".to_string(), 6312 device_id: None, 6313 }]; 6314 assert!(check_latest_events_exact( 6315 &expected_sequential_events, 6316 &event_path 6317 )); 6318 }); 6319 6320 kill_child(&mut child); 6321 let output = child.wait_with_output().unwrap(); 6322 6323 handle_child_output(r, &output); 6324 } 6325 6326 #[test] 6327 fn test_tap_from_fd() { 6328 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6329 let guest = Guest::new(Box::new(focal)); 6330 let kernel_path = direct_kernel_boot_path(); 6331 6332 // Create a TAP interface with multi-queue enabled 6333 let num_queue_pairs: usize = 2; 6334 6335 use std::str::FromStr; 6336 let taps = net_util::open_tap( 6337 Some("chtap0"), 6338 Some(std::net::Ipv4Addr::from_str(&guest.network.host_ip).unwrap()), 6339 None, 6340 &mut None, 6341 None, 6342 num_queue_pairs, 6343 Some(libc::O_RDWR | libc::O_NONBLOCK), 6344 ) 6345 .unwrap(); 6346 6347 let mut child = GuestCommand::new(&guest) 6348 .args(["--cpus", &format!("boot={num_queue_pairs}")]) 6349 .args(["--memory", "size=512M"]) 6350 .args(["--kernel", kernel_path.to_str().unwrap()]) 6351 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6352 .default_disks() 6353 .args([ 6354 "--net", 6355 &format!( 6356 "fd=[{},{}],mac={},num_queues={}", 6357 taps[0].as_raw_fd(), 6358 taps[1].as_raw_fd(), 6359 guest.network.guest_mac, 6360 num_queue_pairs * 2 6361 ), 6362 ]) 6363 .capture_output() 6364 .spawn() 6365 .unwrap(); 6366 6367 let r = std::panic::catch_unwind(|| { 6368 guest.wait_vm_boot(None).unwrap(); 6369 6370 assert_eq!( 6371 guest 6372 .ssh_command("ip -o link | wc -l") 6373 .unwrap() 6374 .trim() 6375 .parse::<u32>() 6376 .unwrap_or_default(), 6377 2 6378 ); 6379 6380 guest.reboot_linux(0, None); 6381 6382 assert_eq!( 6383 guest 6384 .ssh_command("ip -o link | wc -l") 6385 .unwrap() 6386 .trim() 6387 .parse::<u32>() 6388 .unwrap_or_default(), 6389 2 6390 ); 6391 }); 6392 6393 kill_child(&mut child); 6394 let output = child.wait_with_output().unwrap(); 6395 6396 handle_child_output(r, &output); 6397 } 6398 6399 // By design, a guest VM won't be able to connect to the host 6400 // machine when using a macvtap network interface (while it can 6401 // communicate externally). As a workaround, this integration 6402 // test creates two macvtap interfaces in 'bridge' mode on the 6403 // same physical net interface, one for the guest and one for 6404 // the host. With additional setup on the IP address and the 6405 // routing table, it enables the communications between the 6406 // guest VM and the host machine. 6407 // Details: https://wiki.libvirt.org/page/TroubleshootMacvtapHostFail 6408 fn _test_macvtap(hotplug: bool, guest_macvtap_name: &str, host_macvtap_name: &str) { 6409 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6410 let guest = Guest::new(Box::new(focal)); 6411 let api_socket = temp_api_path(&guest.tmp_dir); 6412 6413 #[cfg(target_arch = "x86_64")] 6414 let kernel_path = direct_kernel_boot_path(); 6415 #[cfg(target_arch = "aarch64")] 6416 let kernel_path = edk2_path(); 6417 6418 let phy_net = "eth0"; 6419 6420 // Create a macvtap interface for the guest VM to use 6421 assert!(exec_host_command_status(&format!( 6422 "sudo ip link add link {phy_net} name {guest_macvtap_name} type macvtap mod bridge" 6423 )) 6424 .success()); 6425 assert!(exec_host_command_status(&format!( 6426 "sudo ip link set {} address {} up", 6427 guest_macvtap_name, guest.network.guest_mac 6428 )) 6429 .success()); 6430 assert!( 6431 exec_host_command_status(&format!("sudo ip link show {guest_macvtap_name}")).success() 6432 ); 6433 6434 let tap_index = 6435 fs::read_to_string(format!("/sys/class/net/{guest_macvtap_name}/ifindex")).unwrap(); 6436 let tap_device = format!("/dev/tap{}", tap_index.trim()); 6437 6438 assert!(exec_host_command_status(&format!("sudo chown $UID.$UID {tap_device}")).success()); 6439 6440 let cstr_tap_device = std::ffi::CString::new(tap_device).unwrap(); 6441 let tap_fd1 = unsafe { libc::open(cstr_tap_device.as_ptr(), libc::O_RDWR) }; 6442 assert!(tap_fd1 > 0); 6443 let tap_fd2 = unsafe { libc::open(cstr_tap_device.as_ptr(), libc::O_RDWR) }; 6444 assert!(tap_fd2 > 0); 6445 6446 // Create a macvtap on the same physical net interface for 6447 // the host machine to use 6448 assert!(exec_host_command_status(&format!( 6449 "sudo ip link add link {phy_net} name {host_macvtap_name} type macvtap mod bridge" 6450 )) 6451 .success()); 6452 // Use default mask "255.255.255.0" 6453 assert!(exec_host_command_status(&format!( 6454 "sudo ip address add {}/24 dev {}", 6455 guest.network.host_ip, host_macvtap_name 6456 )) 6457 .success()); 6458 assert!( 6459 exec_host_command_status(&format!("sudo ip link set dev {host_macvtap_name} up")) 6460 .success() 6461 ); 6462 6463 let mut guest_command = GuestCommand::new(&guest); 6464 guest_command 6465 .args(["--cpus", "boot=2"]) 6466 .args(["--memory", "size=512M"]) 6467 .args(["--kernel", kernel_path.to_str().unwrap()]) 6468 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6469 .default_disks() 6470 .args(["--api-socket", &api_socket]); 6471 6472 let net_params = format!( 6473 "fd=[{},{}],mac={},num_queues=4", 6474 tap_fd1, tap_fd2, guest.network.guest_mac 6475 ); 6476 6477 if !hotplug { 6478 guest_command.args(["--net", &net_params]); 6479 } 6480 6481 let mut child = guest_command.capture_output().spawn().unwrap(); 6482 6483 if hotplug { 6484 // Give some time to the VMM process to listen to the API 6485 // socket. This is the only requirement to avoid the following 6486 // call to ch-remote from failing. 6487 thread::sleep(std::time::Duration::new(10, 0)); 6488 // Hotplug the virtio-net device 6489 let (cmd_success, cmd_output) = 6490 remote_command_w_output(&api_socket, "add-net", Some(&net_params)); 6491 assert!(cmd_success); 6492 #[cfg(target_arch = "x86_64")] 6493 assert!(String::from_utf8_lossy(&cmd_output) 6494 .contains("{\"id\":\"_net2\",\"bdf\":\"0000:00:05.0\"}")); 6495 #[cfg(target_arch = "aarch64")] 6496 assert!(String::from_utf8_lossy(&cmd_output) 6497 .contains("{\"id\":\"_net0\",\"bdf\":\"0000:00:05.0\"}")); 6498 } 6499 6500 // The functional connectivity provided by the virtio-net device 6501 // gets tested through wait_vm_boot() as it expects to receive a 6502 // HTTP request, and through the SSH command as well. 6503 let r = std::panic::catch_unwind(|| { 6504 guest.wait_vm_boot(None).unwrap(); 6505 6506 assert_eq!( 6507 guest 6508 .ssh_command("ip -o link | wc -l") 6509 .unwrap() 6510 .trim() 6511 .parse::<u32>() 6512 .unwrap_or_default(), 6513 2 6514 ); 6515 6516 guest.reboot_linux(0, None); 6517 6518 assert_eq!( 6519 guest 6520 .ssh_command("ip -o link | wc -l") 6521 .unwrap() 6522 .trim() 6523 .parse::<u32>() 6524 .unwrap_or_default(), 6525 2 6526 ); 6527 }); 6528 6529 kill_child(&mut child); 6530 6531 exec_host_command_status(&format!("sudo ip link del {guest_macvtap_name}")); 6532 exec_host_command_status(&format!("sudo ip link del {host_macvtap_name}")); 6533 6534 let output = child.wait_with_output().unwrap(); 6535 6536 handle_child_output(r, &output); 6537 } 6538 6539 #[test] 6540 #[cfg_attr(target_arch = "aarch64", ignore = "See #5443")] 6541 fn test_macvtap() { 6542 _test_macvtap(false, "guestmacvtap0", "hostmacvtap0") 6543 } 6544 6545 #[test] 6546 #[cfg_attr(target_arch = "aarch64", ignore = "See #5443")] 6547 fn test_macvtap_hotplug() { 6548 _test_macvtap(true, "guestmacvtap1", "hostmacvtap1") 6549 } 6550 6551 #[test] 6552 #[cfg(not(feature = "mshv"))] 6553 fn test_ovs_dpdk() { 6554 let focal1 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6555 let guest1 = Guest::new(Box::new(focal1)); 6556 6557 let focal2 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6558 let guest2 = Guest::new(Box::new(focal2)); 6559 let api_socket_source = format!("{}.1", temp_api_path(&guest2.tmp_dir)); 6560 6561 let (mut child1, mut child2) = 6562 setup_ovs_dpdk_guests(&guest1, &guest2, &api_socket_source, false); 6563 6564 // Create the snapshot directory 6565 let snapshot_dir = temp_snapshot_dir_path(&guest2.tmp_dir); 6566 6567 let r = std::panic::catch_unwind(|| { 6568 // Remove one of the two ports from the OVS bridge 6569 assert!(exec_host_command_status("ovs-vsctl del-port vhost-user1").success()); 6570 6571 // Spawn a new netcat listener in the first VM 6572 let guest_ip = guest1.network.guest_ip.clone(); 6573 thread::spawn(move || { 6574 ssh_command_ip( 6575 "nc -l 12345", 6576 &guest_ip, 6577 DEFAULT_SSH_RETRIES, 6578 DEFAULT_SSH_TIMEOUT, 6579 ) 6580 .unwrap(); 6581 }); 6582 6583 // Wait for the server to be listening 6584 thread::sleep(std::time::Duration::new(5, 0)); 6585 6586 // Check the connection fails this time 6587 assert!(guest2.ssh_command("nc -vz 172.100.0.1 12345").is_err()); 6588 6589 // Add the OVS port back 6590 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface vhost-user1 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient1").success()); 6591 6592 // And finally check the connection is functional again 6593 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap(); 6594 6595 // Pause the VM 6596 assert!(remote_command(&api_socket_source, "pause", None)); 6597 6598 // Take a snapshot from the VM 6599 assert!(remote_command( 6600 &api_socket_source, 6601 "snapshot", 6602 Some(format!("file://{snapshot_dir}").as_str()), 6603 )); 6604 6605 // Wait to make sure the snapshot is completed 6606 thread::sleep(std::time::Duration::new(10, 0)); 6607 }); 6608 6609 // Shutdown the source VM 6610 kill_child(&mut child2); 6611 let output = child2.wait_with_output().unwrap(); 6612 handle_child_output(r, &output); 6613 6614 // Remove the vhost-user socket file. 6615 Command::new("rm") 6616 .arg("-f") 6617 .arg("/tmp/dpdkvhostclient2") 6618 .output() 6619 .unwrap(); 6620 6621 let api_socket_restored = format!("{}.2", temp_api_path(&guest2.tmp_dir)); 6622 // Restore the VM from the snapshot 6623 let mut child2 = GuestCommand::new(&guest2) 6624 .args(["--api-socket", &api_socket_restored]) 6625 .args([ 6626 "--restore", 6627 format!("source_url=file://{snapshot_dir}").as_str(), 6628 ]) 6629 .capture_output() 6630 .spawn() 6631 .unwrap(); 6632 6633 // Wait for the VM to be restored 6634 thread::sleep(std::time::Duration::new(10, 0)); 6635 6636 let r = std::panic::catch_unwind(|| { 6637 // Resume the VM 6638 assert!(remote_command(&api_socket_restored, "resume", None)); 6639 6640 // Spawn a new netcat listener in the first VM 6641 let guest_ip = guest1.network.guest_ip.clone(); 6642 thread::spawn(move || { 6643 ssh_command_ip( 6644 "nc -l 12345", 6645 &guest_ip, 6646 DEFAULT_SSH_RETRIES, 6647 DEFAULT_SSH_TIMEOUT, 6648 ) 6649 .unwrap(); 6650 }); 6651 6652 // Wait for the server to be listening 6653 thread::sleep(std::time::Duration::new(5, 0)); 6654 6655 // And check the connection is still functional after restore 6656 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap(); 6657 }); 6658 6659 kill_child(&mut child1); 6660 kill_child(&mut child2); 6661 6662 let output = child1.wait_with_output().unwrap(); 6663 child2.wait().unwrap(); 6664 6665 cleanup_ovs_dpdk(); 6666 6667 handle_child_output(r, &output); 6668 } 6669 6670 fn setup_spdk_nvme(nvme_dir: &std::path::Path) { 6671 cleanup_spdk_nvme(); 6672 6673 assert!(exec_host_command_status(&format!( 6674 "mkdir -p {}", 6675 nvme_dir.join("nvme-vfio-user").to_str().unwrap() 6676 )) 6677 .success()); 6678 assert!(exec_host_command_status(&format!( 6679 "truncate {} -s 128M", 6680 nvme_dir.join("test-disk.raw").to_str().unwrap() 6681 )) 6682 .success()); 6683 assert!(exec_host_command_status(&format!( 6684 "mkfs.ext4 {}", 6685 nvme_dir.join("test-disk.raw").to_str().unwrap() 6686 )) 6687 .success()); 6688 6689 // Start the SPDK nvmf_tgt daemon to present NVMe device as a VFIO user device 6690 Command::new("/usr/local/bin/spdk-nvme/nvmf_tgt") 6691 .args(["-i", "0", "-m", "0x1"]) 6692 .spawn() 6693 .unwrap(); 6694 thread::sleep(std::time::Duration::new(2, 0)); 6695 6696 assert!(exec_host_command_with_retries( 6697 "/usr/local/bin/spdk-nvme/rpc.py nvmf_create_transport -t VFIOUSER", 6698 3, 6699 std::time::Duration::new(5, 0), 6700 )); 6701 assert!(exec_host_command_status(&format!( 6702 "/usr/local/bin/spdk-nvme/rpc.py bdev_aio_create {} test 512", 6703 nvme_dir.join("test-disk.raw").to_str().unwrap() 6704 )) 6705 .success()); 6706 assert!(exec_host_command_status( 6707 "/usr/local/bin/spdk-nvme/rpc.py nvmf_create_subsystem nqn.2019-07.io.spdk:cnode -a -s test" 6708 ) 6709 .success()); 6710 assert!(exec_host_command_status( 6711 "/usr/local/bin/spdk-nvme/rpc.py nvmf_subsystem_add_ns nqn.2019-07.io.spdk:cnode test" 6712 ) 6713 .success()); 6714 assert!(exec_host_command_status(&format!( 6715 "/usr/local/bin/spdk-nvme/rpc.py nvmf_subsystem_add_listener nqn.2019-07.io.spdk:cnode -t VFIOUSER -a {} -s 0", 6716 nvme_dir.join("nvme-vfio-user").to_str().unwrap() 6717 )) 6718 .success()); 6719 } 6720 6721 fn cleanup_spdk_nvme() { 6722 exec_host_command_status("pkill -f nvmf_tgt"); 6723 } 6724 6725 #[test] 6726 fn test_vfio_user() { 6727 let jammy_image = JAMMY_IMAGE_NAME.to_string(); 6728 let jammy = UbuntuDiskConfig::new(jammy_image); 6729 let guest = Guest::new(Box::new(jammy)); 6730 6731 let spdk_nvme_dir = guest.tmp_dir.as_path().join("test-vfio-user"); 6732 setup_spdk_nvme(spdk_nvme_dir.as_path()); 6733 6734 let api_socket = temp_api_path(&guest.tmp_dir); 6735 let mut child = GuestCommand::new(&guest) 6736 .args(["--api-socket", &api_socket]) 6737 .args(["--cpus", "boot=1"]) 6738 .args(["--memory", "size=512M,shared=on,hugepages=on"]) 6739 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 6740 .args(["--serial", "tty", "--console", "off"]) 6741 .default_disks() 6742 .default_net() 6743 .capture_output() 6744 .spawn() 6745 .unwrap(); 6746 6747 let r = std::panic::catch_unwind(|| { 6748 guest.wait_vm_boot(None).unwrap(); 6749 6750 // Hotplug the SPDK-NVMe device to the VM 6751 let (cmd_success, cmd_output) = remote_command_w_output( 6752 &api_socket, 6753 "add-user-device", 6754 Some(&format!( 6755 "socket={},id=vfio_user0", 6756 spdk_nvme_dir 6757 .as_path() 6758 .join("nvme-vfio-user/cntrl") 6759 .to_str() 6760 .unwrap(), 6761 )), 6762 ); 6763 assert!(cmd_success); 6764 assert!(String::from_utf8_lossy(&cmd_output) 6765 .contains("{\"id\":\"vfio_user0\",\"bdf\":\"0000:00:05.0\"}")); 6766 6767 thread::sleep(std::time::Duration::new(10, 0)); 6768 6769 // Check both if /dev/nvme exists and if the block size is 128M. 6770 assert_eq!( 6771 guest 6772 .ssh_command("lsblk | grep nvme0n1 | grep -c 128M") 6773 .unwrap() 6774 .trim() 6775 .parse::<u32>() 6776 .unwrap_or_default(), 6777 1 6778 ); 6779 6780 // Check changes persist after reboot 6781 assert_eq!( 6782 guest.ssh_command("sudo mount /dev/nvme0n1 /mnt").unwrap(), 6783 "" 6784 ); 6785 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), "lost+found\n"); 6786 guest 6787 .ssh_command("echo test123 | sudo tee /mnt/test") 6788 .unwrap(); 6789 assert_eq!(guest.ssh_command("sudo umount /mnt").unwrap(), ""); 6790 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), ""); 6791 6792 guest.reboot_linux(0, None); 6793 assert_eq!( 6794 guest.ssh_command("sudo mount /dev/nvme0n1 /mnt").unwrap(), 6795 "" 6796 ); 6797 assert_eq!( 6798 guest.ssh_command("sudo cat /mnt/test").unwrap().trim(), 6799 "test123" 6800 ); 6801 }); 6802 6803 cleanup_spdk_nvme(); 6804 6805 kill_child(&mut child); 6806 let output = child.wait_with_output().unwrap(); 6807 6808 handle_child_output(r, &output); 6809 } 6810 6811 #[test] 6812 #[cfg(target_arch = "x86_64")] 6813 fn test_vdpa_block() { 6814 // Before trying to run the test, verify the vdpa_sim_blk module is correctly loaded. 6815 assert!(exec_host_command_status("lsmod | grep vdpa_sim_blk").success()); 6816 6817 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6818 let guest = Guest::new(Box::new(focal)); 6819 let api_socket = temp_api_path(&guest.tmp_dir); 6820 6821 let kernel_path = direct_kernel_boot_path(); 6822 6823 let mut child = GuestCommand::new(&guest) 6824 .args(["--cpus", "boot=2"]) 6825 .args(["--memory", "size=512M,hugepages=on"]) 6826 .args(["--kernel", kernel_path.to_str().unwrap()]) 6827 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6828 .default_disks() 6829 .default_net() 6830 .args(["--vdpa", "path=/dev/vhost-vdpa-0,num_queues=1"]) 6831 .args(["--platform", "num_pci_segments=2,iommu_segments=1"]) 6832 .args(["--api-socket", &api_socket]) 6833 .capture_output() 6834 .spawn() 6835 .unwrap(); 6836 6837 let r = std::panic::catch_unwind(|| { 6838 guest.wait_vm_boot(None).unwrap(); 6839 6840 // Check both if /dev/vdc exists and if the block size is 128M. 6841 assert_eq!( 6842 guest 6843 .ssh_command("lsblk | grep vdc | grep -c 128M") 6844 .unwrap() 6845 .trim() 6846 .parse::<u32>() 6847 .unwrap_or_default(), 6848 1 6849 ); 6850 6851 // Check the content of the block device after we wrote to it. 6852 // The vpda-sim-blk should let us read what we previously wrote. 6853 guest 6854 .ssh_command("sudo bash -c 'echo foobar > /dev/vdc'") 6855 .unwrap(); 6856 assert_eq!( 6857 guest.ssh_command("sudo head -1 /dev/vdc").unwrap().trim(), 6858 "foobar" 6859 ); 6860 6861 // Hotplug an extra vDPA block device behind the vIOMMU 6862 // Add a new vDPA device to the VM 6863 let (cmd_success, cmd_output) = remote_command_w_output( 6864 &api_socket, 6865 "add-vdpa", 6866 Some("id=myvdpa0,path=/dev/vhost-vdpa-1,num_queues=1,pci_segment=1,iommu=on"), 6867 ); 6868 assert!(cmd_success); 6869 assert!(String::from_utf8_lossy(&cmd_output) 6870 .contains("{\"id\":\"myvdpa0\",\"bdf\":\"0001:00:01.0\"}")); 6871 6872 thread::sleep(std::time::Duration::new(10, 0)); 6873 6874 // Check IOMMU setup 6875 assert!(guest 6876 .does_device_vendor_pair_match("0x1057", "0x1af4") 6877 .unwrap_or_default()); 6878 assert_eq!( 6879 guest 6880 .ssh_command("ls /sys/kernel/iommu_groups/0/devices") 6881 .unwrap() 6882 .trim(), 6883 "0001:00:01.0" 6884 ); 6885 6886 // Check both if /dev/vdd exists and if the block size is 128M. 6887 assert_eq!( 6888 guest 6889 .ssh_command("lsblk | grep vdd | grep -c 128M") 6890 .unwrap() 6891 .trim() 6892 .parse::<u32>() 6893 .unwrap_or_default(), 6894 1 6895 ); 6896 6897 // Write some content to the block device we've just plugged. 6898 guest 6899 .ssh_command("sudo bash -c 'echo foobar > /dev/vdd'") 6900 .unwrap(); 6901 6902 // Check we can read the content back. 6903 assert_eq!( 6904 guest.ssh_command("sudo head -1 /dev/vdd").unwrap().trim(), 6905 "foobar" 6906 ); 6907 6908 // Unplug the device 6909 let cmd_success = remote_command(&api_socket, "remove-device", Some("myvdpa0")); 6910 assert!(cmd_success); 6911 thread::sleep(std::time::Duration::new(10, 0)); 6912 6913 // Check /dev/vdd doesn't exist anymore 6914 assert_eq!( 6915 guest 6916 .ssh_command("lsblk | grep -c vdd || true") 6917 .unwrap() 6918 .trim() 6919 .parse::<u32>() 6920 .unwrap_or(1), 6921 0 6922 ); 6923 }); 6924 6925 kill_child(&mut child); 6926 let output = child.wait_with_output().unwrap(); 6927 6928 handle_child_output(r, &output); 6929 } 6930 6931 #[test] 6932 #[cfg(target_arch = "x86_64")] 6933 #[ignore = "See #5756"] 6934 fn test_vdpa_net() { 6935 // Before trying to run the test, verify the vdpa_sim_net module is correctly loaded. 6936 if !exec_host_command_status("lsmod | grep vdpa_sim_net").success() { 6937 return; 6938 } 6939 6940 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6941 let guest = Guest::new(Box::new(focal)); 6942 6943 let kernel_path = direct_kernel_boot_path(); 6944 6945 let mut child = GuestCommand::new(&guest) 6946 .args(["--cpus", "boot=2"]) 6947 .args(["--memory", "size=512M,hugepages=on"]) 6948 .args(["--kernel", kernel_path.to_str().unwrap()]) 6949 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6950 .default_disks() 6951 .default_net() 6952 .args(["--vdpa", "path=/dev/vhost-vdpa-2,num_queues=2"]) 6953 .capture_output() 6954 .spawn() 6955 .unwrap(); 6956 6957 let r = std::panic::catch_unwind(|| { 6958 guest.wait_vm_boot(None).unwrap(); 6959 6960 // Check we can find network interface related to vDPA device 6961 assert_eq!( 6962 guest 6963 .ssh_command("ip -o link | grep -c ens6") 6964 .unwrap() 6965 .trim() 6966 .parse::<u32>() 6967 .unwrap_or(0), 6968 1 6969 ); 6970 6971 guest 6972 .ssh_command("sudo ip addr add 172.16.1.2/24 dev ens6") 6973 .unwrap(); 6974 guest.ssh_command("sudo ip link set up dev ens6").unwrap(); 6975 6976 // Check there is no packet yet on both TX/RX of the network interface 6977 assert_eq!( 6978 guest 6979 .ssh_command("ip -j -p -s link show ens6 | grep -c '\"packets\": 0'") 6980 .unwrap() 6981 .trim() 6982 .parse::<u32>() 6983 .unwrap_or(0), 6984 2 6985 ); 6986 6987 // Send 6 packets with ping command 6988 guest.ssh_command("ping 172.16.1.10 -c 6 || true").unwrap(); 6989 6990 // Check we can find 6 packets on both TX/RX of the network interface 6991 assert_eq!( 6992 guest 6993 .ssh_command("ip -j -p -s link show ens6 | grep -c '\"packets\": 6'") 6994 .unwrap() 6995 .trim() 6996 .parse::<u32>() 6997 .unwrap_or(0), 6998 2 6999 ); 7000 7001 // No need to check for hotplug as we already tested it through 7002 // test_vdpa_block() 7003 }); 7004 7005 kill_child(&mut child); 7006 let output = child.wait_with_output().unwrap(); 7007 7008 handle_child_output(r, &output); 7009 } 7010 7011 #[test] 7012 #[cfg(target_arch = "x86_64")] 7013 fn test_tpm() { 7014 let focal = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string()); 7015 let guest = Guest::new(Box::new(focal)); 7016 7017 let (mut swtpm_command, swtpm_socket_path) = prepare_swtpm_daemon(&guest.tmp_dir); 7018 7019 let mut guest_cmd = GuestCommand::new(&guest); 7020 guest_cmd 7021 .args(["--cpus", "boot=1"]) 7022 .args(["--memory", "size=512M"]) 7023 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 7024 .args(["--tpm", &format!("socket={swtpm_socket_path}")]) 7025 .capture_output() 7026 .default_disks() 7027 .default_net(); 7028 7029 // Start swtpm daemon 7030 let mut swtpm_child = swtpm_command.spawn().unwrap(); 7031 thread::sleep(std::time::Duration::new(10, 0)); 7032 let mut child = guest_cmd.spawn().unwrap(); 7033 let r = std::panic::catch_unwind(|| { 7034 guest.wait_vm_boot(None).unwrap(); 7035 assert_eq!( 7036 guest.ssh_command("ls /dev/tpm0").unwrap().trim(), 7037 "/dev/tpm0" 7038 ); 7039 guest.ssh_command("sudo tpm2_selftest -f").unwrap(); 7040 guest 7041 .ssh_command("echo 'hello' > /tmp/checksum_test; ") 7042 .unwrap(); 7043 guest.ssh_command("cmp <(sudo tpm2_pcrevent /tmp/checksum_test | grep sha256 | awk '{print $2}') <(sha256sum /tmp/checksum_test| awk '{print $1}')").unwrap(); 7044 }); 7045 7046 let _ = swtpm_child.kill(); 7047 let _d_out = swtpm_child.wait_with_output().unwrap(); 7048 7049 kill_child(&mut child); 7050 let output = child.wait_with_output().unwrap(); 7051 7052 handle_child_output(r, &output); 7053 } 7054 7055 #[test] 7056 #[cfg(target_arch = "x86_64")] 7057 fn test_double_tty() { 7058 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7059 let guest = Guest::new(Box::new(focal)); 7060 let mut cmd = GuestCommand::new(&guest); 7061 let api_socket = temp_api_path(&guest.tmp_dir); 7062 let tty_str: &str = "console=hvc0 earlyprintk=ttyS0 "; 7063 // linux printk module enable console log. 7064 let con_dis_str: &str = "console [hvc0] enabled"; 7065 // linux printk module disable console log. 7066 let con_enb_str: &str = "bootconsole [earlyser0] disabled"; 7067 7068 let kernel_path = direct_kernel_boot_path(); 7069 7070 cmd.args(["--cpus", "boot=1"]) 7071 .args(["--memory", "size=512M"]) 7072 .args(["--kernel", kernel_path.to_str().unwrap()]) 7073 .args([ 7074 "--cmdline", 7075 DIRECT_KERNEL_BOOT_CMDLINE 7076 .replace("console=hvc0 ", tty_str) 7077 .as_str(), 7078 ]) 7079 .capture_output() 7080 .default_disks() 7081 .default_net() 7082 .args(["--serial", "tty"]) 7083 .args(["--console", "tty"]) 7084 .args(["--api-socket", &api_socket]); 7085 7086 let mut child = cmd.spawn().unwrap(); 7087 7088 let mut r = std::panic::catch_unwind(|| { 7089 guest.wait_vm_boot(None).unwrap(); 7090 }); 7091 7092 kill_child(&mut child); 7093 let output = child.wait_with_output().unwrap(); 7094 7095 if r.is_ok() { 7096 r = std::panic::catch_unwind(|| { 7097 let s = String::from_utf8_lossy(&output.stdout); 7098 assert!(s.contains(tty_str)); 7099 assert!(s.contains(con_dis_str)); 7100 assert!(s.contains(con_enb_str)); 7101 }); 7102 } 7103 7104 handle_child_output(r, &output); 7105 } 7106 7107 #[test] 7108 #[cfg(target_arch = "x86_64")] 7109 fn test_nmi() { 7110 let jammy = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string()); 7111 let guest = Guest::new(Box::new(jammy)); 7112 let api_socket = temp_api_path(&guest.tmp_dir); 7113 let event_path = temp_event_monitor_path(&guest.tmp_dir); 7114 7115 let kernel_path = direct_kernel_boot_path(); 7116 let cmd_line = format!("{} {}", DIRECT_KERNEL_BOOT_CMDLINE, "unknown_nmi_panic=1"); 7117 7118 let mut cmd = GuestCommand::new(&guest); 7119 cmd.args(["--cpus", "boot=4"]) 7120 .args(["--memory", "size=512M"]) 7121 .args(["--kernel", kernel_path.to_str().unwrap()]) 7122 .args(["--cmdline", cmd_line.as_str()]) 7123 .default_disks() 7124 .args(["--net", guest.default_net_string().as_str()]) 7125 .args(["--pvpanic"]) 7126 .args(["--api-socket", &api_socket]) 7127 .args(["--event-monitor", format!("path={event_path}").as_str()]) 7128 .capture_output(); 7129 7130 let mut child = cmd.spawn().unwrap(); 7131 7132 let r = std::panic::catch_unwind(|| { 7133 guest.wait_vm_boot(None).unwrap(); 7134 7135 assert!(remote_command(&api_socket, "nmi", None)); 7136 7137 // Wait a while for guest 7138 thread::sleep(std::time::Duration::new(3, 0)); 7139 7140 let expected_sequential_events = [&MetaEvent { 7141 event: "panic".to_string(), 7142 device_id: None, 7143 }]; 7144 assert!(check_latest_events_exact( 7145 &expected_sequential_events, 7146 &event_path 7147 )); 7148 }); 7149 7150 kill_child(&mut child); 7151 let output = child.wait_with_output().unwrap(); 7152 7153 handle_child_output(r, &output); 7154 } 7155 } 7156 7157 mod dbus_api { 7158 use crate::*; 7159 7160 // Start cloud-hypervisor with no VM parameters, running both the HTTP 7161 // and DBus APIs. Alternate calls to the external APIs (HTTP and DBus) 7162 // to create a VM, boot it, and verify that it can be shut down and then 7163 // booted again. 7164 #[test] 7165 fn test_api_dbus_and_http_interleaved() { 7166 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7167 let guest = Guest::new(Box::new(focal)); 7168 let dbus_api = TargetApi::new_dbus_api(&guest.tmp_dir); 7169 let http_api = TargetApi::new_http_api(&guest.tmp_dir); 7170 7171 let mut child = GuestCommand::new(&guest) 7172 .args(dbus_api.guest_args()) 7173 .args(http_api.guest_args()) 7174 .capture_output() 7175 .spawn() 7176 .unwrap(); 7177 7178 thread::sleep(std::time::Duration::new(1, 0)); 7179 7180 // Verify API servers are running 7181 assert!(dbus_api.remote_command("ping", None)); 7182 assert!(http_api.remote_command("ping", None)); 7183 7184 // Create the VM first 7185 let cpu_count: u8 = 4; 7186 let request_body = guest.api_create_body( 7187 cpu_count, 7188 direct_kernel_boot_path().to_str().unwrap(), 7189 DIRECT_KERNEL_BOOT_CMDLINE, 7190 ); 7191 7192 let temp_config_path = guest.tmp_dir.as_path().join("config"); 7193 std::fs::write(&temp_config_path, request_body).unwrap(); 7194 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 7195 7196 let r = std::panic::catch_unwind(|| { 7197 // Create the VM 7198 assert!(dbus_api.remote_command("create", Some(create_config),)); 7199 7200 // Then boot it 7201 assert!(http_api.remote_command("boot", None)); 7202 guest.wait_vm_boot(None).unwrap(); 7203 7204 // Check that the VM booted as expected 7205 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 7206 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 7207 7208 // Sync and shutdown without powering off to prevent filesystem 7209 // corruption. 7210 guest.ssh_command("sync").unwrap(); 7211 guest.ssh_command("sudo shutdown -H now").unwrap(); 7212 7213 // Wait for the guest to be fully shutdown 7214 thread::sleep(std::time::Duration::new(20, 0)); 7215 7216 // Then shutdown the VM 7217 assert!(dbus_api.remote_command("shutdown", None)); 7218 7219 // Then boot it again 7220 assert!(http_api.remote_command("boot", None)); 7221 guest.wait_vm_boot(None).unwrap(); 7222 7223 // Check that the VM booted as expected 7224 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 7225 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 7226 }); 7227 7228 kill_child(&mut child); 7229 let output = child.wait_with_output().unwrap(); 7230 7231 handle_child_output(r, &output); 7232 } 7233 7234 #[test] 7235 fn test_api_dbus_create_boot() { 7236 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7237 let guest = Guest::new(Box::new(focal)); 7238 7239 _test_api_create_boot(TargetApi::new_dbus_api(&guest.tmp_dir), guest) 7240 } 7241 7242 #[test] 7243 fn test_api_dbus_shutdown() { 7244 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7245 let guest = Guest::new(Box::new(focal)); 7246 7247 _test_api_shutdown(TargetApi::new_dbus_api(&guest.tmp_dir), guest) 7248 } 7249 7250 #[test] 7251 fn test_api_dbus_delete() { 7252 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7253 let guest = Guest::new(Box::new(focal)); 7254 7255 _test_api_delete(TargetApi::new_dbus_api(&guest.tmp_dir), guest); 7256 } 7257 7258 #[test] 7259 fn test_api_dbus_pause_resume() { 7260 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7261 let guest = Guest::new(Box::new(focal)); 7262 7263 _test_api_pause_resume(TargetApi::new_dbus_api(&guest.tmp_dir), guest) 7264 } 7265 } 7266 7267 mod common_sequential { 7268 use std::fs::remove_dir_all; 7269 7270 use crate::*; 7271 7272 #[test] 7273 #[cfg(not(feature = "mshv"))] 7274 fn test_memory_mergeable_on() { 7275 test_memory_mergeable(true) 7276 } 7277 7278 fn snapshot_and_check_events(api_socket: &str, snapshot_dir: &str, event_path: &str) { 7279 // Pause the VM 7280 assert!(remote_command(api_socket, "pause", None)); 7281 let latest_events: [&MetaEvent; 2] = [ 7282 &MetaEvent { 7283 event: "pausing".to_string(), 7284 device_id: None, 7285 }, 7286 &MetaEvent { 7287 event: "paused".to_string(), 7288 device_id: None, 7289 }, 7290 ]; 7291 // See: #5938 7292 thread::sleep(std::time::Duration::new(1, 0)); 7293 assert!(check_latest_events_exact(&latest_events, event_path)); 7294 7295 // Take a snapshot from the VM 7296 assert!(remote_command( 7297 api_socket, 7298 "snapshot", 7299 Some(format!("file://{snapshot_dir}").as_str()), 7300 )); 7301 7302 // Wait to make sure the snapshot is completed 7303 thread::sleep(std::time::Duration::new(10, 0)); 7304 7305 let latest_events = [ 7306 &MetaEvent { 7307 event: "snapshotting".to_string(), 7308 device_id: None, 7309 }, 7310 &MetaEvent { 7311 event: "snapshotted".to_string(), 7312 device_id: None, 7313 }, 7314 ]; 7315 // See: #5938 7316 thread::sleep(std::time::Duration::new(1, 0)); 7317 assert!(check_latest_events_exact(&latest_events, event_path)); 7318 } 7319 7320 // One thing to note about this test. The virtio-net device is heavily used 7321 // through each ssh command. There's no need to perform a dedicated test to 7322 // verify the migration went well for virtio-net. 7323 #[test] 7324 #[cfg(not(feature = "mshv"))] 7325 fn test_snapshot_restore_hotplug_virtiomem() { 7326 _test_snapshot_restore(true); 7327 } 7328 7329 #[test] 7330 fn test_snapshot_restore_basic() { 7331 _test_snapshot_restore(false); 7332 } 7333 7334 fn _test_snapshot_restore(use_hotplug: bool) { 7335 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7336 let guest = Guest::new(Box::new(focal)); 7337 let kernel_path = direct_kernel_boot_path(); 7338 7339 let api_socket_source = format!("{}.1", temp_api_path(&guest.tmp_dir)); 7340 7341 let net_id = "net123"; 7342 let net_params = format!( 7343 "id={},tap=,mac={},ip={},mask=255.255.255.0", 7344 net_id, guest.network.guest_mac, guest.network.host_ip 7345 ); 7346 let mut mem_params = "size=2G"; 7347 7348 if use_hotplug { 7349 mem_params = "size=2G,hotplug_method=virtio-mem,hotplug_size=32G" 7350 } 7351 7352 let cloudinit_params = format!( 7353 "path={},iommu=on", 7354 guest.disk_config.disk(DiskType::CloudInit).unwrap() 7355 ); 7356 7357 let socket = temp_vsock_path(&guest.tmp_dir); 7358 let event_path = temp_event_monitor_path(&guest.tmp_dir); 7359 7360 let mut child = GuestCommand::new(&guest) 7361 .args(["--api-socket", &api_socket_source]) 7362 .args(["--event-monitor", format!("path={event_path}").as_str()]) 7363 .args(["--cpus", "boot=4"]) 7364 .args(["--memory", mem_params]) 7365 .args(["--balloon", "size=0"]) 7366 .args(["--kernel", kernel_path.to_str().unwrap()]) 7367 .args([ 7368 "--disk", 7369 format!( 7370 "path={}", 7371 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 7372 ) 7373 .as_str(), 7374 cloudinit_params.as_str(), 7375 ]) 7376 .args(["--net", net_params.as_str()]) 7377 .args(["--vsock", format!("cid=3,socket={socket}").as_str()]) 7378 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 7379 .capture_output() 7380 .spawn() 7381 .unwrap(); 7382 7383 let console_text = String::from("On a branch floating down river a cricket, singing."); 7384 // Create the snapshot directory 7385 let snapshot_dir = temp_snapshot_dir_path(&guest.tmp_dir); 7386 7387 let r = std::panic::catch_unwind(|| { 7388 guest.wait_vm_boot(None).unwrap(); 7389 7390 // Check the number of vCPUs 7391 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 4); 7392 // Check the guest RAM 7393 assert!(guest.get_total_memory().unwrap_or_default() > 1_920_000); 7394 if use_hotplug { 7395 // Increase guest RAM with virtio-mem 7396 resize_command( 7397 &api_socket_source, 7398 None, 7399 Some(6 << 30), 7400 None, 7401 Some(&event_path), 7402 ); 7403 thread::sleep(std::time::Duration::new(5, 0)); 7404 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 7405 // Use balloon to remove RAM from the VM 7406 resize_command( 7407 &api_socket_source, 7408 None, 7409 None, 7410 Some(1 << 30), 7411 Some(&event_path), 7412 ); 7413 thread::sleep(std::time::Duration::new(5, 0)); 7414 let total_memory = guest.get_total_memory().unwrap_or_default(); 7415 assert!(total_memory > 4_800_000); 7416 assert!(total_memory < 5_760_000); 7417 } 7418 // Check the guest virtio-devices, e.g. block, rng, vsock, console, and net 7419 guest.check_devices_common(Some(&socket), Some(&console_text), None); 7420 7421 // x86_64: We check that removing and adding back the virtio-net device 7422 // does not break the snapshot/restore support for virtio-pci. 7423 // This is an important thing to test as the hotplug will 7424 // trigger a PCI BAR reprogramming, which is a good way of 7425 // checking if the stored resources are correctly restored. 7426 // Unplug the virtio-net device 7427 // AArch64: Device hotplug is currently not supported, skipping here. 7428 #[cfg(target_arch = "x86_64")] 7429 { 7430 assert!(remote_command( 7431 &api_socket_source, 7432 "remove-device", 7433 Some(net_id), 7434 )); 7435 thread::sleep(std::time::Duration::new(10, 0)); 7436 let latest_events = [&MetaEvent { 7437 event: "device-removed".to_string(), 7438 device_id: Some(net_id.to_string()), 7439 }]; 7440 // See: #5938 7441 thread::sleep(std::time::Duration::new(1, 0)); 7442 assert!(check_latest_events_exact(&latest_events, &event_path)); 7443 7444 // Plug the virtio-net device again 7445 assert!(remote_command( 7446 &api_socket_source, 7447 "add-net", 7448 Some(net_params.as_str()), 7449 )); 7450 thread::sleep(std::time::Duration::new(10, 0)); 7451 } 7452 7453 snapshot_and_check_events(&api_socket_source, &snapshot_dir, &event_path); 7454 }); 7455 7456 // Shutdown the source VM and check console output 7457 kill_child(&mut child); 7458 let output = child.wait_with_output().unwrap(); 7459 handle_child_output(r, &output); 7460 7461 let r = std::panic::catch_unwind(|| { 7462 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 7463 }); 7464 7465 handle_child_output(r, &output); 7466 7467 // Remove the vsock socket file. 7468 Command::new("rm") 7469 .arg("-f") 7470 .arg(socket.as_str()) 7471 .output() 7472 .unwrap(); 7473 7474 let api_socket_restored = format!("{}.2", temp_api_path(&guest.tmp_dir)); 7475 let event_path_restored = format!("{}.2", temp_event_monitor_path(&guest.tmp_dir)); 7476 7477 // Restore the VM from the snapshot 7478 let mut child = GuestCommand::new(&guest) 7479 .args(["--api-socket", &api_socket_restored]) 7480 .args([ 7481 "--event-monitor", 7482 format!("path={event_path_restored}").as_str(), 7483 ]) 7484 .args([ 7485 "--restore", 7486 format!("source_url=file://{snapshot_dir}").as_str(), 7487 ]) 7488 .capture_output() 7489 .spawn() 7490 .unwrap(); 7491 7492 // Wait for the VM to be restored 7493 thread::sleep(std::time::Duration::new(20, 0)); 7494 let expected_events = [ 7495 &MetaEvent { 7496 event: "starting".to_string(), 7497 device_id: None, 7498 }, 7499 &MetaEvent { 7500 event: "activated".to_string(), 7501 device_id: Some("__console".to_string()), 7502 }, 7503 &MetaEvent { 7504 event: "activated".to_string(), 7505 device_id: Some("__rng".to_string()), 7506 }, 7507 &MetaEvent { 7508 event: "restoring".to_string(), 7509 device_id: None, 7510 }, 7511 ]; 7512 assert!(check_sequential_events( 7513 &expected_events, 7514 &event_path_restored 7515 )); 7516 let latest_events = [&MetaEvent { 7517 event: "restored".to_string(), 7518 device_id: None, 7519 }]; 7520 assert!(check_latest_events_exact( 7521 &latest_events, 7522 &event_path_restored 7523 )); 7524 7525 // Remove the snapshot dir 7526 let _ = remove_dir_all(snapshot_dir.as_str()); 7527 7528 let r = std::panic::catch_unwind(|| { 7529 // Resume the VM 7530 assert!(remote_command(&api_socket_restored, "resume", None)); 7531 // There is no way that we can ensure the 'write()' to the 7532 // event file is completed when the 'resume' request is 7533 // returned successfully, because the 'write()' was done 7534 // asynchronously from a different thread of Cloud 7535 // Hypervisor (e.g. the event-monitor thread). 7536 thread::sleep(std::time::Duration::new(1, 0)); 7537 let latest_events = [ 7538 &MetaEvent { 7539 event: "resuming".to_string(), 7540 device_id: None, 7541 }, 7542 &MetaEvent { 7543 event: "resumed".to_string(), 7544 device_id: None, 7545 }, 7546 ]; 7547 assert!(check_latest_events_exact( 7548 &latest_events, 7549 &event_path_restored 7550 )); 7551 7552 // Perform same checks to validate VM has been properly restored 7553 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 4); 7554 let total_memory = guest.get_total_memory().unwrap_or_default(); 7555 if !use_hotplug { 7556 assert!(total_memory > 1_920_000); 7557 } else { 7558 assert!(total_memory > 4_800_000); 7559 assert!(total_memory < 5_760_000); 7560 // Deflate balloon to restore entire RAM to the VM 7561 resize_command(&api_socket_restored, None, None, Some(0), None); 7562 thread::sleep(std::time::Duration::new(5, 0)); 7563 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 7564 // Decrease guest RAM with virtio-mem 7565 resize_command(&api_socket_restored, None, Some(5 << 30), None, None); 7566 thread::sleep(std::time::Duration::new(5, 0)); 7567 let total_memory = guest.get_total_memory().unwrap_or_default(); 7568 assert!(total_memory > 4_800_000); 7569 assert!(total_memory < 5_760_000); 7570 } 7571 7572 guest.check_devices_common(Some(&socket), Some(&console_text), None); 7573 }); 7574 // Shutdown the target VM and check console output 7575 kill_child(&mut child); 7576 let output = child.wait_with_output().unwrap(); 7577 handle_child_output(r, &output); 7578 7579 let r = std::panic::catch_unwind(|| { 7580 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 7581 }); 7582 7583 handle_child_output(r, &output); 7584 } 7585 7586 #[test] 7587 fn test_snapshot_restore_with_fd() { 7588 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7589 let guest = Guest::new(Box::new(focal)); 7590 let kernel_path = direct_kernel_boot_path(); 7591 7592 let api_socket_source = format!("{}.1", temp_api_path(&guest.tmp_dir)); 7593 7594 let net_id = "net123"; 7595 let num_queue_pairs: usize = 2; 7596 // use a name that does not conflict with tap dev created from other tests 7597 let tap_name = "chtap999"; 7598 use std::str::FromStr; 7599 let taps = net_util::open_tap( 7600 Some(tap_name), 7601 Some(std::net::Ipv4Addr::from_str(&guest.network.host_ip).unwrap()), 7602 None, 7603 &mut None, 7604 None, 7605 num_queue_pairs, 7606 Some(libc::O_RDWR | libc::O_NONBLOCK), 7607 ) 7608 .unwrap(); 7609 let net_params = format!( 7610 "id={},fd=[{},{}],mac={},ip={},mask=255.255.255.0,num_queues={}", 7611 net_id, 7612 taps[0].as_raw_fd(), 7613 taps[1].as_raw_fd(), 7614 guest.network.guest_mac, 7615 guest.network.host_ip, 7616 num_queue_pairs * 2 7617 ); 7618 7619 let cloudinit_params = format!( 7620 "path={},iommu=on", 7621 guest.disk_config.disk(DiskType::CloudInit).unwrap() 7622 ); 7623 7624 let n_cpu = 2; 7625 let event_path = temp_event_monitor_path(&guest.tmp_dir); 7626 7627 let mut child = GuestCommand::new(&guest) 7628 .args(["--api-socket", &api_socket_source]) 7629 .args(["--event-monitor", format!("path={event_path}").as_str()]) 7630 .args(["--cpus", format!("boot={}", n_cpu).as_str()]) 7631 .args(["--memory", "size=1G"]) 7632 .args(["--kernel", kernel_path.to_str().unwrap()]) 7633 .args([ 7634 "--disk", 7635 format!( 7636 "path={}", 7637 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 7638 ) 7639 .as_str(), 7640 cloudinit_params.as_str(), 7641 ]) 7642 .args(["--net", net_params.as_str()]) 7643 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 7644 .capture_output() 7645 .spawn() 7646 .unwrap(); 7647 7648 let console_text = String::from("On a branch floating down river a cricket, singing."); 7649 // Create the snapshot directory 7650 let snapshot_dir = temp_snapshot_dir_path(&guest.tmp_dir); 7651 7652 let r = std::panic::catch_unwind(|| { 7653 guest.wait_vm_boot(None).unwrap(); 7654 7655 // close the fds after VM boots, as CH duplicates them before using 7656 for tap in taps.iter() { 7657 unsafe { libc::close(tap.as_raw_fd()) }; 7658 } 7659 7660 // Check the number of vCPUs 7661 assert_eq!(guest.get_cpu_count().unwrap_or_default(), n_cpu); 7662 // Check the guest RAM 7663 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 7664 7665 // Check the guest virtio-devices, e.g. block, rng, vsock, console, and net 7666 guest.check_devices_common(None, Some(&console_text), None); 7667 7668 snapshot_and_check_events(&api_socket_source, &snapshot_dir, &event_path); 7669 }); 7670 7671 // Shutdown the source VM and check console output 7672 kill_child(&mut child); 7673 let output = child.wait_with_output().unwrap(); 7674 handle_child_output(r, &output); 7675 7676 let r = std::panic::catch_unwind(|| { 7677 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 7678 }); 7679 7680 handle_child_output(r, &output); 7681 7682 let api_socket_restored = format!("{}.2", temp_api_path(&guest.tmp_dir)); 7683 let event_path_restored = format!("{}.2", temp_event_monitor_path(&guest.tmp_dir)); 7684 7685 // Restore the VM from the snapshot 7686 let mut child = GuestCommand::new(&guest) 7687 .args(["--api-socket", &api_socket_restored]) 7688 .args([ 7689 "--event-monitor", 7690 format!("path={event_path_restored}").as_str(), 7691 ]) 7692 .capture_output() 7693 .spawn() 7694 .unwrap(); 7695 thread::sleep(std::time::Duration::new(2, 0)); 7696 7697 let taps = net_util::open_tap( 7698 Some(tap_name), 7699 Some(std::net::Ipv4Addr::from_str(&guest.network.host_ip).unwrap()), 7700 None, 7701 &mut None, 7702 None, 7703 num_queue_pairs, 7704 Some(libc::O_RDWR | libc::O_NONBLOCK), 7705 ) 7706 .unwrap(); 7707 let restore_params = format!( 7708 "source_url=file://{},net_fds=[{}@[{},{}]]", 7709 snapshot_dir, 7710 net_id, 7711 taps[0].as_raw_fd(), 7712 taps[1].as_raw_fd() 7713 ); 7714 assert!(remote_command( 7715 &api_socket_restored, 7716 "restore", 7717 Some(restore_params.as_str()) 7718 )); 7719 7720 // Wait for the VM to be restored 7721 thread::sleep(std::time::Duration::new(20, 0)); 7722 7723 // close the fds as CH duplicates them before using 7724 for tap in taps.iter() { 7725 unsafe { libc::close(tap.as_raw_fd()) }; 7726 } 7727 7728 let expected_events = [ 7729 &MetaEvent { 7730 event: "starting".to_string(), 7731 device_id: None, 7732 }, 7733 &MetaEvent { 7734 event: "activated".to_string(), 7735 device_id: Some("__console".to_string()), 7736 }, 7737 &MetaEvent { 7738 event: "activated".to_string(), 7739 device_id: Some("__rng".to_string()), 7740 }, 7741 &MetaEvent { 7742 event: "restoring".to_string(), 7743 device_id: None, 7744 }, 7745 ]; 7746 assert!(check_sequential_events( 7747 &expected_events, 7748 &event_path_restored 7749 )); 7750 let latest_events = [&MetaEvent { 7751 event: "restored".to_string(), 7752 device_id: None, 7753 }]; 7754 assert!(check_latest_events_exact( 7755 &latest_events, 7756 &event_path_restored 7757 )); 7758 7759 // Remove the snapshot dir 7760 let _ = remove_dir_all(snapshot_dir.as_str()); 7761 7762 let r = std::panic::catch_unwind(|| { 7763 // Resume the VM 7764 assert!(remote_command(&api_socket_restored, "resume", None)); 7765 // There is no way that we can ensure the 'write()' to the 7766 // event file is completed when the 'resume' request is 7767 // returned successfully, because the 'write()' was done 7768 // asynchronously from a different thread of Cloud 7769 // Hypervisor (e.g. the event-monitor thread). 7770 thread::sleep(std::time::Duration::new(1, 0)); 7771 let latest_events = [ 7772 &MetaEvent { 7773 event: "resuming".to_string(), 7774 device_id: None, 7775 }, 7776 &MetaEvent { 7777 event: "resumed".to_string(), 7778 device_id: None, 7779 }, 7780 ]; 7781 assert!(check_latest_events_exact( 7782 &latest_events, 7783 &event_path_restored 7784 )); 7785 7786 // Perform same checks to validate VM has been properly restored 7787 assert_eq!(guest.get_cpu_count().unwrap_or_default(), n_cpu); 7788 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 7789 7790 guest.check_devices_common(None, Some(&console_text), None); 7791 }); 7792 // Shutdown the target VM and check console output 7793 kill_child(&mut child); 7794 let output = child.wait_with_output().unwrap(); 7795 handle_child_output(r, &output); 7796 7797 let r = std::panic::catch_unwind(|| { 7798 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 7799 }); 7800 7801 handle_child_output(r, &output); 7802 } 7803 7804 #[test] 7805 #[cfg(target_arch = "x86_64")] 7806 fn test_snapshot_restore_pvpanic() { 7807 _test_snapshot_restore_devices(true); 7808 } 7809 7810 fn _test_snapshot_restore_devices(pvpanic: bool) { 7811 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7812 let guest = Guest::new(Box::new(focal)); 7813 let kernel_path = direct_kernel_boot_path(); 7814 7815 let api_socket_source = format!("{}.1", temp_api_path(&guest.tmp_dir)); 7816 7817 let device_params = { 7818 let mut data = vec![]; 7819 if pvpanic { 7820 data.push("--pvpanic"); 7821 } 7822 data 7823 }; 7824 7825 let socket = temp_vsock_path(&guest.tmp_dir); 7826 let event_path = temp_event_monitor_path(&guest.tmp_dir); 7827 7828 let mut child = GuestCommand::new(&guest) 7829 .args(["--api-socket", &api_socket_source]) 7830 .args(["--event-monitor", format!("path={}", event_path).as_str()]) 7831 .args(["--cpus", "boot=2"]) 7832 .args(["--memory", "size=1G"]) 7833 .args(["--kernel", kernel_path.to_str().unwrap()]) 7834 .default_disks() 7835 .default_net() 7836 .args(["--vsock", format!("cid=3,socket={}", socket).as_str()]) 7837 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 7838 .args(device_params) 7839 .capture_output() 7840 .spawn() 7841 .unwrap(); 7842 7843 let console_text = String::from("On a branch floating down river a cricket, singing."); 7844 // Create the snapshot directory 7845 let snapshot_dir = temp_snapshot_dir_path(&guest.tmp_dir); 7846 7847 let r = std::panic::catch_unwind(|| { 7848 guest.wait_vm_boot(None).unwrap(); 7849 7850 // Check the number of vCPUs 7851 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 7852 7853 snapshot_and_check_events(&api_socket_source, &snapshot_dir, &event_path); 7854 }); 7855 7856 // Shutdown the source VM and check console output 7857 kill_child(&mut child); 7858 let output = child.wait_with_output().unwrap(); 7859 handle_child_output(r, &output); 7860 7861 // Remove the vsock socket file. 7862 Command::new("rm") 7863 .arg("-f") 7864 .arg(socket.as_str()) 7865 .output() 7866 .unwrap(); 7867 7868 let api_socket_restored = format!("{}.2", temp_api_path(&guest.tmp_dir)); 7869 let event_path_restored = format!("{}.2", temp_event_monitor_path(&guest.tmp_dir)); 7870 7871 // Restore the VM from the snapshot 7872 let mut child = GuestCommand::new(&guest) 7873 .args(["--api-socket", &api_socket_restored]) 7874 .args([ 7875 "--event-monitor", 7876 format!("path={event_path_restored}").as_str(), 7877 ]) 7878 .args([ 7879 "--restore", 7880 format!("source_url=file://{snapshot_dir}").as_str(), 7881 ]) 7882 .capture_output() 7883 .spawn() 7884 .unwrap(); 7885 7886 // Wait for the VM to be restored 7887 thread::sleep(std::time::Duration::new(20, 0)); 7888 7889 let latest_events = [&MetaEvent { 7890 event: "restored".to_string(), 7891 device_id: None, 7892 }]; 7893 assert!(check_latest_events_exact( 7894 &latest_events, 7895 &event_path_restored 7896 )); 7897 7898 // Remove the snapshot dir 7899 let _ = remove_dir_all(snapshot_dir.as_str()); 7900 7901 let r = std::panic::catch_unwind(|| { 7902 // Resume the VM 7903 assert!(remote_command(&api_socket_restored, "resume", None)); 7904 // There is no way that we can ensure the 'write()' to the 7905 // event file is completed when the 'resume' request is 7906 // returned successfully, because the 'write()' was done 7907 // asynchronously from a different thread of Cloud 7908 // Hypervisor (e.g. the event-monitor thread). 7909 thread::sleep(std::time::Duration::new(1, 0)); 7910 let latest_events = [ 7911 &MetaEvent { 7912 event: "resuming".to_string(), 7913 device_id: None, 7914 }, 7915 &MetaEvent { 7916 event: "resumed".to_string(), 7917 device_id: None, 7918 }, 7919 ]; 7920 assert!(check_latest_events_exact( 7921 &latest_events, 7922 &event_path_restored 7923 )); 7924 7925 // Check the number of vCPUs 7926 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 7927 guest.check_devices_common(Some(&socket), Some(&console_text), None); 7928 7929 if pvpanic { 7930 // Trigger guest a panic 7931 make_guest_panic(&guest); 7932 // Wait a while for guest 7933 thread::sleep(std::time::Duration::new(10, 0)); 7934 7935 let expected_sequential_events = [&MetaEvent { 7936 event: "panic".to_string(), 7937 device_id: None, 7938 }]; 7939 assert!(check_latest_events_exact( 7940 &expected_sequential_events, 7941 &event_path_restored 7942 )); 7943 } 7944 }); 7945 // Shutdown the target VM and check console output 7946 kill_child(&mut child); 7947 let output = child.wait_with_output().unwrap(); 7948 handle_child_output(r, &output); 7949 7950 let r = std::panic::catch_unwind(|| { 7951 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 7952 }); 7953 7954 handle_child_output(r, &output); 7955 } 7956 } 7957 7958 mod windows { 7959 use once_cell::sync::Lazy; 7960 7961 use crate::*; 7962 7963 static NEXT_DISK_ID: Lazy<Mutex<u8>> = Lazy::new(|| Mutex::new(1)); 7964 7965 struct WindowsGuest { 7966 guest: Guest, 7967 auth: PasswordAuth, 7968 } 7969 7970 trait FsType { 7971 const FS_FAT: u8; 7972 const FS_NTFS: u8; 7973 } 7974 impl FsType for WindowsGuest { 7975 const FS_FAT: u8 = 0; 7976 const FS_NTFS: u8 = 1; 7977 } 7978 7979 impl WindowsGuest { 7980 fn new() -> Self { 7981 let disk = WindowsDiskConfig::new(WINDOWS_IMAGE_NAME.to_string()); 7982 let guest = Guest::new(Box::new(disk)); 7983 let auth = PasswordAuth { 7984 username: String::from("administrator"), 7985 password: String::from("Admin123"), 7986 }; 7987 7988 WindowsGuest { guest, auth } 7989 } 7990 7991 fn guest(&self) -> &Guest { 7992 &self.guest 7993 } 7994 7995 fn ssh_cmd(&self, cmd: &str) -> String { 7996 ssh_command_ip_with_auth( 7997 cmd, 7998 &self.auth, 7999 &self.guest.network.guest_ip, 8000 DEFAULT_SSH_RETRIES, 8001 DEFAULT_SSH_TIMEOUT, 8002 ) 8003 .unwrap() 8004 } 8005 8006 fn cpu_count(&self) -> u8 { 8007 self.ssh_cmd("powershell -Command \"(Get-CimInstance win32_computersystem).NumberOfLogicalProcessors\"") 8008 .trim() 8009 .parse::<u8>() 8010 .unwrap_or(0) 8011 } 8012 8013 fn ram_size(&self) -> usize { 8014 self.ssh_cmd("powershell -Command \"(Get-CimInstance win32_computersystem).TotalPhysicalMemory\"") 8015 .trim() 8016 .parse::<usize>() 8017 .unwrap_or(0) 8018 } 8019 8020 fn netdev_count(&self) -> u8 { 8021 self.ssh_cmd("powershell -Command \"netsh int ipv4 show interfaces | Select-String ethernet | Measure-Object -Line | Format-Table -HideTableHeaders\"") 8022 .trim() 8023 .parse::<u8>() 8024 .unwrap_or(0) 8025 } 8026 8027 fn disk_count(&self) -> u8 { 8028 self.ssh_cmd("powershell -Command \"Get-Disk | Measure-Object -Line | Format-Table -HideTableHeaders\"") 8029 .trim() 8030 .parse::<u8>() 8031 .unwrap_or(0) 8032 } 8033 8034 fn reboot(&self) { 8035 let _ = self.ssh_cmd("shutdown /r /t 0"); 8036 } 8037 8038 fn shutdown(&self) { 8039 let _ = self.ssh_cmd("shutdown /s /t 0"); 8040 } 8041 8042 fn run_dnsmasq(&self) -> std::process::Child { 8043 let listen_address = format!("--listen-address={}", self.guest.network.host_ip); 8044 let dhcp_host = format!( 8045 "--dhcp-host={},{}", 8046 self.guest.network.guest_mac, self.guest.network.guest_ip 8047 ); 8048 let dhcp_range = format!( 8049 "--dhcp-range=eth,{},{}", 8050 self.guest.network.guest_ip, self.guest.network.guest_ip 8051 ); 8052 8053 Command::new("dnsmasq") 8054 .arg("--no-daemon") 8055 .arg("--log-queries") 8056 .arg(listen_address.as_str()) 8057 .arg("--except-interface=lo") 8058 .arg("--bind-dynamic") // Allow listening to host_ip while the interface is not ready yet. 8059 .arg("--conf-file=/dev/null") 8060 .arg(dhcp_host.as_str()) 8061 .arg(dhcp_range.as_str()) 8062 .spawn() 8063 .unwrap() 8064 } 8065 8066 // TODO Cleanup image file explicitly after test, if there's some space issues. 8067 fn disk_new(&self, fs: u8, sz: usize) -> String { 8068 let mut guard = NEXT_DISK_ID.lock().unwrap(); 8069 let id = *guard; 8070 *guard = id + 1; 8071 8072 let img = PathBuf::from(format!("/tmp/test-hotplug-{id}.raw")); 8073 let _ = fs::remove_file(&img); 8074 8075 // Create an image file 8076 let out = Command::new("qemu-img") 8077 .args([ 8078 "create", 8079 "-f", 8080 "raw", 8081 img.to_str().unwrap(), 8082 format!("{sz}m").as_str(), 8083 ]) 8084 .output() 8085 .expect("qemu-img command failed") 8086 .stdout; 8087 println!("{out:?}"); 8088 8089 // Associate image to a loop device 8090 let out = Command::new("losetup") 8091 .args(["--show", "-f", img.to_str().unwrap()]) 8092 .output() 8093 .expect("failed to create loop device") 8094 .stdout; 8095 let _tmp = String::from_utf8_lossy(&out); 8096 let loop_dev = _tmp.trim(); 8097 println!("{out:?}"); 8098 8099 // Create a partition table 8100 // echo 'type=7' | sudo sfdisk "${LOOP}" 8101 let mut child = Command::new("sfdisk") 8102 .args([loop_dev]) 8103 .stdin(Stdio::piped()) 8104 .spawn() 8105 .unwrap(); 8106 let stdin = child.stdin.as_mut().expect("failed to open stdin"); 8107 stdin 8108 .write_all("type=7".as_bytes()) 8109 .expect("failed to write stdin"); 8110 let out = child.wait_with_output().expect("sfdisk failed").stdout; 8111 println!("{out:?}"); 8112 8113 // Disengage the loop device 8114 let out = Command::new("losetup") 8115 .args(["-d", loop_dev]) 8116 .output() 8117 .expect("loop device not found") 8118 .stdout; 8119 println!("{out:?}"); 8120 8121 // Re-associate loop device pointing to the partition only 8122 let out = Command::new("losetup") 8123 .args([ 8124 "--show", 8125 "--offset", 8126 (512 * 2048).to_string().as_str(), 8127 "-f", 8128 img.to_str().unwrap(), 8129 ]) 8130 .output() 8131 .expect("failed to create loop device") 8132 .stdout; 8133 let _tmp = String::from_utf8_lossy(&out); 8134 let loop_dev = _tmp.trim(); 8135 println!("{out:?}"); 8136 8137 // Create filesystem. 8138 let fs_cmd = match fs { 8139 WindowsGuest::FS_FAT => "mkfs.msdos", 8140 WindowsGuest::FS_NTFS => "mkfs.ntfs", 8141 _ => panic!("Unknown filesystem type '{fs}'"), 8142 }; 8143 let out = Command::new(fs_cmd) 8144 .args([&loop_dev]) 8145 .output() 8146 .unwrap_or_else(|_| panic!("{fs_cmd} failed")) 8147 .stdout; 8148 println!("{out:?}"); 8149 8150 // Disengage the loop device 8151 let out = Command::new("losetup") 8152 .args(["-d", loop_dev]) 8153 .output() 8154 .unwrap_or_else(|_| panic!("loop device '{loop_dev}' not found")) 8155 .stdout; 8156 println!("{out:?}"); 8157 8158 img.to_str().unwrap().to_string() 8159 } 8160 8161 fn disks_set_rw(&self) { 8162 let _ = self.ssh_cmd("powershell -Command \"Get-Disk | Where-Object IsOffline -eq $True | Set-Disk -IsReadOnly $False\""); 8163 } 8164 8165 fn disks_online(&self) { 8166 let _ = self.ssh_cmd("powershell -Command \"Get-Disk | Where-Object IsOffline -eq $True | Set-Disk -IsOffline $False\""); 8167 } 8168 8169 fn disk_file_put(&self, fname: &str, data: &str) { 8170 let _ = self.ssh_cmd(&format!( 8171 "powershell -Command \"'{data}' | Set-Content -Path {fname}\"" 8172 )); 8173 } 8174 8175 fn disk_file_read(&self, fname: &str) -> String { 8176 self.ssh_cmd(&format!( 8177 "powershell -Command \"Get-Content -Path {fname}\"" 8178 )) 8179 } 8180 8181 fn wait_for_boot(&self) -> bool { 8182 let cmd = "dir /b c:\\ | find \"Windows\""; 8183 let tmo_max = 180; 8184 // The timeout increase by n*1+n*2+n*3+..., therefore the initial 8185 // interval must be small. 8186 let tmo_int = 2; 8187 let out = ssh_command_ip_with_auth( 8188 cmd, 8189 &self.auth, 8190 &self.guest.network.guest_ip, 8191 { 8192 let mut ret = 1; 8193 let mut tmo_acc = 0; 8194 loop { 8195 tmo_acc += tmo_int * ret; 8196 if tmo_acc >= tmo_max { 8197 break; 8198 } 8199 ret += 1; 8200 } 8201 ret 8202 }, 8203 tmo_int, 8204 ) 8205 .unwrap(); 8206 8207 if "Windows" == out.trim() { 8208 return true; 8209 } 8210 8211 false 8212 } 8213 } 8214 8215 fn vcpu_threads_count(pid: u32) -> u8 { 8216 // ps -T -p 12345 | grep vcpu | wc -l 8217 let out = Command::new("ps") 8218 .args(["-T", "-p", format!("{pid}").as_str()]) 8219 .output() 8220 .expect("ps command failed") 8221 .stdout; 8222 return String::from_utf8_lossy(&out).matches("vcpu").count() as u8; 8223 } 8224 8225 fn netdev_ctrl_threads_count(pid: u32) -> u8 { 8226 // ps -T -p 12345 | grep "_net[0-9]*_ctrl" | wc -l 8227 let out = Command::new("ps") 8228 .args(["-T", "-p", format!("{pid}").as_str()]) 8229 .output() 8230 .expect("ps command failed") 8231 .stdout; 8232 let mut n = 0; 8233 String::from_utf8_lossy(&out) 8234 .split_whitespace() 8235 .for_each(|s| n += (s.starts_with("_net") && s.ends_with("_ctrl")) as u8); // _net1_ctrl 8236 n 8237 } 8238 8239 fn disk_ctrl_threads_count(pid: u32) -> u8 { 8240 // ps -T -p 15782 | grep "_disk[0-9]*_q0" | wc -l 8241 let out = Command::new("ps") 8242 .args(["-T", "-p", format!("{pid}").as_str()]) 8243 .output() 8244 .expect("ps command failed") 8245 .stdout; 8246 let mut n = 0; 8247 String::from_utf8_lossy(&out) 8248 .split_whitespace() 8249 .for_each(|s| n += (s.starts_with("_disk") && s.ends_with("_q0")) as u8); // _disk0_q0, don't care about multiple queues as they're related to the same hdd 8250 n 8251 } 8252 8253 #[test] 8254 fn test_windows_guest() { 8255 let windows_guest = WindowsGuest::new(); 8256 8257 let mut child = GuestCommand::new(windows_guest.guest()) 8258 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8259 .args(["--memory", "size=4G"]) 8260 .args(["--kernel", edk2_path().to_str().unwrap()]) 8261 .args(["--serial", "tty"]) 8262 .args(["--console", "off"]) 8263 .default_disks() 8264 .default_net() 8265 .capture_output() 8266 .spawn() 8267 .unwrap(); 8268 8269 let fd = child.stdout.as_ref().unwrap().as_raw_fd(); 8270 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 8271 let fd = child.stderr.as_ref().unwrap().as_raw_fd(); 8272 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 8273 8274 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE); 8275 8276 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8277 8278 let r = std::panic::catch_unwind(|| { 8279 // Wait to make sure Windows boots up 8280 assert!(windows_guest.wait_for_boot()); 8281 8282 windows_guest.shutdown(); 8283 }); 8284 8285 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8286 let _ = child.kill(); 8287 let output = child.wait_with_output().unwrap(); 8288 8289 let _ = child_dnsmasq.kill(); 8290 let _ = child_dnsmasq.wait(); 8291 8292 handle_child_output(r, &output); 8293 } 8294 8295 #[test] 8296 fn test_windows_guest_multiple_queues() { 8297 let windows_guest = WindowsGuest::new(); 8298 8299 let mut ovmf_path = dirs::home_dir().unwrap(); 8300 ovmf_path.push("workloads"); 8301 ovmf_path.push(OVMF_NAME); 8302 8303 let mut child = GuestCommand::new(windows_guest.guest()) 8304 .args(["--cpus", "boot=4,kvm_hyperv=on"]) 8305 .args(["--memory", "size=4G"]) 8306 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8307 .args(["--serial", "tty"]) 8308 .args(["--console", "off"]) 8309 .args([ 8310 "--disk", 8311 format!( 8312 "path={},num_queues=4", 8313 windows_guest 8314 .guest() 8315 .disk_config 8316 .disk(DiskType::OperatingSystem) 8317 .unwrap() 8318 ) 8319 .as_str(), 8320 ]) 8321 .args([ 8322 "--net", 8323 format!( 8324 "tap=,mac={},ip={},mask=255.255.255.0,num_queues=8", 8325 windows_guest.guest().network.guest_mac, 8326 windows_guest.guest().network.host_ip 8327 ) 8328 .as_str(), 8329 ]) 8330 .capture_output() 8331 .spawn() 8332 .unwrap(); 8333 8334 let fd = child.stdout.as_ref().unwrap().as_raw_fd(); 8335 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 8336 let fd = child.stderr.as_ref().unwrap().as_raw_fd(); 8337 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 8338 8339 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE); 8340 8341 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8342 8343 let r = std::panic::catch_unwind(|| { 8344 // Wait to make sure Windows boots up 8345 assert!(windows_guest.wait_for_boot()); 8346 8347 windows_guest.shutdown(); 8348 }); 8349 8350 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8351 let _ = child.kill(); 8352 let output = child.wait_with_output().unwrap(); 8353 8354 let _ = child_dnsmasq.kill(); 8355 let _ = child_dnsmasq.wait(); 8356 8357 handle_child_output(r, &output); 8358 } 8359 8360 #[test] 8361 #[cfg(not(feature = "mshv"))] 8362 #[ignore = "See #4327"] 8363 fn test_windows_guest_snapshot_restore() { 8364 let windows_guest = WindowsGuest::new(); 8365 8366 let mut ovmf_path = dirs::home_dir().unwrap(); 8367 ovmf_path.push("workloads"); 8368 ovmf_path.push(OVMF_NAME); 8369 8370 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8371 let api_socket_source = format!("{}.1", temp_api_path(&tmp_dir)); 8372 8373 let mut child = GuestCommand::new(windows_guest.guest()) 8374 .args(["--api-socket", &api_socket_source]) 8375 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8376 .args(["--memory", "size=4G"]) 8377 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8378 .args(["--serial", "tty"]) 8379 .args(["--console", "off"]) 8380 .default_disks() 8381 .default_net() 8382 .capture_output() 8383 .spawn() 8384 .unwrap(); 8385 8386 let fd = child.stdout.as_ref().unwrap().as_raw_fd(); 8387 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 8388 let fd = child.stderr.as_ref().unwrap().as_raw_fd(); 8389 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 8390 8391 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE); 8392 8393 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8394 8395 // Wait to make sure Windows boots up 8396 assert!(windows_guest.wait_for_boot()); 8397 8398 let snapshot_dir = temp_snapshot_dir_path(&tmp_dir); 8399 8400 // Pause the VM 8401 assert!(remote_command(&api_socket_source, "pause", None)); 8402 8403 // Take a snapshot from the VM 8404 assert!(remote_command( 8405 &api_socket_source, 8406 "snapshot", 8407 Some(format!("file://{snapshot_dir}").as_str()), 8408 )); 8409 8410 // Wait to make sure the snapshot is completed 8411 thread::sleep(std::time::Duration::new(30, 0)); 8412 8413 let _ = child.kill(); 8414 child.wait().unwrap(); 8415 8416 let api_socket_restored = format!("{}.2", temp_api_path(&tmp_dir)); 8417 8418 // Restore the VM from the snapshot 8419 let mut child = GuestCommand::new(windows_guest.guest()) 8420 .args(["--api-socket", &api_socket_restored]) 8421 .args([ 8422 "--restore", 8423 format!("source_url=file://{snapshot_dir}").as_str(), 8424 ]) 8425 .capture_output() 8426 .spawn() 8427 .unwrap(); 8428 8429 // Wait for the VM to be restored 8430 thread::sleep(std::time::Duration::new(20, 0)); 8431 8432 let r = std::panic::catch_unwind(|| { 8433 // Resume the VM 8434 assert!(remote_command(&api_socket_restored, "resume", None)); 8435 8436 windows_guest.shutdown(); 8437 }); 8438 8439 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8440 let _ = child.kill(); 8441 let output = child.wait_with_output().unwrap(); 8442 8443 let _ = child_dnsmasq.kill(); 8444 let _ = child_dnsmasq.wait(); 8445 8446 handle_child_output(r, &output); 8447 } 8448 8449 #[test] 8450 #[cfg(not(feature = "mshv"))] 8451 #[cfg(not(target_arch = "aarch64"))] 8452 fn test_windows_guest_cpu_hotplug() { 8453 let windows_guest = WindowsGuest::new(); 8454 8455 let mut ovmf_path = dirs::home_dir().unwrap(); 8456 ovmf_path.push("workloads"); 8457 ovmf_path.push(OVMF_NAME); 8458 8459 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8460 let api_socket = temp_api_path(&tmp_dir); 8461 8462 let mut child = GuestCommand::new(windows_guest.guest()) 8463 .args(["--api-socket", &api_socket]) 8464 .args(["--cpus", "boot=2,max=8,kvm_hyperv=on"]) 8465 .args(["--memory", "size=4G"]) 8466 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8467 .args(["--serial", "tty"]) 8468 .args(["--console", "off"]) 8469 .default_disks() 8470 .default_net() 8471 .capture_output() 8472 .spawn() 8473 .unwrap(); 8474 8475 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8476 8477 let r = std::panic::catch_unwind(|| { 8478 // Wait to make sure Windows boots up 8479 assert!(windows_guest.wait_for_boot()); 8480 8481 let vcpu_num = 2; 8482 // Check the initial number of CPUs the guest sees 8483 assert_eq!(windows_guest.cpu_count(), vcpu_num); 8484 // Check the initial number of vcpu threads in the CH process 8485 assert_eq!(vcpu_threads_count(child.id()), vcpu_num); 8486 8487 let vcpu_num = 6; 8488 // Hotplug some CPUs 8489 resize_command(&api_socket, Some(vcpu_num), None, None, None); 8490 // Wait to make sure CPUs are added 8491 thread::sleep(std::time::Duration::new(10, 0)); 8492 // Check the guest sees the correct number 8493 assert_eq!(windows_guest.cpu_count(), vcpu_num); 8494 // Check the CH process has the correct number of vcpu threads 8495 assert_eq!(vcpu_threads_count(child.id()), vcpu_num); 8496 8497 let vcpu_num = 4; 8498 // Remove some CPUs. Note that Windows doesn't support hot-remove. 8499 resize_command(&api_socket, Some(vcpu_num), None, None, None); 8500 // Wait to make sure CPUs are removed 8501 thread::sleep(std::time::Duration::new(10, 0)); 8502 // Reboot to let Windows catch up 8503 windows_guest.reboot(); 8504 // Wait to make sure Windows completely rebooted 8505 thread::sleep(std::time::Duration::new(60, 0)); 8506 // Check the guest sees the correct number 8507 assert_eq!(windows_guest.cpu_count(), vcpu_num); 8508 // Check the CH process has the correct number of vcpu threads 8509 assert_eq!(vcpu_threads_count(child.id()), vcpu_num); 8510 8511 windows_guest.shutdown(); 8512 }); 8513 8514 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8515 let _ = child.kill(); 8516 let output = child.wait_with_output().unwrap(); 8517 8518 let _ = child_dnsmasq.kill(); 8519 let _ = child_dnsmasq.wait(); 8520 8521 handle_child_output(r, &output); 8522 } 8523 8524 #[test] 8525 #[cfg(not(feature = "mshv"))] 8526 #[cfg(not(target_arch = "aarch64"))] 8527 fn test_windows_guest_ram_hotplug() { 8528 let windows_guest = WindowsGuest::new(); 8529 8530 let mut ovmf_path = dirs::home_dir().unwrap(); 8531 ovmf_path.push("workloads"); 8532 ovmf_path.push(OVMF_NAME); 8533 8534 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8535 let api_socket = temp_api_path(&tmp_dir); 8536 8537 let mut child = GuestCommand::new(windows_guest.guest()) 8538 .args(["--api-socket", &api_socket]) 8539 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8540 .args(["--memory", "size=2G,hotplug_size=5G"]) 8541 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8542 .args(["--serial", "tty"]) 8543 .args(["--console", "off"]) 8544 .default_disks() 8545 .default_net() 8546 .capture_output() 8547 .spawn() 8548 .unwrap(); 8549 8550 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8551 8552 let r = std::panic::catch_unwind(|| { 8553 // Wait to make sure Windows boots up 8554 assert!(windows_guest.wait_for_boot()); 8555 8556 let ram_size = 2 * 1024 * 1024 * 1024; 8557 // Check the initial number of RAM the guest sees 8558 let current_ram_size = windows_guest.ram_size(); 8559 // This size seems to be reserved by the system and thus the 8560 // reported amount differs by this constant value. 8561 let reserved_ram_size = ram_size - current_ram_size; 8562 // Verify that there's not more than 4mb constant diff wasted 8563 // by the reserved ram. 8564 assert!(reserved_ram_size < 4 * 1024 * 1024); 8565 8566 let ram_size = 4 * 1024 * 1024 * 1024; 8567 // Hotplug some RAM 8568 resize_command(&api_socket, None, Some(ram_size), None, None); 8569 // Wait to make sure RAM has been added 8570 thread::sleep(std::time::Duration::new(10, 0)); 8571 // Check the guest sees the correct number 8572 assert_eq!(windows_guest.ram_size(), ram_size - reserved_ram_size); 8573 8574 let ram_size = 3 * 1024 * 1024 * 1024; 8575 // Unplug some RAM. Note that hot-remove most likely won't work. 8576 resize_command(&api_socket, None, Some(ram_size), None, None); 8577 // Wait to make sure RAM has been added 8578 thread::sleep(std::time::Duration::new(10, 0)); 8579 // Reboot to let Windows catch up 8580 windows_guest.reboot(); 8581 // Wait to make sure guest completely rebooted 8582 thread::sleep(std::time::Duration::new(60, 0)); 8583 // Check the guest sees the correct number 8584 assert_eq!(windows_guest.ram_size(), ram_size - reserved_ram_size); 8585 8586 windows_guest.shutdown(); 8587 }); 8588 8589 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8590 let _ = child.kill(); 8591 let output = child.wait_with_output().unwrap(); 8592 8593 let _ = child_dnsmasq.kill(); 8594 let _ = child_dnsmasq.wait(); 8595 8596 handle_child_output(r, &output); 8597 } 8598 8599 #[test] 8600 #[cfg(not(feature = "mshv"))] 8601 fn test_windows_guest_netdev_hotplug() { 8602 let windows_guest = WindowsGuest::new(); 8603 8604 let mut ovmf_path = dirs::home_dir().unwrap(); 8605 ovmf_path.push("workloads"); 8606 ovmf_path.push(OVMF_NAME); 8607 8608 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8609 let api_socket = temp_api_path(&tmp_dir); 8610 8611 let mut child = GuestCommand::new(windows_guest.guest()) 8612 .args(["--api-socket", &api_socket]) 8613 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8614 .args(["--memory", "size=4G"]) 8615 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8616 .args(["--serial", "tty"]) 8617 .args(["--console", "off"]) 8618 .default_disks() 8619 .default_net() 8620 .capture_output() 8621 .spawn() 8622 .unwrap(); 8623 8624 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8625 8626 let r = std::panic::catch_unwind(|| { 8627 // Wait to make sure Windows boots up 8628 assert!(windows_guest.wait_for_boot()); 8629 8630 // Initially present network device 8631 let netdev_num = 1; 8632 assert_eq!(windows_guest.netdev_count(), netdev_num); 8633 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 8634 8635 // Hotplug network device 8636 let (cmd_success, cmd_output) = remote_command_w_output( 8637 &api_socket, 8638 "add-net", 8639 Some(windows_guest.guest().default_net_string().as_str()), 8640 ); 8641 assert!(cmd_success); 8642 assert!(String::from_utf8_lossy(&cmd_output).contains("\"id\":\"_net2\"")); 8643 thread::sleep(std::time::Duration::new(5, 0)); 8644 // Verify the device is on the system 8645 let netdev_num = 2; 8646 assert_eq!(windows_guest.netdev_count(), netdev_num); 8647 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 8648 8649 // Remove network device 8650 let cmd_success = remote_command(&api_socket, "remove-device", Some("_net2")); 8651 assert!(cmd_success); 8652 thread::sleep(std::time::Duration::new(5, 0)); 8653 // Verify the device has been removed 8654 let netdev_num = 1; 8655 assert_eq!(windows_guest.netdev_count(), netdev_num); 8656 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 8657 8658 windows_guest.shutdown(); 8659 }); 8660 8661 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8662 let _ = child.kill(); 8663 let output = child.wait_with_output().unwrap(); 8664 8665 let _ = child_dnsmasq.kill(); 8666 let _ = child_dnsmasq.wait(); 8667 8668 handle_child_output(r, &output); 8669 } 8670 8671 #[test] 8672 #[ignore = "See #6037"] 8673 #[cfg(not(feature = "mshv"))] 8674 #[cfg(not(target_arch = "aarch64"))] 8675 fn test_windows_guest_disk_hotplug() { 8676 let windows_guest = WindowsGuest::new(); 8677 8678 let mut ovmf_path = dirs::home_dir().unwrap(); 8679 ovmf_path.push("workloads"); 8680 ovmf_path.push(OVMF_NAME); 8681 8682 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8683 let api_socket = temp_api_path(&tmp_dir); 8684 8685 let mut child = GuestCommand::new(windows_guest.guest()) 8686 .args(["--api-socket", &api_socket]) 8687 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8688 .args(["--memory", "size=4G"]) 8689 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8690 .args(["--serial", "tty"]) 8691 .args(["--console", "off"]) 8692 .default_disks() 8693 .default_net() 8694 .capture_output() 8695 .spawn() 8696 .unwrap(); 8697 8698 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8699 8700 let disk = windows_guest.disk_new(WindowsGuest::FS_FAT, 100); 8701 8702 let r = std::panic::catch_unwind(|| { 8703 // Wait to make sure Windows boots up 8704 assert!(windows_guest.wait_for_boot()); 8705 8706 // Initially present disk device 8707 let disk_num = 1; 8708 assert_eq!(windows_guest.disk_count(), disk_num); 8709 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8710 8711 // Hotplug disk device 8712 let (cmd_success, cmd_output) = remote_command_w_output( 8713 &api_socket, 8714 "add-disk", 8715 Some(format!("path={disk},readonly=off").as_str()), 8716 ); 8717 assert!(cmd_success); 8718 assert!(String::from_utf8_lossy(&cmd_output).contains("\"id\":\"_disk2\"")); 8719 thread::sleep(std::time::Duration::new(5, 0)); 8720 // Online disk device 8721 windows_guest.disks_set_rw(); 8722 windows_guest.disks_online(); 8723 // Verify the device is on the system 8724 let disk_num = 2; 8725 assert_eq!(windows_guest.disk_count(), disk_num); 8726 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8727 8728 let data = "hello"; 8729 let fname = "d:\\world"; 8730 windows_guest.disk_file_put(fname, data); 8731 8732 // Unmount disk device 8733 let cmd_success = remote_command(&api_socket, "remove-device", Some("_disk2")); 8734 assert!(cmd_success); 8735 thread::sleep(std::time::Duration::new(5, 0)); 8736 // Verify the device has been removed 8737 let disk_num = 1; 8738 assert_eq!(windows_guest.disk_count(), disk_num); 8739 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8740 8741 // Remount and check the file exists with the expected contents 8742 let (cmd_success, _cmd_output) = remote_command_w_output( 8743 &api_socket, 8744 "add-disk", 8745 Some(format!("path={disk},readonly=off").as_str()), 8746 ); 8747 assert!(cmd_success); 8748 thread::sleep(std::time::Duration::new(5, 0)); 8749 let out = windows_guest.disk_file_read(fname); 8750 assert_eq!(data, out.trim()); 8751 8752 // Intentionally no unmount, it'll happen at shutdown. 8753 8754 windows_guest.shutdown(); 8755 }); 8756 8757 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8758 let _ = child.kill(); 8759 let output = child.wait_with_output().unwrap(); 8760 8761 let _ = child_dnsmasq.kill(); 8762 let _ = child_dnsmasq.wait(); 8763 8764 handle_child_output(r, &output); 8765 } 8766 8767 #[test] 8768 #[ignore = "See #6037"] 8769 #[cfg(not(feature = "mshv"))] 8770 #[cfg(not(target_arch = "aarch64"))] 8771 fn test_windows_guest_disk_hotplug_multi() { 8772 let windows_guest = WindowsGuest::new(); 8773 8774 let mut ovmf_path = dirs::home_dir().unwrap(); 8775 ovmf_path.push("workloads"); 8776 ovmf_path.push(OVMF_NAME); 8777 8778 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8779 let api_socket = temp_api_path(&tmp_dir); 8780 8781 let mut child = GuestCommand::new(windows_guest.guest()) 8782 .args(["--api-socket", &api_socket]) 8783 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8784 .args(["--memory", "size=2G"]) 8785 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8786 .args(["--serial", "tty"]) 8787 .args(["--console", "off"]) 8788 .default_disks() 8789 .default_net() 8790 .capture_output() 8791 .spawn() 8792 .unwrap(); 8793 8794 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8795 8796 // Predefined data to used at various test stages 8797 let disk_test_data: [[String; 4]; 2] = [ 8798 [ 8799 "_disk2".to_string(), 8800 windows_guest.disk_new(WindowsGuest::FS_FAT, 123), 8801 "d:\\world".to_string(), 8802 "hello".to_string(), 8803 ], 8804 [ 8805 "_disk3".to_string(), 8806 windows_guest.disk_new(WindowsGuest::FS_NTFS, 333), 8807 "e:\\hello".to_string(), 8808 "world".to_string(), 8809 ], 8810 ]; 8811 8812 let r = std::panic::catch_unwind(|| { 8813 // Wait to make sure Windows boots up 8814 assert!(windows_guest.wait_for_boot()); 8815 8816 // Initially present disk device 8817 let disk_num = 1; 8818 assert_eq!(windows_guest.disk_count(), disk_num); 8819 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8820 8821 for it in &disk_test_data { 8822 let disk_id = it[0].as_str(); 8823 let disk = it[1].as_str(); 8824 // Hotplug disk device 8825 let (cmd_success, cmd_output) = remote_command_w_output( 8826 &api_socket, 8827 "add-disk", 8828 Some(format!("path={disk},readonly=off").as_str()), 8829 ); 8830 assert!(cmd_success); 8831 assert!(String::from_utf8_lossy(&cmd_output) 8832 .contains(format!("\"id\":\"{disk_id}\"").as_str())); 8833 thread::sleep(std::time::Duration::new(5, 0)); 8834 // Online disk devices 8835 windows_guest.disks_set_rw(); 8836 windows_guest.disks_online(); 8837 } 8838 // Verify the devices are on the system 8839 let disk_num = (disk_test_data.len() + 1) as u8; 8840 assert_eq!(windows_guest.disk_count(), disk_num); 8841 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8842 8843 // Put test data 8844 for it in &disk_test_data { 8845 let fname = it[2].as_str(); 8846 let data = it[3].as_str(); 8847 windows_guest.disk_file_put(fname, data); 8848 } 8849 8850 // Unmount disk devices 8851 for it in &disk_test_data { 8852 let disk_id = it[0].as_str(); 8853 let cmd_success = remote_command(&api_socket, "remove-device", Some(disk_id)); 8854 assert!(cmd_success); 8855 thread::sleep(std::time::Duration::new(5, 0)); 8856 } 8857 8858 // Verify the devices have been removed 8859 let disk_num = 1; 8860 assert_eq!(windows_guest.disk_count(), disk_num); 8861 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8862 8863 // Remount 8864 for it in &disk_test_data { 8865 let disk = it[1].as_str(); 8866 let (cmd_success, _cmd_output) = remote_command_w_output( 8867 &api_socket, 8868 "add-disk", 8869 Some(format!("path={disk},readonly=off").as_str()), 8870 ); 8871 assert!(cmd_success); 8872 thread::sleep(std::time::Duration::new(5, 0)); 8873 } 8874 8875 // Check the files exists with the expected contents 8876 for it in &disk_test_data { 8877 let fname = it[2].as_str(); 8878 let data = it[3].as_str(); 8879 let out = windows_guest.disk_file_read(fname); 8880 assert_eq!(data, out.trim()); 8881 } 8882 8883 // Intentionally no unmount, it'll happen at shutdown. 8884 8885 windows_guest.shutdown(); 8886 }); 8887 8888 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8889 let _ = child.kill(); 8890 let output = child.wait_with_output().unwrap(); 8891 8892 let _ = child_dnsmasq.kill(); 8893 let _ = child_dnsmasq.wait(); 8894 8895 handle_child_output(r, &output); 8896 } 8897 8898 #[test] 8899 #[cfg(not(feature = "mshv"))] 8900 #[cfg(not(target_arch = "aarch64"))] 8901 fn test_windows_guest_netdev_multi() { 8902 let windows_guest = WindowsGuest::new(); 8903 8904 let mut ovmf_path = dirs::home_dir().unwrap(); 8905 ovmf_path.push("workloads"); 8906 ovmf_path.push(OVMF_NAME); 8907 8908 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8909 let api_socket = temp_api_path(&tmp_dir); 8910 8911 let mut child = GuestCommand::new(windows_guest.guest()) 8912 .args(["--api-socket", &api_socket]) 8913 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8914 .args(["--memory", "size=4G"]) 8915 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8916 .args(["--serial", "tty"]) 8917 .args(["--console", "off"]) 8918 .default_disks() 8919 // The multi net dev config is borrowed from test_multiple_network_interfaces 8920 .args([ 8921 "--net", 8922 windows_guest.guest().default_net_string().as_str(), 8923 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0", 8924 "tap=mytap42,mac=fe:1f:9e:e1:60:f2,ip=192.168.4.1,mask=255.255.255.0", 8925 ]) 8926 .capture_output() 8927 .spawn() 8928 .unwrap(); 8929 8930 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8931 8932 let r = std::panic::catch_unwind(|| { 8933 // Wait to make sure Windows boots up 8934 assert!(windows_guest.wait_for_boot()); 8935 8936 let netdev_num = 3; 8937 assert_eq!(windows_guest.netdev_count(), netdev_num); 8938 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 8939 8940 let tap_count = exec_host_command_output("ip link | grep -c mytap42"); 8941 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1"); 8942 8943 windows_guest.shutdown(); 8944 }); 8945 8946 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8947 let _ = child.kill(); 8948 let output = child.wait_with_output().unwrap(); 8949 8950 let _ = child_dnsmasq.kill(); 8951 let _ = child_dnsmasq.wait(); 8952 8953 handle_child_output(r, &output); 8954 } 8955 } 8956 8957 #[cfg(target_arch = "x86_64")] 8958 mod sgx { 8959 use crate::*; 8960 8961 #[test] 8962 fn test_sgx() { 8963 let jammy_image = JAMMY_IMAGE_NAME.to_string(); 8964 let jammy = UbuntuDiskConfig::new(jammy_image); 8965 let guest = Guest::new(Box::new(jammy)); 8966 8967 let mut child = GuestCommand::new(&guest) 8968 .args(["--cpus", "boot=1"]) 8969 .args(["--memory", "size=512M"]) 8970 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 8971 .default_disks() 8972 .default_net() 8973 .args(["--sgx-epc", "id=epc0,size=64M"]) 8974 .capture_output() 8975 .spawn() 8976 .unwrap(); 8977 8978 let r = std::panic::catch_unwind(|| { 8979 guest.wait_vm_boot(None).unwrap(); 8980 8981 // Check if SGX is correctly detected in the guest. 8982 guest.check_sgx_support().unwrap(); 8983 8984 // Validate the SGX EPC section is 64MiB. 8985 assert_eq!( 8986 guest 8987 .ssh_command("cpuid -l 0x12 -s 2 | grep 'section size' | cut -d '=' -f 2") 8988 .unwrap() 8989 .trim(), 8990 "0x0000000004000000" 8991 ); 8992 }); 8993 8994 let _ = child.kill(); 8995 let output = child.wait_with_output().unwrap(); 8996 8997 handle_child_output(r, &output); 8998 } 8999 } 9000 9001 #[cfg(target_arch = "x86_64")] 9002 mod vfio { 9003 use crate::*; 9004 9005 fn test_nvidia_card_memory_hotplug(hotplug_method: &str) { 9006 let jammy = UbuntuDiskConfig::new(JAMMY_NVIDIA_IMAGE_NAME.to_string()); 9007 let guest = Guest::new(Box::new(jammy)); 9008 let api_socket = temp_api_path(&guest.tmp_dir); 9009 9010 let mut child = GuestCommand::new(&guest) 9011 .args(["--cpus", "boot=4"]) 9012 .args([ 9013 "--memory", 9014 format!("size=4G,hotplug_size=4G,hotplug_method={hotplug_method}").as_str(), 9015 ]) 9016 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 9017 .args(["--device", "path=/sys/bus/pci/devices/0000:31:00.0/"]) 9018 .args(["--api-socket", &api_socket]) 9019 .default_disks() 9020 .default_net() 9021 .capture_output() 9022 .spawn() 9023 .unwrap(); 9024 9025 let r = std::panic::catch_unwind(|| { 9026 guest.wait_vm_boot(None).unwrap(); 9027 9028 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9029 9030 guest.enable_memory_hotplug(); 9031 9032 // Add RAM to the VM 9033 let desired_ram = 6 << 30; 9034 resize_command(&api_socket, None, Some(desired_ram), None, None); 9035 thread::sleep(std::time::Duration::new(30, 0)); 9036 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 9037 9038 // Check the VFIO device works when RAM is increased to 6GiB 9039 guest.check_nvidia_gpu(); 9040 }); 9041 9042 let _ = child.kill(); 9043 let output = child.wait_with_output().unwrap(); 9044 9045 handle_child_output(r, &output); 9046 } 9047 9048 #[test] 9049 fn test_nvidia_card_memory_hotplug_acpi() { 9050 test_nvidia_card_memory_hotplug("acpi") 9051 } 9052 9053 #[test] 9054 fn test_nvidia_card_memory_hotplug_virtio_mem() { 9055 test_nvidia_card_memory_hotplug("virtio-mem") 9056 } 9057 9058 #[test] 9059 fn test_nvidia_card_pci_hotplug() { 9060 let jammy = UbuntuDiskConfig::new(JAMMY_NVIDIA_IMAGE_NAME.to_string()); 9061 let guest = Guest::new(Box::new(jammy)); 9062 let api_socket = temp_api_path(&guest.tmp_dir); 9063 9064 let mut child = GuestCommand::new(&guest) 9065 .args(["--cpus", "boot=4"]) 9066 .args(["--memory", "size=4G"]) 9067 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 9068 .args(["--api-socket", &api_socket]) 9069 .default_disks() 9070 .default_net() 9071 .capture_output() 9072 .spawn() 9073 .unwrap(); 9074 9075 let r = std::panic::catch_unwind(|| { 9076 guest.wait_vm_boot(None).unwrap(); 9077 9078 // Hotplug the card to the VM 9079 let (cmd_success, cmd_output) = remote_command_w_output( 9080 &api_socket, 9081 "add-device", 9082 Some("id=vfio0,path=/sys/bus/pci/devices/0000:31:00.0/"), 9083 ); 9084 assert!(cmd_success); 9085 assert!(String::from_utf8_lossy(&cmd_output) 9086 .contains("{\"id\":\"vfio0\",\"bdf\":\"0000:00:06.0\"}")); 9087 9088 thread::sleep(std::time::Duration::new(10, 0)); 9089 9090 // Check the VFIO device works after hotplug 9091 guest.check_nvidia_gpu(); 9092 }); 9093 9094 let _ = child.kill(); 9095 let output = child.wait_with_output().unwrap(); 9096 9097 handle_child_output(r, &output); 9098 } 9099 9100 #[test] 9101 fn test_nvidia_card_reboot() { 9102 let jammy = UbuntuDiskConfig::new(JAMMY_NVIDIA_IMAGE_NAME.to_string()); 9103 let guest = Guest::new(Box::new(jammy)); 9104 let api_socket = temp_api_path(&guest.tmp_dir); 9105 9106 let mut child = GuestCommand::new(&guest) 9107 .args(["--cpus", "boot=4"]) 9108 .args(["--memory", "size=4G"]) 9109 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 9110 .args(["--device", "path=/sys/bus/pci/devices/0000:31:00.0/"]) 9111 .args(["--api-socket", &api_socket]) 9112 .default_disks() 9113 .default_net() 9114 .capture_output() 9115 .spawn() 9116 .unwrap(); 9117 9118 let r = std::panic::catch_unwind(|| { 9119 guest.wait_vm_boot(None).unwrap(); 9120 9121 // Check the VFIO device works after boot 9122 guest.check_nvidia_gpu(); 9123 9124 guest.reboot_linux(0, None); 9125 9126 // Check the VFIO device works after reboot 9127 guest.check_nvidia_gpu(); 9128 }); 9129 9130 let _ = child.kill(); 9131 let output = child.wait_with_output().unwrap(); 9132 9133 handle_child_output(r, &output); 9134 } 9135 } 9136 9137 mod live_migration { 9138 use crate::*; 9139 9140 fn start_live_migration( 9141 migration_socket: &str, 9142 src_api_socket: &str, 9143 dest_api_socket: &str, 9144 local: bool, 9145 ) -> bool { 9146 // Start to receive migration from the destination VM 9147 let mut receive_migration = Command::new(clh_command("ch-remote")) 9148 .args([ 9149 &format!("--api-socket={dest_api_socket}"), 9150 "receive-migration", 9151 &format! {"unix:{migration_socket}"}, 9152 ]) 9153 .stderr(Stdio::piped()) 9154 .stdout(Stdio::piped()) 9155 .spawn() 9156 .unwrap(); 9157 // Give it '1s' to make sure the 'migration_socket' file is properly created 9158 thread::sleep(std::time::Duration::new(1, 0)); 9159 // Start to send migration from the source VM 9160 9161 let mut args = [ 9162 format!("--api-socket={}", &src_api_socket), 9163 "send-migration".to_string(), 9164 format! {"unix:{migration_socket}"}, 9165 ] 9166 .to_vec(); 9167 9168 if local { 9169 args.insert(2, "--local".to_string()); 9170 } 9171 9172 let mut send_migration = Command::new(clh_command("ch-remote")) 9173 .args(&args) 9174 .stderr(Stdio::piped()) 9175 .stdout(Stdio::piped()) 9176 .spawn() 9177 .unwrap(); 9178 9179 // The 'send-migration' command should be executed successfully within the given timeout 9180 let send_success = if let Some(status) = send_migration 9181 .wait_timeout(std::time::Duration::from_secs(30)) 9182 .unwrap() 9183 { 9184 status.success() 9185 } else { 9186 false 9187 }; 9188 9189 if !send_success { 9190 let _ = send_migration.kill(); 9191 let output = send_migration.wait_with_output().unwrap(); 9192 eprintln!( 9193 "\n\n==== Start 'send_migration' output ==== \ 9194 \n\n---stdout---\n{}\n\n---stderr---\n{} \ 9195 \n\n==== End 'send_migration' output ====\n\n", 9196 String::from_utf8_lossy(&output.stdout), 9197 String::from_utf8_lossy(&output.stderr) 9198 ); 9199 } 9200 9201 // The 'receive-migration' command should be executed successfully within the given timeout 9202 let receive_success = if let Some(status) = receive_migration 9203 .wait_timeout(std::time::Duration::from_secs(30)) 9204 .unwrap() 9205 { 9206 status.success() 9207 } else { 9208 false 9209 }; 9210 9211 if !receive_success { 9212 let _ = receive_migration.kill(); 9213 let output = receive_migration.wait_with_output().unwrap(); 9214 eprintln!( 9215 "\n\n==== Start 'receive_migration' output ==== \ 9216 \n\n---stdout---\n{}\n\n---stderr---\n{} \ 9217 \n\n==== End 'receive_migration' output ====\n\n", 9218 String::from_utf8_lossy(&output.stdout), 9219 String::from_utf8_lossy(&output.stderr) 9220 ); 9221 } 9222 9223 send_success && receive_success 9224 } 9225 9226 fn print_and_panic(src_vm: Child, dest_vm: Child, ovs_vm: Option<Child>, message: &str) -> ! { 9227 let mut src_vm = src_vm; 9228 let mut dest_vm = dest_vm; 9229 9230 let _ = src_vm.kill(); 9231 let src_output = src_vm.wait_with_output().unwrap(); 9232 eprintln!( 9233 "\n\n==== Start 'source_vm' stdout ====\n\n{}\n\n==== End 'source_vm' stdout ====", 9234 String::from_utf8_lossy(&src_output.stdout) 9235 ); 9236 eprintln!( 9237 "\n\n==== Start 'source_vm' stderr ====\n\n{}\n\n==== End 'source_vm' stderr ====", 9238 String::from_utf8_lossy(&src_output.stderr) 9239 ); 9240 let _ = dest_vm.kill(); 9241 let dest_output = dest_vm.wait_with_output().unwrap(); 9242 eprintln!( 9243 "\n\n==== Start 'destination_vm' stdout ====\n\n{}\n\n==== End 'destination_vm' stdout ====", 9244 String::from_utf8_lossy(&dest_output.stdout) 9245 ); 9246 eprintln!( 9247 "\n\n==== Start 'destination_vm' stderr ====\n\n{}\n\n==== End 'destination_vm' stderr ====", 9248 String::from_utf8_lossy(&dest_output.stderr) 9249 ); 9250 9251 if let Some(ovs_vm) = ovs_vm { 9252 let mut ovs_vm = ovs_vm; 9253 let _ = ovs_vm.kill(); 9254 let ovs_output = ovs_vm.wait_with_output().unwrap(); 9255 eprintln!( 9256 "\n\n==== Start 'ovs_vm' stdout ====\n\n{}\n\n==== End 'ovs_vm' stdout ====", 9257 String::from_utf8_lossy(&ovs_output.stdout) 9258 ); 9259 eprintln!( 9260 "\n\n==== Start 'ovs_vm' stderr ====\n\n{}\n\n==== End 'ovs_vm' stderr ====", 9261 String::from_utf8_lossy(&ovs_output.stderr) 9262 ); 9263 9264 cleanup_ovs_dpdk(); 9265 } 9266 9267 panic!("Test failed: {message}") 9268 } 9269 9270 // This test exercises the local live-migration between two Cloud Hypervisor VMs on the 9271 // same host. It ensures the following behaviors: 9272 // 1. The source VM is up and functional (including various virtio-devices are working properly); 9273 // 2. The 'send-migration' and 'receive-migration' command finished successfully; 9274 // 3. The source VM terminated gracefully after live migration; 9275 // 4. The destination VM is functional (including various virtio-devices are working properly) after 9276 // live migration; 9277 // Note: This test does not use vsock as we can't create two identical vsock on the same host. 9278 fn _test_live_migration(upgrade_test: bool, local: bool) { 9279 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9280 let guest = Guest::new(Box::new(focal)); 9281 let kernel_path = direct_kernel_boot_path(); 9282 let console_text = String::from("On a branch floating down river a cricket, singing."); 9283 let net_id = "net123"; 9284 let net_params = format!( 9285 "id={},tap=,mac={},ip={},mask=255.255.255.0", 9286 net_id, guest.network.guest_mac, guest.network.host_ip 9287 ); 9288 9289 let memory_param: &[&str] = if local { 9290 &["--memory", "size=4G,shared=on"] 9291 } else { 9292 &["--memory", "size=4G"] 9293 }; 9294 9295 let boot_vcpus = 2; 9296 let max_vcpus = 4; 9297 9298 let pmem_temp_file = TempFile::new().unwrap(); 9299 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 9300 std::process::Command::new("mkfs.ext4") 9301 .arg(pmem_temp_file.as_path()) 9302 .output() 9303 .expect("Expect creating disk image to succeed"); 9304 let pmem_path = String::from("/dev/pmem0"); 9305 9306 // Start the source VM 9307 let src_vm_path = if !upgrade_test { 9308 clh_command("cloud-hypervisor") 9309 } else { 9310 cloud_hypervisor_release_path() 9311 }; 9312 let src_api_socket = temp_api_path(&guest.tmp_dir); 9313 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 9314 src_vm_cmd 9315 .args([ 9316 "--cpus", 9317 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 9318 ]) 9319 .args(memory_param) 9320 .args(["--kernel", kernel_path.to_str().unwrap()]) 9321 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9322 .default_disks() 9323 .args(["--net", net_params.as_str()]) 9324 .args(["--api-socket", &src_api_socket]) 9325 .args([ 9326 "--pmem", 9327 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 9328 ]); 9329 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 9330 9331 // Start the destination VM 9332 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 9333 dest_api_socket.push_str(".dest"); 9334 let mut dest_child = GuestCommand::new(&guest) 9335 .args(["--api-socket", &dest_api_socket]) 9336 .capture_output() 9337 .spawn() 9338 .unwrap(); 9339 9340 let r = std::panic::catch_unwind(|| { 9341 guest.wait_vm_boot(None).unwrap(); 9342 9343 // Make sure the source VM is functional 9344 // Check the number of vCPUs 9345 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9346 9347 // Check the guest RAM 9348 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9349 9350 // Check the guest virtio-devices, e.g. block, rng, console, and net 9351 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9352 9353 // x86_64: Following what's done in the `test_snapshot_restore`, we need 9354 // to make sure that removing and adding back the virtio-net device does 9355 // not break the live-migration support for virtio-pci. 9356 #[cfg(target_arch = "x86_64")] 9357 { 9358 assert!(remote_command( 9359 &src_api_socket, 9360 "remove-device", 9361 Some(net_id), 9362 )); 9363 thread::sleep(std::time::Duration::new(10, 0)); 9364 9365 // Plug the virtio-net device again 9366 assert!(remote_command( 9367 &src_api_socket, 9368 "add-net", 9369 Some(net_params.as_str()), 9370 )); 9371 thread::sleep(std::time::Duration::new(10, 0)); 9372 } 9373 9374 // Start the live-migration 9375 let migration_socket = String::from( 9376 guest 9377 .tmp_dir 9378 .as_path() 9379 .join("live-migration.sock") 9380 .to_str() 9381 .unwrap(), 9382 ); 9383 9384 assert!( 9385 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9386 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9387 ); 9388 }); 9389 9390 // Check and report any errors occurred during the live-migration 9391 if r.is_err() { 9392 print_and_panic( 9393 src_child, 9394 dest_child, 9395 None, 9396 "Error occurred during live-migration", 9397 ); 9398 } 9399 9400 // Check the source vm has been terminated successful (give it '3s' to settle) 9401 thread::sleep(std::time::Duration::new(3, 0)); 9402 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 9403 print_and_panic( 9404 src_child, 9405 dest_child, 9406 None, 9407 "source VM was not terminated successfully.", 9408 ); 9409 }; 9410 9411 // Post live-migration check to make sure the destination VM is functional 9412 let r = std::panic::catch_unwind(|| { 9413 // Perform same checks to validate VM has been properly migrated 9414 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9415 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9416 9417 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9418 }); 9419 9420 // Clean-up the destination VM and make sure it terminated correctly 9421 let _ = dest_child.kill(); 9422 let dest_output = dest_child.wait_with_output().unwrap(); 9423 handle_child_output(r, &dest_output); 9424 9425 // Check the destination VM has the expected 'console_text' from its output 9426 let r = std::panic::catch_unwind(|| { 9427 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 9428 }); 9429 handle_child_output(r, &dest_output); 9430 } 9431 9432 fn _test_live_migration_balloon(upgrade_test: bool, local: bool) { 9433 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9434 let guest = Guest::new(Box::new(focal)); 9435 let kernel_path = direct_kernel_boot_path(); 9436 let console_text = String::from("On a branch floating down river a cricket, singing."); 9437 let net_id = "net123"; 9438 let net_params = format!( 9439 "id={},tap=,mac={},ip={},mask=255.255.255.0", 9440 net_id, guest.network.guest_mac, guest.network.host_ip 9441 ); 9442 9443 let memory_param: &[&str] = if local { 9444 &[ 9445 "--memory", 9446 "size=4G,hotplug_method=virtio-mem,hotplug_size=8G,shared=on", 9447 "--balloon", 9448 "size=0", 9449 ] 9450 } else { 9451 &[ 9452 "--memory", 9453 "size=4G,hotplug_method=virtio-mem,hotplug_size=8G", 9454 "--balloon", 9455 "size=0", 9456 ] 9457 }; 9458 9459 let boot_vcpus = 2; 9460 let max_vcpus = 4; 9461 9462 let pmem_temp_file = TempFile::new().unwrap(); 9463 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 9464 std::process::Command::new("mkfs.ext4") 9465 .arg(pmem_temp_file.as_path()) 9466 .output() 9467 .expect("Expect creating disk image to succeed"); 9468 let pmem_path = String::from("/dev/pmem0"); 9469 9470 // Start the source VM 9471 let src_vm_path = if !upgrade_test { 9472 clh_command("cloud-hypervisor") 9473 } else { 9474 cloud_hypervisor_release_path() 9475 }; 9476 let src_api_socket = temp_api_path(&guest.tmp_dir); 9477 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 9478 src_vm_cmd 9479 .args([ 9480 "--cpus", 9481 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 9482 ]) 9483 .args(memory_param) 9484 .args(["--kernel", kernel_path.to_str().unwrap()]) 9485 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9486 .default_disks() 9487 .args(["--net", net_params.as_str()]) 9488 .args(["--api-socket", &src_api_socket]) 9489 .args([ 9490 "--pmem", 9491 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 9492 ]); 9493 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 9494 9495 // Start the destination VM 9496 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 9497 dest_api_socket.push_str(".dest"); 9498 let mut dest_child = GuestCommand::new(&guest) 9499 .args(["--api-socket", &dest_api_socket]) 9500 .capture_output() 9501 .spawn() 9502 .unwrap(); 9503 9504 let r = std::panic::catch_unwind(|| { 9505 guest.wait_vm_boot(None).unwrap(); 9506 9507 // Make sure the source VM is functional 9508 // Check the number of vCPUs 9509 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9510 9511 // Check the guest RAM 9512 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9513 // Increase the guest RAM 9514 resize_command(&src_api_socket, None, Some(6 << 30), None, None); 9515 thread::sleep(std::time::Duration::new(5, 0)); 9516 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 9517 // Use balloon to remove RAM from the VM 9518 resize_command(&src_api_socket, None, None, Some(1 << 30), None); 9519 thread::sleep(std::time::Duration::new(5, 0)); 9520 let total_memory = guest.get_total_memory().unwrap_or_default(); 9521 assert!(total_memory > 4_800_000); 9522 assert!(total_memory < 5_760_000); 9523 9524 // Check the guest virtio-devices, e.g. block, rng, console, and net 9525 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9526 9527 // x86_64: Following what's done in the `test_snapshot_restore`, we need 9528 // to make sure that removing and adding back the virtio-net device does 9529 // not break the live-migration support for virtio-pci. 9530 #[cfg(target_arch = "x86_64")] 9531 { 9532 assert!(remote_command( 9533 &src_api_socket, 9534 "remove-device", 9535 Some(net_id), 9536 )); 9537 thread::sleep(std::time::Duration::new(10, 0)); 9538 9539 // Plug the virtio-net device again 9540 assert!(remote_command( 9541 &src_api_socket, 9542 "add-net", 9543 Some(net_params.as_str()), 9544 )); 9545 thread::sleep(std::time::Duration::new(10, 0)); 9546 } 9547 9548 // Start the live-migration 9549 let migration_socket = String::from( 9550 guest 9551 .tmp_dir 9552 .as_path() 9553 .join("live-migration.sock") 9554 .to_str() 9555 .unwrap(), 9556 ); 9557 9558 assert!( 9559 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9560 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9561 ); 9562 }); 9563 9564 // Check and report any errors occurred during the live-migration 9565 if r.is_err() { 9566 print_and_panic( 9567 src_child, 9568 dest_child, 9569 None, 9570 "Error occurred during live-migration", 9571 ); 9572 } 9573 9574 // Check the source vm has been terminated successful (give it '3s' to settle) 9575 thread::sleep(std::time::Duration::new(3, 0)); 9576 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 9577 print_and_panic( 9578 src_child, 9579 dest_child, 9580 None, 9581 "source VM was not terminated successfully.", 9582 ); 9583 }; 9584 9585 // Post live-migration check to make sure the destination VM is functional 9586 let r = std::panic::catch_unwind(|| { 9587 // Perform same checks to validate VM has been properly migrated 9588 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9589 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9590 9591 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9592 9593 // Perform checks on guest RAM using balloon 9594 let total_memory = guest.get_total_memory().unwrap_or_default(); 9595 assert!(total_memory > 4_800_000); 9596 assert!(total_memory < 5_760_000); 9597 // Deflate balloon to restore entire RAM to the VM 9598 resize_command(&dest_api_socket, None, None, Some(0), None); 9599 thread::sleep(std::time::Duration::new(5, 0)); 9600 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 9601 // Decrease guest RAM with virtio-mem 9602 resize_command(&dest_api_socket, None, Some(5 << 30), None, None); 9603 thread::sleep(std::time::Duration::new(5, 0)); 9604 let total_memory = guest.get_total_memory().unwrap_or_default(); 9605 assert!(total_memory > 4_800_000); 9606 assert!(total_memory < 5_760_000); 9607 }); 9608 9609 // Clean-up the destination VM and make sure it terminated correctly 9610 let _ = dest_child.kill(); 9611 let dest_output = dest_child.wait_with_output().unwrap(); 9612 handle_child_output(r, &dest_output); 9613 9614 // Check the destination VM has the expected 'console_text' from its output 9615 let r = std::panic::catch_unwind(|| { 9616 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 9617 }); 9618 handle_child_output(r, &dest_output); 9619 } 9620 9621 fn _test_live_migration_numa(upgrade_test: bool, local: bool) { 9622 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9623 let guest = Guest::new(Box::new(focal)); 9624 let kernel_path = direct_kernel_boot_path(); 9625 let console_text = String::from("On a branch floating down river a cricket, singing."); 9626 let net_id = "net123"; 9627 let net_params = format!( 9628 "id={},tap=,mac={},ip={},mask=255.255.255.0", 9629 net_id, guest.network.guest_mac, guest.network.host_ip 9630 ); 9631 9632 let memory_param: &[&str] = if local { 9633 &[ 9634 "--memory", 9635 "size=0,hotplug_method=virtio-mem,shared=on", 9636 "--memory-zone", 9637 "id=mem0,size=1G,hotplug_size=4G,shared=on", 9638 "id=mem1,size=1G,hotplug_size=4G,shared=on", 9639 "id=mem2,size=2G,hotplug_size=4G,shared=on", 9640 "--numa", 9641 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0", 9642 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1", 9643 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2", 9644 ] 9645 } else { 9646 &[ 9647 "--memory", 9648 "size=0,hotplug_method=virtio-mem", 9649 "--memory-zone", 9650 "id=mem0,size=1G,hotplug_size=4G", 9651 "id=mem1,size=1G,hotplug_size=4G", 9652 "id=mem2,size=2G,hotplug_size=4G", 9653 "--numa", 9654 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0", 9655 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1", 9656 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2", 9657 ] 9658 }; 9659 9660 let boot_vcpus = 6; 9661 let max_vcpus = 12; 9662 9663 let pmem_temp_file = TempFile::new().unwrap(); 9664 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 9665 std::process::Command::new("mkfs.ext4") 9666 .arg(pmem_temp_file.as_path()) 9667 .output() 9668 .expect("Expect creating disk image to succeed"); 9669 let pmem_path = String::from("/dev/pmem0"); 9670 9671 // Start the source VM 9672 let src_vm_path = if !upgrade_test { 9673 clh_command("cloud-hypervisor") 9674 } else { 9675 cloud_hypervisor_release_path() 9676 }; 9677 let src_api_socket = temp_api_path(&guest.tmp_dir); 9678 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 9679 src_vm_cmd 9680 .args([ 9681 "--cpus", 9682 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 9683 ]) 9684 .args(memory_param) 9685 .args(["--kernel", kernel_path.to_str().unwrap()]) 9686 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9687 .default_disks() 9688 .args(["--net", net_params.as_str()]) 9689 .args(["--api-socket", &src_api_socket]) 9690 .args([ 9691 "--pmem", 9692 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 9693 ]); 9694 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 9695 9696 // Start the destination VM 9697 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 9698 dest_api_socket.push_str(".dest"); 9699 let mut dest_child = GuestCommand::new(&guest) 9700 .args(["--api-socket", &dest_api_socket]) 9701 .capture_output() 9702 .spawn() 9703 .unwrap(); 9704 9705 let r = std::panic::catch_unwind(|| { 9706 guest.wait_vm_boot(None).unwrap(); 9707 9708 // Make sure the source VM is functional 9709 // Check the number of vCPUs 9710 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9711 9712 // Check the guest RAM 9713 assert!(guest.get_total_memory().unwrap_or_default() > 2_880_000); 9714 9715 // Check the guest virtio-devices, e.g. block, rng, console, and net 9716 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9717 9718 // Check the NUMA parameters are applied correctly and resize 9719 // each zone to test the case where we migrate a VM with the 9720 // virtio-mem regions being used. 9721 { 9722 guest.check_numa_common( 9723 Some(&[960_000, 960_000, 1_920_000]), 9724 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 9725 Some(&["10 15 20", "20 10 25", "25 30 10"]), 9726 ); 9727 9728 // AArch64 currently does not support hotplug, and therefore we only 9729 // test hotplug-related function on x86_64 here. 9730 #[cfg(target_arch = "x86_64")] 9731 { 9732 guest.enable_memory_hotplug(); 9733 9734 // Resize every memory zone and check each associated NUMA node 9735 // has been assigned the right amount of memory. 9736 resize_zone_command(&src_api_socket, "mem0", "2G"); 9737 resize_zone_command(&src_api_socket, "mem1", "2G"); 9738 resize_zone_command(&src_api_socket, "mem2", "3G"); 9739 thread::sleep(std::time::Duration::new(5, 0)); 9740 9741 guest.check_numa_common(Some(&[1_920_000, 1_920_000, 1_920_000]), None, None); 9742 } 9743 } 9744 9745 // x86_64: Following what's done in the `test_snapshot_restore`, we need 9746 // to make sure that removing and adding back the virtio-net device does 9747 // not break the live-migration support for virtio-pci. 9748 #[cfg(target_arch = "x86_64")] 9749 { 9750 assert!(remote_command( 9751 &src_api_socket, 9752 "remove-device", 9753 Some(net_id), 9754 )); 9755 thread::sleep(std::time::Duration::new(10, 0)); 9756 9757 // Plug the virtio-net device again 9758 assert!(remote_command( 9759 &src_api_socket, 9760 "add-net", 9761 Some(net_params.as_str()), 9762 )); 9763 thread::sleep(std::time::Duration::new(10, 0)); 9764 } 9765 9766 // Start the live-migration 9767 let migration_socket = String::from( 9768 guest 9769 .tmp_dir 9770 .as_path() 9771 .join("live-migration.sock") 9772 .to_str() 9773 .unwrap(), 9774 ); 9775 9776 assert!( 9777 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9778 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9779 ); 9780 }); 9781 9782 // Check and report any errors occurred during the live-migration 9783 if r.is_err() { 9784 print_and_panic( 9785 src_child, 9786 dest_child, 9787 None, 9788 "Error occurred during live-migration", 9789 ); 9790 } 9791 9792 // Check the source vm has been terminated successful (give it '3s' to settle) 9793 thread::sleep(std::time::Duration::new(3, 0)); 9794 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 9795 print_and_panic( 9796 src_child, 9797 dest_child, 9798 None, 9799 "source VM was not terminated successfully.", 9800 ); 9801 }; 9802 9803 // Post live-migration check to make sure the destination VM is functional 9804 let r = std::panic::catch_unwind(|| { 9805 // Perform same checks to validate VM has been properly migrated 9806 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9807 #[cfg(target_arch = "x86_64")] 9808 assert!(guest.get_total_memory().unwrap_or_default() > 6_720_000); 9809 #[cfg(target_arch = "aarch64")] 9810 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9811 9812 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9813 9814 // Perform NUMA related checks 9815 { 9816 #[cfg(target_arch = "aarch64")] 9817 { 9818 guest.check_numa_common( 9819 Some(&[960_000, 960_000, 1_920_000]), 9820 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 9821 Some(&["10 15 20", "20 10 25", "25 30 10"]), 9822 ); 9823 } 9824 9825 // AArch64 currently does not support hotplug, and therefore we only 9826 // test hotplug-related function on x86_64 here. 9827 #[cfg(target_arch = "x86_64")] 9828 { 9829 guest.check_numa_common( 9830 Some(&[1_920_000, 1_920_000, 2_880_000]), 9831 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 9832 Some(&["10 15 20", "20 10 25", "25 30 10"]), 9833 ); 9834 9835 guest.enable_memory_hotplug(); 9836 9837 // Resize every memory zone and check each associated NUMA node 9838 // has been assigned the right amount of memory. 9839 resize_zone_command(&dest_api_socket, "mem0", "4G"); 9840 resize_zone_command(&dest_api_socket, "mem1", "4G"); 9841 resize_zone_command(&dest_api_socket, "mem2", "4G"); 9842 // Resize to the maximum amount of CPUs and check each NUMA 9843 // node has been assigned the right CPUs set. 9844 resize_command(&dest_api_socket, Some(max_vcpus), None, None, None); 9845 thread::sleep(std::time::Duration::new(5, 0)); 9846 9847 guest.check_numa_common( 9848 Some(&[3_840_000, 3_840_000, 3_840_000]), 9849 Some(&[vec![0, 1, 2, 9], vec![3, 4, 6, 7, 8], vec![5, 10, 11]]), 9850 None, 9851 ); 9852 } 9853 } 9854 }); 9855 9856 // Clean-up the destination VM and make sure it terminated correctly 9857 let _ = dest_child.kill(); 9858 let dest_output = dest_child.wait_with_output().unwrap(); 9859 handle_child_output(r, &dest_output); 9860 9861 // Check the destination VM has the expected 'console_text' from its output 9862 let r = std::panic::catch_unwind(|| { 9863 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 9864 }); 9865 handle_child_output(r, &dest_output); 9866 } 9867 9868 fn _test_live_migration_watchdog(upgrade_test: bool, local: bool) { 9869 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9870 let guest = Guest::new(Box::new(focal)); 9871 let kernel_path = direct_kernel_boot_path(); 9872 let console_text = String::from("On a branch floating down river a cricket, singing."); 9873 let net_id = "net123"; 9874 let net_params = format!( 9875 "id={},tap=,mac={},ip={},mask=255.255.255.0", 9876 net_id, guest.network.guest_mac, guest.network.host_ip 9877 ); 9878 9879 let memory_param: &[&str] = if local { 9880 &["--memory", "size=4G,shared=on"] 9881 } else { 9882 &["--memory", "size=4G"] 9883 }; 9884 9885 let boot_vcpus = 2; 9886 let max_vcpus = 4; 9887 9888 let pmem_temp_file = TempFile::new().unwrap(); 9889 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 9890 std::process::Command::new("mkfs.ext4") 9891 .arg(pmem_temp_file.as_path()) 9892 .output() 9893 .expect("Expect creating disk image to succeed"); 9894 let pmem_path = String::from("/dev/pmem0"); 9895 9896 // Start the source VM 9897 let src_vm_path = if !upgrade_test { 9898 clh_command("cloud-hypervisor") 9899 } else { 9900 cloud_hypervisor_release_path() 9901 }; 9902 let src_api_socket = temp_api_path(&guest.tmp_dir); 9903 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 9904 src_vm_cmd 9905 .args([ 9906 "--cpus", 9907 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 9908 ]) 9909 .args(memory_param) 9910 .args(["--kernel", kernel_path.to_str().unwrap()]) 9911 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9912 .default_disks() 9913 .args(["--net", net_params.as_str()]) 9914 .args(["--api-socket", &src_api_socket]) 9915 .args([ 9916 "--pmem", 9917 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 9918 ]) 9919 .args(["--watchdog"]); 9920 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 9921 9922 // Start the destination VM 9923 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 9924 dest_api_socket.push_str(".dest"); 9925 let mut dest_child = GuestCommand::new(&guest) 9926 .args(["--api-socket", &dest_api_socket]) 9927 .capture_output() 9928 .spawn() 9929 .unwrap(); 9930 9931 let r = std::panic::catch_unwind(|| { 9932 guest.wait_vm_boot(None).unwrap(); 9933 9934 // Make sure the source VM is functional 9935 // Check the number of vCPUs 9936 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9937 // Check the guest RAM 9938 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9939 // Check the guest virtio-devices, e.g. block, rng, console, and net 9940 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9941 // x86_64: Following what's done in the `test_snapshot_restore`, we need 9942 // to make sure that removing and adding back the virtio-net device does 9943 // not break the live-migration support for virtio-pci. 9944 #[cfg(target_arch = "x86_64")] 9945 { 9946 assert!(remote_command( 9947 &src_api_socket, 9948 "remove-device", 9949 Some(net_id), 9950 )); 9951 thread::sleep(std::time::Duration::new(10, 0)); 9952 9953 // Plug the virtio-net device again 9954 assert!(remote_command( 9955 &src_api_socket, 9956 "add-net", 9957 Some(net_params.as_str()), 9958 )); 9959 thread::sleep(std::time::Duration::new(10, 0)); 9960 } 9961 9962 // Enable watchdog and ensure its functional 9963 let expected_reboot_count = 1; 9964 // Enable the watchdog with a 15s timeout 9965 enable_guest_watchdog(&guest, 15); 9966 9967 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9968 assert_eq!( 9969 guest 9970 .ssh_command("sudo journalctl | grep -c -- \"Watchdog started\"") 9971 .unwrap() 9972 .trim() 9973 .parse::<u32>() 9974 .unwrap_or_default(), 9975 1 9976 ); 9977 // Allow some normal time to elapse to check we don't get spurious reboots 9978 thread::sleep(std::time::Duration::new(40, 0)); 9979 // Check no reboot 9980 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9981 9982 // Start the live-migration 9983 let migration_socket = String::from( 9984 guest 9985 .tmp_dir 9986 .as_path() 9987 .join("live-migration.sock") 9988 .to_str() 9989 .unwrap(), 9990 ); 9991 9992 assert!( 9993 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9994 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9995 ); 9996 }); 9997 9998 // Check and report any errors occurred during the live-migration 9999 if r.is_err() { 10000 print_and_panic( 10001 src_child, 10002 dest_child, 10003 None, 10004 "Error occurred during live-migration", 10005 ); 10006 } 10007 10008 // Check the source vm has been terminated successful (give it '3s' to settle) 10009 thread::sleep(std::time::Duration::new(3, 0)); 10010 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 10011 print_and_panic( 10012 src_child, 10013 dest_child, 10014 None, 10015 "source VM was not terminated successfully.", 10016 ); 10017 }; 10018 10019 // Post live-migration check to make sure the destination VM is functional 10020 let r = std::panic::catch_unwind(|| { 10021 // Perform same checks to validate VM has been properly migrated 10022 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 10023 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 10024 10025 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 10026 10027 // Perform checks on watchdog 10028 let mut expected_reboot_count = 1; 10029 10030 // Allow some normal time to elapse to check we don't get spurious reboots 10031 thread::sleep(std::time::Duration::new(40, 0)); 10032 // Check no reboot 10033 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 10034 10035 // Trigger a panic (sync first). We need to do this inside a screen with a delay so the SSH command returns. 10036 guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap(); 10037 // Allow some time for the watchdog to trigger (max 30s) and reboot to happen 10038 guest.wait_vm_boot(Some(50)).unwrap(); 10039 // Check a reboot is triggered by the watchdog 10040 expected_reboot_count += 1; 10041 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 10042 10043 #[cfg(target_arch = "x86_64")] 10044 { 10045 // Now pause the VM and remain offline for 30s 10046 assert!(remote_command(&dest_api_socket, "pause", None)); 10047 thread::sleep(std::time::Duration::new(30, 0)); 10048 assert!(remote_command(&dest_api_socket, "resume", None)); 10049 10050 // Check no reboot 10051 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 10052 } 10053 }); 10054 10055 // Clean-up the destination VM and make sure it terminated correctly 10056 let _ = dest_child.kill(); 10057 let dest_output = dest_child.wait_with_output().unwrap(); 10058 handle_child_output(r, &dest_output); 10059 10060 // Check the destination VM has the expected 'console_text' from its output 10061 let r = std::panic::catch_unwind(|| { 10062 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 10063 }); 10064 handle_child_output(r, &dest_output); 10065 } 10066 10067 fn _test_live_migration_ovs_dpdk(upgrade_test: bool, local: bool) { 10068 let ovs_focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10069 let ovs_guest = Guest::new(Box::new(ovs_focal)); 10070 10071 let migration_focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10072 let migration_guest = Guest::new(Box::new(migration_focal)); 10073 let src_api_socket = temp_api_path(&migration_guest.tmp_dir); 10074 10075 // Start two VMs that are connected through ovs-dpdk and one of the VMs is the source VM for live-migration 10076 let (mut ovs_child, mut src_child) = 10077 setup_ovs_dpdk_guests(&ovs_guest, &migration_guest, &src_api_socket, upgrade_test); 10078 10079 // Start the destination VM 10080 let mut dest_api_socket = temp_api_path(&migration_guest.tmp_dir); 10081 dest_api_socket.push_str(".dest"); 10082 let mut dest_child = GuestCommand::new(&migration_guest) 10083 .args(["--api-socket", &dest_api_socket]) 10084 .capture_output() 10085 .spawn() 10086 .unwrap(); 10087 10088 let r = std::panic::catch_unwind(|| { 10089 // Give it '1s' to make sure the 'dest_api_socket' file is properly created 10090 thread::sleep(std::time::Duration::new(1, 0)); 10091 10092 // Start the live-migration 10093 let migration_socket = String::from( 10094 migration_guest 10095 .tmp_dir 10096 .as_path() 10097 .join("live-migration.sock") 10098 .to_str() 10099 .unwrap(), 10100 ); 10101 10102 assert!( 10103 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 10104 "Unsuccessful command: 'send-migration' or 'receive-migration'." 10105 ); 10106 }); 10107 10108 // Check and report any errors occurred during the live-migration 10109 if r.is_err() { 10110 print_and_panic( 10111 src_child, 10112 dest_child, 10113 Some(ovs_child), 10114 "Error occurred during live-migration", 10115 ); 10116 } 10117 10118 // Check the source vm has been terminated successful (give it '3s' to settle) 10119 thread::sleep(std::time::Duration::new(3, 0)); 10120 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 10121 print_and_panic( 10122 src_child, 10123 dest_child, 10124 Some(ovs_child), 10125 "source VM was not terminated successfully.", 10126 ); 10127 }; 10128 10129 // Post live-migration check to make sure the destination VM is functional 10130 let r = std::panic::catch_unwind(|| { 10131 // Perform same checks to validate VM has been properly migrated 10132 // Spawn a new netcat listener in the OVS VM 10133 let guest_ip = ovs_guest.network.guest_ip.clone(); 10134 thread::spawn(move || { 10135 ssh_command_ip( 10136 "nc -l 12345", 10137 &guest_ip, 10138 DEFAULT_SSH_RETRIES, 10139 DEFAULT_SSH_TIMEOUT, 10140 ) 10141 .unwrap(); 10142 }); 10143 10144 // Wait for the server to be listening 10145 thread::sleep(std::time::Duration::new(5, 0)); 10146 10147 // And check the connection is still functional after live-migration 10148 migration_guest 10149 .ssh_command("nc -vz 172.100.0.1 12345") 10150 .unwrap(); 10151 }); 10152 10153 // Clean-up the destination VM and OVS VM, and make sure they terminated correctly 10154 let _ = dest_child.kill(); 10155 let _ = ovs_child.kill(); 10156 let dest_output = dest_child.wait_with_output().unwrap(); 10157 let ovs_output = ovs_child.wait_with_output().unwrap(); 10158 10159 cleanup_ovs_dpdk(); 10160 10161 handle_child_output(r, &dest_output); 10162 handle_child_output(Ok(()), &ovs_output); 10163 } 10164 10165 // This test exercises the local live-migration between two Cloud Hypervisor VMs on the 10166 // same host with Landlock enabled on both VMs. The test validates the following: 10167 // 1. The source VM is up and functional 10168 // 2. Ensure Landlock is enabled on source VM by hotplugging a disk. As the path for this 10169 // disk is not known to the source VM this step will fail. 10170 // 3. The 'send-migration' and 'receive-migration' command finished successfully; 10171 // 4. The source VM terminated gracefully after live migration; 10172 // 5. The destination VM is functional after live migration; 10173 // 6. Ensure Landlock is enabled on destination VM by hotplugging a disk. As the path for 10174 // this disk is not known to the destination VM this step will fail. 10175 fn _test_live_migration_with_landlock() { 10176 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10177 let guest = Guest::new(Box::new(focal)); 10178 let kernel_path = direct_kernel_boot_path(); 10179 let net_id = "net123"; 10180 let net_params = format!( 10181 "id={},tap=,mac={},ip={},mask=255.255.255.0", 10182 net_id, guest.network.guest_mac, guest.network.host_ip 10183 ); 10184 10185 let boot_vcpus = 2; 10186 let max_vcpus = 4; 10187 10188 let mut blk_file_path = dirs::home_dir().unwrap(); 10189 blk_file_path.push("workloads"); 10190 blk_file_path.push("blk.img"); 10191 10192 let src_api_socket = temp_api_path(&guest.tmp_dir); 10193 let mut src_child = GuestCommand::new(&guest) 10194 .args([ 10195 "--cpus", 10196 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 10197 ]) 10198 .args(["--memory", "size=4G,shared=on"]) 10199 .args(["--kernel", kernel_path.to_str().unwrap()]) 10200 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 10201 .default_disks() 10202 .args(["--api-socket", &src_api_socket]) 10203 .args(["--landlock"]) 10204 .args(["--net", net_params.as_str()]) 10205 .args([ 10206 "--landlock-rules", 10207 format!("path={:?},access=rw", guest.tmp_dir.as_path()).as_str(), 10208 ]) 10209 .capture_output() 10210 .spawn() 10211 .unwrap(); 10212 10213 // Start the destination VM 10214 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 10215 dest_api_socket.push_str(".dest"); 10216 let mut dest_child = GuestCommand::new(&guest) 10217 .args(["--api-socket", &dest_api_socket]) 10218 .capture_output() 10219 .spawn() 10220 .unwrap(); 10221 10222 let r = std::panic::catch_unwind(|| { 10223 guest.wait_vm_boot(None).unwrap(); 10224 10225 // Make sure the source VM is functaionl 10226 // Check the number of vCPUs 10227 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 10228 10229 // Check the guest RAM 10230 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 10231 10232 // Check Landlock is enabled by hot-plugging a disk. 10233 assert!(!remote_command( 10234 &src_api_socket, 10235 "add-disk", 10236 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 10237 )); 10238 10239 // Start the live-migration 10240 let migration_socket = String::from( 10241 guest 10242 .tmp_dir 10243 .as_path() 10244 .join("live-migration.sock") 10245 .to_str() 10246 .unwrap(), 10247 ); 10248 10249 assert!( 10250 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, true), 10251 "Unsuccessful command: 'send-migration' or 'receive-migration'." 10252 ); 10253 }); 10254 10255 // Check and report any errors occurred during the live-migration 10256 if r.is_err() { 10257 print_and_panic( 10258 src_child, 10259 dest_child, 10260 None, 10261 "Error occurred during live-migration", 10262 ); 10263 } 10264 10265 // Check the source vm has been terminated successful (give it '3s' to settle) 10266 thread::sleep(std::time::Duration::new(3, 0)); 10267 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 10268 print_and_panic( 10269 src_child, 10270 dest_child, 10271 None, 10272 "source VM was not terminated successfully.", 10273 ); 10274 }; 10275 10276 // Post live-migration check to make sure the destination VM is funcational 10277 let r = std::panic::catch_unwind(|| { 10278 // Perform same checks to validate VM has been properly migrated 10279 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 10280 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 10281 }); 10282 10283 // Check Landlock is enabled on destination VM by hot-plugging a disk. 10284 assert!(!remote_command( 10285 &dest_api_socket, 10286 "add-disk", 10287 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 10288 )); 10289 10290 // Clean-up the destination VM and make sure it terminated correctly 10291 let _ = dest_child.kill(); 10292 let dest_output = dest_child.wait_with_output().unwrap(); 10293 handle_child_output(r, &dest_output); 10294 } 10295 10296 mod live_migration_parallel { 10297 use super::*; 10298 #[test] 10299 fn test_live_migration_basic() { 10300 _test_live_migration(false, false) 10301 } 10302 10303 #[test] 10304 fn test_live_migration_local() { 10305 _test_live_migration(false, true) 10306 } 10307 10308 #[test] 10309 fn test_live_migration_watchdog() { 10310 _test_live_migration_watchdog(false, false) 10311 } 10312 10313 #[test] 10314 fn test_live_migration_watchdog_local() { 10315 _test_live_migration_watchdog(false, true) 10316 } 10317 10318 #[test] 10319 fn test_live_upgrade_basic() { 10320 _test_live_migration(true, false) 10321 } 10322 10323 #[test] 10324 fn test_live_upgrade_local() { 10325 _test_live_migration(true, true) 10326 } 10327 10328 #[test] 10329 fn test_live_upgrade_watchdog() { 10330 _test_live_migration_watchdog(true, false) 10331 } 10332 10333 #[test] 10334 fn test_live_upgrade_watchdog_local() { 10335 _test_live_migration_watchdog(true, true) 10336 } 10337 #[test] 10338 #[cfg(target_arch = "x86_64")] 10339 fn test_live_migration_with_landlock() { 10340 _test_live_migration_with_landlock() 10341 } 10342 } 10343 10344 mod live_migration_sequential { 10345 use super::*; 10346 10347 // NUMA & balloon live migration tests are large so run sequentially 10348 10349 #[test] 10350 fn test_live_migration_balloon() { 10351 _test_live_migration_balloon(false, false) 10352 } 10353 10354 #[test] 10355 fn test_live_migration_balloon_local() { 10356 _test_live_migration_balloon(false, true) 10357 } 10358 10359 #[test] 10360 fn test_live_upgrade_balloon() { 10361 _test_live_migration_balloon(true, false) 10362 } 10363 10364 #[test] 10365 fn test_live_upgrade_balloon_local() { 10366 _test_live_migration_balloon(true, true) 10367 } 10368 10369 #[test] 10370 #[cfg(not(feature = "mshv"))] 10371 fn test_live_migration_numa() { 10372 _test_live_migration_numa(false, false) 10373 } 10374 10375 #[test] 10376 #[cfg(not(feature = "mshv"))] 10377 fn test_live_migration_numa_local() { 10378 _test_live_migration_numa(false, true) 10379 } 10380 10381 #[test] 10382 #[cfg(not(feature = "mshv"))] 10383 fn test_live_upgrade_numa() { 10384 _test_live_migration_numa(true, false) 10385 } 10386 10387 #[test] 10388 #[cfg(not(feature = "mshv"))] 10389 fn test_live_upgrade_numa_local() { 10390 _test_live_migration_numa(true, true) 10391 } 10392 10393 // Require to run ovs-dpdk tests sequentially because they rely on the same ovs-dpdk setup 10394 #[test] 10395 #[ignore = "See #5532"] 10396 #[cfg(target_arch = "x86_64")] 10397 #[cfg(not(feature = "mshv"))] 10398 fn test_live_migration_ovs_dpdk() { 10399 _test_live_migration_ovs_dpdk(false, false); 10400 } 10401 10402 #[test] 10403 #[cfg(target_arch = "x86_64")] 10404 #[cfg(not(feature = "mshv"))] 10405 fn test_live_migration_ovs_dpdk_local() { 10406 _test_live_migration_ovs_dpdk(false, true); 10407 } 10408 10409 #[test] 10410 #[ignore = "See #5532"] 10411 #[cfg(target_arch = "x86_64")] 10412 #[cfg(not(feature = "mshv"))] 10413 fn test_live_upgrade_ovs_dpdk() { 10414 _test_live_migration_ovs_dpdk(true, false); 10415 } 10416 10417 #[test] 10418 #[ignore = "See #5532"] 10419 #[cfg(target_arch = "x86_64")] 10420 #[cfg(not(feature = "mshv"))] 10421 fn test_live_upgrade_ovs_dpdk_local() { 10422 _test_live_migration_ovs_dpdk(true, true); 10423 } 10424 } 10425 } 10426 10427 #[cfg(target_arch = "aarch64")] 10428 mod aarch64_acpi { 10429 use crate::*; 10430 10431 #[test] 10432 fn test_simple_launch_acpi() { 10433 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10434 10435 vec![Box::new(focal)].drain(..).for_each(|disk_config| { 10436 let guest = Guest::new(disk_config); 10437 10438 let mut child = GuestCommand::new(&guest) 10439 .args(["--cpus", "boot=1"]) 10440 .args(["--memory", "size=512M"]) 10441 .args(["--kernel", edk2_path().to_str().unwrap()]) 10442 .default_disks() 10443 .default_net() 10444 .args(["--serial", "tty", "--console", "off"]) 10445 .capture_output() 10446 .spawn() 10447 .unwrap(); 10448 10449 let r = std::panic::catch_unwind(|| { 10450 guest.wait_vm_boot(Some(120)).unwrap(); 10451 10452 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 10453 assert!(guest.get_total_memory().unwrap_or_default() > 400_000); 10454 assert_eq!(guest.get_pci_bridge_class().unwrap_or_default(), "0x060000"); 10455 }); 10456 10457 let _ = child.kill(); 10458 let output = child.wait_with_output().unwrap(); 10459 10460 handle_child_output(r, &output); 10461 }); 10462 } 10463 10464 #[test] 10465 fn test_guest_numa_nodes_acpi() { 10466 _test_guest_numa_nodes(true); 10467 } 10468 10469 #[test] 10470 fn test_cpu_topology_421_acpi() { 10471 test_cpu_topology(4, 2, 1, true); 10472 } 10473 10474 #[test] 10475 fn test_cpu_topology_142_acpi() { 10476 test_cpu_topology(1, 4, 2, true); 10477 } 10478 10479 #[test] 10480 fn test_cpu_topology_262_acpi() { 10481 test_cpu_topology(2, 6, 2, true); 10482 } 10483 10484 #[test] 10485 fn test_power_button_acpi() { 10486 _test_power_button(true); 10487 } 10488 10489 #[test] 10490 fn test_virtio_iommu() { 10491 _test_virtio_iommu(true) 10492 } 10493 } 10494 10495 mod rate_limiter { 10496 use super::*; 10497 10498 // Check if the 'measured' rate is within the expected 'difference' (in percentage) 10499 // compared to given 'limit' rate. 10500 fn check_rate_limit(measured: f64, limit: f64, difference: f64) -> bool { 10501 let upper_limit = limit * (1_f64 + difference); 10502 let lower_limit = limit * (1_f64 - difference); 10503 10504 if measured > lower_limit && measured < upper_limit { 10505 return true; 10506 } 10507 10508 eprintln!( 10509 "\n\n==== Start 'check_rate_limit' failed ==== \ 10510 \n\nmeasured={measured}, , lower_limit={lower_limit}, upper_limit={upper_limit} \ 10511 \n\n==== End 'check_rate_limit' failed ====\n\n" 10512 ); 10513 10514 false 10515 } 10516 10517 fn _test_rate_limiter_net(rx: bool) { 10518 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10519 let guest = Guest::new(Box::new(focal)); 10520 10521 let test_timeout = 10; 10522 let num_queues = 2; 10523 let queue_size = 256; 10524 let bw_size = 10485760_u64; // bytes 10525 let bw_refill_time = 100; // ms 10526 let limit_bps = (bw_size * 8 * 1000) as f64 / bw_refill_time as f64; 10527 10528 let net_params = format!( 10529 "tap=,mac={},ip={},mask=255.255.255.0,num_queues={},queue_size={},bw_size={},bw_refill_time={}", 10530 guest.network.guest_mac, 10531 guest.network.host_ip, 10532 num_queues, 10533 queue_size, 10534 bw_size, 10535 bw_refill_time, 10536 ); 10537 10538 let mut child = GuestCommand::new(&guest) 10539 .args(["--cpus", &format!("boot={}", num_queues / 2)]) 10540 .args(["--memory", "size=4G"]) 10541 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 10542 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 10543 .default_disks() 10544 .args(["--net", net_params.as_str()]) 10545 .capture_output() 10546 .spawn() 10547 .unwrap(); 10548 10549 let r = std::panic::catch_unwind(|| { 10550 guest.wait_vm_boot(None).unwrap(); 10551 let measured_bps = 10552 measure_virtio_net_throughput(test_timeout, num_queues / 2, &guest, rx, true) 10553 .unwrap(); 10554 assert!(check_rate_limit(measured_bps, limit_bps, 0.1)); 10555 }); 10556 10557 let _ = child.kill(); 10558 let output = child.wait_with_output().unwrap(); 10559 handle_child_output(r, &output); 10560 } 10561 10562 #[test] 10563 fn test_rate_limiter_net_rx() { 10564 _test_rate_limiter_net(true); 10565 } 10566 10567 #[test] 10568 fn test_rate_limiter_net_tx() { 10569 _test_rate_limiter_net(false); 10570 } 10571 10572 fn _test_rate_limiter_block(bandwidth: bool, num_queues: u32) { 10573 let test_timeout = 10; 10574 let fio_ops = FioOps::RandRW; 10575 10576 let bw_size = if bandwidth { 10577 10485760_u64 // bytes 10578 } else { 10579 100_u64 // I/O 10580 }; 10581 let bw_refill_time = 100; // ms 10582 let limit_rate = (bw_size * 1000) as f64 / bw_refill_time as f64; 10583 10584 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10585 let guest = Guest::new(Box::new(focal)); 10586 let api_socket = temp_api_path(&guest.tmp_dir); 10587 let test_img_dir = TempDir::new_with_prefix("/var/tmp/ch").unwrap(); 10588 let blk_rate_limiter_test_img = 10589 String::from(test_img_dir.as_path().join("blk.img").to_str().unwrap()); 10590 10591 // Create the test block image 10592 assert!(exec_host_command_output(&format!( 10593 "dd if=/dev/zero of={blk_rate_limiter_test_img} bs=1M count=1024" 10594 )) 10595 .status 10596 .success()); 10597 10598 let test_blk_params = if bandwidth { 10599 format!( 10600 "path={blk_rate_limiter_test_img},num_queues={num_queues},bw_size={bw_size},bw_refill_time={bw_refill_time}" 10601 ) 10602 } else { 10603 format!( 10604 "path={blk_rate_limiter_test_img},num_queues={num_queues},ops_size={bw_size},ops_refill_time={bw_refill_time}" 10605 ) 10606 }; 10607 10608 let mut child = GuestCommand::new(&guest) 10609 .args(["--cpus", &format!("boot={num_queues}")]) 10610 .args(["--memory", "size=4G"]) 10611 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 10612 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 10613 .args([ 10614 "--disk", 10615 format!( 10616 "path={}", 10617 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 10618 ) 10619 .as_str(), 10620 format!( 10621 "path={}", 10622 guest.disk_config.disk(DiskType::CloudInit).unwrap() 10623 ) 10624 .as_str(), 10625 test_blk_params.as_str(), 10626 ]) 10627 .default_net() 10628 .args(["--api-socket", &api_socket]) 10629 .capture_output() 10630 .spawn() 10631 .unwrap(); 10632 10633 let r = std::panic::catch_unwind(|| { 10634 guest.wait_vm_boot(None).unwrap(); 10635 10636 let fio_command = format!( 10637 "sudo fio --filename=/dev/vdc --name=test --output-format=json \ 10638 --direct=1 --bs=4k --ioengine=io_uring --iodepth=64 \ 10639 --rw={fio_ops} --runtime={test_timeout} --numjobs={num_queues}" 10640 ); 10641 let output = guest.ssh_command(&fio_command).unwrap(); 10642 10643 // Parse fio output 10644 let measured_rate = if bandwidth { 10645 parse_fio_output(&output, &fio_ops, num_queues).unwrap() 10646 } else { 10647 parse_fio_output_iops(&output, &fio_ops, num_queues).unwrap() 10648 }; 10649 assert!(check_rate_limit(measured_rate, limit_rate, 0.1)); 10650 }); 10651 10652 let _ = child.kill(); 10653 let output = child.wait_with_output().unwrap(); 10654 handle_child_output(r, &output); 10655 } 10656 10657 fn _test_rate_limiter_group_block(bandwidth: bool, num_queues: u32, num_disks: u32) { 10658 let test_timeout = 10; 10659 let fio_ops = FioOps::RandRW; 10660 10661 let bw_size = if bandwidth { 10662 10485760_u64 // bytes 10663 } else { 10664 100_u64 // I/O 10665 }; 10666 let bw_refill_time = 100; // ms 10667 let limit_rate = (bw_size * 1000) as f64 / bw_refill_time as f64; 10668 10669 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10670 let guest = Guest::new(Box::new(focal)); 10671 let api_socket = temp_api_path(&guest.tmp_dir); 10672 let test_img_dir = TempDir::new_with_prefix("/var/tmp/ch").unwrap(); 10673 10674 let rate_limit_group_arg = if bandwidth { 10675 format!("id=group0,bw_size={bw_size},bw_refill_time={bw_refill_time}") 10676 } else { 10677 format!("id=group0,ops_size={bw_size},ops_refill_time={bw_refill_time}") 10678 }; 10679 10680 let mut disk_args = vec![ 10681 "--disk".to_string(), 10682 format!( 10683 "path={}", 10684 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 10685 ), 10686 format!( 10687 "path={}", 10688 guest.disk_config.disk(DiskType::CloudInit).unwrap() 10689 ), 10690 ]; 10691 10692 for i in 0..num_disks { 10693 let test_img_path = String::from( 10694 test_img_dir 10695 .as_path() 10696 .join(format!("blk{}.img", i)) 10697 .to_str() 10698 .unwrap(), 10699 ); 10700 10701 assert!(exec_host_command_output(&format!( 10702 "dd if=/dev/zero of={test_img_path} bs=1M count=1024" 10703 )) 10704 .status 10705 .success()); 10706 10707 disk_args.push(format!( 10708 "path={test_img_path},num_queues={num_queues},rate_limit_group=group0" 10709 )); 10710 } 10711 10712 let mut child = GuestCommand::new(&guest) 10713 .args(["--cpus", &format!("boot={}", num_queues * num_disks)]) 10714 .args(["--memory", "size=4G"]) 10715 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 10716 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 10717 .args(["--rate-limit-group", &rate_limit_group_arg]) 10718 .args(disk_args) 10719 .default_net() 10720 .args(["--api-socket", &api_socket]) 10721 .capture_output() 10722 .spawn() 10723 .unwrap(); 10724 10725 let r = std::panic::catch_unwind(|| { 10726 guest.wait_vm_boot(None).unwrap(); 10727 10728 let mut fio_command = format!( 10729 "sudo fio --name=global --output-format=json \ 10730 --direct=1 --bs=4k --ioengine=io_uring --iodepth=64 \ 10731 --rw={fio_ops} --runtime={test_timeout} --numjobs={num_queues}" 10732 ); 10733 10734 // Generate additional argument for each disk: 10735 // --name=job0 --filename=/dev/vdc \ 10736 // --name=job1 --filename=/dev/vdd \ 10737 // --name=job2 --filename=/dev/vde \ 10738 // ... 10739 for i in 0..num_disks { 10740 let c: char = 'c'; 10741 let arg = format!( 10742 " --name=job{i} --filename=/dev/vd{}", 10743 char::from_u32((c as u32) + i).unwrap() 10744 ); 10745 fio_command += &arg; 10746 } 10747 let output = guest.ssh_command(&fio_command).unwrap(); 10748 10749 // Parse fio output 10750 let measured_rate = if bandwidth { 10751 parse_fio_output(&output, &fio_ops, num_queues * num_disks).unwrap() 10752 } else { 10753 parse_fio_output_iops(&output, &fio_ops, num_queues * num_disks).unwrap() 10754 }; 10755 assert!(check_rate_limit(measured_rate, limit_rate, 0.2)); 10756 }); 10757 10758 let _ = child.kill(); 10759 let output = child.wait_with_output().unwrap(); 10760 handle_child_output(r, &output); 10761 } 10762 10763 #[test] 10764 fn test_rate_limiter_block_bandwidth() { 10765 _test_rate_limiter_block(true, 1); 10766 _test_rate_limiter_block(true, 2) 10767 } 10768 10769 #[test] 10770 fn test_rate_limiter_group_block_bandwidth() { 10771 _test_rate_limiter_group_block(true, 1, 1); 10772 _test_rate_limiter_group_block(true, 2, 1); 10773 _test_rate_limiter_group_block(true, 1, 2); 10774 _test_rate_limiter_group_block(true, 2, 2); 10775 } 10776 10777 #[test] 10778 fn test_rate_limiter_block_iops() { 10779 _test_rate_limiter_block(false, 1); 10780 _test_rate_limiter_block(false, 2); 10781 } 10782 10783 #[test] 10784 fn test_rate_limiter_group_block_iops() { 10785 _test_rate_limiter_group_block(false, 1, 1); 10786 _test_rate_limiter_group_block(false, 2, 1); 10787 _test_rate_limiter_group_block(false, 1, 2); 10788 _test_rate_limiter_group_block(false, 2, 2); 10789 } 10790 } 10791