1 // Copyright © 2020 Intel Corporation 2 // 3 // SPDX-License-Identifier: Apache-2.0 4 // 5 #![allow(clippy::undocumented_unsafe_blocks)] 6 // When enabling the `mshv` feature, we skip quite some tests and 7 // hence have known dead-code. This annotation silences dead-code 8 // related warnings for our quality workflow to pass. 9 #![allow(dead_code)] 10 11 extern crate test_infra; 12 13 use net_util::MacAddr; 14 use std::collections::HashMap; 15 use std::fs; 16 use std::io; 17 use std::io::BufRead; 18 use std::io::Read; 19 use std::io::Seek; 20 use std::io::Write; 21 use std::os::unix::io::AsRawFd; 22 use std::path::PathBuf; 23 use std::process::{Child, Command, Stdio}; 24 use std::string::String; 25 use std::sync::mpsc; 26 use std::sync::mpsc::Receiver; 27 use std::sync::Mutex; 28 use std::thread; 29 use test_infra::*; 30 use vmm_sys_util::{tempdir::TempDir, tempfile::TempFile}; 31 use wait_timeout::ChildExt; 32 33 // Constant taken from the VMM crate. 34 const MAX_NUM_PCI_SEGMENTS: u16 = 96; 35 36 #[cfg(target_arch = "x86_64")] 37 mod x86_64 { 38 pub const FOCAL_IMAGE_NAME: &str = "focal-server-cloudimg-amd64-custom-20210609-0.raw"; 39 pub const JAMMY_NVIDIA_IMAGE_NAME: &str = "jammy-server-cloudimg-amd64-nvidia.raw"; 40 pub const FOCAL_IMAGE_NAME_QCOW2: &str = "focal-server-cloudimg-amd64-custom-20210609-0.qcow2"; 41 pub const FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE: &str = 42 "focal-server-cloudimg-amd64-custom-20210609-0-backing.qcow2"; 43 pub const FOCAL_IMAGE_NAME_VHD: &str = "focal-server-cloudimg-amd64-custom-20210609-0.vhd"; 44 pub const FOCAL_IMAGE_NAME_VHDX: &str = "focal-server-cloudimg-amd64-custom-20210609-0.vhdx"; 45 pub const JAMMY_IMAGE_NAME: &str = "jammy-server-cloudimg-amd64-custom-20230119-0.raw"; 46 pub const WINDOWS_IMAGE_NAME: &str = "windows-server-2022-amd64-2.raw"; 47 pub const OVMF_NAME: &str = "CLOUDHV.fd"; 48 pub const GREP_SERIAL_IRQ_CMD: &str = "grep -c 'IO-APIC.*ttyS0' /proc/interrupts || true"; 49 } 50 51 #[cfg(target_arch = "x86_64")] 52 use x86_64::*; 53 54 #[cfg(target_arch = "aarch64")] 55 mod aarch64 { 56 pub const FOCAL_IMAGE_NAME: &str = "focal-server-cloudimg-arm64-custom-20210929-0.raw"; 57 pub const FOCAL_IMAGE_UPDATE_KERNEL_NAME: &str = 58 "focal-server-cloudimg-arm64-custom-20210929-0-update-kernel.raw"; 59 pub const FOCAL_IMAGE_NAME_QCOW2: &str = "focal-server-cloudimg-arm64-custom-20210929-0.qcow2"; 60 pub const FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE: &str = 61 "focal-server-cloudimg-arm64-custom-20210929-0-backing.qcow2"; 62 pub const FOCAL_IMAGE_NAME_VHD: &str = "focal-server-cloudimg-arm64-custom-20210929-0.vhd"; 63 pub const FOCAL_IMAGE_NAME_VHDX: &str = "focal-server-cloudimg-arm64-custom-20210929-0.vhdx"; 64 pub const JAMMY_IMAGE_NAME: &str = "jammy-server-cloudimg-arm64-custom-20220329-0.raw"; 65 pub const WINDOWS_IMAGE_NAME: &str = "windows-11-iot-enterprise-aarch64.raw"; 66 pub const OVMF_NAME: &str = "CLOUDHV_EFI.fd"; 67 pub const GREP_SERIAL_IRQ_CMD: &str = "grep -c 'GICv3.*uart-pl011' /proc/interrupts || true"; 68 pub const GREP_PMU_IRQ_CMD: &str = "grep -c 'GICv3.*arm-pmu' /proc/interrupts || true"; 69 } 70 71 #[cfg(target_arch = "aarch64")] 72 use aarch64::*; 73 74 const DIRECT_KERNEL_BOOT_CMDLINE: &str = 75 "root=/dev/vda1 console=hvc0 rw systemd.journald.forward_to_console=1"; 76 77 const CONSOLE_TEST_STRING: &str = "Started OpenBSD Secure Shell server"; 78 79 // This enum exists to make it more convenient to 80 // implement test for both D-Bus and REST APIs. 81 enum TargetApi { 82 // API socket 83 HttpApi(String), 84 // well known service name, object path 85 DBusApi(String, String), 86 } 87 88 impl TargetApi { 89 fn new_http_api(tmp_dir: &TempDir) -> Self { 90 Self::HttpApi(temp_api_path(tmp_dir)) 91 } 92 93 fn new_dbus_api(tmp_dir: &TempDir) -> Self { 94 // `tmp_dir` is in the form of "/tmp/chXXXXXX" 95 // and we take the `chXXXXXX` part as a unique identifier for the guest 96 let id = tmp_dir.as_path().file_name().unwrap().to_str().unwrap(); 97 98 Self::DBusApi( 99 format!("org.cloudhypervisor.{id}"), 100 format!("/org/cloudhypervisor/{id}"), 101 ) 102 } 103 104 fn guest_args(&self) -> Vec<String> { 105 match self { 106 TargetApi::HttpApi(api_socket) => { 107 vec![format!("--api-socket={}", api_socket.as_str())] 108 } 109 TargetApi::DBusApi(service_name, object_path) => { 110 vec![ 111 format!("--dbus-service-name={}", service_name.as_str()), 112 format!("--dbus-object-path={}", object_path.as_str()), 113 ] 114 } 115 } 116 } 117 118 fn remote_args(&self) -> Vec<String> { 119 // `guest_args` and `remote_args` are consistent with each other 120 self.guest_args() 121 } 122 123 fn remote_command(&self, command: &str, arg: Option<&str>) -> bool { 124 let mut cmd = Command::new(clh_command("ch-remote")); 125 cmd.args(self.remote_args()); 126 cmd.arg(command); 127 128 if let Some(arg) = arg { 129 cmd.arg(arg); 130 } 131 132 let output = cmd.output().unwrap(); 133 if output.status.success() { 134 true 135 } else { 136 eprintln!("Error running ch-remote command: {:?}", &cmd); 137 let stderr = String::from_utf8_lossy(&output.stderr); 138 eprintln!("stderr: {stderr}"); 139 false 140 } 141 } 142 } 143 144 // Start cloud-hypervisor with no VM parameters, only the API server running. 145 // From the API: Create a VM, boot it and check that it looks as expected. 146 fn _test_api_create_boot(target_api: TargetApi, guest: Guest) { 147 let mut child = GuestCommand::new(&guest) 148 .args(target_api.guest_args()) 149 .capture_output() 150 .spawn() 151 .unwrap(); 152 153 thread::sleep(std::time::Duration::new(1, 0)); 154 155 // Verify API server is running 156 assert!(target_api.remote_command("ping", None)); 157 158 // Create the VM first 159 let cpu_count: u8 = 4; 160 let request_body = guest.api_create_body( 161 cpu_count, 162 direct_kernel_boot_path().to_str().unwrap(), 163 DIRECT_KERNEL_BOOT_CMDLINE, 164 ); 165 166 let temp_config_path = guest.tmp_dir.as_path().join("config"); 167 std::fs::write(&temp_config_path, request_body).unwrap(); 168 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 169 170 assert!(target_api.remote_command("create", Some(create_config),)); 171 172 // Then boot it 173 assert!(target_api.remote_command("boot", None)); 174 thread::sleep(std::time::Duration::new(20, 0)); 175 176 let r = std::panic::catch_unwind(|| { 177 // Check that the VM booted as expected 178 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 179 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 180 }); 181 182 kill_child(&mut child); 183 let output = child.wait_with_output().unwrap(); 184 185 handle_child_output(r, &output); 186 } 187 188 // Start cloud-hypervisor with no VM parameters, only the API server running. 189 // From the API: Create a VM, boot it and check it can be shutdown and then 190 // booted again 191 fn _test_api_shutdown(target_api: TargetApi, guest: Guest) { 192 let mut child = GuestCommand::new(&guest) 193 .args(target_api.guest_args()) 194 .capture_output() 195 .spawn() 196 .unwrap(); 197 198 thread::sleep(std::time::Duration::new(1, 0)); 199 200 // Verify API server is running 201 assert!(target_api.remote_command("ping", None)); 202 203 // Create the VM first 204 let cpu_count: u8 = 4; 205 let request_body = guest.api_create_body( 206 cpu_count, 207 direct_kernel_boot_path().to_str().unwrap(), 208 DIRECT_KERNEL_BOOT_CMDLINE, 209 ); 210 211 let temp_config_path = guest.tmp_dir.as_path().join("config"); 212 std::fs::write(&temp_config_path, request_body).unwrap(); 213 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 214 215 let r = std::panic::catch_unwind(|| { 216 assert!(target_api.remote_command("create", Some(create_config))); 217 218 // Then boot it 219 assert!(target_api.remote_command("boot", None)); 220 221 guest.wait_vm_boot(None).unwrap(); 222 223 // Check that the VM booted as expected 224 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 225 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 226 227 // Sync and shutdown without powering off to prevent filesystem 228 // corruption. 229 guest.ssh_command("sync").unwrap(); 230 guest.ssh_command("sudo shutdown -H now").unwrap(); 231 232 // Wait for the guest to be fully shutdown 233 thread::sleep(std::time::Duration::new(20, 0)); 234 235 // Then shut it down 236 assert!(target_api.remote_command("shutdown", None)); 237 238 // Then boot it again 239 assert!(target_api.remote_command("boot", None)); 240 241 guest.wait_vm_boot(None).unwrap(); 242 243 // Check that the VM booted as expected 244 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 245 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 246 }); 247 248 kill_child(&mut child); 249 let output = child.wait_with_output().unwrap(); 250 251 handle_child_output(r, &output); 252 } 253 254 // Start cloud-hypervisor with no VM parameters, only the API server running. 255 // From the API: Create a VM, boot it and check it can be deleted and then recreated 256 // booted again. 257 fn _test_api_delete(target_api: TargetApi, guest: Guest) { 258 let mut child = GuestCommand::new(&guest) 259 .args(target_api.guest_args()) 260 .capture_output() 261 .spawn() 262 .unwrap(); 263 264 thread::sleep(std::time::Duration::new(1, 0)); 265 266 // Verify API server is running 267 assert!(target_api.remote_command("ping", None)); 268 269 // Create the VM first 270 let cpu_count: u8 = 4; 271 let request_body = guest.api_create_body( 272 cpu_count, 273 direct_kernel_boot_path().to_str().unwrap(), 274 DIRECT_KERNEL_BOOT_CMDLINE, 275 ); 276 let temp_config_path = guest.tmp_dir.as_path().join("config"); 277 std::fs::write(&temp_config_path, request_body).unwrap(); 278 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 279 280 let r = std::panic::catch_unwind(|| { 281 assert!(target_api.remote_command("create", Some(create_config))); 282 283 // Then boot it 284 assert!(target_api.remote_command("boot", None)); 285 286 guest.wait_vm_boot(None).unwrap(); 287 288 // Check that the VM booted as expected 289 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 290 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 291 292 // Sync and shutdown without powering off to prevent filesystem 293 // corruption. 294 guest.ssh_command("sync").unwrap(); 295 guest.ssh_command("sudo shutdown -H now").unwrap(); 296 297 // Wait for the guest to be fully shutdown 298 thread::sleep(std::time::Duration::new(20, 0)); 299 300 // Then delete it 301 assert!(target_api.remote_command("delete", None)); 302 303 assert!(target_api.remote_command("create", Some(create_config))); 304 305 // Then boot it again 306 assert!(target_api.remote_command("boot", None)); 307 308 guest.wait_vm_boot(None).unwrap(); 309 310 // Check that the VM booted as expected 311 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 312 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 313 }); 314 315 kill_child(&mut child); 316 let output = child.wait_with_output().unwrap(); 317 318 handle_child_output(r, &output); 319 } 320 321 // Start cloud-hypervisor with no VM parameters, only the API server running. 322 // From the API: Create a VM, boot it and check that it looks as expected. 323 // Then we pause the VM, check that it's no longer available. 324 // Finally we resume the VM and check that it's available. 325 fn _test_api_pause_resume(target_api: TargetApi, guest: Guest) { 326 let mut child = GuestCommand::new(&guest) 327 .args(target_api.guest_args()) 328 .capture_output() 329 .spawn() 330 .unwrap(); 331 332 thread::sleep(std::time::Duration::new(1, 0)); 333 334 // Verify API server is running 335 assert!(target_api.remote_command("ping", None)); 336 337 // Create the VM first 338 let cpu_count: u8 = 4; 339 let request_body = guest.api_create_body( 340 cpu_count, 341 direct_kernel_boot_path().to_str().unwrap(), 342 DIRECT_KERNEL_BOOT_CMDLINE, 343 ); 344 345 let temp_config_path = guest.tmp_dir.as_path().join("config"); 346 std::fs::write(&temp_config_path, request_body).unwrap(); 347 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 348 349 assert!(target_api.remote_command("create", Some(create_config))); 350 351 // Then boot it 352 assert!(target_api.remote_command("boot", None)); 353 thread::sleep(std::time::Duration::new(20, 0)); 354 355 let r = std::panic::catch_unwind(|| { 356 // Check that the VM booted as expected 357 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 358 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 359 360 // We now pause the VM 361 assert!(target_api.remote_command("pause", None)); 362 363 // Check pausing again fails 364 assert!(!target_api.remote_command("pause", None)); 365 366 thread::sleep(std::time::Duration::new(2, 0)); 367 368 // SSH into the VM should fail 369 assert!(ssh_command_ip( 370 "grep -c processor /proc/cpuinfo", 371 &guest.network.guest_ip, 372 2, 373 5 374 ) 375 .is_err()); 376 377 // Resume the VM 378 assert!(target_api.remote_command("resume", None)); 379 380 // Check resuming again fails 381 assert!(!target_api.remote_command("resume", None)); 382 383 thread::sleep(std::time::Duration::new(2, 0)); 384 385 // Now we should be able to SSH back in and get the right number of CPUs 386 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 387 }); 388 389 kill_child(&mut child); 390 let output = child.wait_with_output().unwrap(); 391 392 handle_child_output(r, &output); 393 } 394 395 fn _test_pty_interaction(pty_path: PathBuf) { 396 let mut cf = std::fs::OpenOptions::new() 397 .write(true) 398 .read(true) 399 .open(pty_path) 400 .unwrap(); 401 402 // Some dumb sleeps but we don't want to write 403 // before the console is up and we don't want 404 // to try and write the next line before the 405 // login process is ready. 406 thread::sleep(std::time::Duration::new(5, 0)); 407 assert_eq!(cf.write(b"cloud\n").unwrap(), 6); 408 thread::sleep(std::time::Duration::new(2, 0)); 409 assert_eq!(cf.write(b"cloud123\n").unwrap(), 9); 410 thread::sleep(std::time::Duration::new(2, 0)); 411 assert_eq!(cf.write(b"echo test_pty_console\n").unwrap(), 22); 412 thread::sleep(std::time::Duration::new(2, 0)); 413 414 // read pty and ensure they have a login shell 415 // some fairly hacky workarounds to avoid looping 416 // forever in case the channel is blocked getting output 417 let ptyc = pty_read(cf); 418 let mut empty = 0; 419 let mut prev = String::new(); 420 loop { 421 thread::sleep(std::time::Duration::new(2, 0)); 422 match ptyc.try_recv() { 423 Ok(line) => { 424 empty = 0; 425 prev = prev + &line; 426 if prev.contains("test_pty_console") { 427 break; 428 } 429 } 430 Err(mpsc::TryRecvError::Empty) => { 431 empty += 1; 432 assert!(empty <= 5, "No login on pty"); 433 } 434 _ => { 435 panic!("No login on pty") 436 } 437 } 438 } 439 } 440 441 fn prepare_virtiofsd(tmp_dir: &TempDir, shared_dir: &str) -> (std::process::Child, String) { 442 let mut workload_path = dirs::home_dir().unwrap(); 443 workload_path.push("workloads"); 444 445 let mut virtiofsd_path = workload_path; 446 virtiofsd_path.push("virtiofsd"); 447 let virtiofsd_path = String::from(virtiofsd_path.to_str().unwrap()); 448 449 let virtiofsd_socket_path = 450 String::from(tmp_dir.as_path().join("virtiofs.sock").to_str().unwrap()); 451 452 // Start the daemon 453 let child = Command::new(virtiofsd_path.as_str()) 454 .args(["--shared-dir", shared_dir]) 455 .args(["--socket-path", virtiofsd_socket_path.as_str()]) 456 .args(["--cache", "never"]) 457 .spawn() 458 .unwrap(); 459 460 thread::sleep(std::time::Duration::new(10, 0)); 461 462 (child, virtiofsd_socket_path) 463 } 464 465 fn prepare_vubd( 466 tmp_dir: &TempDir, 467 blk_img: &str, 468 num_queues: usize, 469 rdonly: bool, 470 direct: bool, 471 ) -> (std::process::Child, String) { 472 let mut workload_path = dirs::home_dir().unwrap(); 473 workload_path.push("workloads"); 474 475 let mut blk_file_path = workload_path; 476 blk_file_path.push(blk_img); 477 let blk_file_path = String::from(blk_file_path.to_str().unwrap()); 478 479 let vubd_socket_path = String::from(tmp_dir.as_path().join("vub.sock").to_str().unwrap()); 480 481 // Start the daemon 482 let child = Command::new(clh_command("vhost_user_block")) 483 .args([ 484 "--block-backend", 485 format!( 486 "path={blk_file_path},socket={vubd_socket_path},num_queues={num_queues},readonly={rdonly},direct={direct}" 487 ) 488 .as_str(), 489 ]) 490 .spawn() 491 .unwrap(); 492 493 thread::sleep(std::time::Duration::new(10, 0)); 494 495 (child, vubd_socket_path) 496 } 497 498 fn temp_vsock_path(tmp_dir: &TempDir) -> String { 499 String::from(tmp_dir.as_path().join("vsock").to_str().unwrap()) 500 } 501 502 fn temp_api_path(tmp_dir: &TempDir) -> String { 503 String::from( 504 tmp_dir 505 .as_path() 506 .join("cloud-hypervisor.sock") 507 .to_str() 508 .unwrap(), 509 ) 510 } 511 512 fn temp_event_monitor_path(tmp_dir: &TempDir) -> String { 513 String::from(tmp_dir.as_path().join("event.json").to_str().unwrap()) 514 } 515 516 // Creates the directory and returns the path. 517 fn temp_snapshot_dir_path(tmp_dir: &TempDir) -> String { 518 let snapshot_dir = String::from(tmp_dir.as_path().join("snapshot").to_str().unwrap()); 519 std::fs::create_dir(&snapshot_dir).unwrap(); 520 snapshot_dir 521 } 522 523 fn temp_vmcore_file_path(tmp_dir: &TempDir) -> String { 524 let vmcore_file = String::from(tmp_dir.as_path().join("vmcore").to_str().unwrap()); 525 vmcore_file 526 } 527 528 // Creates the path for direct kernel boot and return the path. 529 // For x86_64, this function returns the vmlinux kernel path. 530 // For AArch64, this function returns the PE kernel path. 531 fn direct_kernel_boot_path() -> PathBuf { 532 let mut workload_path = dirs::home_dir().unwrap(); 533 workload_path.push("workloads"); 534 535 let mut kernel_path = workload_path; 536 #[cfg(target_arch = "x86_64")] 537 kernel_path.push("vmlinux"); 538 #[cfg(target_arch = "aarch64")] 539 kernel_path.push("Image"); 540 541 kernel_path 542 } 543 544 fn edk2_path() -> PathBuf { 545 let mut workload_path = dirs::home_dir().unwrap(); 546 workload_path.push("workloads"); 547 let mut edk2_path = workload_path; 548 edk2_path.push(OVMF_NAME); 549 550 edk2_path 551 } 552 553 fn cloud_hypervisor_release_path() -> String { 554 let mut workload_path = dirs::home_dir().unwrap(); 555 workload_path.push("workloads"); 556 557 let mut ch_release_path = workload_path; 558 #[cfg(target_arch = "x86_64")] 559 ch_release_path.push("cloud-hypervisor-static"); 560 #[cfg(target_arch = "aarch64")] 561 ch_release_path.push("cloud-hypervisor-static-aarch64"); 562 563 ch_release_path.into_os_string().into_string().unwrap() 564 } 565 566 fn prepare_vhost_user_net_daemon( 567 tmp_dir: &TempDir, 568 ip: &str, 569 tap: Option<&str>, 570 mtu: Option<u16>, 571 num_queues: usize, 572 client_mode: bool, 573 ) -> (std::process::Command, String) { 574 let vunet_socket_path = String::from(tmp_dir.as_path().join("vunet.sock").to_str().unwrap()); 575 576 // Start the daemon 577 let mut net_params = format!( 578 "ip={ip},mask=255.255.255.0,socket={vunet_socket_path},num_queues={num_queues},queue_size=1024,client={client_mode}" 579 ); 580 581 if let Some(tap) = tap { 582 net_params.push_str(format!(",tap={tap}").as_str()); 583 } 584 585 if let Some(mtu) = mtu { 586 net_params.push_str(format!(",mtu={mtu}").as_str()); 587 } 588 589 let mut command = Command::new(clh_command("vhost_user_net")); 590 command.args(["--net-backend", net_params.as_str()]); 591 592 (command, vunet_socket_path) 593 } 594 595 fn prepare_swtpm_daemon(tmp_dir: &TempDir) -> (std::process::Command, String) { 596 let swtpm_tpm_dir = String::from(tmp_dir.as_path().join("swtpm").to_str().unwrap()); 597 let swtpm_socket_path = String::from( 598 tmp_dir 599 .as_path() 600 .join("swtpm") 601 .join("swtpm.sock") 602 .to_str() 603 .unwrap(), 604 ); 605 std::fs::create_dir(&swtpm_tpm_dir).unwrap(); 606 607 let mut swtpm_command = Command::new("swtpm"); 608 let swtpm_args = [ 609 "socket", 610 "--tpmstate", 611 &format!("dir={swtpm_tpm_dir}"), 612 "--ctrl", 613 &format!("type=unixio,path={swtpm_socket_path}"), 614 "--flags", 615 "startup-clear", 616 "--tpm2", 617 ]; 618 swtpm_command.args(swtpm_args); 619 620 (swtpm_command, swtpm_socket_path) 621 } 622 623 fn remote_command(api_socket: &str, command: &str, arg: Option<&str>) -> bool { 624 let mut cmd = Command::new(clh_command("ch-remote")); 625 cmd.args([&format!("--api-socket={api_socket}"), command]); 626 627 if let Some(arg) = arg { 628 cmd.arg(arg); 629 } 630 let output = cmd.output().unwrap(); 631 if output.status.success() { 632 true 633 } else { 634 eprintln!("Error running ch-remote command: {:?}", &cmd); 635 let stderr = String::from_utf8_lossy(&output.stderr); 636 eprintln!("stderr: {stderr}"); 637 false 638 } 639 } 640 641 fn remote_command_w_output(api_socket: &str, command: &str, arg: Option<&str>) -> (bool, Vec<u8>) { 642 let mut cmd = Command::new(clh_command("ch-remote")); 643 cmd.args([&format!("--api-socket={api_socket}"), command]); 644 645 if let Some(arg) = arg { 646 cmd.arg(arg); 647 } 648 649 let output = cmd.output().expect("Failed to launch ch-remote"); 650 651 (output.status.success(), output.stdout) 652 } 653 654 fn resize_command( 655 api_socket: &str, 656 desired_vcpus: Option<u8>, 657 desired_ram: Option<usize>, 658 desired_balloon: Option<usize>, 659 event_file: Option<&str>, 660 ) -> bool { 661 let mut cmd = Command::new(clh_command("ch-remote")); 662 cmd.args([&format!("--api-socket={api_socket}"), "resize"]); 663 664 if let Some(desired_vcpus) = desired_vcpus { 665 cmd.arg(format!("--cpus={desired_vcpus}")); 666 } 667 668 if let Some(desired_ram) = desired_ram { 669 cmd.arg(format!("--memory={desired_ram}")); 670 } 671 672 if let Some(desired_balloon) = desired_balloon { 673 cmd.arg(format!("--balloon={desired_balloon}")); 674 } 675 676 let ret = cmd.status().expect("Failed to launch ch-remote").success(); 677 678 if let Some(event_path) = event_file { 679 let latest_events = [ 680 &MetaEvent { 681 event: "resizing".to_string(), 682 device_id: None, 683 }, 684 &MetaEvent { 685 event: "resized".to_string(), 686 device_id: None, 687 }, 688 ]; 689 // See: #5938 690 thread::sleep(std::time::Duration::new(1, 0)); 691 assert!(check_latest_events_exact(&latest_events, event_path)); 692 } 693 694 ret 695 } 696 697 fn resize_zone_command(api_socket: &str, id: &str, desired_size: &str) -> bool { 698 let mut cmd = Command::new(clh_command("ch-remote")); 699 cmd.args([ 700 &format!("--api-socket={api_socket}"), 701 "resize-zone", 702 &format!("--id={id}"), 703 &format!("--size={desired_size}"), 704 ]); 705 706 cmd.status().expect("Failed to launch ch-remote").success() 707 } 708 709 // setup OVS-DPDK bridge and ports 710 fn setup_ovs_dpdk() { 711 // setup OVS-DPDK 712 assert!(exec_host_command_status("service openvswitch-switch start").success()); 713 assert!(exec_host_command_status("ovs-vsctl init").success()); 714 assert!( 715 exec_host_command_status("ovs-vsctl set Open_vSwitch . other_config:dpdk-init=true") 716 .success() 717 ); 718 assert!(exec_host_command_status("service openvswitch-switch restart").success()); 719 720 // Create OVS-DPDK bridge and ports 721 assert!(exec_host_command_status( 722 "ovs-vsctl add-br ovsbr0 -- set bridge ovsbr0 datapath_type=netdev", 723 ) 724 .success()); 725 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface vhost-user1 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient1").success()); 726 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user2 -- set Interface vhost-user2 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient2").success()); 727 assert!(exec_host_command_status("ip link set up dev ovsbr0").success()); 728 assert!(exec_host_command_status("service openvswitch-switch restart").success()); 729 } 730 fn cleanup_ovs_dpdk() { 731 assert!(exec_host_command_status("ovs-vsctl del-br ovsbr0").success()); 732 exec_host_command_status("rm -f ovs-vsctl /tmp/dpdkvhostclient1 /tmp/dpdkvhostclient2"); 733 } 734 // Setup two guests and ensure they are connected through ovs-dpdk 735 fn setup_ovs_dpdk_guests( 736 guest1: &Guest, 737 guest2: &Guest, 738 api_socket: &str, 739 release_binary: bool, 740 ) -> (Child, Child) { 741 setup_ovs_dpdk(); 742 743 let clh_path = if !release_binary { 744 clh_command("cloud-hypervisor") 745 } else { 746 cloud_hypervisor_release_path() 747 }; 748 749 let mut child1 = GuestCommand::new_with_binary_path(guest1, &clh_path) 750 .args(["--cpus", "boot=2"]) 751 .args(["--memory", "size=0,shared=on"]) 752 .args(["--memory-zone", "id=mem0,size=1G,shared=on,host_numa_node=0"]) 753 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 754 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 755 .default_disks() 756 .args(["--net", guest1.default_net_string().as_str(), "vhost_user=true,socket=/tmp/dpdkvhostclient1,num_queues=2,queue_size=256,vhost_mode=server"]) 757 .capture_output() 758 .spawn() 759 .unwrap(); 760 761 #[cfg(target_arch = "x86_64")] 762 let guest_net_iface = "ens5"; 763 #[cfg(target_arch = "aarch64")] 764 let guest_net_iface = "enp0s5"; 765 766 let r = std::panic::catch_unwind(|| { 767 guest1.wait_vm_boot(None).unwrap(); 768 769 guest1 770 .ssh_command(&format!( 771 "sudo ip addr add 172.100.0.1/24 dev {guest_net_iface}" 772 )) 773 .unwrap(); 774 guest1 775 .ssh_command(&format!("sudo ip link set up dev {guest_net_iface}")) 776 .unwrap(); 777 778 let guest_ip = guest1.network.guest_ip.clone(); 779 thread::spawn(move || { 780 ssh_command_ip( 781 "nc -l 12345", 782 &guest_ip, 783 DEFAULT_SSH_RETRIES, 784 DEFAULT_SSH_TIMEOUT, 785 ) 786 .unwrap(); 787 }); 788 }); 789 if r.is_err() { 790 cleanup_ovs_dpdk(); 791 792 let _ = child1.kill(); 793 let output = child1.wait_with_output().unwrap(); 794 handle_child_output(r, &output); 795 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return 796 } 797 798 let mut child2 = GuestCommand::new_with_binary_path(guest2, &clh_path) 799 .args(["--api-socket", api_socket]) 800 .args(["--cpus", "boot=2"]) 801 .args(["--memory", "size=0,shared=on"]) 802 .args(["--memory-zone", "id=mem0,size=1G,shared=on,host_numa_node=0"]) 803 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 804 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 805 .default_disks() 806 .args(["--net", guest2.default_net_string().as_str(), "vhost_user=true,socket=/tmp/dpdkvhostclient2,num_queues=2,queue_size=256,vhost_mode=server"]) 807 .capture_output() 808 .spawn() 809 .unwrap(); 810 811 let r = std::panic::catch_unwind(|| { 812 guest2.wait_vm_boot(None).unwrap(); 813 814 guest2 815 .ssh_command(&format!( 816 "sudo ip addr add 172.100.0.2/24 dev {guest_net_iface}" 817 )) 818 .unwrap(); 819 guest2 820 .ssh_command(&format!("sudo ip link set up dev {guest_net_iface}")) 821 .unwrap(); 822 823 // Check the connection works properly between the two VMs 824 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap(); 825 }); 826 if r.is_err() { 827 cleanup_ovs_dpdk(); 828 829 let _ = child1.kill(); 830 let _ = child2.kill(); 831 let output = child2.wait_with_output().unwrap(); 832 handle_child_output(r, &output); 833 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return 834 } 835 836 (child1, child2) 837 } 838 839 enum FwType { 840 Ovmf, 841 RustHypervisorFirmware, 842 } 843 844 fn fw_path(_fw_type: FwType) -> String { 845 let mut workload_path = dirs::home_dir().unwrap(); 846 workload_path.push("workloads"); 847 848 let mut fw_path = workload_path; 849 #[cfg(target_arch = "aarch64")] 850 fw_path.push("CLOUDHV_EFI.fd"); 851 #[cfg(target_arch = "x86_64")] 852 { 853 match _fw_type { 854 FwType::Ovmf => fw_path.push(OVMF_NAME), 855 FwType::RustHypervisorFirmware => fw_path.push("hypervisor-fw"), 856 } 857 } 858 859 fw_path.to_str().unwrap().to_string() 860 } 861 862 #[derive(Debug)] 863 struct MetaEvent { 864 event: String, 865 device_id: Option<String>, 866 } 867 868 impl MetaEvent { 869 pub fn match_with_json_event(&self, v: &serde_json::Value) -> bool { 870 let mut matched = false; 871 if v["event"].as_str().unwrap() == self.event { 872 if let Some(device_id) = &self.device_id { 873 if v["properties"]["id"].as_str().unwrap() == device_id { 874 matched = true 875 } 876 } else { 877 matched = true; 878 } 879 } 880 matched 881 } 882 } 883 884 // Parse the event_monitor file based on the format that each event 885 // is followed by a double newline 886 fn parse_event_file(event_file: &str) -> Vec<serde_json::Value> { 887 let content = fs::read(event_file).unwrap(); 888 let mut ret = Vec::new(); 889 for entry in String::from_utf8_lossy(&content) 890 .trim() 891 .split("\n\n") 892 .collect::<Vec<&str>>() 893 { 894 ret.push(serde_json::from_str(entry).unwrap()); 895 } 896 897 ret 898 } 899 900 // Return true if all events from the input 'expected_events' are matched sequentially 901 // with events from the 'event_file' 902 fn check_sequential_events(expected_events: &[&MetaEvent], event_file: &str) -> bool { 903 let json_events = parse_event_file(event_file); 904 let len = expected_events.len(); 905 let mut idx = 0; 906 for e in &json_events { 907 if idx == len { 908 break; 909 } 910 if expected_events[idx].match_with_json_event(e) { 911 idx += 1; 912 } 913 } 914 915 let ret = idx == len; 916 917 if !ret { 918 eprintln!( 919 "\n\n==== Start 'check_sequential_events' failed ==== \ 920 \n\nexpected_events={:?}\nactual_events={:?} \ 921 \n\n==== End 'check_sequential_events' failed ====", 922 expected_events, json_events, 923 ); 924 } 925 926 ret 927 } 928 929 // Return true if all events from the input 'expected_events' are matched exactly 930 // with events from the 'event_file' 931 fn check_sequential_events_exact(expected_events: &[&MetaEvent], event_file: &str) -> bool { 932 let json_events = parse_event_file(event_file); 933 assert!(expected_events.len() <= json_events.len()); 934 let json_events = &json_events[..expected_events.len()]; 935 936 for (idx, e) in json_events.iter().enumerate() { 937 if !expected_events[idx].match_with_json_event(e) { 938 eprintln!( 939 "\n\n==== Start 'check_sequential_events_exact' failed ==== \ 940 \n\nexpected_events={:?}\nactual_events={:?} \ 941 \n\n==== End 'check_sequential_events_exact' failed ====", 942 expected_events, json_events, 943 ); 944 945 return false; 946 } 947 } 948 949 true 950 } 951 952 // Return true if events from the input 'latest_events' are matched exactly 953 // with the most recent events from the 'event_file' 954 fn check_latest_events_exact(latest_events: &[&MetaEvent], event_file: &str) -> bool { 955 let json_events = parse_event_file(event_file); 956 assert!(latest_events.len() <= json_events.len()); 957 let json_events = &json_events[(json_events.len() - latest_events.len())..]; 958 959 for (idx, e) in json_events.iter().enumerate() { 960 if !latest_events[idx].match_with_json_event(e) { 961 eprintln!( 962 "\n\n==== Start 'check_latest_events_exact' failed ==== \ 963 \n\nexpected_events={:?}\nactual_events={:?} \ 964 \n\n==== End 'check_latest_events_exact' failed ====", 965 latest_events, json_events, 966 ); 967 968 return false; 969 } 970 } 971 972 true 973 } 974 975 fn test_cpu_topology(threads_per_core: u8, cores_per_package: u8, packages: u8, use_fw: bool) { 976 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 977 let guest = Guest::new(Box::new(focal)); 978 let total_vcpus = threads_per_core * cores_per_package * packages; 979 let direct_kernel_boot_path = direct_kernel_boot_path(); 980 let mut kernel_path = direct_kernel_boot_path.to_str().unwrap(); 981 let fw_path = fw_path(FwType::RustHypervisorFirmware); 982 if use_fw { 983 kernel_path = fw_path.as_str(); 984 } 985 986 let mut child = GuestCommand::new(&guest) 987 .args([ 988 "--cpus", 989 &format!( 990 "boot={total_vcpus},topology={threads_per_core}:{cores_per_package}:1:{packages}" 991 ), 992 ]) 993 .args(["--memory", "size=512M"]) 994 .args(["--kernel", kernel_path]) 995 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 996 .default_disks() 997 .default_net() 998 .capture_output() 999 .spawn() 1000 .unwrap(); 1001 1002 let r = std::panic::catch_unwind(|| { 1003 guest.wait_vm_boot(None).unwrap(); 1004 assert_eq!( 1005 guest.get_cpu_count().unwrap_or_default(), 1006 u32::from(total_vcpus) 1007 ); 1008 assert_eq!( 1009 guest 1010 .ssh_command("lscpu | grep \"per core\" | cut -f 2 -d \":\" | sed \"s# *##\"") 1011 .unwrap() 1012 .trim() 1013 .parse::<u8>() 1014 .unwrap_or(0), 1015 threads_per_core 1016 ); 1017 1018 assert_eq!( 1019 guest 1020 .ssh_command("lscpu | grep \"per socket\" | cut -f 2 -d \":\" | sed \"s# *##\"") 1021 .unwrap() 1022 .trim() 1023 .parse::<u8>() 1024 .unwrap_or(0), 1025 cores_per_package 1026 ); 1027 1028 assert_eq!( 1029 guest 1030 .ssh_command("lscpu | grep \"Socket\" | cut -f 2 -d \":\" | sed \"s# *##\"") 1031 .unwrap() 1032 .trim() 1033 .parse::<u8>() 1034 .unwrap_or(0), 1035 packages 1036 ); 1037 1038 #[cfg(target_arch = "x86_64")] 1039 { 1040 let mut cpu_id = 0; 1041 for package_id in 0..packages { 1042 for core_id in 0..cores_per_package { 1043 for _ in 0..threads_per_core { 1044 assert_eq!( 1045 guest 1046 .ssh_command(&format!("cat /sys/devices/system/cpu/cpu{cpu_id}/topology/physical_package_id")) 1047 .unwrap() 1048 .trim() 1049 .parse::<u8>() 1050 .unwrap_or(0), 1051 package_id 1052 ); 1053 1054 assert_eq!( 1055 guest 1056 .ssh_command(&format!( 1057 "cat /sys/devices/system/cpu/cpu{cpu_id}/topology/core_id" 1058 )) 1059 .unwrap() 1060 .trim() 1061 .parse::<u8>() 1062 .unwrap_or(0), 1063 core_id 1064 ); 1065 1066 cpu_id += 1; 1067 } 1068 } 1069 } 1070 } 1071 }); 1072 1073 kill_child(&mut child); 1074 let output = child.wait_with_output().unwrap(); 1075 1076 handle_child_output(r, &output); 1077 } 1078 1079 #[allow(unused_variables)] 1080 fn _test_guest_numa_nodes(acpi: bool) { 1081 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1082 let guest = Guest::new(Box::new(focal)); 1083 let api_socket = temp_api_path(&guest.tmp_dir); 1084 #[cfg(target_arch = "x86_64")] 1085 let kernel_path = direct_kernel_boot_path(); 1086 #[cfg(target_arch = "aarch64")] 1087 let kernel_path = if acpi { 1088 edk2_path() 1089 } else { 1090 direct_kernel_boot_path() 1091 }; 1092 1093 let mut child = GuestCommand::new(&guest) 1094 .args(["--cpus", "boot=6,max=12"]) 1095 .args(["--memory", "size=0,hotplug_method=virtio-mem"]) 1096 .args([ 1097 "--memory-zone", 1098 "id=mem0,size=1G,hotplug_size=3G", 1099 "id=mem1,size=2G,hotplug_size=3G", 1100 "id=mem2,size=3G,hotplug_size=3G", 1101 ]) 1102 .args([ 1103 "--numa", 1104 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0", 1105 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1", 1106 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2", 1107 ]) 1108 .args(["--kernel", kernel_path.to_str().unwrap()]) 1109 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1110 .args(["--api-socket", &api_socket]) 1111 .capture_output() 1112 .default_disks() 1113 .default_net() 1114 .spawn() 1115 .unwrap(); 1116 1117 let r = std::panic::catch_unwind(|| { 1118 guest.wait_vm_boot(None).unwrap(); 1119 1120 guest.check_numa_common( 1121 Some(&[960_000, 1_920_000, 2_880_000]), 1122 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 1123 Some(&["10 15 20", "20 10 25", "25 30 10"]), 1124 ); 1125 1126 // AArch64 currently does not support hotplug, and therefore we only 1127 // test hotplug-related function on x86_64 here. 1128 #[cfg(target_arch = "x86_64")] 1129 { 1130 guest.enable_memory_hotplug(); 1131 1132 // Resize every memory zone and check each associated NUMA node 1133 // has been assigned the right amount of memory. 1134 resize_zone_command(&api_socket, "mem0", "4G"); 1135 resize_zone_command(&api_socket, "mem1", "4G"); 1136 resize_zone_command(&api_socket, "mem2", "4G"); 1137 // Resize to the maximum amount of CPUs and check each NUMA 1138 // node has been assigned the right CPUs set. 1139 resize_command(&api_socket, Some(12), None, None, None); 1140 thread::sleep(std::time::Duration::new(5, 0)); 1141 1142 guest.check_numa_common( 1143 Some(&[3_840_000, 3_840_000, 3_840_000]), 1144 Some(&[vec![0, 1, 2, 9], vec![3, 4, 6, 7, 8], vec![5, 10, 11]]), 1145 None, 1146 ); 1147 } 1148 }); 1149 1150 kill_child(&mut child); 1151 let output = child.wait_with_output().unwrap(); 1152 1153 handle_child_output(r, &output); 1154 } 1155 1156 #[allow(unused_variables)] 1157 fn _test_power_button(acpi: bool) { 1158 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1159 let guest = Guest::new(Box::new(focal)); 1160 let mut cmd = GuestCommand::new(&guest); 1161 let api_socket = temp_api_path(&guest.tmp_dir); 1162 1163 #[cfg(target_arch = "x86_64")] 1164 let kernel_path = direct_kernel_boot_path(); 1165 #[cfg(target_arch = "aarch64")] 1166 let kernel_path = if acpi { 1167 edk2_path() 1168 } else { 1169 direct_kernel_boot_path() 1170 }; 1171 1172 cmd.args(["--cpus", "boot=1"]) 1173 .args(["--memory", "size=512M"]) 1174 .args(["--kernel", kernel_path.to_str().unwrap()]) 1175 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1176 .capture_output() 1177 .default_disks() 1178 .default_net() 1179 .args(["--api-socket", &api_socket]); 1180 1181 let child = cmd.spawn().unwrap(); 1182 1183 let r = std::panic::catch_unwind(|| { 1184 guest.wait_vm_boot(None).unwrap(); 1185 assert!(remote_command(&api_socket, "power-button", None)); 1186 }); 1187 1188 let output = child.wait_with_output().unwrap(); 1189 assert!(output.status.success()); 1190 handle_child_output(r, &output); 1191 } 1192 1193 type PrepareNetDaemon = dyn Fn( 1194 &TempDir, 1195 &str, 1196 Option<&str>, 1197 Option<u16>, 1198 usize, 1199 bool, 1200 ) -> (std::process::Command, String); 1201 1202 fn test_vhost_user_net( 1203 tap: Option<&str>, 1204 num_queues: usize, 1205 prepare_daemon: &PrepareNetDaemon, 1206 generate_host_mac: bool, 1207 client_mode_daemon: bool, 1208 ) { 1209 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1210 let guest = Guest::new(Box::new(focal)); 1211 let api_socket = temp_api_path(&guest.tmp_dir); 1212 1213 let kernel_path = direct_kernel_boot_path(); 1214 1215 let host_mac = if generate_host_mac { 1216 Some(MacAddr::local_random()) 1217 } else { 1218 None 1219 }; 1220 1221 let mtu = Some(3000); 1222 1223 let (mut daemon_command, vunet_socket_path) = prepare_daemon( 1224 &guest.tmp_dir, 1225 &guest.network.host_ip, 1226 tap, 1227 mtu, 1228 num_queues, 1229 client_mode_daemon, 1230 ); 1231 1232 let net_params = format!( 1233 "vhost_user=true,mac={},socket={},num_queues={},queue_size=1024{},vhost_mode={},mtu=3000", 1234 guest.network.guest_mac, 1235 vunet_socket_path, 1236 num_queues, 1237 if let Some(host_mac) = host_mac { 1238 format!(",host_mac={host_mac}") 1239 } else { 1240 "".to_owned() 1241 }, 1242 if client_mode_daemon { 1243 "server" 1244 } else { 1245 "client" 1246 }, 1247 ); 1248 1249 let mut ch_command = GuestCommand::new(&guest); 1250 ch_command 1251 .args(["--cpus", format!("boot={}", num_queues / 2).as_str()]) 1252 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"]) 1253 .args(["--kernel", kernel_path.to_str().unwrap()]) 1254 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1255 .default_disks() 1256 .args(["--net", net_params.as_str()]) 1257 .args(["--api-socket", &api_socket]) 1258 .capture_output(); 1259 1260 let mut daemon_child: std::process::Child; 1261 let mut child: std::process::Child; 1262 1263 if client_mode_daemon { 1264 child = ch_command.spawn().unwrap(); 1265 // Make sure the VMM is waiting for the backend to connect 1266 thread::sleep(std::time::Duration::new(10, 0)); 1267 daemon_child = daemon_command.spawn().unwrap(); 1268 } else { 1269 daemon_child = daemon_command.spawn().unwrap(); 1270 // Make sure the backend is waiting for the VMM to connect 1271 thread::sleep(std::time::Duration::new(10, 0)); 1272 child = ch_command.spawn().unwrap(); 1273 } 1274 1275 let r = std::panic::catch_unwind(|| { 1276 guest.wait_vm_boot(None).unwrap(); 1277 1278 if let Some(tap_name) = tap { 1279 let tap_count = exec_host_command_output(&format!("ip link | grep -c {tap_name}")); 1280 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1"); 1281 } 1282 1283 if let Some(host_mac) = tap { 1284 let mac_count = exec_host_command_output(&format!("ip link | grep -c {host_mac}")); 1285 assert_eq!(String::from_utf8_lossy(&mac_count.stdout).trim(), "1"); 1286 } 1287 1288 #[cfg(target_arch = "aarch64")] 1289 let iface = "enp0s4"; 1290 #[cfg(target_arch = "x86_64")] 1291 let iface = "ens4"; 1292 1293 assert_eq!( 1294 guest 1295 .ssh_command(format!("cat /sys/class/net/{iface}/mtu").as_str()) 1296 .unwrap() 1297 .trim(), 1298 "3000" 1299 ); 1300 1301 // 1 network interface + default localhost ==> 2 interfaces 1302 // It's important to note that this test is fully exercising the 1303 // vhost-user-net implementation and the associated backend since 1304 // it does not define any --net network interface. That means all 1305 // the ssh communication in that test happens through the network 1306 // interface backed by vhost-user-net. 1307 assert_eq!( 1308 guest 1309 .ssh_command("ip -o link | wc -l") 1310 .unwrap() 1311 .trim() 1312 .parse::<u32>() 1313 .unwrap_or_default(), 1314 2 1315 ); 1316 1317 // The following pci devices will appear on guest with PCI-MSI 1318 // interrupt vectors assigned. 1319 // 1 virtio-console with 3 vectors: config, Rx, Tx 1320 // 1 virtio-blk with 2 vectors: config, Request 1321 // 1 virtio-blk with 2 vectors: config, Request 1322 // 1 virtio-rng with 2 vectors: config, Request 1323 // Since virtio-net has 2 queue pairs, its vectors is as follows: 1324 // 1 virtio-net with 5 vectors: config, Rx (2), Tx (2) 1325 // Based on the above, the total vectors should 14. 1326 #[cfg(target_arch = "x86_64")] 1327 let grep_cmd = "grep -c PCI-MSI /proc/interrupts"; 1328 #[cfg(target_arch = "aarch64")] 1329 let grep_cmd = "grep -c ITS-MSI /proc/interrupts"; 1330 assert_eq!( 1331 guest 1332 .ssh_command(grep_cmd) 1333 .unwrap() 1334 .trim() 1335 .parse::<u32>() 1336 .unwrap_or_default(), 1337 10 + (num_queues as u32) 1338 ); 1339 1340 // ACPI feature is needed. 1341 #[cfg(target_arch = "x86_64")] 1342 { 1343 guest.enable_memory_hotplug(); 1344 1345 // Add RAM to the VM 1346 let desired_ram = 1024 << 20; 1347 resize_command(&api_socket, None, Some(desired_ram), None, None); 1348 1349 thread::sleep(std::time::Duration::new(10, 0)); 1350 1351 // Here by simply checking the size (through ssh), we validate 1352 // the connection is still working, which means vhost-user-net 1353 // keeps working after the resize. 1354 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 1355 } 1356 }); 1357 1358 kill_child(&mut child); 1359 let output = child.wait_with_output().unwrap(); 1360 1361 thread::sleep(std::time::Duration::new(5, 0)); 1362 let _ = daemon_child.kill(); 1363 let _ = daemon_child.wait(); 1364 1365 handle_child_output(r, &output); 1366 } 1367 1368 type PrepareBlkDaemon = dyn Fn(&TempDir, &str, usize, bool, bool) -> (std::process::Child, String); 1369 1370 fn test_vhost_user_blk( 1371 num_queues: usize, 1372 readonly: bool, 1373 direct: bool, 1374 prepare_vhost_user_blk_daemon: Option<&PrepareBlkDaemon>, 1375 ) { 1376 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1377 let guest = Guest::new(Box::new(focal)); 1378 let api_socket = temp_api_path(&guest.tmp_dir); 1379 1380 let kernel_path = direct_kernel_boot_path(); 1381 1382 let (blk_params, daemon_child) = { 1383 let prepare_daemon = prepare_vhost_user_blk_daemon.unwrap(); 1384 // Start the daemon 1385 let (daemon_child, vubd_socket_path) = 1386 prepare_daemon(&guest.tmp_dir, "blk.img", num_queues, readonly, direct); 1387 1388 ( 1389 format!( 1390 "vhost_user=true,socket={vubd_socket_path},num_queues={num_queues},queue_size=128", 1391 ), 1392 Some(daemon_child), 1393 ) 1394 }; 1395 1396 let mut child = GuestCommand::new(&guest) 1397 .args(["--cpus", format!("boot={num_queues}").as_str()]) 1398 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"]) 1399 .args(["--kernel", kernel_path.to_str().unwrap()]) 1400 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1401 .args([ 1402 "--disk", 1403 format!( 1404 "path={}", 1405 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 1406 ) 1407 .as_str(), 1408 format!( 1409 "path={}", 1410 guest.disk_config.disk(DiskType::CloudInit).unwrap() 1411 ) 1412 .as_str(), 1413 blk_params.as_str(), 1414 ]) 1415 .default_net() 1416 .args(["--api-socket", &api_socket]) 1417 .capture_output() 1418 .spawn() 1419 .unwrap(); 1420 1421 let r = std::panic::catch_unwind(|| { 1422 guest.wait_vm_boot(None).unwrap(); 1423 1424 // Check both if /dev/vdc exists and if the block size is 16M. 1425 assert_eq!( 1426 guest 1427 .ssh_command("lsblk | grep vdc | grep -c 16M") 1428 .unwrap() 1429 .trim() 1430 .parse::<u32>() 1431 .unwrap_or_default(), 1432 1 1433 ); 1434 1435 // Check if this block is RO or RW. 1436 assert_eq!( 1437 guest 1438 .ssh_command("lsblk | grep vdc | awk '{print $5}'") 1439 .unwrap() 1440 .trim() 1441 .parse::<u32>() 1442 .unwrap_or_default(), 1443 readonly as u32 1444 ); 1445 1446 // Check if the number of queues in /sys/block/vdc/mq matches the 1447 // expected num_queues. 1448 assert_eq!( 1449 guest 1450 .ssh_command("ls -ll /sys/block/vdc/mq | grep ^d | wc -l") 1451 .unwrap() 1452 .trim() 1453 .parse::<u32>() 1454 .unwrap_or_default(), 1455 num_queues as u32 1456 ); 1457 1458 // Mount the device 1459 let mount_ro_rw_flag = if readonly { "ro,noload" } else { "rw" }; 1460 guest.ssh_command("mkdir mount_image").unwrap(); 1461 guest 1462 .ssh_command( 1463 format!("sudo mount -o {mount_ro_rw_flag} -t ext4 /dev/vdc mount_image/").as_str(), 1464 ) 1465 .unwrap(); 1466 1467 // Check the content of the block device. The file "foo" should 1468 // contain "bar". 1469 assert_eq!( 1470 guest.ssh_command("cat mount_image/foo").unwrap().trim(), 1471 "bar" 1472 ); 1473 1474 // ACPI feature is needed. 1475 #[cfg(target_arch = "x86_64")] 1476 { 1477 guest.enable_memory_hotplug(); 1478 1479 // Add RAM to the VM 1480 let desired_ram = 1024 << 20; 1481 resize_command(&api_socket, None, Some(desired_ram), None, None); 1482 1483 thread::sleep(std::time::Duration::new(10, 0)); 1484 1485 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 1486 1487 // Check again the content of the block device after the resize 1488 // has been performed. 1489 assert_eq!( 1490 guest.ssh_command("cat mount_image/foo").unwrap().trim(), 1491 "bar" 1492 ); 1493 } 1494 1495 // Unmount the device 1496 guest.ssh_command("sudo umount /dev/vdc").unwrap(); 1497 guest.ssh_command("rm -r mount_image").unwrap(); 1498 }); 1499 1500 kill_child(&mut child); 1501 let output = child.wait_with_output().unwrap(); 1502 1503 if let Some(mut daemon_child) = daemon_child { 1504 thread::sleep(std::time::Duration::new(5, 0)); 1505 let _ = daemon_child.kill(); 1506 let _ = daemon_child.wait(); 1507 } 1508 1509 handle_child_output(r, &output); 1510 } 1511 1512 fn test_boot_from_vhost_user_blk( 1513 num_queues: usize, 1514 readonly: bool, 1515 direct: bool, 1516 prepare_vhost_user_blk_daemon: Option<&PrepareBlkDaemon>, 1517 ) { 1518 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1519 let guest = Guest::new(Box::new(focal)); 1520 1521 let kernel_path = direct_kernel_boot_path(); 1522 1523 let disk_path = guest.disk_config.disk(DiskType::OperatingSystem).unwrap(); 1524 1525 let (blk_boot_params, daemon_child) = { 1526 let prepare_daemon = prepare_vhost_user_blk_daemon.unwrap(); 1527 // Start the daemon 1528 let (daemon_child, vubd_socket_path) = prepare_daemon( 1529 &guest.tmp_dir, 1530 disk_path.as_str(), 1531 num_queues, 1532 readonly, 1533 direct, 1534 ); 1535 1536 ( 1537 format!( 1538 "vhost_user=true,socket={vubd_socket_path},num_queues={num_queues},queue_size=128", 1539 ), 1540 Some(daemon_child), 1541 ) 1542 }; 1543 1544 let mut child = GuestCommand::new(&guest) 1545 .args(["--cpus", format!("boot={num_queues}").as_str()]) 1546 .args(["--memory", "size=512M,shared=on"]) 1547 .args(["--kernel", kernel_path.to_str().unwrap()]) 1548 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1549 .args([ 1550 "--disk", 1551 blk_boot_params.as_str(), 1552 format!( 1553 "path={}", 1554 guest.disk_config.disk(DiskType::CloudInit).unwrap() 1555 ) 1556 .as_str(), 1557 ]) 1558 .default_net() 1559 .capture_output() 1560 .spawn() 1561 .unwrap(); 1562 1563 let r = std::panic::catch_unwind(|| { 1564 guest.wait_vm_boot(None).unwrap(); 1565 1566 // Just check the VM booted correctly. 1567 assert_eq!(guest.get_cpu_count().unwrap_or_default(), num_queues as u32); 1568 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 1569 }); 1570 kill_child(&mut child); 1571 let output = child.wait_with_output().unwrap(); 1572 1573 if let Some(mut daemon_child) = daemon_child { 1574 thread::sleep(std::time::Duration::new(5, 0)); 1575 let _ = daemon_child.kill(); 1576 let _ = daemon_child.wait(); 1577 } 1578 1579 handle_child_output(r, &output); 1580 } 1581 1582 fn _test_virtio_fs( 1583 prepare_daemon: &dyn Fn(&TempDir, &str) -> (std::process::Child, String), 1584 hotplug: bool, 1585 pci_segment: Option<u16>, 1586 ) { 1587 #[cfg(target_arch = "aarch64")] 1588 let focal_image = if hotplug { 1589 FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string() 1590 } else { 1591 FOCAL_IMAGE_NAME.to_string() 1592 }; 1593 #[cfg(target_arch = "x86_64")] 1594 let focal_image = FOCAL_IMAGE_NAME.to_string(); 1595 let focal = UbuntuDiskConfig::new(focal_image); 1596 let guest = Guest::new(Box::new(focal)); 1597 let api_socket = temp_api_path(&guest.tmp_dir); 1598 1599 let mut workload_path = dirs::home_dir().unwrap(); 1600 workload_path.push("workloads"); 1601 1602 let mut shared_dir = workload_path; 1603 shared_dir.push("shared_dir"); 1604 1605 #[cfg(target_arch = "x86_64")] 1606 let kernel_path = direct_kernel_boot_path(); 1607 #[cfg(target_arch = "aarch64")] 1608 let kernel_path = if hotplug { 1609 edk2_path() 1610 } else { 1611 direct_kernel_boot_path() 1612 }; 1613 1614 let (mut daemon_child, virtiofsd_socket_path) = 1615 prepare_daemon(&guest.tmp_dir, shared_dir.to_str().unwrap()); 1616 1617 let mut guest_command = GuestCommand::new(&guest); 1618 guest_command 1619 .args(["--cpus", "boot=1"]) 1620 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"]) 1621 .args(["--kernel", kernel_path.to_str().unwrap()]) 1622 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1623 .default_disks() 1624 .default_net() 1625 .args(["--api-socket", &api_socket]); 1626 if pci_segment.is_some() { 1627 guest_command.args([ 1628 "--platform", 1629 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 1630 ]); 1631 } 1632 1633 let fs_params = format!( 1634 "id=myfs0,tag=myfs,socket={},num_queues=1,queue_size=1024{}", 1635 virtiofsd_socket_path, 1636 if let Some(pci_segment) = pci_segment { 1637 format!(",pci_segment={pci_segment}") 1638 } else { 1639 "".to_owned() 1640 } 1641 ); 1642 1643 if !hotplug { 1644 guest_command.args(["--fs", fs_params.as_str()]); 1645 } 1646 1647 let mut child = guest_command.capture_output().spawn().unwrap(); 1648 1649 let r = std::panic::catch_unwind(|| { 1650 guest.wait_vm_boot(None).unwrap(); 1651 1652 if hotplug { 1653 // Add fs to the VM 1654 let (cmd_success, cmd_output) = 1655 remote_command_w_output(&api_socket, "add-fs", Some(&fs_params)); 1656 assert!(cmd_success); 1657 1658 if let Some(pci_segment) = pci_segment { 1659 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 1660 "{{\"id\":\"myfs0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 1661 ))); 1662 } else { 1663 assert!(String::from_utf8_lossy(&cmd_output) 1664 .contains("{\"id\":\"myfs0\",\"bdf\":\"0000:00:06.0\"}")); 1665 } 1666 1667 thread::sleep(std::time::Duration::new(10, 0)); 1668 } 1669 1670 // Mount shared directory through virtio_fs filesystem 1671 guest 1672 .ssh_command("mkdir -p mount_dir && sudo mount -t virtiofs myfs mount_dir/") 1673 .unwrap(); 1674 1675 // Check file1 exists and its content is "foo" 1676 assert_eq!( 1677 guest.ssh_command("cat mount_dir/file1").unwrap().trim(), 1678 "foo" 1679 ); 1680 // Check file2 does not exist 1681 guest 1682 .ssh_command("[ ! -f 'mount_dir/file2' ] || true") 1683 .unwrap(); 1684 1685 // Check file3 exists and its content is "bar" 1686 assert_eq!( 1687 guest.ssh_command("cat mount_dir/file3").unwrap().trim(), 1688 "bar" 1689 ); 1690 1691 // ACPI feature is needed. 1692 #[cfg(target_arch = "x86_64")] 1693 { 1694 guest.enable_memory_hotplug(); 1695 1696 // Add RAM to the VM 1697 let desired_ram = 1024 << 20; 1698 resize_command(&api_socket, None, Some(desired_ram), None, None); 1699 1700 thread::sleep(std::time::Duration::new(30, 0)); 1701 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 1702 1703 // After the resize, check again that file1 exists and its 1704 // content is "foo". 1705 assert_eq!( 1706 guest.ssh_command("cat mount_dir/file1").unwrap().trim(), 1707 "foo" 1708 ); 1709 } 1710 1711 if hotplug { 1712 // Remove from VM 1713 guest.ssh_command("sudo umount mount_dir").unwrap(); 1714 assert!(remote_command(&api_socket, "remove-device", Some("myfs0"))); 1715 } 1716 }); 1717 1718 let (r, hotplug_daemon_child) = if r.is_ok() && hotplug { 1719 thread::sleep(std::time::Duration::new(10, 0)); 1720 let (daemon_child, virtiofsd_socket_path) = 1721 prepare_daemon(&guest.tmp_dir, shared_dir.to_str().unwrap()); 1722 1723 let r = std::panic::catch_unwind(|| { 1724 thread::sleep(std::time::Duration::new(10, 0)); 1725 let fs_params = format!( 1726 "id=myfs0,tag=myfs,socket={},num_queues=1,queue_size=1024{}", 1727 virtiofsd_socket_path, 1728 if let Some(pci_segment) = pci_segment { 1729 format!(",pci_segment={pci_segment}") 1730 } else { 1731 "".to_owned() 1732 } 1733 ); 1734 1735 // Add back and check it works 1736 let (cmd_success, cmd_output) = 1737 remote_command_w_output(&api_socket, "add-fs", Some(&fs_params)); 1738 assert!(cmd_success); 1739 if let Some(pci_segment) = pci_segment { 1740 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 1741 "{{\"id\":\"myfs0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 1742 ))); 1743 } else { 1744 assert!(String::from_utf8_lossy(&cmd_output) 1745 .contains("{\"id\":\"myfs0\",\"bdf\":\"0000:00:06.0\"}")); 1746 } 1747 1748 thread::sleep(std::time::Duration::new(10, 0)); 1749 // Mount shared directory through virtio_fs filesystem 1750 guest 1751 .ssh_command("mkdir -p mount_dir && sudo mount -t virtiofs myfs mount_dir/") 1752 .unwrap(); 1753 1754 // Check file1 exists and its content is "foo" 1755 assert_eq!( 1756 guest.ssh_command("cat mount_dir/file1").unwrap().trim(), 1757 "foo" 1758 ); 1759 }); 1760 1761 (r, Some(daemon_child)) 1762 } else { 1763 (r, None) 1764 }; 1765 1766 kill_child(&mut child); 1767 let output = child.wait_with_output().unwrap(); 1768 1769 let _ = daemon_child.kill(); 1770 let _ = daemon_child.wait(); 1771 1772 if let Some(mut daemon_child) = hotplug_daemon_child { 1773 let _ = daemon_child.kill(); 1774 let _ = daemon_child.wait(); 1775 } 1776 1777 handle_child_output(r, &output); 1778 } 1779 1780 fn test_virtio_pmem(discard_writes: bool, specify_size: bool) { 1781 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1782 let guest = Guest::new(Box::new(focal)); 1783 1784 let kernel_path = direct_kernel_boot_path(); 1785 1786 let pmem_temp_file = TempFile::new().unwrap(); 1787 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 1788 1789 std::process::Command::new("mkfs.ext4") 1790 .arg(pmem_temp_file.as_path()) 1791 .output() 1792 .expect("Expect creating disk image to succeed"); 1793 1794 let mut child = GuestCommand::new(&guest) 1795 .args(["--cpus", "boot=1"]) 1796 .args(["--memory", "size=512M"]) 1797 .args(["--kernel", kernel_path.to_str().unwrap()]) 1798 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1799 .default_disks() 1800 .default_net() 1801 .args([ 1802 "--pmem", 1803 format!( 1804 "file={}{}{}", 1805 pmem_temp_file.as_path().to_str().unwrap(), 1806 if specify_size { ",size=128M" } else { "" }, 1807 if discard_writes { 1808 ",discard_writes=on" 1809 } else { 1810 "" 1811 } 1812 ) 1813 .as_str(), 1814 ]) 1815 .capture_output() 1816 .spawn() 1817 .unwrap(); 1818 1819 let r = std::panic::catch_unwind(|| { 1820 guest.wait_vm_boot(None).unwrap(); 1821 1822 // Check for the presence of /dev/pmem0 1823 assert_eq!( 1824 guest.ssh_command("ls /dev/pmem0").unwrap().trim(), 1825 "/dev/pmem0" 1826 ); 1827 1828 // Check changes persist after reboot 1829 assert_eq!(guest.ssh_command("sudo mount /dev/pmem0 /mnt").unwrap(), ""); 1830 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), "lost+found\n"); 1831 guest 1832 .ssh_command("echo test123 | sudo tee /mnt/test") 1833 .unwrap(); 1834 assert_eq!(guest.ssh_command("sudo umount /mnt").unwrap(), ""); 1835 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), ""); 1836 1837 guest.reboot_linux(0, None); 1838 assert_eq!(guest.ssh_command("sudo mount /dev/pmem0 /mnt").unwrap(), ""); 1839 assert_eq!( 1840 guest 1841 .ssh_command("sudo cat /mnt/test || true") 1842 .unwrap() 1843 .trim(), 1844 if discard_writes { "" } else { "test123" } 1845 ); 1846 }); 1847 1848 kill_child(&mut child); 1849 let output = child.wait_with_output().unwrap(); 1850 1851 handle_child_output(r, &output); 1852 } 1853 1854 fn get_fd_count(pid: u32) -> usize { 1855 fs::read_dir(format!("/proc/{pid}/fd")).unwrap().count() 1856 } 1857 1858 fn _test_virtio_vsock(hotplug: bool) { 1859 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1860 let guest = Guest::new(Box::new(focal)); 1861 1862 #[cfg(target_arch = "x86_64")] 1863 let kernel_path = direct_kernel_boot_path(); 1864 #[cfg(target_arch = "aarch64")] 1865 let kernel_path = if hotplug { 1866 edk2_path() 1867 } else { 1868 direct_kernel_boot_path() 1869 }; 1870 1871 let socket = temp_vsock_path(&guest.tmp_dir); 1872 let api_socket = temp_api_path(&guest.tmp_dir); 1873 1874 let mut cmd = GuestCommand::new(&guest); 1875 cmd.args(["--api-socket", &api_socket]); 1876 cmd.args(["--cpus", "boot=1"]); 1877 cmd.args(["--memory", "size=512M"]); 1878 cmd.args(["--kernel", kernel_path.to_str().unwrap()]); 1879 cmd.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]); 1880 cmd.default_disks(); 1881 cmd.default_net(); 1882 1883 if !hotplug { 1884 cmd.args(["--vsock", format!("cid=3,socket={socket}").as_str()]); 1885 } 1886 1887 let mut child = cmd.capture_output().spawn().unwrap(); 1888 1889 let r = std::panic::catch_unwind(|| { 1890 guest.wait_vm_boot(None).unwrap(); 1891 1892 if hotplug { 1893 let (cmd_success, cmd_output) = remote_command_w_output( 1894 &api_socket, 1895 "add-vsock", 1896 Some(format!("cid=3,socket={socket},id=test0").as_str()), 1897 ); 1898 assert!(cmd_success); 1899 assert!(String::from_utf8_lossy(&cmd_output) 1900 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 1901 thread::sleep(std::time::Duration::new(10, 0)); 1902 // Check adding a second one fails 1903 assert!(!remote_command( 1904 &api_socket, 1905 "add-vsock", 1906 Some("cid=1234,socket=/tmp/fail") 1907 )); 1908 } 1909 1910 // Validate vsock works as expected. 1911 guest.check_vsock(socket.as_str()); 1912 guest.reboot_linux(0, None); 1913 // Validate vsock still works after a reboot. 1914 guest.check_vsock(socket.as_str()); 1915 1916 if hotplug { 1917 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 1918 } 1919 }); 1920 1921 kill_child(&mut child); 1922 let output = child.wait_with_output().unwrap(); 1923 1924 handle_child_output(r, &output); 1925 } 1926 1927 fn get_ksm_pages_shared() -> u32 { 1928 fs::read_to_string("/sys/kernel/mm/ksm/pages_shared") 1929 .unwrap() 1930 .trim() 1931 .parse::<u32>() 1932 .unwrap() 1933 } 1934 1935 fn test_memory_mergeable(mergeable: bool) { 1936 let memory_param = if mergeable { 1937 "mergeable=on" 1938 } else { 1939 "mergeable=off" 1940 }; 1941 1942 // We are assuming the rest of the system in our CI is not using mergeable memory 1943 let ksm_ps_init = get_ksm_pages_shared(); 1944 assert!(ksm_ps_init == 0); 1945 1946 let focal1 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1947 let guest1 = Guest::new(Box::new(focal1)); 1948 let mut child1 = GuestCommand::new(&guest1) 1949 .args(["--cpus", "boot=1"]) 1950 .args(["--memory", format!("size=512M,{memory_param}").as_str()]) 1951 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 1952 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1953 .default_disks() 1954 .args(["--net", guest1.default_net_string().as_str()]) 1955 .args(["--serial", "tty", "--console", "off"]) 1956 .capture_output() 1957 .spawn() 1958 .unwrap(); 1959 1960 let r = std::panic::catch_unwind(|| { 1961 guest1.wait_vm_boot(None).unwrap(); 1962 }); 1963 if r.is_err() { 1964 kill_child(&mut child1); 1965 let output = child1.wait_with_output().unwrap(); 1966 handle_child_output(r, &output); 1967 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return 1968 } 1969 1970 let ksm_ps_guest1 = get_ksm_pages_shared(); 1971 1972 let focal2 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1973 let guest2 = Guest::new(Box::new(focal2)); 1974 let mut child2 = GuestCommand::new(&guest2) 1975 .args(["--cpus", "boot=1"]) 1976 .args(["--memory", format!("size=512M,{memory_param}").as_str()]) 1977 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 1978 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1979 .default_disks() 1980 .args(["--net", guest2.default_net_string().as_str()]) 1981 .args(["--serial", "tty", "--console", "off"]) 1982 .capture_output() 1983 .spawn() 1984 .unwrap(); 1985 1986 let r = std::panic::catch_unwind(|| { 1987 guest2.wait_vm_boot(None).unwrap(); 1988 let ksm_ps_guest2 = get_ksm_pages_shared(); 1989 1990 if mergeable { 1991 println!( 1992 "ksm pages_shared after vm1 booted '{ksm_ps_guest1}', ksm pages_shared after vm2 booted '{ksm_ps_guest2}'" 1993 ); 1994 // We are expecting the number of shared pages to increase as the number of VM increases 1995 assert!(ksm_ps_guest1 < ksm_ps_guest2); 1996 } else { 1997 assert!(ksm_ps_guest1 == 0); 1998 assert!(ksm_ps_guest2 == 0); 1999 } 2000 }); 2001 2002 kill_child(&mut child1); 2003 kill_child(&mut child2); 2004 2005 let output = child1.wait_with_output().unwrap(); 2006 child2.wait().unwrap(); 2007 2008 handle_child_output(r, &output); 2009 } 2010 2011 fn _get_vmm_overhead(pid: u32, guest_memory_size: u32) -> HashMap<String, u32> { 2012 let smaps = fs::File::open(format!("/proc/{pid}/smaps")).unwrap(); 2013 let reader = io::BufReader::new(smaps); 2014 2015 let mut skip_map: bool = false; 2016 let mut region_name: String = "".to_string(); 2017 let mut region_maps = HashMap::new(); 2018 for line in reader.lines() { 2019 let l = line.unwrap(); 2020 2021 if l.contains('-') { 2022 let values: Vec<&str> = l.split_whitespace().collect(); 2023 region_name = values.last().unwrap().trim().to_string(); 2024 if region_name == "0" { 2025 region_name = "anonymous".to_string() 2026 } 2027 } 2028 2029 // Each section begins with something that looks like: 2030 // Size: 2184 kB 2031 if l.starts_with("Size:") { 2032 let values: Vec<&str> = l.split_whitespace().collect(); 2033 let map_size = values[1].parse::<u32>().unwrap(); 2034 // We skip the assigned guest RAM map, its RSS is only 2035 // dependent on the guest actual memory usage. 2036 // Everything else can be added to the VMM overhead. 2037 skip_map = map_size >= guest_memory_size; 2038 continue; 2039 } 2040 2041 // If this is a map we're taking into account, then we only 2042 // count the RSS. The sum of all counted RSS is the VMM overhead. 2043 if !skip_map && l.starts_with("Rss:") { 2044 let values: Vec<&str> = l.split_whitespace().collect(); 2045 let value = values[1].trim().parse::<u32>().unwrap(); 2046 *region_maps.entry(region_name.clone()).or_insert(0) += value; 2047 } 2048 } 2049 2050 region_maps 2051 } 2052 2053 fn get_vmm_overhead(pid: u32, guest_memory_size: u32) -> u32 { 2054 let mut total = 0; 2055 2056 for (region_name, value) in &_get_vmm_overhead(pid, guest_memory_size) { 2057 eprintln!("{region_name}: {value}"); 2058 total += value; 2059 } 2060 2061 total 2062 } 2063 2064 fn process_rss_kib(pid: u32) -> usize { 2065 let command = format!("ps -q {pid} -o rss="); 2066 let rss = exec_host_command_output(&command); 2067 String::from_utf8_lossy(&rss.stdout).trim().parse().unwrap() 2068 } 2069 2070 // 10MB is our maximum accepted overhead. 2071 const MAXIMUM_VMM_OVERHEAD_KB: u32 = 10 * 1024; 2072 2073 #[derive(PartialEq, Eq, PartialOrd)] 2074 struct Counters { 2075 rx_bytes: u64, 2076 rx_frames: u64, 2077 tx_bytes: u64, 2078 tx_frames: u64, 2079 read_bytes: u64, 2080 write_bytes: u64, 2081 read_ops: u64, 2082 write_ops: u64, 2083 } 2084 2085 fn get_counters(api_socket: &str) -> Counters { 2086 // Get counters 2087 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "counters", None); 2088 assert!(cmd_success); 2089 2090 let counters: HashMap<&str, HashMap<&str, u64>> = 2091 serde_json::from_slice(&cmd_output).unwrap_or_default(); 2092 2093 let rx_bytes = *counters.get("_net2").unwrap().get("rx_bytes").unwrap(); 2094 let rx_frames = *counters.get("_net2").unwrap().get("rx_frames").unwrap(); 2095 let tx_bytes = *counters.get("_net2").unwrap().get("tx_bytes").unwrap(); 2096 let tx_frames = *counters.get("_net2").unwrap().get("tx_frames").unwrap(); 2097 2098 let read_bytes = *counters.get("_disk0").unwrap().get("read_bytes").unwrap(); 2099 let write_bytes = *counters.get("_disk0").unwrap().get("write_bytes").unwrap(); 2100 let read_ops = *counters.get("_disk0").unwrap().get("read_ops").unwrap(); 2101 let write_ops = *counters.get("_disk0").unwrap().get("write_ops").unwrap(); 2102 2103 Counters { 2104 rx_bytes, 2105 rx_frames, 2106 tx_bytes, 2107 tx_frames, 2108 read_bytes, 2109 write_bytes, 2110 read_ops, 2111 write_ops, 2112 } 2113 } 2114 2115 fn pty_read(mut pty: std::fs::File) -> Receiver<String> { 2116 let (tx, rx) = mpsc::channel::<String>(); 2117 thread::spawn(move || loop { 2118 thread::sleep(std::time::Duration::new(1, 0)); 2119 let mut buf = [0; 512]; 2120 match pty.read(&mut buf) { 2121 Ok(_bytes) => { 2122 let output = std::str::from_utf8(&buf).unwrap().to_string(); 2123 match tx.send(output) { 2124 Ok(_) => (), 2125 Err(_) => break, 2126 } 2127 } 2128 Err(_) => break, 2129 } 2130 }); 2131 rx 2132 } 2133 2134 fn get_pty_path(api_socket: &str, pty_type: &str) -> PathBuf { 2135 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None); 2136 assert!(cmd_success); 2137 let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default(); 2138 assert_eq!("Pty", info["config"][pty_type]["mode"]); 2139 PathBuf::from( 2140 info["config"][pty_type]["file"] 2141 .as_str() 2142 .expect("Missing pty path"), 2143 ) 2144 } 2145 2146 // VFIO test network setup. 2147 // We reserve a different IP class for it: 172.18.0.0/24. 2148 #[cfg(target_arch = "x86_64")] 2149 fn setup_vfio_network_interfaces() { 2150 // 'vfio-br0' 2151 assert!(exec_host_command_status("sudo ip link add name vfio-br0 type bridge").success()); 2152 assert!(exec_host_command_status("sudo ip link set vfio-br0 up").success()); 2153 assert!(exec_host_command_status("sudo ip addr add 172.18.0.1/24 dev vfio-br0").success()); 2154 // 'vfio-tap0' 2155 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap0 mode tap").success()); 2156 assert!(exec_host_command_status("sudo ip link set vfio-tap0 master vfio-br0").success()); 2157 assert!(exec_host_command_status("sudo ip link set vfio-tap0 up").success()); 2158 // 'vfio-tap1' 2159 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap1 mode tap").success()); 2160 assert!(exec_host_command_status("sudo ip link set vfio-tap1 master vfio-br0").success()); 2161 assert!(exec_host_command_status("sudo ip link set vfio-tap1 up").success()); 2162 // 'vfio-tap2' 2163 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap2 mode tap").success()); 2164 assert!(exec_host_command_status("sudo ip link set vfio-tap2 master vfio-br0").success()); 2165 assert!(exec_host_command_status("sudo ip link set vfio-tap2 up").success()); 2166 // 'vfio-tap3' 2167 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap3 mode tap").success()); 2168 assert!(exec_host_command_status("sudo ip link set vfio-tap3 master vfio-br0").success()); 2169 assert!(exec_host_command_status("sudo ip link set vfio-tap3 up").success()); 2170 } 2171 2172 // Tear VFIO test network down 2173 #[cfg(target_arch = "x86_64")] 2174 fn cleanup_vfio_network_interfaces() { 2175 assert!(exec_host_command_status("sudo ip link del vfio-br0").success()); 2176 assert!(exec_host_command_status("sudo ip link del vfio-tap0").success()); 2177 assert!(exec_host_command_status("sudo ip link del vfio-tap1").success()); 2178 assert!(exec_host_command_status("sudo ip link del vfio-tap2").success()); 2179 assert!(exec_host_command_status("sudo ip link del vfio-tap3").success()); 2180 } 2181 2182 fn balloon_size(api_socket: &str) -> u64 { 2183 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None); 2184 assert!(cmd_success); 2185 2186 let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default(); 2187 let total_mem = &info["config"]["memory"]["size"] 2188 .to_string() 2189 .parse::<u64>() 2190 .unwrap(); 2191 let actual_mem = &info["memory_actual_size"] 2192 .to_string() 2193 .parse::<u64>() 2194 .unwrap(); 2195 total_mem - actual_mem 2196 } 2197 2198 fn vm_state(api_socket: &str) -> String { 2199 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None); 2200 assert!(cmd_success); 2201 2202 let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default(); 2203 let state = &info["state"].as_str().unwrap(); 2204 2205 state.to_string() 2206 } 2207 2208 // This test validates that it can find the virtio-iommu device at first. 2209 // It also verifies that both disks and the network card are attached to 2210 // the virtual IOMMU by looking at /sys/kernel/iommu_groups directory. 2211 // The last interesting part of this test is that it exercises the network 2212 // interface attached to the virtual IOMMU since this is the one used to 2213 // send all commands through SSH. 2214 fn _test_virtio_iommu(acpi: bool) { 2215 // Virtio-iommu support is ready in recent kernel (v5.14). But the kernel in 2216 // Focal image is still old. 2217 // So if ACPI is enabled on AArch64, we use a modified Focal image in which 2218 // the kernel binary has been updated. 2219 #[cfg(target_arch = "aarch64")] 2220 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string(); 2221 #[cfg(target_arch = "x86_64")] 2222 let focal_image = FOCAL_IMAGE_NAME.to_string(); 2223 let focal = UbuntuDiskConfig::new(focal_image); 2224 let guest = Guest::new(Box::new(focal)); 2225 2226 #[cfg(target_arch = "x86_64")] 2227 let kernel_path = direct_kernel_boot_path(); 2228 #[cfg(target_arch = "aarch64")] 2229 let kernel_path = if acpi { 2230 edk2_path() 2231 } else { 2232 direct_kernel_boot_path() 2233 }; 2234 2235 let mut child = GuestCommand::new(&guest) 2236 .args(["--cpus", "boot=1"]) 2237 .args(["--memory", "size=512M"]) 2238 .args(["--kernel", kernel_path.to_str().unwrap()]) 2239 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2240 .args([ 2241 "--disk", 2242 format!( 2243 "path={},iommu=on", 2244 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 2245 ) 2246 .as_str(), 2247 format!( 2248 "path={},iommu=on", 2249 guest.disk_config.disk(DiskType::CloudInit).unwrap() 2250 ) 2251 .as_str(), 2252 ]) 2253 .args(["--net", guest.default_net_string_w_iommu().as_str()]) 2254 .capture_output() 2255 .spawn() 2256 .unwrap(); 2257 2258 let r = std::panic::catch_unwind(|| { 2259 guest.wait_vm_boot(None).unwrap(); 2260 2261 // Verify the virtio-iommu device is present. 2262 assert!(guest 2263 .does_device_vendor_pair_match("0x1057", "0x1af4") 2264 .unwrap_or_default()); 2265 2266 // On AArch64, if the guest system boots from FDT, the behavior of IOMMU is a bit 2267 // different with ACPI. 2268 // All devices on the PCI bus will be attached to the virtual IOMMU, except the 2269 // virtio-iommu device itself. So these devices will all be added to IOMMU groups, 2270 // and appear under folder '/sys/kernel/iommu_groups/'. 2271 // The result is, in the case of FDT, IOMMU group '0' contains "0000:00:01.0" 2272 // which is the console. The first disk "0000:00:02.0" is in group '1'. 2273 // While on ACPI, console device is not attached to IOMMU. So the IOMMU group '0' 2274 // contains "0000:00:02.0" which is the first disk. 2275 // 2276 // Verify the iommu group of the first disk. 2277 let iommu_group = !acpi as i32; 2278 assert_eq!( 2279 guest 2280 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str()) 2281 .unwrap() 2282 .trim(), 2283 "0000:00:02.0" 2284 ); 2285 2286 // Verify the iommu group of the second disk. 2287 let iommu_group = if acpi { 1 } else { 2 }; 2288 assert_eq!( 2289 guest 2290 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str()) 2291 .unwrap() 2292 .trim(), 2293 "0000:00:03.0" 2294 ); 2295 2296 // Verify the iommu group of the network card. 2297 let iommu_group = if acpi { 2 } else { 3 }; 2298 assert_eq!( 2299 guest 2300 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str()) 2301 .unwrap() 2302 .trim(), 2303 "0000:00:04.0" 2304 ); 2305 }); 2306 2307 kill_child(&mut child); 2308 let output = child.wait_with_output().unwrap(); 2309 2310 handle_child_output(r, &output); 2311 } 2312 2313 fn get_reboot_count(guest: &Guest) -> u32 { 2314 guest 2315 .ssh_command("sudo last | grep -c reboot") 2316 .unwrap() 2317 .trim() 2318 .parse::<u32>() 2319 .unwrap_or_default() 2320 } 2321 2322 fn enable_guest_watchdog(guest: &Guest, watchdog_sec: u32) { 2323 // Check for PCI device 2324 assert!(guest 2325 .does_device_vendor_pair_match("0x1063", "0x1af4") 2326 .unwrap_or_default()); 2327 2328 // Enable systemd watchdog 2329 guest 2330 .ssh_command(&format!( 2331 "echo RuntimeWatchdogSec={watchdog_sec}s | sudo tee -a /etc/systemd/system.conf" 2332 )) 2333 .unwrap(); 2334 2335 guest.ssh_command("sudo systemctl daemon-reexec").unwrap(); 2336 } 2337 2338 fn make_guest_panic(guest: &Guest) { 2339 // Check for pvpanic device 2340 assert!(guest 2341 .does_device_vendor_pair_match("0x0011", "0x1b36") 2342 .unwrap_or_default()); 2343 2344 // Trigger guest a panic 2345 guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap(); 2346 } 2347 2348 mod common_parallel { 2349 use std::{fs::OpenOptions, io::SeekFrom}; 2350 2351 use crate::*; 2352 2353 #[test] 2354 #[cfg(target_arch = "x86_64")] 2355 fn test_focal_hypervisor_fw() { 2356 test_simple_launch(fw_path(FwType::RustHypervisorFirmware), FOCAL_IMAGE_NAME) 2357 } 2358 2359 #[test] 2360 #[cfg(target_arch = "x86_64")] 2361 fn test_focal_ovmf() { 2362 test_simple_launch(fw_path(FwType::Ovmf), FOCAL_IMAGE_NAME) 2363 } 2364 2365 #[cfg(target_arch = "x86_64")] 2366 fn test_simple_launch(fw_path: String, disk_path: &str) { 2367 let disk_config = Box::new(UbuntuDiskConfig::new(disk_path.to_string())); 2368 let guest = Guest::new(disk_config); 2369 let event_path = temp_event_monitor_path(&guest.tmp_dir); 2370 2371 let mut child = GuestCommand::new(&guest) 2372 .args(["--cpus", "boot=1"]) 2373 .args(["--memory", "size=512M"]) 2374 .args(["--kernel", fw_path.as_str()]) 2375 .default_disks() 2376 .default_net() 2377 .args(["--serial", "tty", "--console", "off"]) 2378 .args(["--event-monitor", format!("path={event_path}").as_str()]) 2379 .capture_output() 2380 .spawn() 2381 .unwrap(); 2382 2383 let r = std::panic::catch_unwind(|| { 2384 guest.wait_vm_boot(Some(120)).unwrap(); 2385 2386 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 2387 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 2388 assert_eq!(guest.get_pci_bridge_class().unwrap_or_default(), "0x060000"); 2389 2390 let expected_sequential_events = [ 2391 &MetaEvent { 2392 event: "starting".to_string(), 2393 device_id: None, 2394 }, 2395 &MetaEvent { 2396 event: "booting".to_string(), 2397 device_id: None, 2398 }, 2399 &MetaEvent { 2400 event: "booted".to_string(), 2401 device_id: None, 2402 }, 2403 &MetaEvent { 2404 event: "activated".to_string(), 2405 device_id: Some("_disk0".to_string()), 2406 }, 2407 &MetaEvent { 2408 event: "reset".to_string(), 2409 device_id: Some("_disk0".to_string()), 2410 }, 2411 ]; 2412 assert!(check_sequential_events( 2413 &expected_sequential_events, 2414 &event_path 2415 )); 2416 2417 // It's been observed on the Bionic image that udev and snapd 2418 // services can cause some delay in the VM's shutdown. Disabling 2419 // them improves the reliability of this test. 2420 let _ = guest.ssh_command("sudo systemctl disable udev"); 2421 let _ = guest.ssh_command("sudo systemctl stop udev"); 2422 let _ = guest.ssh_command("sudo systemctl disable snapd"); 2423 let _ = guest.ssh_command("sudo systemctl stop snapd"); 2424 2425 guest.ssh_command("sudo poweroff").unwrap(); 2426 thread::sleep(std::time::Duration::new(20, 0)); 2427 let latest_events = [ 2428 &MetaEvent { 2429 event: "shutdown".to_string(), 2430 device_id: None, 2431 }, 2432 &MetaEvent { 2433 event: "deleted".to_string(), 2434 device_id: None, 2435 }, 2436 &MetaEvent { 2437 event: "shutdown".to_string(), 2438 device_id: None, 2439 }, 2440 ]; 2441 assert!(check_latest_events_exact(&latest_events, &event_path)); 2442 }); 2443 2444 kill_child(&mut child); 2445 let output = child.wait_with_output().unwrap(); 2446 2447 handle_child_output(r, &output); 2448 } 2449 2450 #[test] 2451 fn test_multi_cpu() { 2452 let jammy_image = JAMMY_IMAGE_NAME.to_string(); 2453 let jammy = UbuntuDiskConfig::new(jammy_image); 2454 let guest = Guest::new(Box::new(jammy)); 2455 2456 let mut cmd = GuestCommand::new(&guest); 2457 cmd.args(["--cpus", "boot=2,max=4"]) 2458 .args(["--memory", "size=512M"]) 2459 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2460 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2461 .capture_output() 2462 .default_disks() 2463 .default_net(); 2464 2465 let mut child = cmd.spawn().unwrap(); 2466 2467 let r = std::panic::catch_unwind(|| { 2468 guest.wait_vm_boot(Some(120)).unwrap(); 2469 2470 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 2471 2472 assert_eq!( 2473 guest 2474 .ssh_command( 2475 r#"sudo dmesg | grep "smp: Brought up" | sed "s/\[\ *[0-9.]*\] //""# 2476 ) 2477 .unwrap() 2478 .trim(), 2479 "smp: Brought up 1 node, 2 CPUs" 2480 ); 2481 }); 2482 2483 kill_child(&mut child); 2484 let output = child.wait_with_output().unwrap(); 2485 2486 handle_child_output(r, &output); 2487 } 2488 2489 #[test] 2490 fn test_cpu_topology_421() { 2491 test_cpu_topology(4, 2, 1, false); 2492 } 2493 2494 #[test] 2495 fn test_cpu_topology_142() { 2496 test_cpu_topology(1, 4, 2, false); 2497 } 2498 2499 #[test] 2500 fn test_cpu_topology_262() { 2501 test_cpu_topology(2, 6, 2, false); 2502 } 2503 2504 #[test] 2505 #[cfg(target_arch = "x86_64")] 2506 #[cfg(not(feature = "mshv"))] 2507 fn test_cpu_physical_bits() { 2508 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2509 let guest = Guest::new(Box::new(focal)); 2510 let max_phys_bits: u8 = 36; 2511 let mut child = GuestCommand::new(&guest) 2512 .args(["--cpus", &format!("max_phys_bits={max_phys_bits}")]) 2513 .args(["--memory", "size=512M"]) 2514 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2515 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2516 .default_disks() 2517 .default_net() 2518 .capture_output() 2519 .spawn() 2520 .unwrap(); 2521 2522 let r = std::panic::catch_unwind(|| { 2523 guest.wait_vm_boot(None).unwrap(); 2524 2525 assert!( 2526 guest 2527 .ssh_command("lscpu | grep \"Address sizes:\" | cut -f 2 -d \":\" | sed \"s# *##\" | cut -f 1 -d \" \"") 2528 .unwrap() 2529 .trim() 2530 .parse::<u8>() 2531 .unwrap_or(max_phys_bits + 1) <= max_phys_bits, 2532 ); 2533 }); 2534 2535 kill_child(&mut child); 2536 let output = child.wait_with_output().unwrap(); 2537 2538 handle_child_output(r, &output); 2539 } 2540 2541 #[test] 2542 fn test_cpu_affinity() { 2543 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2544 let guest = Guest::new(Box::new(focal)); 2545 2546 // We need the host to have at least 4 CPUs if we want to be able 2547 // to run this test. 2548 let host_cpus_count = exec_host_command_output("nproc"); 2549 assert!( 2550 String::from_utf8_lossy(&host_cpus_count.stdout) 2551 .trim() 2552 .parse::<u16>() 2553 .unwrap_or(0) 2554 >= 4 2555 ); 2556 2557 let mut child = GuestCommand::new(&guest) 2558 .args(["--cpus", "boot=2,affinity=[0@[0,2],1@[1,3]]"]) 2559 .args(["--memory", "size=512M"]) 2560 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2561 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2562 .default_disks() 2563 .default_net() 2564 .capture_output() 2565 .spawn() 2566 .unwrap(); 2567 2568 let r = std::panic::catch_unwind(|| { 2569 guest.wait_vm_boot(None).unwrap(); 2570 let pid = child.id(); 2571 let taskset_vcpu0 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep vcpu0 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2572 assert_eq!(String::from_utf8_lossy(&taskset_vcpu0.stdout).trim(), "0,2"); 2573 let taskset_vcpu1 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep vcpu1 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2574 assert_eq!(String::from_utf8_lossy(&taskset_vcpu1.stdout).trim(), "1,3"); 2575 }); 2576 2577 kill_child(&mut child); 2578 let output = child.wait_with_output().unwrap(); 2579 handle_child_output(r, &output); 2580 } 2581 2582 #[test] 2583 fn test_virtio_queue_affinity() { 2584 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2585 let guest = Guest::new(Box::new(focal)); 2586 2587 // We need the host to have at least 4 CPUs if we want to be able 2588 // to run this test. 2589 let host_cpus_count = exec_host_command_output("nproc"); 2590 assert!( 2591 String::from_utf8_lossy(&host_cpus_count.stdout) 2592 .trim() 2593 .parse::<u16>() 2594 .unwrap_or(0) 2595 >= 4 2596 ); 2597 2598 let mut child = GuestCommand::new(&guest) 2599 .args(["--cpus", "boot=4"]) 2600 .args(["--memory", "size=512M"]) 2601 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2602 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2603 .args([ 2604 "--disk", 2605 format!( 2606 "path={}", 2607 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 2608 ) 2609 .as_str(), 2610 format!( 2611 "path={},num_queues=4,queue_affinity=[0@[0,2],1@[1,3],2@[1],3@[3]]", 2612 guest.disk_config.disk(DiskType::CloudInit).unwrap() 2613 ) 2614 .as_str(), 2615 ]) 2616 .default_net() 2617 .capture_output() 2618 .spawn() 2619 .unwrap(); 2620 2621 let r = std::panic::catch_unwind(|| { 2622 guest.wait_vm_boot(None).unwrap(); 2623 let pid = child.id(); 2624 let taskset_q0 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep disk1_q0 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2625 assert_eq!(String::from_utf8_lossy(&taskset_q0.stdout).trim(), "0,2"); 2626 let taskset_q1 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep disk1_q1 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2627 assert_eq!(String::from_utf8_lossy(&taskset_q1.stdout).trim(), "1,3"); 2628 let taskset_q2 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep disk1_q2 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2629 assert_eq!(String::from_utf8_lossy(&taskset_q2.stdout).trim(), "1"); 2630 let taskset_q3 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep disk1_q3 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2631 assert_eq!(String::from_utf8_lossy(&taskset_q3.stdout).trim(), "3"); 2632 }); 2633 2634 kill_child(&mut child); 2635 let output = child.wait_with_output().unwrap(); 2636 handle_child_output(r, &output); 2637 } 2638 2639 #[test] 2640 #[cfg(not(feature = "mshv"))] 2641 fn test_large_vm() { 2642 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2643 let guest = Guest::new(Box::new(focal)); 2644 let mut cmd = GuestCommand::new(&guest); 2645 cmd.args(["--cpus", "boot=48"]) 2646 .args(["--memory", "size=5120M"]) 2647 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2648 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2649 .args(["--serial", "tty"]) 2650 .args(["--console", "off"]) 2651 .capture_output() 2652 .default_disks() 2653 .default_net(); 2654 2655 let mut child = cmd.spawn().unwrap(); 2656 2657 guest.wait_vm_boot(None).unwrap(); 2658 2659 let r = std::panic::catch_unwind(|| { 2660 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 48); 2661 assert_eq!( 2662 guest 2663 .ssh_command("lscpu | grep \"On-line\" | cut -f 2 -d \":\" | sed \"s# *##\"") 2664 .unwrap() 2665 .trim(), 2666 "0-47" 2667 ); 2668 2669 assert!(guest.get_total_memory().unwrap_or_default() > 5_000_000); 2670 }); 2671 2672 kill_child(&mut child); 2673 let output = child.wait_with_output().unwrap(); 2674 2675 handle_child_output(r, &output); 2676 } 2677 2678 #[test] 2679 #[cfg(not(feature = "mshv"))] 2680 fn test_huge_memory() { 2681 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2682 let guest = Guest::new(Box::new(focal)); 2683 let mut cmd = GuestCommand::new(&guest); 2684 cmd.args(["--cpus", "boot=1"]) 2685 .args(["--memory", "size=128G"]) 2686 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2687 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2688 .capture_output() 2689 .default_disks() 2690 .default_net(); 2691 2692 let mut child = cmd.spawn().unwrap(); 2693 2694 guest.wait_vm_boot(Some(120)).unwrap(); 2695 2696 let r = std::panic::catch_unwind(|| { 2697 assert!(guest.get_total_memory().unwrap_or_default() > 128_000_000); 2698 }); 2699 2700 kill_child(&mut child); 2701 let output = child.wait_with_output().unwrap(); 2702 2703 handle_child_output(r, &output); 2704 } 2705 2706 #[test] 2707 fn test_power_button() { 2708 _test_power_button(false); 2709 } 2710 2711 #[test] 2712 #[cfg(not(feature = "mshv"))] 2713 fn test_user_defined_memory_regions() { 2714 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2715 let guest = Guest::new(Box::new(focal)); 2716 let api_socket = temp_api_path(&guest.tmp_dir); 2717 2718 let kernel_path = direct_kernel_boot_path(); 2719 2720 let mut child = GuestCommand::new(&guest) 2721 .args(["--cpus", "boot=1"]) 2722 .args(["--memory", "size=0,hotplug_method=virtio-mem"]) 2723 .args([ 2724 "--memory-zone", 2725 "id=mem0,size=1G,hotplug_size=2G", 2726 "id=mem1,size=1G,shared=on", 2727 "id=mem2,size=1G,host_numa_node=0,hotplug_size=2G", 2728 ]) 2729 .args(["--kernel", kernel_path.to_str().unwrap()]) 2730 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2731 .args(["--api-socket", &api_socket]) 2732 .capture_output() 2733 .default_disks() 2734 .default_net() 2735 .spawn() 2736 .unwrap(); 2737 2738 let r = std::panic::catch_unwind(|| { 2739 guest.wait_vm_boot(None).unwrap(); 2740 2741 assert!(guest.get_total_memory().unwrap_or_default() > 2_880_000); 2742 2743 guest.enable_memory_hotplug(); 2744 2745 resize_zone_command(&api_socket, "mem0", "3G"); 2746 thread::sleep(std::time::Duration::new(5, 0)); 2747 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000); 2748 resize_zone_command(&api_socket, "mem2", "3G"); 2749 thread::sleep(std::time::Duration::new(5, 0)); 2750 assert!(guest.get_total_memory().unwrap_or_default() > 6_720_000); 2751 resize_zone_command(&api_socket, "mem0", "2G"); 2752 thread::sleep(std::time::Duration::new(5, 0)); 2753 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 2754 resize_zone_command(&api_socket, "mem2", "2G"); 2755 thread::sleep(std::time::Duration::new(5, 0)); 2756 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000); 2757 2758 guest.reboot_linux(0, None); 2759 2760 // Check the amount of RAM after reboot 2761 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000); 2762 assert!(guest.get_total_memory().unwrap_or_default() < 5_760_000); 2763 2764 // Check if we can still resize down to the initial 'boot'size 2765 resize_zone_command(&api_socket, "mem0", "1G"); 2766 thread::sleep(std::time::Duration::new(5, 0)); 2767 assert!(guest.get_total_memory().unwrap_or_default() < 4_800_000); 2768 resize_zone_command(&api_socket, "mem2", "1G"); 2769 thread::sleep(std::time::Duration::new(5, 0)); 2770 assert!(guest.get_total_memory().unwrap_or_default() < 3_840_000); 2771 }); 2772 2773 kill_child(&mut child); 2774 let output = child.wait_with_output().unwrap(); 2775 2776 handle_child_output(r, &output); 2777 } 2778 2779 #[test] 2780 #[cfg(not(feature = "mshv"))] 2781 fn test_guest_numa_nodes() { 2782 _test_guest_numa_nodes(false); 2783 } 2784 2785 #[test] 2786 #[cfg(target_arch = "x86_64")] 2787 fn test_iommu_segments() { 2788 let focal_image = FOCAL_IMAGE_NAME.to_string(); 2789 let focal = UbuntuDiskConfig::new(focal_image); 2790 let guest = Guest::new(Box::new(focal)); 2791 2792 // Prepare another disk file for the virtio-disk device 2793 let test_disk_path = String::from( 2794 guest 2795 .tmp_dir 2796 .as_path() 2797 .join("test-disk.raw") 2798 .to_str() 2799 .unwrap(), 2800 ); 2801 assert!( 2802 exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success() 2803 ); 2804 assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success()); 2805 2806 let api_socket = temp_api_path(&guest.tmp_dir); 2807 let mut cmd = GuestCommand::new(&guest); 2808 2809 cmd.args(["--cpus", "boot=1"]) 2810 .args(["--api-socket", &api_socket]) 2811 .args(["--memory", "size=512M"]) 2812 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2813 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2814 .args([ 2815 "--platform", 2816 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS},iommu_segments=[1]"), 2817 ]) 2818 .default_disks() 2819 .capture_output() 2820 .default_net(); 2821 2822 let mut child = cmd.spawn().unwrap(); 2823 2824 guest.wait_vm_boot(None).unwrap(); 2825 2826 let r = std::panic::catch_unwind(|| { 2827 let (cmd_success, cmd_output) = remote_command_w_output( 2828 &api_socket, 2829 "add-disk", 2830 Some( 2831 format!( 2832 "path={},id=test0,pci_segment=1,iommu=on", 2833 test_disk_path.as_str() 2834 ) 2835 .as_str(), 2836 ), 2837 ); 2838 assert!(cmd_success); 2839 assert!(String::from_utf8_lossy(&cmd_output) 2840 .contains("{\"id\":\"test0\",\"bdf\":\"0001:00:01.0\"}")); 2841 2842 // Check IOMMU setup 2843 assert!(guest 2844 .does_device_vendor_pair_match("0x1057", "0x1af4") 2845 .unwrap_or_default()); 2846 assert_eq!( 2847 guest 2848 .ssh_command("ls /sys/kernel/iommu_groups/0/devices") 2849 .unwrap() 2850 .trim(), 2851 "0001:00:01.0" 2852 ); 2853 }); 2854 2855 kill_child(&mut child); 2856 let output = child.wait_with_output().unwrap(); 2857 2858 handle_child_output(r, &output); 2859 } 2860 2861 #[test] 2862 fn test_pci_msi() { 2863 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2864 let guest = Guest::new(Box::new(focal)); 2865 let mut cmd = GuestCommand::new(&guest); 2866 cmd.args(["--cpus", "boot=1"]) 2867 .args(["--memory", "size=512M"]) 2868 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2869 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2870 .capture_output() 2871 .default_disks() 2872 .default_net(); 2873 2874 let mut child = cmd.spawn().unwrap(); 2875 2876 guest.wait_vm_boot(None).unwrap(); 2877 2878 #[cfg(target_arch = "x86_64")] 2879 let grep_cmd = "grep -c PCI-MSI /proc/interrupts"; 2880 #[cfg(target_arch = "aarch64")] 2881 let grep_cmd = "grep -c ITS-MSI /proc/interrupts"; 2882 2883 let r = std::panic::catch_unwind(|| { 2884 assert_eq!( 2885 guest 2886 .ssh_command(grep_cmd) 2887 .unwrap() 2888 .trim() 2889 .parse::<u32>() 2890 .unwrap_or_default(), 2891 12 2892 ); 2893 }); 2894 2895 kill_child(&mut child); 2896 let output = child.wait_with_output().unwrap(); 2897 2898 handle_child_output(r, &output); 2899 } 2900 2901 #[test] 2902 fn test_virtio_net_ctrl_queue() { 2903 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2904 let guest = Guest::new(Box::new(focal)); 2905 let mut cmd = GuestCommand::new(&guest); 2906 cmd.args(["--cpus", "boot=1"]) 2907 .args(["--memory", "size=512M"]) 2908 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2909 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2910 .args(["--net", guest.default_net_string_w_mtu(3000).as_str()]) 2911 .capture_output() 2912 .default_disks(); 2913 2914 let mut child = cmd.spawn().unwrap(); 2915 2916 guest.wait_vm_boot(None).unwrap(); 2917 2918 #[cfg(target_arch = "aarch64")] 2919 let iface = "enp0s4"; 2920 #[cfg(target_arch = "x86_64")] 2921 let iface = "ens4"; 2922 2923 let r = std::panic::catch_unwind(|| { 2924 assert_eq!( 2925 guest 2926 .ssh_command( 2927 format!("sudo ethtool -K {iface} rx-gro-hw off && echo success").as_str() 2928 ) 2929 .unwrap() 2930 .trim(), 2931 "success" 2932 ); 2933 assert_eq!( 2934 guest 2935 .ssh_command(format!("cat /sys/class/net/{iface}/mtu").as_str()) 2936 .unwrap() 2937 .trim(), 2938 "3000" 2939 ); 2940 }); 2941 2942 kill_child(&mut child); 2943 let output = child.wait_with_output().unwrap(); 2944 2945 handle_child_output(r, &output); 2946 } 2947 2948 #[test] 2949 fn test_pci_multiple_segments() { 2950 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2951 let guest = Guest::new(Box::new(focal)); 2952 2953 // Prepare another disk file for the virtio-disk device 2954 let test_disk_path = String::from( 2955 guest 2956 .tmp_dir 2957 .as_path() 2958 .join("test-disk.raw") 2959 .to_str() 2960 .unwrap(), 2961 ); 2962 assert!( 2963 exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success() 2964 ); 2965 assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success()); 2966 2967 let mut cmd = GuestCommand::new(&guest); 2968 cmd.args(["--cpus", "boot=1"]) 2969 .args(["--memory", "size=512M"]) 2970 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2971 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2972 .args([ 2973 "--platform", 2974 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 2975 ]) 2976 .args([ 2977 "--disk", 2978 format!( 2979 "path={}", 2980 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 2981 ) 2982 .as_str(), 2983 format!( 2984 "path={}", 2985 guest.disk_config.disk(DiskType::CloudInit).unwrap() 2986 ) 2987 .as_str(), 2988 format!("path={test_disk_path},pci_segment=15").as_str(), 2989 ]) 2990 .capture_output() 2991 .default_net(); 2992 2993 let mut child = cmd.spawn().unwrap(); 2994 2995 guest.wait_vm_boot(None).unwrap(); 2996 2997 let grep_cmd = "lspci | grep \"Host bridge\" | wc -l"; 2998 2999 let r = std::panic::catch_unwind(|| { 3000 // There should be MAX_NUM_PCI_SEGMENTS PCI host bridges in the guest. 3001 assert_eq!( 3002 guest 3003 .ssh_command(grep_cmd) 3004 .unwrap() 3005 .trim() 3006 .parse::<u16>() 3007 .unwrap_or_default(), 3008 MAX_NUM_PCI_SEGMENTS 3009 ); 3010 3011 // Check both if /dev/vdc exists and if the block size is 4M. 3012 assert_eq!( 3013 guest 3014 .ssh_command("lsblk | grep vdc | grep -c 4M") 3015 .unwrap() 3016 .trim() 3017 .parse::<u32>() 3018 .unwrap_or_default(), 3019 1 3020 ); 3021 3022 // Mount the device. 3023 guest.ssh_command("mkdir mount_image").unwrap(); 3024 guest 3025 .ssh_command("sudo mount -o rw -t ext4 /dev/vdc mount_image/") 3026 .unwrap(); 3027 // Grant all users with write permission. 3028 guest.ssh_command("sudo chmod a+w mount_image/").unwrap(); 3029 3030 // Write something to the device. 3031 guest 3032 .ssh_command("sudo echo \"bar\" >> mount_image/foo") 3033 .unwrap(); 3034 3035 // Check the content of the block device. The file "foo" should 3036 // contain "bar". 3037 assert_eq!( 3038 guest 3039 .ssh_command("sudo cat mount_image/foo") 3040 .unwrap() 3041 .trim(), 3042 "bar" 3043 ); 3044 }); 3045 3046 kill_child(&mut child); 3047 let output = child.wait_with_output().unwrap(); 3048 3049 handle_child_output(r, &output); 3050 } 3051 3052 #[test] 3053 fn test_pci_multiple_segments_numa_node() { 3054 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3055 let guest = Guest::new(Box::new(focal)); 3056 let api_socket = temp_api_path(&guest.tmp_dir); 3057 #[cfg(target_arch = "x86_64")] 3058 let kernel_path = direct_kernel_boot_path(); 3059 #[cfg(target_arch = "aarch64")] 3060 let kernel_path = edk2_path(); 3061 3062 // Prepare another disk file for the virtio-disk device 3063 let test_disk_path = String::from( 3064 guest 3065 .tmp_dir 3066 .as_path() 3067 .join("test-disk.raw") 3068 .to_str() 3069 .unwrap(), 3070 ); 3071 assert!( 3072 exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success() 3073 ); 3074 assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success()); 3075 const TEST_DISK_NODE: u16 = 1; 3076 3077 let mut child = GuestCommand::new(&guest) 3078 .args(["--platform", "num_pci_segments=2"]) 3079 .args(["--cpus", "boot=2"]) 3080 .args(["--memory", "size=0"]) 3081 .args(["--memory-zone", "id=mem0,size=256M", "id=mem1,size=256M"]) 3082 .args([ 3083 "--numa", 3084 "guest_numa_id=0,cpus=[0],distances=[1@20],memory_zones=mem0,pci_segments=[0]", 3085 "guest_numa_id=1,cpus=[1],distances=[0@20],memory_zones=mem1,pci_segments=[1]", 3086 ]) 3087 .args(["--kernel", kernel_path.to_str().unwrap()]) 3088 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3089 .args(["--api-socket", &api_socket]) 3090 .capture_output() 3091 .args([ 3092 "--disk", 3093 format!( 3094 "path={}", 3095 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 3096 ) 3097 .as_str(), 3098 format!( 3099 "path={}", 3100 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3101 ) 3102 .as_str(), 3103 format!("path={test_disk_path},pci_segment={TEST_DISK_NODE}").as_str(), 3104 ]) 3105 .default_net() 3106 .spawn() 3107 .unwrap(); 3108 3109 let cmd = "cat /sys/block/vdc/device/../numa_node"; 3110 3111 let r = std::panic::catch_unwind(|| { 3112 guest.wait_vm_boot(None).unwrap(); 3113 3114 assert_eq!( 3115 guest 3116 .ssh_command(cmd) 3117 .unwrap() 3118 .trim() 3119 .parse::<u16>() 3120 .unwrap_or_default(), 3121 TEST_DISK_NODE 3122 ); 3123 }); 3124 3125 kill_child(&mut child); 3126 let output = child.wait_with_output().unwrap(); 3127 3128 handle_child_output(r, &output); 3129 } 3130 3131 #[test] 3132 fn test_direct_kernel_boot() { 3133 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3134 let guest = Guest::new(Box::new(focal)); 3135 3136 let kernel_path = direct_kernel_boot_path(); 3137 3138 let mut child = GuestCommand::new(&guest) 3139 .args(["--cpus", "boot=1"]) 3140 .args(["--memory", "size=512M"]) 3141 .args(["--kernel", kernel_path.to_str().unwrap()]) 3142 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3143 .default_disks() 3144 .default_net() 3145 .capture_output() 3146 .spawn() 3147 .unwrap(); 3148 3149 let r = std::panic::catch_unwind(|| { 3150 guest.wait_vm_boot(None).unwrap(); 3151 3152 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 3153 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 3154 3155 let grep_cmd = if cfg!(target_arch = "x86_64") { 3156 "grep -c PCI-MSI /proc/interrupts" 3157 } else { 3158 "grep -c ITS-MSI /proc/interrupts" 3159 }; 3160 assert_eq!( 3161 guest 3162 .ssh_command(grep_cmd) 3163 .unwrap() 3164 .trim() 3165 .parse::<u32>() 3166 .unwrap_or_default(), 3167 12 3168 ); 3169 }); 3170 3171 kill_child(&mut child); 3172 let output = child.wait_with_output().unwrap(); 3173 3174 handle_child_output(r, &output); 3175 } 3176 3177 #[test] 3178 #[cfg(target_arch = "x86_64")] 3179 fn test_direct_kernel_boot_bzimage() { 3180 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3181 let guest = Guest::new(Box::new(focal)); 3182 3183 let mut kernel_path = direct_kernel_boot_path(); 3184 // Replace the default kernel with the bzImage. 3185 kernel_path.pop(); 3186 kernel_path.push("bzImage"); 3187 3188 let mut child = GuestCommand::new(&guest) 3189 .args(["--cpus", "boot=1"]) 3190 .args(["--memory", "size=512M"]) 3191 .args(["--kernel", kernel_path.to_str().unwrap()]) 3192 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3193 .default_disks() 3194 .default_net() 3195 .capture_output() 3196 .spawn() 3197 .unwrap(); 3198 3199 let r = std::panic::catch_unwind(|| { 3200 guest.wait_vm_boot(None).unwrap(); 3201 3202 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 3203 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 3204 3205 let grep_cmd = if cfg!(target_arch = "x86_64") { 3206 "grep -c PCI-MSI /proc/interrupts" 3207 } else { 3208 "grep -c ITS-MSI /proc/interrupts" 3209 }; 3210 assert_eq!( 3211 guest 3212 .ssh_command(grep_cmd) 3213 .unwrap() 3214 .trim() 3215 .parse::<u32>() 3216 .unwrap_or_default(), 3217 12 3218 ); 3219 }); 3220 3221 kill_child(&mut child); 3222 let output = child.wait_with_output().unwrap(); 3223 3224 handle_child_output(r, &output); 3225 } 3226 3227 fn _test_virtio_block(image_name: &str, disable_io_uring: bool, disable_aio: bool) { 3228 let focal = UbuntuDiskConfig::new(image_name.to_string()); 3229 let guest = Guest::new(Box::new(focal)); 3230 3231 let mut workload_path = dirs::home_dir().unwrap(); 3232 workload_path.push("workloads"); 3233 3234 let mut blk_file_path = workload_path; 3235 blk_file_path.push("blk.img"); 3236 3237 let kernel_path = direct_kernel_boot_path(); 3238 3239 let mut cloud_child = GuestCommand::new(&guest) 3240 .args(["--cpus", "boot=4"]) 3241 .args(["--memory", "size=512M,shared=on"]) 3242 .args(["--kernel", kernel_path.to_str().unwrap()]) 3243 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3244 .args([ 3245 "--disk", 3246 format!( 3247 "path={}", 3248 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 3249 ) 3250 .as_str(), 3251 format!( 3252 "path={}", 3253 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3254 ) 3255 .as_str(), 3256 format!( 3257 "path={},readonly=on,direct=on,num_queues=4,_disable_io_uring={},_disable_aio={}", 3258 blk_file_path.to_str().unwrap(), 3259 disable_io_uring, 3260 disable_aio, 3261 ) 3262 .as_str(), 3263 ]) 3264 .default_net() 3265 .capture_output() 3266 .spawn() 3267 .unwrap(); 3268 3269 let r = std::panic::catch_unwind(|| { 3270 guest.wait_vm_boot(None).unwrap(); 3271 3272 // Check both if /dev/vdc exists and if the block size is 16M. 3273 assert_eq!( 3274 guest 3275 .ssh_command("lsblk | grep vdc | grep -c 16M") 3276 .unwrap() 3277 .trim() 3278 .parse::<u32>() 3279 .unwrap_or_default(), 3280 1 3281 ); 3282 3283 // Check both if /dev/vdc exists and if this block is RO. 3284 assert_eq!( 3285 guest 3286 .ssh_command("lsblk | grep vdc | awk '{print $5}'") 3287 .unwrap() 3288 .trim() 3289 .parse::<u32>() 3290 .unwrap_or_default(), 3291 1 3292 ); 3293 3294 // Check if the number of queues is 4. 3295 assert_eq!( 3296 guest 3297 .ssh_command("ls -ll /sys/block/vdc/mq | grep ^d | wc -l") 3298 .unwrap() 3299 .trim() 3300 .parse::<u32>() 3301 .unwrap_or_default(), 3302 4 3303 ); 3304 }); 3305 3306 let _ = cloud_child.kill(); 3307 let output = cloud_child.wait_with_output().unwrap(); 3308 3309 handle_child_output(r, &output); 3310 } 3311 3312 #[test] 3313 fn test_virtio_block_io_uring() { 3314 _test_virtio_block(FOCAL_IMAGE_NAME, false, true) 3315 } 3316 3317 #[test] 3318 fn test_virtio_block_aio() { 3319 _test_virtio_block(FOCAL_IMAGE_NAME, true, false) 3320 } 3321 3322 #[test] 3323 fn test_virtio_block_sync() { 3324 _test_virtio_block(FOCAL_IMAGE_NAME, true, true) 3325 } 3326 3327 #[test] 3328 fn test_virtio_block_qcow2() { 3329 _test_virtio_block(FOCAL_IMAGE_NAME_QCOW2, false, false) 3330 } 3331 3332 #[test] 3333 fn test_virtio_block_qcow2_backing_file() { 3334 _test_virtio_block(FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE, false, false) 3335 } 3336 3337 #[test] 3338 fn test_virtio_block_vhd() { 3339 let mut workload_path = dirs::home_dir().unwrap(); 3340 workload_path.push("workloads"); 3341 3342 let mut raw_file_path = workload_path.clone(); 3343 let mut vhd_file_path = workload_path; 3344 raw_file_path.push(FOCAL_IMAGE_NAME); 3345 vhd_file_path.push(FOCAL_IMAGE_NAME_VHD); 3346 3347 // Generate VHD file from RAW file 3348 std::process::Command::new("qemu-img") 3349 .arg("convert") 3350 .arg("-p") 3351 .args(["-f", "raw"]) 3352 .args(["-O", "vpc"]) 3353 .args(["-o", "subformat=fixed"]) 3354 .arg(raw_file_path.to_str().unwrap()) 3355 .arg(vhd_file_path.to_str().unwrap()) 3356 .output() 3357 .expect("Expect generating VHD image from RAW image"); 3358 3359 _test_virtio_block(FOCAL_IMAGE_NAME_VHD, false, false) 3360 } 3361 3362 #[test] 3363 fn test_virtio_block_vhdx() { 3364 let mut workload_path = dirs::home_dir().unwrap(); 3365 workload_path.push("workloads"); 3366 3367 let mut raw_file_path = workload_path.clone(); 3368 let mut vhdx_file_path = workload_path; 3369 raw_file_path.push(FOCAL_IMAGE_NAME); 3370 vhdx_file_path.push(FOCAL_IMAGE_NAME_VHDX); 3371 3372 // Generate dynamic VHDX file from RAW file 3373 std::process::Command::new("qemu-img") 3374 .arg("convert") 3375 .arg("-p") 3376 .args(["-f", "raw"]) 3377 .args(["-O", "vhdx"]) 3378 .arg(raw_file_path.to_str().unwrap()) 3379 .arg(vhdx_file_path.to_str().unwrap()) 3380 .output() 3381 .expect("Expect generating dynamic VHDx image from RAW image"); 3382 3383 _test_virtio_block(FOCAL_IMAGE_NAME_VHDX, false, false) 3384 } 3385 3386 #[test] 3387 fn test_virtio_block_dynamic_vhdx_expand() { 3388 const VIRTUAL_DISK_SIZE: u64 = 100 << 20; 3389 const EMPTY_VHDX_FILE_SIZE: u64 = 8 << 20; 3390 const FULL_VHDX_FILE_SIZE: u64 = 112 << 20; 3391 const DYNAMIC_VHDX_NAME: &str = "dynamic.vhdx"; 3392 3393 let mut workload_path = dirs::home_dir().unwrap(); 3394 workload_path.push("workloads"); 3395 3396 let mut vhdx_file_path = workload_path; 3397 vhdx_file_path.push(DYNAMIC_VHDX_NAME); 3398 let vhdx_path = vhdx_file_path.to_str().unwrap(); 3399 3400 // Generate a 100 MiB dynamic VHDX file 3401 std::process::Command::new("qemu-img") 3402 .arg("create") 3403 .args(["-f", "vhdx"]) 3404 .arg(vhdx_path) 3405 .arg(VIRTUAL_DISK_SIZE.to_string()) 3406 .output() 3407 .expect("Expect generating dynamic VHDx image from RAW image"); 3408 3409 // Check if the size matches with empty VHDx file size 3410 assert_eq!(vhdx_image_size(vhdx_path), EMPTY_VHDX_FILE_SIZE); 3411 3412 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3413 let guest = Guest::new(Box::new(focal)); 3414 let kernel_path = direct_kernel_boot_path(); 3415 3416 let mut cloud_child = GuestCommand::new(&guest) 3417 .args(["--cpus", "boot=1"]) 3418 .args(["--memory", "size=512M"]) 3419 .args(["--kernel", kernel_path.to_str().unwrap()]) 3420 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3421 .args([ 3422 "--disk", 3423 format!( 3424 "path={}", 3425 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 3426 ) 3427 .as_str(), 3428 format!( 3429 "path={}", 3430 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3431 ) 3432 .as_str(), 3433 format!("path={vhdx_path}").as_str(), 3434 ]) 3435 .default_net() 3436 .capture_output() 3437 .spawn() 3438 .unwrap(); 3439 3440 let r = std::panic::catch_unwind(|| { 3441 guest.wait_vm_boot(None).unwrap(); 3442 3443 // Check both if /dev/vdc exists and if the block size is 100 MiB. 3444 assert_eq!( 3445 guest 3446 .ssh_command("lsblk | grep vdc | grep -c 100M") 3447 .unwrap() 3448 .trim() 3449 .parse::<u32>() 3450 .unwrap_or_default(), 3451 1 3452 ); 3453 3454 // Write 100 MB of data to the VHDx disk 3455 guest 3456 .ssh_command("sudo dd if=/dev/urandom of=/dev/vdc bs=1M count=100") 3457 .unwrap(); 3458 }); 3459 3460 // Check if the size matches with expected expanded VHDx file size 3461 assert_eq!(vhdx_image_size(vhdx_path), FULL_VHDX_FILE_SIZE); 3462 3463 kill_child(&mut cloud_child); 3464 let output = cloud_child.wait_with_output().unwrap(); 3465 3466 handle_child_output(r, &output); 3467 } 3468 3469 fn vhdx_image_size(disk_name: &str) -> u64 { 3470 std::fs::File::open(disk_name) 3471 .unwrap() 3472 .seek(SeekFrom::End(0)) 3473 .unwrap() 3474 } 3475 3476 #[test] 3477 fn test_virtio_block_direct_and_firmware() { 3478 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3479 let guest = Guest::new(Box::new(focal)); 3480 3481 // The OS disk must be copied to a location that is not backed by 3482 // tmpfs, otherwise the syscall openat(2) with O_DIRECT simply fails 3483 // with EINVAL because tmpfs doesn't support this flag. 3484 let mut workloads_path = dirs::home_dir().unwrap(); 3485 workloads_path.push("workloads"); 3486 let os_dir = TempDir::new_in(workloads_path.as_path()).unwrap(); 3487 let mut os_path = os_dir.as_path().to_path_buf(); 3488 os_path.push("osdisk.img"); 3489 rate_limited_copy( 3490 guest.disk_config.disk(DiskType::OperatingSystem).unwrap(), 3491 os_path.as_path(), 3492 ) 3493 .expect("copying of OS disk failed"); 3494 3495 let mut child = GuestCommand::new(&guest) 3496 .args(["--cpus", "boot=1"]) 3497 .args(["--memory", "size=512M"]) 3498 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 3499 .args([ 3500 "--disk", 3501 format!("path={},direct=on", os_path.as_path().to_str().unwrap()).as_str(), 3502 format!( 3503 "path={}", 3504 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3505 ) 3506 .as_str(), 3507 ]) 3508 .default_net() 3509 .capture_output() 3510 .spawn() 3511 .unwrap(); 3512 3513 let r = std::panic::catch_unwind(|| { 3514 guest.wait_vm_boot(Some(120)).unwrap(); 3515 }); 3516 3517 kill_child(&mut child); 3518 let output = child.wait_with_output().unwrap(); 3519 3520 handle_child_output(r, &output); 3521 } 3522 3523 #[test] 3524 fn test_vhost_user_net_default() { 3525 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, false, false) 3526 } 3527 3528 #[test] 3529 fn test_vhost_user_net_named_tap() { 3530 test_vhost_user_net( 3531 Some("mytap0"), 3532 2, 3533 &prepare_vhost_user_net_daemon, 3534 false, 3535 false, 3536 ) 3537 } 3538 3539 #[test] 3540 fn test_vhost_user_net_existing_tap() { 3541 test_vhost_user_net( 3542 Some("vunet-tap0"), 3543 2, 3544 &prepare_vhost_user_net_daemon, 3545 false, 3546 false, 3547 ) 3548 } 3549 3550 #[test] 3551 fn test_vhost_user_net_multiple_queues() { 3552 test_vhost_user_net(None, 4, &prepare_vhost_user_net_daemon, false, false) 3553 } 3554 3555 #[test] 3556 fn test_vhost_user_net_tap_multiple_queues() { 3557 test_vhost_user_net( 3558 Some("vunet-tap1"), 3559 4, 3560 &prepare_vhost_user_net_daemon, 3561 false, 3562 false, 3563 ) 3564 } 3565 3566 #[test] 3567 fn test_vhost_user_net_host_mac() { 3568 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, true, false) 3569 } 3570 3571 #[test] 3572 fn test_vhost_user_net_client_mode() { 3573 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, false, true) 3574 } 3575 3576 #[test] 3577 #[cfg(not(target_arch = "aarch64"))] 3578 fn test_vhost_user_blk_default() { 3579 test_vhost_user_blk(2, false, false, Some(&prepare_vubd)) 3580 } 3581 3582 #[test] 3583 #[cfg(not(target_arch = "aarch64"))] 3584 fn test_vhost_user_blk_readonly() { 3585 test_vhost_user_blk(1, true, false, Some(&prepare_vubd)) 3586 } 3587 3588 #[test] 3589 #[cfg(not(target_arch = "aarch64"))] 3590 fn test_vhost_user_blk_direct() { 3591 test_vhost_user_blk(1, false, true, Some(&prepare_vubd)) 3592 } 3593 3594 #[test] 3595 fn test_boot_from_vhost_user_blk_default() { 3596 test_boot_from_vhost_user_blk(1, false, false, Some(&prepare_vubd)) 3597 } 3598 3599 #[test] 3600 #[cfg(target_arch = "x86_64")] 3601 fn test_split_irqchip() { 3602 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3603 let guest = Guest::new(Box::new(focal)); 3604 3605 let mut child = GuestCommand::new(&guest) 3606 .args(["--cpus", "boot=1"]) 3607 .args(["--memory", "size=512M"]) 3608 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3609 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3610 .default_disks() 3611 .default_net() 3612 .capture_output() 3613 .spawn() 3614 .unwrap(); 3615 3616 let r = std::panic::catch_unwind(|| { 3617 guest.wait_vm_boot(None).unwrap(); 3618 3619 assert_eq!( 3620 guest 3621 .ssh_command("grep -c IO-APIC.*timer /proc/interrupts || true") 3622 .unwrap() 3623 .trim() 3624 .parse::<u32>() 3625 .unwrap_or(1), 3626 0 3627 ); 3628 assert_eq!( 3629 guest 3630 .ssh_command("grep -c IO-APIC.*cascade /proc/interrupts || true") 3631 .unwrap() 3632 .trim() 3633 .parse::<u32>() 3634 .unwrap_or(1), 3635 0 3636 ); 3637 }); 3638 3639 kill_child(&mut child); 3640 let output = child.wait_with_output().unwrap(); 3641 3642 handle_child_output(r, &output); 3643 } 3644 3645 #[test] 3646 #[cfg(target_arch = "x86_64")] 3647 fn test_dmi_serial_number() { 3648 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3649 let guest = Guest::new(Box::new(focal)); 3650 3651 let mut child = GuestCommand::new(&guest) 3652 .args(["--cpus", "boot=1"]) 3653 .args(["--memory", "size=512M"]) 3654 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3655 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3656 .args(["--platform", "serial_number=a=b;c=d"]) 3657 .default_disks() 3658 .default_net() 3659 .capture_output() 3660 .spawn() 3661 .unwrap(); 3662 3663 let r = std::panic::catch_unwind(|| { 3664 guest.wait_vm_boot(None).unwrap(); 3665 3666 assert_eq!( 3667 guest 3668 .ssh_command("sudo cat /sys/class/dmi/id/product_serial") 3669 .unwrap() 3670 .trim(), 3671 "a=b;c=d" 3672 ); 3673 }); 3674 3675 kill_child(&mut child); 3676 let output = child.wait_with_output().unwrap(); 3677 3678 handle_child_output(r, &output); 3679 } 3680 3681 #[test] 3682 #[cfg(target_arch = "x86_64")] 3683 fn test_dmi_uuid() { 3684 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3685 let guest = Guest::new(Box::new(focal)); 3686 3687 let mut child = GuestCommand::new(&guest) 3688 .args(["--cpus", "boot=1"]) 3689 .args(["--memory", "size=512M"]) 3690 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3691 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3692 .args(["--platform", "uuid=1e8aa28a-435d-4027-87f4-40dceff1fa0a"]) 3693 .default_disks() 3694 .default_net() 3695 .capture_output() 3696 .spawn() 3697 .unwrap(); 3698 3699 let r = std::panic::catch_unwind(|| { 3700 guest.wait_vm_boot(None).unwrap(); 3701 3702 assert_eq!( 3703 guest 3704 .ssh_command("sudo cat /sys/class/dmi/id/product_uuid") 3705 .unwrap() 3706 .trim(), 3707 "1e8aa28a-435d-4027-87f4-40dceff1fa0a" 3708 ); 3709 }); 3710 3711 kill_child(&mut child); 3712 let output = child.wait_with_output().unwrap(); 3713 3714 handle_child_output(r, &output); 3715 } 3716 3717 #[test] 3718 #[cfg(target_arch = "x86_64")] 3719 fn test_dmi_oem_strings() { 3720 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3721 let guest = Guest::new(Box::new(focal)); 3722 3723 let s1 = "io.systemd.credential:xx=yy"; 3724 let s2 = "This is a test string"; 3725 3726 let oem_strings = format!("oem_strings=[{s1},{s2}]"); 3727 3728 let mut child = GuestCommand::new(&guest) 3729 .args(["--cpus", "boot=1"]) 3730 .args(["--memory", "size=512M"]) 3731 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3732 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3733 .args(["--platform", &oem_strings]) 3734 .default_disks() 3735 .default_net() 3736 .capture_output() 3737 .spawn() 3738 .unwrap(); 3739 3740 let r = std::panic::catch_unwind(|| { 3741 guest.wait_vm_boot(None).unwrap(); 3742 3743 assert_eq!( 3744 guest 3745 .ssh_command("sudo dmidecode --oem-string count") 3746 .unwrap() 3747 .trim(), 3748 "2" 3749 ); 3750 3751 assert_eq!( 3752 guest 3753 .ssh_command("sudo dmidecode --oem-string 1") 3754 .unwrap() 3755 .trim(), 3756 s1 3757 ); 3758 3759 assert_eq!( 3760 guest 3761 .ssh_command("sudo dmidecode --oem-string 2") 3762 .unwrap() 3763 .trim(), 3764 s2 3765 ); 3766 }); 3767 3768 kill_child(&mut child); 3769 let output = child.wait_with_output().unwrap(); 3770 3771 handle_child_output(r, &output); 3772 } 3773 3774 #[test] 3775 fn test_virtio_fs() { 3776 _test_virtio_fs(&prepare_virtiofsd, false, None) 3777 } 3778 3779 #[test] 3780 fn test_virtio_fs_hotplug() { 3781 _test_virtio_fs(&prepare_virtiofsd, true, None) 3782 } 3783 3784 #[test] 3785 #[cfg(not(feature = "mshv"))] 3786 fn test_virtio_fs_multi_segment_hotplug() { 3787 _test_virtio_fs(&prepare_virtiofsd, true, Some(15)) 3788 } 3789 3790 #[test] 3791 #[cfg(not(feature = "mshv"))] 3792 fn test_virtio_fs_multi_segment() { 3793 _test_virtio_fs(&prepare_virtiofsd, false, Some(15)) 3794 } 3795 3796 #[test] 3797 fn test_virtio_pmem_persist_writes() { 3798 test_virtio_pmem(false, false) 3799 } 3800 3801 #[test] 3802 fn test_virtio_pmem_discard_writes() { 3803 test_virtio_pmem(true, false) 3804 } 3805 3806 #[test] 3807 fn test_virtio_pmem_with_size() { 3808 test_virtio_pmem(true, true) 3809 } 3810 3811 #[test] 3812 fn test_boot_from_virtio_pmem() { 3813 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3814 let guest = Guest::new(Box::new(focal)); 3815 3816 let kernel_path = direct_kernel_boot_path(); 3817 3818 let mut child = GuestCommand::new(&guest) 3819 .args(["--cpus", "boot=1"]) 3820 .args(["--memory", "size=512M"]) 3821 .args(["--kernel", kernel_path.to_str().unwrap()]) 3822 .args([ 3823 "--disk", 3824 format!( 3825 "path={}", 3826 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3827 ) 3828 .as_str(), 3829 ]) 3830 .default_net() 3831 .args([ 3832 "--pmem", 3833 format!( 3834 "file={},size={}", 3835 guest.disk_config.disk(DiskType::OperatingSystem).unwrap(), 3836 fs::metadata(guest.disk_config.disk(DiskType::OperatingSystem).unwrap()) 3837 .unwrap() 3838 .len() 3839 ) 3840 .as_str(), 3841 ]) 3842 .args([ 3843 "--cmdline", 3844 DIRECT_KERNEL_BOOT_CMDLINE 3845 .replace("vda1", "pmem0p1") 3846 .as_str(), 3847 ]) 3848 .capture_output() 3849 .spawn() 3850 .unwrap(); 3851 3852 let r = std::panic::catch_unwind(|| { 3853 guest.wait_vm_boot(None).unwrap(); 3854 3855 // Simple checks to validate the VM booted properly 3856 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 3857 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 3858 }); 3859 3860 kill_child(&mut child); 3861 let output = child.wait_with_output().unwrap(); 3862 3863 handle_child_output(r, &output); 3864 } 3865 3866 #[test] 3867 fn test_multiple_network_interfaces() { 3868 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3869 let guest = Guest::new(Box::new(focal)); 3870 3871 let kernel_path = direct_kernel_boot_path(); 3872 3873 let mut child = GuestCommand::new(&guest) 3874 .args(["--cpus", "boot=1"]) 3875 .args(["--memory", "size=512M"]) 3876 .args(["--kernel", kernel_path.to_str().unwrap()]) 3877 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3878 .default_disks() 3879 .args([ 3880 "--net", 3881 guest.default_net_string().as_str(), 3882 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0", 3883 "tap=mytap1,mac=fe:1f:9e:e1:60:f2,ip=192.168.4.1,mask=255.255.255.0", 3884 ]) 3885 .capture_output() 3886 .spawn() 3887 .unwrap(); 3888 3889 let r = std::panic::catch_unwind(|| { 3890 guest.wait_vm_boot(None).unwrap(); 3891 3892 let tap_count = exec_host_command_output("ip link | grep -c mytap1"); 3893 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1"); 3894 3895 // 3 network interfaces + default localhost ==> 4 interfaces 3896 assert_eq!( 3897 guest 3898 .ssh_command("ip -o link | wc -l") 3899 .unwrap() 3900 .trim() 3901 .parse::<u32>() 3902 .unwrap_or_default(), 3903 4 3904 ); 3905 }); 3906 3907 kill_child(&mut child); 3908 let output = child.wait_with_output().unwrap(); 3909 3910 handle_child_output(r, &output); 3911 } 3912 3913 #[test] 3914 #[cfg(target_arch = "aarch64")] 3915 fn test_pmu_on() { 3916 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3917 let guest = Guest::new(Box::new(focal)); 3918 let mut child = GuestCommand::new(&guest) 3919 .args(["--cpus", "boot=1"]) 3920 .args(["--memory", "size=512M"]) 3921 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3922 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3923 .default_disks() 3924 .default_net() 3925 .capture_output() 3926 .spawn() 3927 .unwrap(); 3928 3929 let r = std::panic::catch_unwind(|| { 3930 guest.wait_vm_boot(None).unwrap(); 3931 3932 // Test that PMU exists. 3933 assert_eq!( 3934 guest 3935 .ssh_command(GREP_PMU_IRQ_CMD) 3936 .unwrap() 3937 .trim() 3938 .parse::<u32>() 3939 .unwrap_or_default(), 3940 1 3941 ); 3942 }); 3943 3944 kill_child(&mut child); 3945 let output = child.wait_with_output().unwrap(); 3946 3947 handle_child_output(r, &output); 3948 } 3949 3950 #[test] 3951 fn test_serial_off() { 3952 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3953 let guest = Guest::new(Box::new(focal)); 3954 let mut child = GuestCommand::new(&guest) 3955 .args(["--cpus", "boot=1"]) 3956 .args(["--memory", "size=512M"]) 3957 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3958 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3959 .default_disks() 3960 .default_net() 3961 .args(["--serial", "off"]) 3962 .capture_output() 3963 .spawn() 3964 .unwrap(); 3965 3966 let r = std::panic::catch_unwind(|| { 3967 guest.wait_vm_boot(None).unwrap(); 3968 3969 // Test that there is no ttyS0 3970 assert_eq!( 3971 guest 3972 .ssh_command(GREP_SERIAL_IRQ_CMD) 3973 .unwrap() 3974 .trim() 3975 .parse::<u32>() 3976 .unwrap_or(1), 3977 0 3978 ); 3979 }); 3980 3981 kill_child(&mut child); 3982 let output = child.wait_with_output().unwrap(); 3983 3984 handle_child_output(r, &output); 3985 } 3986 3987 #[test] 3988 fn test_serial_null() { 3989 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3990 let guest = Guest::new(Box::new(focal)); 3991 let mut cmd = GuestCommand::new(&guest); 3992 #[cfg(target_arch = "x86_64")] 3993 let console_str: &str = "console=ttyS0"; 3994 #[cfg(target_arch = "aarch64")] 3995 let console_str: &str = "console=ttyAMA0"; 3996 3997 cmd.args(["--cpus", "boot=1"]) 3998 .args(["--memory", "size=512M"]) 3999 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4000 .args([ 4001 "--cmdline", 4002 DIRECT_KERNEL_BOOT_CMDLINE 4003 .replace("console=hvc0 ", console_str) 4004 .as_str(), 4005 ]) 4006 .default_disks() 4007 .default_net() 4008 .args(["--serial", "null"]) 4009 .args(["--console", "off"]) 4010 .capture_output(); 4011 4012 let mut child = cmd.spawn().unwrap(); 4013 4014 let r = std::panic::catch_unwind(|| { 4015 guest.wait_vm_boot(None).unwrap(); 4016 4017 // Test that there is a ttyS0 4018 assert_eq!( 4019 guest 4020 .ssh_command(GREP_SERIAL_IRQ_CMD) 4021 .unwrap() 4022 .trim() 4023 .parse::<u32>() 4024 .unwrap_or_default(), 4025 1 4026 ); 4027 }); 4028 4029 kill_child(&mut child); 4030 let output = child.wait_with_output().unwrap(); 4031 handle_child_output(r, &output); 4032 4033 let r = std::panic::catch_unwind(|| { 4034 assert!(!String::from_utf8_lossy(&output.stdout).contains(CONSOLE_TEST_STRING)); 4035 }); 4036 4037 handle_child_output(r, &output); 4038 } 4039 4040 #[test] 4041 fn test_serial_tty() { 4042 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4043 let guest = Guest::new(Box::new(focal)); 4044 4045 let kernel_path = direct_kernel_boot_path(); 4046 4047 #[cfg(target_arch = "x86_64")] 4048 let console_str: &str = "console=ttyS0"; 4049 #[cfg(target_arch = "aarch64")] 4050 let console_str: &str = "console=ttyAMA0"; 4051 4052 let mut child = GuestCommand::new(&guest) 4053 .args(["--cpus", "boot=1"]) 4054 .args(["--memory", "size=512M"]) 4055 .args(["--kernel", kernel_path.to_str().unwrap()]) 4056 .args([ 4057 "--cmdline", 4058 DIRECT_KERNEL_BOOT_CMDLINE 4059 .replace("console=hvc0 ", console_str) 4060 .as_str(), 4061 ]) 4062 .default_disks() 4063 .default_net() 4064 .args(["--serial", "tty"]) 4065 .args(["--console", "off"]) 4066 .capture_output() 4067 .spawn() 4068 .unwrap(); 4069 4070 let r = std::panic::catch_unwind(|| { 4071 guest.wait_vm_boot(None).unwrap(); 4072 4073 // Test that there is a ttyS0 4074 assert_eq!( 4075 guest 4076 .ssh_command(GREP_SERIAL_IRQ_CMD) 4077 .unwrap() 4078 .trim() 4079 .parse::<u32>() 4080 .unwrap_or_default(), 4081 1 4082 ); 4083 }); 4084 4085 // This sleep is needed to wait for the login prompt 4086 thread::sleep(std::time::Duration::new(2, 0)); 4087 4088 kill_child(&mut child); 4089 let output = child.wait_with_output().unwrap(); 4090 handle_child_output(r, &output); 4091 4092 let r = std::panic::catch_unwind(|| { 4093 assert!(String::from_utf8_lossy(&output.stdout).contains(CONSOLE_TEST_STRING)); 4094 }); 4095 4096 handle_child_output(r, &output); 4097 } 4098 4099 #[test] 4100 fn test_serial_file() { 4101 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4102 let guest = Guest::new(Box::new(focal)); 4103 4104 let serial_path = guest.tmp_dir.as_path().join("serial-output"); 4105 #[cfg(target_arch = "x86_64")] 4106 let console_str: &str = "console=ttyS0"; 4107 #[cfg(target_arch = "aarch64")] 4108 let console_str: &str = "console=ttyAMA0"; 4109 4110 let mut child = GuestCommand::new(&guest) 4111 .args(["--cpus", "boot=1"]) 4112 .args(["--memory", "size=512M"]) 4113 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4114 .args([ 4115 "--cmdline", 4116 DIRECT_KERNEL_BOOT_CMDLINE 4117 .replace("console=hvc0 ", console_str) 4118 .as_str(), 4119 ]) 4120 .default_disks() 4121 .default_net() 4122 .args([ 4123 "--serial", 4124 format!("file={}", serial_path.to_str().unwrap()).as_str(), 4125 ]) 4126 .capture_output() 4127 .spawn() 4128 .unwrap(); 4129 4130 let r = std::panic::catch_unwind(|| { 4131 guest.wait_vm_boot(None).unwrap(); 4132 4133 // Test that there is a ttyS0 4134 assert_eq!( 4135 guest 4136 .ssh_command(GREP_SERIAL_IRQ_CMD) 4137 .unwrap() 4138 .trim() 4139 .parse::<u32>() 4140 .unwrap_or_default(), 4141 1 4142 ); 4143 4144 guest.ssh_command("sudo shutdown -h now").unwrap(); 4145 }); 4146 4147 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 4148 kill_child(&mut child); 4149 let output = child.wait_with_output().unwrap(); 4150 handle_child_output(r, &output); 4151 4152 let r = std::panic::catch_unwind(|| { 4153 // Check that the cloud-hypervisor binary actually terminated 4154 assert!(output.status.success()); 4155 4156 // Do this check after shutdown of the VM as an easy way to ensure 4157 // all writes are flushed to disk 4158 let mut f = std::fs::File::open(serial_path).unwrap(); 4159 let mut buf = String::new(); 4160 f.read_to_string(&mut buf).unwrap(); 4161 assert!(buf.contains(CONSOLE_TEST_STRING)); 4162 }); 4163 4164 handle_child_output(r, &output); 4165 } 4166 4167 #[test] 4168 fn test_pty_interaction() { 4169 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4170 let guest = Guest::new(Box::new(focal)); 4171 let api_socket = temp_api_path(&guest.tmp_dir); 4172 let serial_option = if cfg!(target_arch = "x86_64") { 4173 " console=ttyS0" 4174 } else { 4175 " console=ttyAMA0" 4176 }; 4177 let cmdline = DIRECT_KERNEL_BOOT_CMDLINE.to_owned() + serial_option; 4178 4179 let mut child = GuestCommand::new(&guest) 4180 .args(["--cpus", "boot=1"]) 4181 .args(["--memory", "size=512M"]) 4182 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4183 .args(["--cmdline", &cmdline]) 4184 .default_disks() 4185 .default_net() 4186 .args(["--serial", "null"]) 4187 .args(["--console", "pty"]) 4188 .args(["--api-socket", &api_socket]) 4189 .spawn() 4190 .unwrap(); 4191 4192 let r = std::panic::catch_unwind(|| { 4193 guest.wait_vm_boot(None).unwrap(); 4194 // Get pty fd for console 4195 let console_path = get_pty_path(&api_socket, "console"); 4196 _test_pty_interaction(console_path); 4197 4198 guest.ssh_command("sudo shutdown -h now").unwrap(); 4199 }); 4200 4201 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 4202 let _ = child.kill(); 4203 let output = child.wait_with_output().unwrap(); 4204 handle_child_output(r, &output); 4205 4206 let r = std::panic::catch_unwind(|| { 4207 // Check that the cloud-hypervisor binary actually terminated 4208 assert!(output.status.success()) 4209 }); 4210 handle_child_output(r, &output); 4211 } 4212 4213 #[test] 4214 fn test_serial_socket_interaction() { 4215 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4216 let guest = Guest::new(Box::new(focal)); 4217 let serial_socket = guest.tmp_dir.as_path().join("serial.socket"); 4218 let serial_socket_pty = guest.tmp_dir.as_path().join("serial.pty"); 4219 let serial_option = if cfg!(target_arch = "x86_64") { 4220 " console=ttyS0" 4221 } else { 4222 " console=ttyAMA0" 4223 }; 4224 let cmdline = DIRECT_KERNEL_BOOT_CMDLINE.to_owned() + serial_option; 4225 4226 let mut child = GuestCommand::new(&guest) 4227 .args(["--cpus", "boot=1"]) 4228 .args(["--memory", "size=512M"]) 4229 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4230 .args(["--cmdline", &cmdline]) 4231 .default_disks() 4232 .default_net() 4233 .args(["--console", "null"]) 4234 .args([ 4235 "--serial", 4236 format!("socket={}", serial_socket.to_str().unwrap()).as_str(), 4237 ]) 4238 .spawn() 4239 .unwrap(); 4240 4241 let _ = std::panic::catch_unwind(|| { 4242 guest.wait_vm_boot(None).unwrap(); 4243 }); 4244 4245 let mut socat_command = Command::new("socat"); 4246 let socat_args = [ 4247 &format!("pty,link={},raw", serial_socket_pty.display()), 4248 &format!("UNIX-CONNECT:{}", serial_socket.display()), 4249 ]; 4250 socat_command.args(socat_args); 4251 4252 let mut socat_child = socat_command.spawn().unwrap(); 4253 thread::sleep(std::time::Duration::new(1, 0)); 4254 4255 let _ = std::panic::catch_unwind(|| { 4256 _test_pty_interaction(serial_socket_pty); 4257 }); 4258 4259 let _ = socat_child.kill(); 4260 4261 let r = std::panic::catch_unwind(|| { 4262 guest.ssh_command("sudo shutdown -h now").unwrap(); 4263 }); 4264 4265 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 4266 kill_child(&mut child); 4267 let output = child.wait_with_output().unwrap(); 4268 handle_child_output(r, &output); 4269 4270 let r = std::panic::catch_unwind(|| { 4271 // Check that the cloud-hypervisor binary actually terminated 4272 if !output.status.success() { 4273 panic!( 4274 "Cloud Hypervisor process failed to terminate gracefully: {:?}", 4275 output.status 4276 ); 4277 } 4278 }); 4279 handle_child_output(r, &output); 4280 } 4281 4282 #[test] 4283 fn test_virtio_console() { 4284 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4285 let guest = Guest::new(Box::new(focal)); 4286 4287 let kernel_path = direct_kernel_boot_path(); 4288 4289 let mut child = GuestCommand::new(&guest) 4290 .args(["--cpus", "boot=1"]) 4291 .args(["--memory", "size=512M"]) 4292 .args(["--kernel", kernel_path.to_str().unwrap()]) 4293 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4294 .default_disks() 4295 .default_net() 4296 .args(["--console", "tty"]) 4297 .args(["--serial", "null"]) 4298 .capture_output() 4299 .spawn() 4300 .unwrap(); 4301 4302 let text = String::from("On a branch floating down river a cricket, singing."); 4303 let cmd = format!("echo {text} | sudo tee /dev/hvc0"); 4304 4305 let r = std::panic::catch_unwind(|| { 4306 guest.wait_vm_boot(None).unwrap(); 4307 4308 assert!(guest 4309 .does_device_vendor_pair_match("0x1043", "0x1af4") 4310 .unwrap_or_default()); 4311 4312 guest.ssh_command(&cmd).unwrap(); 4313 }); 4314 4315 kill_child(&mut child); 4316 let output = child.wait_with_output().unwrap(); 4317 handle_child_output(r, &output); 4318 4319 let r = std::panic::catch_unwind(|| { 4320 assert!(String::from_utf8_lossy(&output.stdout).contains(&text)); 4321 }); 4322 4323 handle_child_output(r, &output); 4324 } 4325 4326 #[test] 4327 fn test_console_file() { 4328 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4329 let guest = Guest::new(Box::new(focal)); 4330 4331 let console_path = guest.tmp_dir.as_path().join("console-output"); 4332 let mut child = GuestCommand::new(&guest) 4333 .args(["--cpus", "boot=1"]) 4334 .args(["--memory", "size=512M"]) 4335 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4336 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4337 .default_disks() 4338 .default_net() 4339 .args([ 4340 "--console", 4341 format!("file={}", console_path.to_str().unwrap()).as_str(), 4342 ]) 4343 .capture_output() 4344 .spawn() 4345 .unwrap(); 4346 4347 guest.wait_vm_boot(None).unwrap(); 4348 4349 guest.ssh_command("sudo shutdown -h now").unwrap(); 4350 4351 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 4352 kill_child(&mut child); 4353 let output = child.wait_with_output().unwrap(); 4354 4355 let r = std::panic::catch_unwind(|| { 4356 // Check that the cloud-hypervisor binary actually terminated 4357 assert!(output.status.success()); 4358 4359 // Do this check after shutdown of the VM as an easy way to ensure 4360 // all writes are flushed to disk 4361 let mut f = std::fs::File::open(console_path).unwrap(); 4362 let mut buf = String::new(); 4363 f.read_to_string(&mut buf).unwrap(); 4364 4365 if !buf.contains(CONSOLE_TEST_STRING) { 4366 eprintln!( 4367 "\n\n==== Console file output ====\n\n{buf}\n\n==== End console file output ====" 4368 ); 4369 } 4370 assert!(buf.contains(CONSOLE_TEST_STRING)); 4371 }); 4372 4373 handle_child_output(r, &output); 4374 } 4375 4376 #[test] 4377 #[cfg(target_arch = "x86_64")] 4378 #[cfg(not(feature = "mshv"))] 4379 // The VFIO integration test starts cloud-hypervisor guest with 3 TAP 4380 // backed networking interfaces, bound through a simple bridge on the host. 4381 // So if the nested cloud-hypervisor succeeds in getting a directly 4382 // assigned interface from its cloud-hypervisor host, we should be able to 4383 // ssh into it, and verify that it's running with the right kernel command 4384 // line (We tag the command line from cloud-hypervisor for that purpose). 4385 // The third device is added to validate that hotplug works correctly since 4386 // it is being added to the L2 VM through hotplugging mechanism. 4387 // Also, we pass-through a virtio-blk device to the L2 VM to test the 32-bit 4388 // vfio device support 4389 fn test_vfio() { 4390 setup_vfio_network_interfaces(); 4391 4392 let jammy = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string()); 4393 let guest = Guest::new_from_ip_range(Box::new(jammy), "172.18", 0); 4394 4395 let mut workload_path = dirs::home_dir().unwrap(); 4396 workload_path.push("workloads"); 4397 4398 let kernel_path = direct_kernel_boot_path(); 4399 4400 let mut vfio_path = workload_path.clone(); 4401 vfio_path.push("vfio"); 4402 4403 let mut cloud_init_vfio_base_path = vfio_path.clone(); 4404 cloud_init_vfio_base_path.push("cloudinit.img"); 4405 4406 // We copy our cloudinit into the vfio mount point, for the nested 4407 // cloud-hypervisor guest to use. 4408 rate_limited_copy( 4409 guest.disk_config.disk(DiskType::CloudInit).unwrap(), 4410 &cloud_init_vfio_base_path, 4411 ) 4412 .expect("copying of cloud-init disk failed"); 4413 4414 let mut vfio_disk_path = workload_path.clone(); 4415 vfio_disk_path.push("vfio.img"); 4416 4417 // Create the vfio disk image 4418 let output = Command::new("mkfs.ext4") 4419 .arg("-d") 4420 .arg(vfio_path.to_str().unwrap()) 4421 .arg(vfio_disk_path.to_str().unwrap()) 4422 .arg("2g") 4423 .output() 4424 .unwrap(); 4425 if !output.status.success() { 4426 eprintln!("{}", String::from_utf8_lossy(&output.stderr)); 4427 panic!("mkfs.ext4 command generated an error"); 4428 } 4429 4430 let mut blk_file_path = workload_path; 4431 blk_file_path.push("blk.img"); 4432 4433 let vfio_tap0 = "vfio-tap0"; 4434 let vfio_tap1 = "vfio-tap1"; 4435 let vfio_tap2 = "vfio-tap2"; 4436 let vfio_tap3 = "vfio-tap3"; 4437 4438 let mut child = GuestCommand::new(&guest) 4439 .args(["--cpus", "boot=4"]) 4440 .args(["--memory", "size=2G,hugepages=on,shared=on"]) 4441 .args(["--kernel", kernel_path.to_str().unwrap()]) 4442 .args([ 4443 "--disk", 4444 format!( 4445 "path={}", 4446 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 4447 ) 4448 .as_str(), 4449 format!( 4450 "path={}", 4451 guest.disk_config.disk(DiskType::CloudInit).unwrap() 4452 ) 4453 .as_str(), 4454 format!("path={}", vfio_disk_path.to_str().unwrap()).as_str(), 4455 format!("path={},iommu=on", blk_file_path.to_str().unwrap()).as_str(), 4456 ]) 4457 .args([ 4458 "--cmdline", 4459 format!( 4460 "{DIRECT_KERNEL_BOOT_CMDLINE} kvm-intel.nested=1 vfio_iommu_type1.allow_unsafe_interrupts" 4461 ) 4462 .as_str(), 4463 ]) 4464 .args([ 4465 "--net", 4466 format!("tap={},mac={}", vfio_tap0, guest.network.guest_mac).as_str(), 4467 format!( 4468 "tap={},mac={},iommu=on", 4469 vfio_tap1, guest.network.l2_guest_mac1 4470 ) 4471 .as_str(), 4472 format!( 4473 "tap={},mac={},iommu=on", 4474 vfio_tap2, guest.network.l2_guest_mac2 4475 ) 4476 .as_str(), 4477 format!( 4478 "tap={},mac={},iommu=on", 4479 vfio_tap3, guest.network.l2_guest_mac3 4480 ) 4481 .as_str(), 4482 ]) 4483 .capture_output() 4484 .spawn() 4485 .unwrap(); 4486 4487 thread::sleep(std::time::Duration::new(30, 0)); 4488 4489 let r = std::panic::catch_unwind(|| { 4490 guest.ssh_command_l1("sudo systemctl start vfio").unwrap(); 4491 thread::sleep(std::time::Duration::new(120, 0)); 4492 4493 // We booted our cloud hypervisor L2 guest with a "VFIOTAG" tag 4494 // added to its kernel command line. 4495 // Let's ssh into it and verify that it's there. If it is it means 4496 // we're in the right guest (The L2 one) because the QEMU L1 guest 4497 // does not have this command line tag. 4498 assert!(check_matched_lines_count( 4499 guest.ssh_command_l2_1("cat /proc/cmdline").unwrap().trim(), 4500 vec!["VFIOTAG"], 4501 1 4502 )); 4503 4504 // Let's also verify from the second virtio-net device passed to 4505 // the L2 VM. 4506 assert!(check_matched_lines_count( 4507 guest.ssh_command_l2_2("cat /proc/cmdline").unwrap().trim(), 4508 vec!["VFIOTAG"], 4509 1 4510 )); 4511 4512 // Check the amount of PCI devices appearing in L2 VM. 4513 assert!(check_lines_count( 4514 guest 4515 .ssh_command_l2_1("ls /sys/bus/pci/devices") 4516 .unwrap() 4517 .trim(), 4518 8 4519 )); 4520 4521 // Check both if /dev/vdc exists and if the block size is 16M in L2 VM 4522 assert!(check_matched_lines_count( 4523 guest.ssh_command_l2_1("lsblk").unwrap().trim(), 4524 vec!["vdc", "16M"], 4525 1 4526 )); 4527 4528 // Hotplug an extra virtio-net device through L2 VM. 4529 guest 4530 .ssh_command_l1( 4531 "echo 0000:00:09.0 | sudo tee /sys/bus/pci/devices/0000:00:09.0/driver/unbind", 4532 ) 4533 .unwrap(); 4534 guest 4535 .ssh_command_l1("echo 0000:00:09.0 | sudo tee /sys/bus/pci/drivers/vfio-pci/bind") 4536 .unwrap(); 4537 let vfio_hotplug_output = guest 4538 .ssh_command_l1( 4539 "sudo /mnt/ch-remote \ 4540 --api-socket=/tmp/ch_api.sock \ 4541 add-device path=/sys/bus/pci/devices/0000:00:09.0,id=vfio123", 4542 ) 4543 .unwrap(); 4544 assert!(check_matched_lines_count( 4545 vfio_hotplug_output.trim(), 4546 vec!["{\"id\":\"vfio123\",\"bdf\":\"0000:00:08.0\"}"], 4547 1 4548 )); 4549 4550 thread::sleep(std::time::Duration::new(10, 0)); 4551 4552 // Let's also verify from the third virtio-net device passed to 4553 // the L2 VM. This third device has been hotplugged through the L2 4554 // VM, so this is our way to validate hotplug works for VFIO PCI. 4555 assert!(check_matched_lines_count( 4556 guest.ssh_command_l2_3("cat /proc/cmdline").unwrap().trim(), 4557 vec!["VFIOTAG"], 4558 1 4559 )); 4560 4561 // Check the amount of PCI devices appearing in L2 VM. 4562 // There should be one more device than before, raising the count 4563 // up to 9 PCI devices. 4564 assert!(check_lines_count( 4565 guest 4566 .ssh_command_l2_1("ls /sys/bus/pci/devices") 4567 .unwrap() 4568 .trim(), 4569 9 4570 )); 4571 4572 // Let's now verify that we can correctly remove the virtio-net 4573 // device through the "remove-device" command responsible for 4574 // unplugging VFIO devices. 4575 guest 4576 .ssh_command_l1( 4577 "sudo /mnt/ch-remote \ 4578 --api-socket=/tmp/ch_api.sock \ 4579 remove-device vfio123", 4580 ) 4581 .unwrap(); 4582 thread::sleep(std::time::Duration::new(10, 0)); 4583 4584 // Check the amount of PCI devices appearing in L2 VM is back down 4585 // to 8 devices. 4586 assert!(check_lines_count( 4587 guest 4588 .ssh_command_l2_1("ls /sys/bus/pci/devices") 4589 .unwrap() 4590 .trim(), 4591 8 4592 )); 4593 4594 // Perform memory hotplug in L2 and validate the memory is showing 4595 // up as expected. In order to check, we will use the virtio-net 4596 // device already passed through L2 as a VFIO device, this will 4597 // verify that VFIO devices are functional with memory hotplug. 4598 assert!(guest.get_total_memory_l2().unwrap_or_default() > 480_000); 4599 guest 4600 .ssh_command_l2_1( 4601 "sudo bash -c 'echo online > /sys/devices/system/memory/auto_online_blocks'", 4602 ) 4603 .unwrap(); 4604 guest 4605 .ssh_command_l1( 4606 "sudo /mnt/ch-remote \ 4607 --api-socket=/tmp/ch_api.sock \ 4608 resize --memory=1073741824", 4609 ) 4610 .unwrap(); 4611 assert!(guest.get_total_memory_l2().unwrap_or_default() > 960_000); 4612 }); 4613 4614 kill_child(&mut child); 4615 let output = child.wait_with_output().unwrap(); 4616 4617 cleanup_vfio_network_interfaces(); 4618 4619 handle_child_output(r, &output); 4620 } 4621 4622 #[test] 4623 fn test_direct_kernel_boot_noacpi() { 4624 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4625 let guest = Guest::new(Box::new(focal)); 4626 4627 let kernel_path = direct_kernel_boot_path(); 4628 4629 let mut child = GuestCommand::new(&guest) 4630 .args(["--cpus", "boot=1"]) 4631 .args(["--memory", "size=512M"]) 4632 .args(["--kernel", kernel_path.to_str().unwrap()]) 4633 .args([ 4634 "--cmdline", 4635 format!("{DIRECT_KERNEL_BOOT_CMDLINE} acpi=off").as_str(), 4636 ]) 4637 .default_disks() 4638 .default_net() 4639 .capture_output() 4640 .spawn() 4641 .unwrap(); 4642 4643 let r = std::panic::catch_unwind(|| { 4644 guest.wait_vm_boot(None).unwrap(); 4645 4646 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 4647 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4648 }); 4649 4650 kill_child(&mut child); 4651 let output = child.wait_with_output().unwrap(); 4652 4653 handle_child_output(r, &output); 4654 } 4655 4656 #[test] 4657 fn test_virtio_vsock() { 4658 _test_virtio_vsock(false) 4659 } 4660 4661 #[test] 4662 fn test_virtio_vsock_hotplug() { 4663 _test_virtio_vsock(true); 4664 } 4665 4666 #[test] 4667 fn test_api_http_shutdown() { 4668 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4669 let guest = Guest::new(Box::new(focal)); 4670 4671 _test_api_shutdown(TargetApi::new_http_api(&guest.tmp_dir), guest) 4672 } 4673 4674 #[test] 4675 fn test_api_http_delete() { 4676 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4677 let guest = Guest::new(Box::new(focal)); 4678 4679 _test_api_delete(TargetApi::new_http_api(&guest.tmp_dir), guest); 4680 } 4681 4682 #[test] 4683 fn test_api_http_pause_resume() { 4684 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4685 let guest = Guest::new(Box::new(focal)); 4686 4687 _test_api_pause_resume(TargetApi::new_http_api(&guest.tmp_dir), guest) 4688 } 4689 4690 #[test] 4691 fn test_api_http_create_boot() { 4692 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4693 let guest = Guest::new(Box::new(focal)); 4694 4695 _test_api_create_boot(TargetApi::new_http_api(&guest.tmp_dir), guest) 4696 } 4697 4698 #[test] 4699 fn test_virtio_iommu() { 4700 _test_virtio_iommu(cfg!(target_arch = "x86_64")) 4701 } 4702 4703 #[test] 4704 // We cannot force the software running in the guest to reprogram the BAR 4705 // with some different addresses, but we have a reliable way of testing it 4706 // with a standard Linux kernel. 4707 // By removing a device from the PCI tree, and then rescanning the tree, 4708 // Linux consistently chooses to reorganize the PCI device BARs to other 4709 // locations in the guest address space. 4710 // This test creates a dedicated PCI network device to be checked as being 4711 // properly probed first, then removing it, and adding it again by doing a 4712 // rescan. 4713 fn test_pci_bar_reprogramming() { 4714 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4715 let guest = Guest::new(Box::new(focal)); 4716 4717 #[cfg(target_arch = "x86_64")] 4718 let kernel_path = direct_kernel_boot_path(); 4719 #[cfg(target_arch = "aarch64")] 4720 let kernel_path = edk2_path(); 4721 4722 let mut child = GuestCommand::new(&guest) 4723 .args(["--cpus", "boot=1"]) 4724 .args(["--memory", "size=512M"]) 4725 .args(["--kernel", kernel_path.to_str().unwrap()]) 4726 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4727 .default_disks() 4728 .args([ 4729 "--net", 4730 guest.default_net_string().as_str(), 4731 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0", 4732 ]) 4733 .capture_output() 4734 .spawn() 4735 .unwrap(); 4736 4737 let r = std::panic::catch_unwind(|| { 4738 guest.wait_vm_boot(None).unwrap(); 4739 4740 // 2 network interfaces + default localhost ==> 3 interfaces 4741 assert_eq!( 4742 guest 4743 .ssh_command("ip -o link | wc -l") 4744 .unwrap() 4745 .trim() 4746 .parse::<u32>() 4747 .unwrap_or_default(), 4748 3 4749 ); 4750 4751 let init_bar_addr = guest 4752 .ssh_command( 4753 "sudo awk '{print $1; exit}' /sys/bus/pci/devices/0000:00:05.0/resource", 4754 ) 4755 .unwrap(); 4756 4757 // Remove the PCI device 4758 guest 4759 .ssh_command("echo 1 | sudo tee /sys/bus/pci/devices/0000:00:05.0/remove") 4760 .unwrap(); 4761 4762 // Only 1 network interface left + default localhost ==> 2 interfaces 4763 assert_eq!( 4764 guest 4765 .ssh_command("ip -o link | wc -l") 4766 .unwrap() 4767 .trim() 4768 .parse::<u32>() 4769 .unwrap_or_default(), 4770 2 4771 ); 4772 4773 // Remove the PCI device 4774 guest 4775 .ssh_command("echo 1 | sudo tee /sys/bus/pci/rescan") 4776 .unwrap(); 4777 4778 // Back to 2 network interface + default localhost ==> 3 interfaces 4779 assert_eq!( 4780 guest 4781 .ssh_command("ip -o link | wc -l") 4782 .unwrap() 4783 .trim() 4784 .parse::<u32>() 4785 .unwrap_or_default(), 4786 3 4787 ); 4788 4789 let new_bar_addr = guest 4790 .ssh_command( 4791 "sudo awk '{print $1; exit}' /sys/bus/pci/devices/0000:00:05.0/resource", 4792 ) 4793 .unwrap(); 4794 4795 // Let's compare the BAR addresses for our virtio-net device. 4796 // They should be different as we expect the BAR reprogramming 4797 // to have happened. 4798 assert_ne!(init_bar_addr, new_bar_addr); 4799 }); 4800 4801 kill_child(&mut child); 4802 let output = child.wait_with_output().unwrap(); 4803 4804 handle_child_output(r, &output); 4805 } 4806 4807 #[test] 4808 fn test_memory_mergeable_off() { 4809 test_memory_mergeable(false) 4810 } 4811 4812 #[test] 4813 #[cfg(target_arch = "x86_64")] 4814 fn test_cpu_hotplug() { 4815 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4816 let guest = Guest::new(Box::new(focal)); 4817 let api_socket = temp_api_path(&guest.tmp_dir); 4818 4819 let kernel_path = direct_kernel_boot_path(); 4820 4821 let mut child = GuestCommand::new(&guest) 4822 .args(["--cpus", "boot=2,max=4"]) 4823 .args(["--memory", "size=512M"]) 4824 .args(["--kernel", kernel_path.to_str().unwrap()]) 4825 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4826 .default_disks() 4827 .default_net() 4828 .args(["--api-socket", &api_socket]) 4829 .capture_output() 4830 .spawn() 4831 .unwrap(); 4832 4833 let r = std::panic::catch_unwind(|| { 4834 guest.wait_vm_boot(None).unwrap(); 4835 4836 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 4837 4838 // Resize the VM 4839 let desired_vcpus = 4; 4840 resize_command(&api_socket, Some(desired_vcpus), None, None, None); 4841 4842 guest 4843 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online") 4844 .unwrap(); 4845 guest 4846 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online") 4847 .unwrap(); 4848 thread::sleep(std::time::Duration::new(10, 0)); 4849 assert_eq!( 4850 guest.get_cpu_count().unwrap_or_default(), 4851 u32::from(desired_vcpus) 4852 ); 4853 4854 guest.reboot_linux(0, None); 4855 4856 assert_eq!( 4857 guest.get_cpu_count().unwrap_or_default(), 4858 u32::from(desired_vcpus) 4859 ); 4860 4861 // Resize the VM 4862 let desired_vcpus = 2; 4863 resize_command(&api_socket, Some(desired_vcpus), None, None, None); 4864 4865 thread::sleep(std::time::Duration::new(10, 0)); 4866 assert_eq!( 4867 guest.get_cpu_count().unwrap_or_default(), 4868 u32::from(desired_vcpus) 4869 ); 4870 4871 // Resize the VM back up to 4 4872 let desired_vcpus = 4; 4873 resize_command(&api_socket, Some(desired_vcpus), None, None, None); 4874 4875 guest 4876 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online") 4877 .unwrap(); 4878 guest 4879 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online") 4880 .unwrap(); 4881 thread::sleep(std::time::Duration::new(10, 0)); 4882 assert_eq!( 4883 guest.get_cpu_count().unwrap_or_default(), 4884 u32::from(desired_vcpus) 4885 ); 4886 }); 4887 4888 kill_child(&mut child); 4889 let output = child.wait_with_output().unwrap(); 4890 4891 handle_child_output(r, &output); 4892 } 4893 4894 #[test] 4895 fn test_memory_hotplug() { 4896 #[cfg(target_arch = "aarch64")] 4897 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string(); 4898 #[cfg(target_arch = "x86_64")] 4899 let focal_image = FOCAL_IMAGE_NAME.to_string(); 4900 let focal = UbuntuDiskConfig::new(focal_image); 4901 let guest = Guest::new(Box::new(focal)); 4902 let api_socket = temp_api_path(&guest.tmp_dir); 4903 4904 #[cfg(target_arch = "aarch64")] 4905 let kernel_path = edk2_path(); 4906 #[cfg(target_arch = "x86_64")] 4907 let kernel_path = direct_kernel_boot_path(); 4908 4909 let mut child = GuestCommand::new(&guest) 4910 .args(["--cpus", "boot=2,max=4"]) 4911 .args(["--memory", "size=512M,hotplug_size=8192M"]) 4912 .args(["--kernel", kernel_path.to_str().unwrap()]) 4913 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4914 .default_disks() 4915 .default_net() 4916 .args(["--balloon", "size=0"]) 4917 .args(["--api-socket", &api_socket]) 4918 .capture_output() 4919 .spawn() 4920 .unwrap(); 4921 4922 let r = std::panic::catch_unwind(|| { 4923 guest.wait_vm_boot(None).unwrap(); 4924 4925 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4926 4927 guest.enable_memory_hotplug(); 4928 4929 // Add RAM to the VM 4930 let desired_ram = 1024 << 20; 4931 resize_command(&api_socket, None, Some(desired_ram), None, None); 4932 4933 thread::sleep(std::time::Duration::new(10, 0)); 4934 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4935 4936 // Use balloon to remove RAM from the VM 4937 let desired_balloon = 512 << 20; 4938 resize_command(&api_socket, None, None, Some(desired_balloon), None); 4939 4940 thread::sleep(std::time::Duration::new(10, 0)); 4941 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4942 assert!(guest.get_total_memory().unwrap_or_default() < 960_000); 4943 4944 guest.reboot_linux(0, None); 4945 4946 assert!(guest.get_total_memory().unwrap_or_default() < 960_000); 4947 4948 // Use balloon add RAM to the VM 4949 let desired_balloon = 0; 4950 resize_command(&api_socket, None, None, Some(desired_balloon), None); 4951 4952 thread::sleep(std::time::Duration::new(10, 0)); 4953 4954 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4955 4956 guest.enable_memory_hotplug(); 4957 4958 // Add RAM to the VM 4959 let desired_ram = 2048 << 20; 4960 resize_command(&api_socket, None, Some(desired_ram), None, None); 4961 4962 thread::sleep(std::time::Duration::new(10, 0)); 4963 assert!(guest.get_total_memory().unwrap_or_default() > 1_920_000); 4964 4965 // Remove RAM to the VM (only applies after reboot) 4966 let desired_ram = 1024 << 20; 4967 resize_command(&api_socket, None, Some(desired_ram), None, None); 4968 4969 guest.reboot_linux(1, None); 4970 4971 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4972 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000); 4973 }); 4974 4975 kill_child(&mut child); 4976 let output = child.wait_with_output().unwrap(); 4977 4978 handle_child_output(r, &output); 4979 } 4980 4981 #[test] 4982 #[cfg(not(feature = "mshv"))] 4983 fn test_virtio_mem() { 4984 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4985 let guest = Guest::new(Box::new(focal)); 4986 let api_socket = temp_api_path(&guest.tmp_dir); 4987 4988 let kernel_path = direct_kernel_boot_path(); 4989 4990 let mut child = GuestCommand::new(&guest) 4991 .args(["--cpus", "boot=2,max=4"]) 4992 .args([ 4993 "--memory", 4994 "size=512M,hotplug_method=virtio-mem,hotplug_size=8192M", 4995 ]) 4996 .args(["--kernel", kernel_path.to_str().unwrap()]) 4997 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4998 .default_disks() 4999 .default_net() 5000 .args(["--api-socket", &api_socket]) 5001 .capture_output() 5002 .spawn() 5003 .unwrap(); 5004 5005 let r = std::panic::catch_unwind(|| { 5006 guest.wait_vm_boot(None).unwrap(); 5007 5008 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 5009 5010 guest.enable_memory_hotplug(); 5011 5012 // Add RAM to the VM 5013 let desired_ram = 1024 << 20; 5014 resize_command(&api_socket, None, Some(desired_ram), None, None); 5015 5016 thread::sleep(std::time::Duration::new(10, 0)); 5017 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 5018 5019 // Add RAM to the VM 5020 let desired_ram = 2048 << 20; 5021 resize_command(&api_socket, None, Some(desired_ram), None, None); 5022 5023 thread::sleep(std::time::Duration::new(10, 0)); 5024 assert!(guest.get_total_memory().unwrap_or_default() > 1_920_000); 5025 5026 // Remove RAM from the VM 5027 let desired_ram = 1024 << 20; 5028 resize_command(&api_socket, None, Some(desired_ram), None, None); 5029 5030 thread::sleep(std::time::Duration::new(10, 0)); 5031 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 5032 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000); 5033 5034 guest.reboot_linux(0, None); 5035 5036 // Check the amount of memory after reboot is 1GiB 5037 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 5038 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000); 5039 5040 // Check we can still resize to 512MiB 5041 let desired_ram = 512 << 20; 5042 resize_command(&api_socket, None, Some(desired_ram), None, None); 5043 thread::sleep(std::time::Duration::new(10, 0)); 5044 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 5045 assert!(guest.get_total_memory().unwrap_or_default() < 960_000); 5046 }); 5047 5048 kill_child(&mut child); 5049 let output = child.wait_with_output().unwrap(); 5050 5051 handle_child_output(r, &output); 5052 } 5053 5054 #[test] 5055 #[cfg(target_arch = "x86_64")] 5056 #[cfg(not(feature = "mshv"))] 5057 // Test both vCPU and memory resizing together 5058 fn test_resize() { 5059 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5060 let guest = Guest::new(Box::new(focal)); 5061 let api_socket = temp_api_path(&guest.tmp_dir); 5062 5063 let kernel_path = direct_kernel_boot_path(); 5064 5065 let mut child = GuestCommand::new(&guest) 5066 .args(["--cpus", "boot=2,max=4"]) 5067 .args(["--memory", "size=512M,hotplug_size=8192M"]) 5068 .args(["--kernel", kernel_path.to_str().unwrap()]) 5069 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5070 .default_disks() 5071 .default_net() 5072 .args(["--api-socket", &api_socket]) 5073 .capture_output() 5074 .spawn() 5075 .unwrap(); 5076 5077 let r = std::panic::catch_unwind(|| { 5078 guest.wait_vm_boot(None).unwrap(); 5079 5080 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 5081 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 5082 5083 guest.enable_memory_hotplug(); 5084 5085 // Resize the VM 5086 let desired_vcpus = 4; 5087 let desired_ram = 1024 << 20; 5088 resize_command( 5089 &api_socket, 5090 Some(desired_vcpus), 5091 Some(desired_ram), 5092 None, 5093 None, 5094 ); 5095 5096 guest 5097 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online") 5098 .unwrap(); 5099 guest 5100 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online") 5101 .unwrap(); 5102 thread::sleep(std::time::Duration::new(10, 0)); 5103 assert_eq!( 5104 guest.get_cpu_count().unwrap_or_default(), 5105 u32::from(desired_vcpus) 5106 ); 5107 5108 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 5109 }); 5110 5111 kill_child(&mut child); 5112 let output = child.wait_with_output().unwrap(); 5113 5114 handle_child_output(r, &output); 5115 } 5116 5117 #[test] 5118 fn test_memory_overhead() { 5119 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5120 let guest = Guest::new(Box::new(focal)); 5121 5122 let kernel_path = direct_kernel_boot_path(); 5123 5124 let guest_memory_size_kb = 512 * 1024; 5125 5126 let mut child = GuestCommand::new(&guest) 5127 .args(["--cpus", "boot=1"]) 5128 .args(["--memory", format!("size={guest_memory_size_kb}K").as_str()]) 5129 .args(["--kernel", kernel_path.to_str().unwrap()]) 5130 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5131 .default_net() 5132 .default_disks() 5133 .capture_output() 5134 .spawn() 5135 .unwrap(); 5136 5137 guest.wait_vm_boot(None).unwrap(); 5138 5139 let r = std::panic::catch_unwind(|| { 5140 let overhead = get_vmm_overhead(child.id(), guest_memory_size_kb); 5141 eprintln!("Guest memory overhead: {overhead} vs {MAXIMUM_VMM_OVERHEAD_KB}"); 5142 assert!(overhead <= MAXIMUM_VMM_OVERHEAD_KB); 5143 }); 5144 5145 kill_child(&mut child); 5146 let output = child.wait_with_output().unwrap(); 5147 5148 handle_child_output(r, &output); 5149 } 5150 5151 #[test] 5152 #[cfg(target_arch = "x86_64")] 5153 // This test runs a guest with Landlock enabled and hotplugs a new disk. As 5154 // the path for the hotplug disk is not pre-added to Landlock rules, this 5155 // the test will result in a failure. 5156 fn test_landlock() { 5157 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5158 let guest = Guest::new(Box::new(focal)); 5159 5160 #[cfg(target_arch = "x86_64")] 5161 let kernel_path = direct_kernel_boot_path(); 5162 #[cfg(target_arch = "aarch64")] 5163 let kernel_path = edk2_path(); 5164 5165 let api_socket = temp_api_path(&guest.tmp_dir); 5166 5167 let mut child = GuestCommand::new(&guest) 5168 .args(["--api-socket", &api_socket]) 5169 .args(["--cpus", "boot=1"]) 5170 .args(["--memory", "size=512M"]) 5171 .args(["--kernel", kernel_path.to_str().unwrap()]) 5172 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5173 .args(["--landlock"]) 5174 .default_disks() 5175 .default_net() 5176 .capture_output() 5177 .spawn() 5178 .unwrap(); 5179 5180 let r = std::panic::catch_unwind(|| { 5181 guest.wait_vm_boot(None).unwrap(); 5182 5183 // Check /dev/vdc is not there 5184 assert_eq!( 5185 guest 5186 .ssh_command("lsblk | grep -c vdc.*16M || true") 5187 .unwrap() 5188 .trim() 5189 .parse::<u32>() 5190 .unwrap_or(1), 5191 0 5192 ); 5193 5194 // Now let's add the extra disk. 5195 let mut blk_file_path = dirs::home_dir().unwrap(); 5196 blk_file_path.push("workloads"); 5197 blk_file_path.push("blk.img"); 5198 // As the path to the hotplug disk is not pre-added, this remote 5199 // command will fail. 5200 assert!(!remote_command( 5201 &api_socket, 5202 "add-disk", 5203 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 5204 )); 5205 }); 5206 5207 let _ = child.kill(); 5208 let output = child.wait_with_output().unwrap(); 5209 5210 handle_child_output(r, &output); 5211 } 5212 5213 fn _test_disk_hotplug(landlock_enabled: bool) { 5214 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5215 let guest = Guest::new(Box::new(focal)); 5216 5217 #[cfg(target_arch = "x86_64")] 5218 let kernel_path = direct_kernel_boot_path(); 5219 #[cfg(target_arch = "aarch64")] 5220 let kernel_path = edk2_path(); 5221 5222 let api_socket = temp_api_path(&guest.tmp_dir); 5223 5224 let mut blk_file_path = dirs::home_dir().unwrap(); 5225 blk_file_path.push("workloads"); 5226 blk_file_path.push("blk.img"); 5227 5228 let mut cmd = GuestCommand::new(&guest); 5229 if landlock_enabled { 5230 cmd.args(["--landlock"]).args([ 5231 "--landlock-rules", 5232 format!("path={:?},access=rw", blk_file_path).as_str(), 5233 ]); 5234 } 5235 5236 cmd.args(["--api-socket", &api_socket]) 5237 .args(["--cpus", "boot=1"]) 5238 .args(["--memory", "size=512M"]) 5239 .args(["--kernel", kernel_path.to_str().unwrap()]) 5240 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5241 .default_disks() 5242 .default_net() 5243 .capture_output(); 5244 5245 let mut child = cmd.spawn().unwrap(); 5246 5247 let r = std::panic::catch_unwind(|| { 5248 guest.wait_vm_boot(None).unwrap(); 5249 5250 // Check /dev/vdc is not there 5251 assert_eq!( 5252 guest 5253 .ssh_command("lsblk | grep -c vdc.*16M || true") 5254 .unwrap() 5255 .trim() 5256 .parse::<u32>() 5257 .unwrap_or(1), 5258 0 5259 ); 5260 5261 // Now let's add the extra disk. 5262 let (cmd_success, cmd_output) = remote_command_w_output( 5263 &api_socket, 5264 "add-disk", 5265 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 5266 ); 5267 assert!(cmd_success); 5268 assert!(String::from_utf8_lossy(&cmd_output) 5269 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 5270 5271 thread::sleep(std::time::Duration::new(10, 0)); 5272 5273 // Check that /dev/vdc exists and the block size is 16M. 5274 assert_eq!( 5275 guest 5276 .ssh_command("lsblk | grep vdc | grep -c 16M") 5277 .unwrap() 5278 .trim() 5279 .parse::<u32>() 5280 .unwrap_or_default(), 5281 1 5282 ); 5283 // And check the block device can be read. 5284 guest 5285 .ssh_command("sudo dd if=/dev/vdc of=/dev/null bs=1M iflag=direct count=16") 5286 .unwrap(); 5287 5288 // Let's remove it the extra disk. 5289 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 5290 thread::sleep(std::time::Duration::new(5, 0)); 5291 // And check /dev/vdc is not there 5292 assert_eq!( 5293 guest 5294 .ssh_command("lsblk | grep -c vdc.*16M || true") 5295 .unwrap() 5296 .trim() 5297 .parse::<u32>() 5298 .unwrap_or(1), 5299 0 5300 ); 5301 5302 // And add it back to validate unplug did work correctly. 5303 let (cmd_success, cmd_output) = remote_command_w_output( 5304 &api_socket, 5305 "add-disk", 5306 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 5307 ); 5308 assert!(cmd_success); 5309 assert!(String::from_utf8_lossy(&cmd_output) 5310 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 5311 5312 thread::sleep(std::time::Duration::new(10, 0)); 5313 5314 // Check that /dev/vdc exists and the block size is 16M. 5315 assert_eq!( 5316 guest 5317 .ssh_command("lsblk | grep vdc | grep -c 16M") 5318 .unwrap() 5319 .trim() 5320 .parse::<u32>() 5321 .unwrap_or_default(), 5322 1 5323 ); 5324 // And check the block device can be read. 5325 guest 5326 .ssh_command("sudo dd if=/dev/vdc of=/dev/null bs=1M iflag=direct count=16") 5327 .unwrap(); 5328 5329 // Reboot the VM. 5330 guest.reboot_linux(0, None); 5331 5332 // Check still there after reboot 5333 assert_eq!( 5334 guest 5335 .ssh_command("lsblk | grep vdc | grep -c 16M") 5336 .unwrap() 5337 .trim() 5338 .parse::<u32>() 5339 .unwrap_or_default(), 5340 1 5341 ); 5342 5343 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 5344 5345 thread::sleep(std::time::Duration::new(20, 0)); 5346 5347 // Check device has gone away 5348 assert_eq!( 5349 guest 5350 .ssh_command("lsblk | grep -c vdc.*16M || true") 5351 .unwrap() 5352 .trim() 5353 .parse::<u32>() 5354 .unwrap_or(1), 5355 0 5356 ); 5357 5358 guest.reboot_linux(1, None); 5359 5360 // Check device still absent 5361 assert_eq!( 5362 guest 5363 .ssh_command("lsblk | grep -c vdc.*16M || true") 5364 .unwrap() 5365 .trim() 5366 .parse::<u32>() 5367 .unwrap_or(1), 5368 0 5369 ); 5370 }); 5371 5372 kill_child(&mut child); 5373 let output = child.wait_with_output().unwrap(); 5374 5375 handle_child_output(r, &output); 5376 } 5377 5378 #[test] 5379 fn test_disk_hotplug() { 5380 _test_disk_hotplug(false) 5381 } 5382 5383 #[test] 5384 #[cfg(target_arch = "x86_64")] 5385 fn test_disk_hotplug_with_landlock() { 5386 _test_disk_hotplug(true) 5387 } 5388 5389 fn create_loop_device(backing_file_path: &str, block_size: u32, num_retries: usize) -> String { 5390 const LOOP_CONFIGURE: u64 = 0x4c0a; 5391 const LOOP_CTL_GET_FREE: u64 = 0x4c82; 5392 const LOOP_CTL_PATH: &str = "/dev/loop-control"; 5393 const LOOP_DEVICE_PREFIX: &str = "/dev/loop"; 5394 5395 #[repr(C)] 5396 struct LoopInfo64 { 5397 lo_device: u64, 5398 lo_inode: u64, 5399 lo_rdevice: u64, 5400 lo_offset: u64, 5401 lo_sizelimit: u64, 5402 lo_number: u32, 5403 lo_encrypt_type: u32, 5404 lo_encrypt_key_size: u32, 5405 lo_flags: u32, 5406 lo_file_name: [u8; 64], 5407 lo_crypt_name: [u8; 64], 5408 lo_encrypt_key: [u8; 32], 5409 lo_init: [u64; 2], 5410 } 5411 5412 impl Default for LoopInfo64 { 5413 fn default() -> Self { 5414 LoopInfo64 { 5415 lo_device: 0, 5416 lo_inode: 0, 5417 lo_rdevice: 0, 5418 lo_offset: 0, 5419 lo_sizelimit: 0, 5420 lo_number: 0, 5421 lo_encrypt_type: 0, 5422 lo_encrypt_key_size: 0, 5423 lo_flags: 0, 5424 lo_file_name: [0; 64], 5425 lo_crypt_name: [0; 64], 5426 lo_encrypt_key: [0; 32], 5427 lo_init: [0; 2], 5428 } 5429 } 5430 } 5431 5432 #[derive(Default)] 5433 #[repr(C)] 5434 struct LoopConfig { 5435 fd: u32, 5436 block_size: u32, 5437 info: LoopInfo64, 5438 _reserved: [u64; 8], 5439 } 5440 5441 // Open loop-control device 5442 let loop_ctl_file = OpenOptions::new() 5443 .read(true) 5444 .write(true) 5445 .open(LOOP_CTL_PATH) 5446 .unwrap(); 5447 5448 // Request a free loop device 5449 let loop_device_number = 5450 unsafe { libc::ioctl(loop_ctl_file.as_raw_fd(), LOOP_CTL_GET_FREE as _) }; 5451 5452 if loop_device_number < 0 { 5453 panic!("Couldn't find a free loop device"); 5454 } 5455 5456 // Create loop device path 5457 let loop_device_path = format!("{LOOP_DEVICE_PREFIX}{loop_device_number}"); 5458 5459 // Open loop device 5460 let loop_device_file = OpenOptions::new() 5461 .read(true) 5462 .write(true) 5463 .open(&loop_device_path) 5464 .unwrap(); 5465 5466 // Open backing file 5467 let backing_file = OpenOptions::new() 5468 .read(true) 5469 .write(true) 5470 .open(backing_file_path) 5471 .unwrap(); 5472 5473 let loop_config = LoopConfig { 5474 fd: backing_file.as_raw_fd() as u32, 5475 block_size, 5476 ..Default::default() 5477 }; 5478 5479 for i in 0..num_retries { 5480 let ret = unsafe { 5481 libc::ioctl( 5482 loop_device_file.as_raw_fd(), 5483 LOOP_CONFIGURE as _, 5484 &loop_config, 5485 ) 5486 }; 5487 if ret != 0 { 5488 if i < num_retries - 1 { 5489 println!( 5490 "Iteration {}: Failed to configure the loop device {}: {}", 5491 i, 5492 loop_device_path, 5493 std::io::Error::last_os_error() 5494 ); 5495 } else { 5496 panic!( 5497 "Failed {} times trying to configure the loop device {}: {}", 5498 num_retries, 5499 loop_device_path, 5500 std::io::Error::last_os_error() 5501 ); 5502 } 5503 } else { 5504 break; 5505 } 5506 5507 // Wait for a bit before retrying 5508 thread::sleep(std::time::Duration::new(5, 0)); 5509 } 5510 5511 loop_device_path 5512 } 5513 5514 #[test] 5515 fn test_virtio_block_topology() { 5516 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5517 let guest = Guest::new(Box::new(focal)); 5518 5519 let kernel_path = direct_kernel_boot_path(); 5520 let test_disk_path = guest.tmp_dir.as_path().join("test.img"); 5521 5522 let output = exec_host_command_output( 5523 format!( 5524 "qemu-img create -f raw {} 16M", 5525 test_disk_path.to_str().unwrap() 5526 ) 5527 .as_str(), 5528 ); 5529 if !output.status.success() { 5530 let stdout = String::from_utf8_lossy(&output.stdout); 5531 let stderr = String::from_utf8_lossy(&output.stderr); 5532 panic!("qemu-img command failed\nstdout\n{stdout}\nstderr\n{stderr}"); 5533 } 5534 5535 let loop_dev = create_loop_device(test_disk_path.to_str().unwrap(), 4096, 5); 5536 5537 let mut child = GuestCommand::new(&guest) 5538 .args(["--cpus", "boot=1"]) 5539 .args(["--memory", "size=512M"]) 5540 .args(["--kernel", kernel_path.to_str().unwrap()]) 5541 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5542 .args([ 5543 "--disk", 5544 format!( 5545 "path={}", 5546 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 5547 ) 5548 .as_str(), 5549 format!( 5550 "path={}", 5551 guest.disk_config.disk(DiskType::CloudInit).unwrap() 5552 ) 5553 .as_str(), 5554 format!("path={}", &loop_dev).as_str(), 5555 ]) 5556 .default_net() 5557 .capture_output() 5558 .spawn() 5559 .unwrap(); 5560 5561 let r = std::panic::catch_unwind(|| { 5562 guest.wait_vm_boot(None).unwrap(); 5563 5564 // MIN-IO column 5565 assert_eq!( 5566 guest 5567 .ssh_command("lsblk -t| grep vdc | awk '{print $3}'") 5568 .unwrap() 5569 .trim() 5570 .parse::<u32>() 5571 .unwrap_or_default(), 5572 4096 5573 ); 5574 // PHY-SEC column 5575 assert_eq!( 5576 guest 5577 .ssh_command("lsblk -t| grep vdc | awk '{print $5}'") 5578 .unwrap() 5579 .trim() 5580 .parse::<u32>() 5581 .unwrap_or_default(), 5582 4096 5583 ); 5584 // LOG-SEC column 5585 assert_eq!( 5586 guest 5587 .ssh_command("lsblk -t| grep vdc | awk '{print $6}'") 5588 .unwrap() 5589 .trim() 5590 .parse::<u32>() 5591 .unwrap_or_default(), 5592 4096 5593 ); 5594 }); 5595 5596 kill_child(&mut child); 5597 let output = child.wait_with_output().unwrap(); 5598 5599 handle_child_output(r, &output); 5600 5601 Command::new("losetup") 5602 .args(["-d", &loop_dev]) 5603 .output() 5604 .expect("loop device not found"); 5605 } 5606 5607 #[test] 5608 fn test_virtio_balloon_deflate_on_oom() { 5609 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5610 let guest = Guest::new(Box::new(focal)); 5611 5612 let kernel_path = direct_kernel_boot_path(); 5613 5614 let api_socket = temp_api_path(&guest.tmp_dir); 5615 5616 //Let's start a 4G guest with balloon occupied 2G memory 5617 let mut child = GuestCommand::new(&guest) 5618 .args(["--api-socket", &api_socket]) 5619 .args(["--cpus", "boot=1"]) 5620 .args(["--memory", "size=4G"]) 5621 .args(["--kernel", kernel_path.to_str().unwrap()]) 5622 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5623 .args(["--balloon", "size=2G,deflate_on_oom=on"]) 5624 .default_disks() 5625 .default_net() 5626 .capture_output() 5627 .spawn() 5628 .unwrap(); 5629 5630 let r = std::panic::catch_unwind(|| { 5631 guest.wait_vm_boot(None).unwrap(); 5632 5633 // Wait for balloon memory's initialization and check its size. 5634 // The virtio-balloon driver might take a few seconds to report the 5635 // balloon effective size back to the VMM. 5636 thread::sleep(std::time::Duration::new(20, 0)); 5637 5638 let orig_balloon = balloon_size(&api_socket); 5639 println!("The original balloon memory size is {orig_balloon} bytes"); 5640 assert!(orig_balloon == 2147483648); 5641 5642 // Two steps to verify if the 'deflate_on_oom' parameter works. 5643 // 1st: run a command to trigger an OOM in the guest. 5644 guest 5645 .ssh_command("echo f | sudo tee /proc/sysrq-trigger") 5646 .unwrap(); 5647 5648 // Give some time for the OOM to happen in the guest and be reported 5649 // back to the host. 5650 thread::sleep(std::time::Duration::new(20, 0)); 5651 5652 // 2nd: check balloon_mem's value to verify balloon has been automatically deflated 5653 let deflated_balloon = balloon_size(&api_socket); 5654 println!("After deflating, balloon memory size is {deflated_balloon} bytes"); 5655 // Verify the balloon size deflated 5656 assert!(deflated_balloon < 2147483648); 5657 }); 5658 5659 kill_child(&mut child); 5660 let output = child.wait_with_output().unwrap(); 5661 5662 handle_child_output(r, &output); 5663 } 5664 5665 #[test] 5666 #[cfg(not(feature = "mshv"))] 5667 fn test_virtio_balloon_free_page_reporting() { 5668 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5669 let guest = Guest::new(Box::new(focal)); 5670 5671 //Let's start a 4G guest with balloon occupied 2G memory 5672 let mut child = GuestCommand::new(&guest) 5673 .args(["--cpus", "boot=1"]) 5674 .args(["--memory", "size=4G"]) 5675 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 5676 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5677 .args(["--balloon", "size=0,free_page_reporting=on"]) 5678 .default_disks() 5679 .default_net() 5680 .capture_output() 5681 .spawn() 5682 .unwrap(); 5683 5684 let pid = child.id(); 5685 let r = std::panic::catch_unwind(|| { 5686 guest.wait_vm_boot(None).unwrap(); 5687 5688 // Check the initial RSS is less than 1GiB 5689 let rss = process_rss_kib(pid); 5690 println!("RSS {rss} < 1048576"); 5691 assert!(rss < 1048576); 5692 5693 // Spawn a command inside the guest to consume 2GiB of RAM for 60 5694 // seconds 5695 let guest_ip = guest.network.guest_ip.clone(); 5696 thread::spawn(move || { 5697 ssh_command_ip( 5698 "stress --vm 1 --vm-bytes 2G --vm-keep --timeout 60", 5699 &guest_ip, 5700 DEFAULT_SSH_RETRIES, 5701 DEFAULT_SSH_TIMEOUT, 5702 ) 5703 .unwrap(); 5704 }); 5705 5706 // Wait for 50 seconds to make sure the stress command is consuming 5707 // the expected amount of memory. 5708 thread::sleep(std::time::Duration::new(50, 0)); 5709 let rss = process_rss_kib(pid); 5710 println!("RSS {rss} >= 2097152"); 5711 assert!(rss >= 2097152); 5712 5713 // Wait for an extra minute to make sure the stress command has 5714 // completed and that the guest reported the free pages to the VMM 5715 // through the virtio-balloon device. We expect the RSS to be under 5716 // 2GiB. 5717 thread::sleep(std::time::Duration::new(60, 0)); 5718 let rss = process_rss_kib(pid); 5719 println!("RSS {rss} < 2097152"); 5720 assert!(rss < 2097152); 5721 }); 5722 5723 kill_child(&mut child); 5724 let output = child.wait_with_output().unwrap(); 5725 5726 handle_child_output(r, &output); 5727 } 5728 5729 #[test] 5730 fn test_pmem_hotplug() { 5731 _test_pmem_hotplug(None) 5732 } 5733 5734 #[test] 5735 fn test_pmem_multi_segment_hotplug() { 5736 _test_pmem_hotplug(Some(15)) 5737 } 5738 5739 fn _test_pmem_hotplug(pci_segment: Option<u16>) { 5740 #[cfg(target_arch = "aarch64")] 5741 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string(); 5742 #[cfg(target_arch = "x86_64")] 5743 let focal_image = FOCAL_IMAGE_NAME.to_string(); 5744 let focal = UbuntuDiskConfig::new(focal_image); 5745 let guest = Guest::new(Box::new(focal)); 5746 5747 #[cfg(target_arch = "x86_64")] 5748 let kernel_path = direct_kernel_boot_path(); 5749 #[cfg(target_arch = "aarch64")] 5750 let kernel_path = edk2_path(); 5751 5752 let api_socket = temp_api_path(&guest.tmp_dir); 5753 5754 let mut cmd = GuestCommand::new(&guest); 5755 5756 cmd.args(["--api-socket", &api_socket]) 5757 .args(["--cpus", "boot=1"]) 5758 .args(["--memory", "size=512M"]) 5759 .args(["--kernel", kernel_path.to_str().unwrap()]) 5760 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5761 .default_disks() 5762 .default_net() 5763 .capture_output(); 5764 5765 if pci_segment.is_some() { 5766 cmd.args([ 5767 "--platform", 5768 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 5769 ]); 5770 } 5771 5772 let mut child = cmd.spawn().unwrap(); 5773 5774 let r = std::panic::catch_unwind(|| { 5775 guest.wait_vm_boot(None).unwrap(); 5776 5777 // Check /dev/pmem0 is not there 5778 assert_eq!( 5779 guest 5780 .ssh_command("lsblk | grep -c pmem0 || true") 5781 .unwrap() 5782 .trim() 5783 .parse::<u32>() 5784 .unwrap_or(1), 5785 0 5786 ); 5787 5788 let pmem_temp_file = TempFile::new().unwrap(); 5789 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 5790 let (cmd_success, cmd_output) = remote_command_w_output( 5791 &api_socket, 5792 "add-pmem", 5793 Some(&format!( 5794 "file={},id=test0{}", 5795 pmem_temp_file.as_path().to_str().unwrap(), 5796 if let Some(pci_segment) = pci_segment { 5797 format!(",pci_segment={pci_segment}") 5798 } else { 5799 "".to_owned() 5800 } 5801 )), 5802 ); 5803 assert!(cmd_success); 5804 if let Some(pci_segment) = pci_segment { 5805 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 5806 "{{\"id\":\"test0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 5807 ))); 5808 } else { 5809 assert!(String::from_utf8_lossy(&cmd_output) 5810 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 5811 } 5812 5813 // Check that /dev/pmem0 exists and the block size is 128M 5814 assert_eq!( 5815 guest 5816 .ssh_command("lsblk | grep pmem0 | grep -c 128M") 5817 .unwrap() 5818 .trim() 5819 .parse::<u32>() 5820 .unwrap_or_default(), 5821 1 5822 ); 5823 5824 guest.reboot_linux(0, None); 5825 5826 // Check still there after reboot 5827 assert_eq!( 5828 guest 5829 .ssh_command("lsblk | grep pmem0 | grep -c 128M") 5830 .unwrap() 5831 .trim() 5832 .parse::<u32>() 5833 .unwrap_or_default(), 5834 1 5835 ); 5836 5837 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 5838 5839 thread::sleep(std::time::Duration::new(20, 0)); 5840 5841 // Check device has gone away 5842 assert_eq!( 5843 guest 5844 .ssh_command("lsblk | grep -c pmem0.*128M || true") 5845 .unwrap() 5846 .trim() 5847 .parse::<u32>() 5848 .unwrap_or(1), 5849 0 5850 ); 5851 5852 guest.reboot_linux(1, None); 5853 5854 // Check still absent after reboot 5855 assert_eq!( 5856 guest 5857 .ssh_command("lsblk | grep -c pmem0.*128M || true") 5858 .unwrap() 5859 .trim() 5860 .parse::<u32>() 5861 .unwrap_or(1), 5862 0 5863 ); 5864 }); 5865 5866 kill_child(&mut child); 5867 let output = child.wait_with_output().unwrap(); 5868 5869 handle_child_output(r, &output); 5870 } 5871 5872 #[test] 5873 fn test_net_hotplug() { 5874 _test_net_hotplug(None) 5875 } 5876 5877 #[test] 5878 fn test_net_multi_segment_hotplug() { 5879 _test_net_hotplug(Some(15)) 5880 } 5881 5882 fn _test_net_hotplug(pci_segment: Option<u16>) { 5883 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5884 let guest = Guest::new(Box::new(focal)); 5885 5886 #[cfg(target_arch = "x86_64")] 5887 let kernel_path = direct_kernel_boot_path(); 5888 #[cfg(target_arch = "aarch64")] 5889 let kernel_path = edk2_path(); 5890 5891 let api_socket = temp_api_path(&guest.tmp_dir); 5892 5893 // Boot without network 5894 let mut cmd = GuestCommand::new(&guest); 5895 5896 cmd.args(["--api-socket", &api_socket]) 5897 .args(["--cpus", "boot=1"]) 5898 .args(["--memory", "size=512M"]) 5899 .args(["--kernel", kernel_path.to_str().unwrap()]) 5900 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5901 .default_disks() 5902 .capture_output(); 5903 5904 if pci_segment.is_some() { 5905 cmd.args([ 5906 "--platform", 5907 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 5908 ]); 5909 } 5910 5911 let mut child = cmd.spawn().unwrap(); 5912 5913 thread::sleep(std::time::Duration::new(20, 0)); 5914 5915 let r = std::panic::catch_unwind(|| { 5916 // Add network 5917 let (cmd_success, cmd_output) = remote_command_w_output( 5918 &api_socket, 5919 "add-net", 5920 Some( 5921 format!( 5922 "{}{},id=test0", 5923 guest.default_net_string(), 5924 if let Some(pci_segment) = pci_segment { 5925 format!(",pci_segment={pci_segment}") 5926 } else { 5927 "".to_owned() 5928 } 5929 ) 5930 .as_str(), 5931 ), 5932 ); 5933 assert!(cmd_success); 5934 5935 if let Some(pci_segment) = pci_segment { 5936 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 5937 "{{\"id\":\"test0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 5938 ))); 5939 } else { 5940 assert!(String::from_utf8_lossy(&cmd_output) 5941 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:05.0\"}")); 5942 } 5943 5944 thread::sleep(std::time::Duration::new(5, 0)); 5945 5946 // 1 network interfaces + default localhost ==> 2 interfaces 5947 assert_eq!( 5948 guest 5949 .ssh_command("ip -o link | wc -l") 5950 .unwrap() 5951 .trim() 5952 .parse::<u32>() 5953 .unwrap_or_default(), 5954 2 5955 ); 5956 5957 // Remove network 5958 assert!(remote_command(&api_socket, "remove-device", Some("test0"),)); 5959 thread::sleep(std::time::Duration::new(5, 0)); 5960 5961 let (cmd_success, cmd_output) = remote_command_w_output( 5962 &api_socket, 5963 "add-net", 5964 Some( 5965 format!( 5966 "{}{},id=test1", 5967 guest.default_net_string(), 5968 if let Some(pci_segment) = pci_segment { 5969 format!(",pci_segment={pci_segment}") 5970 } else { 5971 "".to_owned() 5972 } 5973 ) 5974 .as_str(), 5975 ), 5976 ); 5977 assert!(cmd_success); 5978 5979 if let Some(pci_segment) = pci_segment { 5980 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 5981 "{{\"id\":\"test1\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 5982 ))); 5983 } else { 5984 assert!(String::from_utf8_lossy(&cmd_output) 5985 .contains("{\"id\":\"test1\",\"bdf\":\"0000:00:05.0\"}")); 5986 } 5987 5988 thread::sleep(std::time::Duration::new(5, 0)); 5989 5990 // 1 network interfaces + default localhost ==> 2 interfaces 5991 assert_eq!( 5992 guest 5993 .ssh_command("ip -o link | wc -l") 5994 .unwrap() 5995 .trim() 5996 .parse::<u32>() 5997 .unwrap_or_default(), 5998 2 5999 ); 6000 6001 guest.reboot_linux(0, None); 6002 6003 // Check still there after reboot 6004 // 1 network interfaces + default localhost ==> 2 interfaces 6005 assert_eq!( 6006 guest 6007 .ssh_command("ip -o link | wc -l") 6008 .unwrap() 6009 .trim() 6010 .parse::<u32>() 6011 .unwrap_or_default(), 6012 2 6013 ); 6014 }); 6015 6016 kill_child(&mut child); 6017 let output = child.wait_with_output().unwrap(); 6018 6019 handle_child_output(r, &output); 6020 } 6021 6022 #[test] 6023 fn test_initramfs() { 6024 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6025 let guest = Guest::new(Box::new(focal)); 6026 let mut workload_path = dirs::home_dir().unwrap(); 6027 workload_path.push("workloads"); 6028 6029 #[cfg(target_arch = "x86_64")] 6030 let mut kernels = vec![direct_kernel_boot_path()]; 6031 #[cfg(target_arch = "aarch64")] 6032 let kernels = [direct_kernel_boot_path()]; 6033 6034 #[cfg(target_arch = "x86_64")] 6035 { 6036 let mut pvh_kernel_path = workload_path.clone(); 6037 pvh_kernel_path.push("vmlinux"); 6038 kernels.push(pvh_kernel_path); 6039 } 6040 6041 let mut initramfs_path = workload_path; 6042 initramfs_path.push("alpine_initramfs.img"); 6043 6044 let test_string = String::from("axz34i9rylotd8n50wbv6kcj7f2qushme1pg"); 6045 let cmdline = format!("console=hvc0 quiet TEST_STRING={test_string}"); 6046 6047 kernels.iter().for_each(|k_path| { 6048 let mut child = GuestCommand::new(&guest) 6049 .args(["--kernel", k_path.to_str().unwrap()]) 6050 .args(["--initramfs", initramfs_path.to_str().unwrap()]) 6051 .args(["--cmdline", &cmdline]) 6052 .capture_output() 6053 .spawn() 6054 .unwrap(); 6055 6056 thread::sleep(std::time::Duration::new(20, 0)); 6057 6058 kill_child(&mut child); 6059 let output = child.wait_with_output().unwrap(); 6060 6061 let r = std::panic::catch_unwind(|| { 6062 let s = String::from_utf8_lossy(&output.stdout); 6063 6064 assert_ne!(s.lines().position(|line| line == test_string), None); 6065 }); 6066 6067 handle_child_output(r, &output); 6068 }); 6069 } 6070 6071 #[test] 6072 fn test_counters() { 6073 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6074 let guest = Guest::new(Box::new(focal)); 6075 let api_socket = temp_api_path(&guest.tmp_dir); 6076 6077 let mut cmd = GuestCommand::new(&guest); 6078 cmd.args(["--cpus", "boot=1"]) 6079 .args(["--memory", "size=512M"]) 6080 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 6081 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6082 .default_disks() 6083 .args(["--net", guest.default_net_string().as_str()]) 6084 .args(["--api-socket", &api_socket]) 6085 .capture_output(); 6086 6087 let mut child = cmd.spawn().unwrap(); 6088 6089 let r = std::panic::catch_unwind(|| { 6090 guest.wait_vm_boot(None).unwrap(); 6091 6092 let orig_counters = get_counters(&api_socket); 6093 guest 6094 .ssh_command("dd if=/dev/zero of=test count=8 bs=1M") 6095 .unwrap(); 6096 6097 let new_counters = get_counters(&api_socket); 6098 6099 // Check that all the counters have increased 6100 assert!(new_counters > orig_counters); 6101 }); 6102 6103 kill_child(&mut child); 6104 let output = child.wait_with_output().unwrap(); 6105 6106 handle_child_output(r, &output); 6107 } 6108 6109 #[test] 6110 #[cfg(feature = "guest_debug")] 6111 fn test_coredump() { 6112 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6113 let guest = Guest::new(Box::new(focal)); 6114 let api_socket = temp_api_path(&guest.tmp_dir); 6115 6116 let mut cmd = GuestCommand::new(&guest); 6117 cmd.args(["--cpus", "boot=4"]) 6118 .args(["--memory", "size=4G"]) 6119 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 6120 .default_disks() 6121 .args(["--net", guest.default_net_string().as_str()]) 6122 .args(["--api-socket", &api_socket]) 6123 .capture_output(); 6124 6125 let mut child = cmd.spawn().unwrap(); 6126 let vmcore_file = temp_vmcore_file_path(&guest.tmp_dir); 6127 6128 let r = std::panic::catch_unwind(|| { 6129 guest.wait_vm_boot(None).unwrap(); 6130 6131 assert!(remote_command(&api_socket, "pause", None)); 6132 6133 assert!(remote_command( 6134 &api_socket, 6135 "coredump", 6136 Some(format!("file://{vmcore_file}").as_str()), 6137 )); 6138 6139 // the num of CORE notes should equals to vcpu 6140 let readelf_core_num_cmd = 6141 format!("readelf --all {vmcore_file} |grep CORE |grep -v Type |wc -l"); 6142 let core_num_in_elf = exec_host_command_output(&readelf_core_num_cmd); 6143 assert_eq!(String::from_utf8_lossy(&core_num_in_elf.stdout).trim(), "4"); 6144 6145 // the num of QEMU notes should equals to vcpu 6146 let readelf_vmm_num_cmd = format!("readelf --all {vmcore_file} |grep QEMU |wc -l"); 6147 let vmm_num_in_elf = exec_host_command_output(&readelf_vmm_num_cmd); 6148 assert_eq!(String::from_utf8_lossy(&vmm_num_in_elf.stdout).trim(), "4"); 6149 }); 6150 6151 kill_child(&mut child); 6152 let output = child.wait_with_output().unwrap(); 6153 6154 handle_child_output(r, &output); 6155 } 6156 6157 #[test] 6158 #[cfg(feature = "guest_debug")] 6159 fn test_coredump_no_pause() { 6160 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6161 let guest = Guest::new(Box::new(focal)); 6162 let api_socket = temp_api_path(&guest.tmp_dir); 6163 6164 let mut cmd = GuestCommand::new(&guest); 6165 cmd.args(["--cpus", "boot=4"]) 6166 .args(["--memory", "size=4G"]) 6167 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 6168 .default_disks() 6169 .args(["--net", guest.default_net_string().as_str()]) 6170 .args(["--api-socket", &api_socket]) 6171 .capture_output(); 6172 6173 let mut child = cmd.spawn().unwrap(); 6174 let vmcore_file = temp_vmcore_file_path(&guest.tmp_dir); 6175 6176 let r = std::panic::catch_unwind(|| { 6177 guest.wait_vm_boot(None).unwrap(); 6178 6179 assert!(remote_command( 6180 &api_socket, 6181 "coredump", 6182 Some(format!("file://{vmcore_file}").as_str()), 6183 )); 6184 6185 assert_eq!(vm_state(&api_socket), "Running"); 6186 }); 6187 6188 kill_child(&mut child); 6189 let output = child.wait_with_output().unwrap(); 6190 6191 handle_child_output(r, &output); 6192 } 6193 6194 #[test] 6195 fn test_watchdog() { 6196 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6197 let guest = Guest::new(Box::new(focal)); 6198 let api_socket = temp_api_path(&guest.tmp_dir); 6199 6200 let kernel_path = direct_kernel_boot_path(); 6201 let event_path = temp_event_monitor_path(&guest.tmp_dir); 6202 6203 let mut cmd = GuestCommand::new(&guest); 6204 cmd.args(["--cpus", "boot=1"]) 6205 .args(["--memory", "size=512M"]) 6206 .args(["--kernel", kernel_path.to_str().unwrap()]) 6207 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6208 .default_disks() 6209 .args(["--net", guest.default_net_string().as_str()]) 6210 .args(["--watchdog"]) 6211 .args(["--api-socket", &api_socket]) 6212 .args(["--event-monitor", format!("path={event_path}").as_str()]) 6213 .capture_output(); 6214 6215 let mut child = cmd.spawn().unwrap(); 6216 6217 let r = std::panic::catch_unwind(|| { 6218 guest.wait_vm_boot(None).unwrap(); 6219 6220 let mut expected_reboot_count = 1; 6221 6222 // Enable the watchdog with a 15s timeout 6223 enable_guest_watchdog(&guest, 15); 6224 6225 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6226 assert_eq!( 6227 guest 6228 .ssh_command("sudo journalctl | grep -c -- \"Watchdog started\"") 6229 .unwrap() 6230 .trim() 6231 .parse::<u32>() 6232 .unwrap_or_default(), 6233 1 6234 ); 6235 6236 // Allow some normal time to elapse to check we don't get spurious reboots 6237 thread::sleep(std::time::Duration::new(40, 0)); 6238 // Check no reboot 6239 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6240 6241 // Trigger a panic (sync first). We need to do this inside a screen with a delay so the SSH command returns. 6242 guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap(); 6243 // Allow some time for the watchdog to trigger (max 30s) and reboot to happen 6244 guest.wait_vm_boot(Some(50)).unwrap(); 6245 // Check a reboot is triggered by the watchdog 6246 expected_reboot_count += 1; 6247 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6248 6249 #[cfg(target_arch = "x86_64")] 6250 { 6251 // Now pause the VM and remain offline for 30s 6252 assert!(remote_command(&api_socket, "pause", None)); 6253 let latest_events = [ 6254 &MetaEvent { 6255 event: "pausing".to_string(), 6256 device_id: None, 6257 }, 6258 &MetaEvent { 6259 event: "paused".to_string(), 6260 device_id: None, 6261 }, 6262 ]; 6263 assert!(check_latest_events_exact(&latest_events, &event_path)); 6264 assert!(remote_command(&api_socket, "resume", None)); 6265 6266 // Check no reboot 6267 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6268 } 6269 }); 6270 6271 kill_child(&mut child); 6272 let output = child.wait_with_output().unwrap(); 6273 6274 handle_child_output(r, &output); 6275 } 6276 6277 #[test] 6278 fn test_pvpanic() { 6279 let jammy = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string()); 6280 let guest = Guest::new(Box::new(jammy)); 6281 let api_socket = temp_api_path(&guest.tmp_dir); 6282 let event_path = temp_event_monitor_path(&guest.tmp_dir); 6283 6284 let kernel_path = direct_kernel_boot_path(); 6285 6286 let mut cmd = GuestCommand::new(&guest); 6287 cmd.args(["--cpus", "boot=1"]) 6288 .args(["--memory", "size=512M"]) 6289 .args(["--kernel", kernel_path.to_str().unwrap()]) 6290 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6291 .default_disks() 6292 .args(["--net", guest.default_net_string().as_str()]) 6293 .args(["--pvpanic"]) 6294 .args(["--api-socket", &api_socket]) 6295 .args(["--event-monitor", format!("path={event_path}").as_str()]) 6296 .capture_output(); 6297 6298 let mut child = cmd.spawn().unwrap(); 6299 6300 let r = std::panic::catch_unwind(|| { 6301 guest.wait_vm_boot(None).unwrap(); 6302 6303 // Trigger guest a panic 6304 make_guest_panic(&guest); 6305 6306 // Wait a while for guest 6307 thread::sleep(std::time::Duration::new(10, 0)); 6308 6309 let expected_sequential_events = [&MetaEvent { 6310 event: "panic".to_string(), 6311 device_id: None, 6312 }]; 6313 assert!(check_latest_events_exact( 6314 &expected_sequential_events, 6315 &event_path 6316 )); 6317 }); 6318 6319 kill_child(&mut child); 6320 let output = child.wait_with_output().unwrap(); 6321 6322 handle_child_output(r, &output); 6323 } 6324 6325 #[test] 6326 fn test_tap_from_fd() { 6327 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6328 let guest = Guest::new(Box::new(focal)); 6329 let kernel_path = direct_kernel_boot_path(); 6330 6331 // Create a TAP interface with multi-queue enabled 6332 let num_queue_pairs: usize = 2; 6333 6334 use std::str::FromStr; 6335 let taps = net_util::open_tap( 6336 Some("chtap0"), 6337 Some(std::net::Ipv4Addr::from_str(&guest.network.host_ip).unwrap()), 6338 None, 6339 &mut None, 6340 None, 6341 num_queue_pairs, 6342 Some(libc::O_RDWR | libc::O_NONBLOCK), 6343 ) 6344 .unwrap(); 6345 6346 let mut child = GuestCommand::new(&guest) 6347 .args(["--cpus", &format!("boot={num_queue_pairs}")]) 6348 .args(["--memory", "size=512M"]) 6349 .args(["--kernel", kernel_path.to_str().unwrap()]) 6350 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6351 .default_disks() 6352 .args([ 6353 "--net", 6354 &format!( 6355 "fd=[{},{}],mac={},num_queues={}", 6356 taps[0].as_raw_fd(), 6357 taps[1].as_raw_fd(), 6358 guest.network.guest_mac, 6359 num_queue_pairs * 2 6360 ), 6361 ]) 6362 .capture_output() 6363 .spawn() 6364 .unwrap(); 6365 6366 let r = std::panic::catch_unwind(|| { 6367 guest.wait_vm_boot(None).unwrap(); 6368 6369 assert_eq!( 6370 guest 6371 .ssh_command("ip -o link | wc -l") 6372 .unwrap() 6373 .trim() 6374 .parse::<u32>() 6375 .unwrap_or_default(), 6376 2 6377 ); 6378 6379 guest.reboot_linux(0, None); 6380 6381 assert_eq!( 6382 guest 6383 .ssh_command("ip -o link | wc -l") 6384 .unwrap() 6385 .trim() 6386 .parse::<u32>() 6387 .unwrap_or_default(), 6388 2 6389 ); 6390 }); 6391 6392 kill_child(&mut child); 6393 let output = child.wait_with_output().unwrap(); 6394 6395 handle_child_output(r, &output); 6396 } 6397 6398 // By design, a guest VM won't be able to connect to the host 6399 // machine when using a macvtap network interface (while it can 6400 // communicate externally). As a workaround, this integration 6401 // test creates two macvtap interfaces in 'bridge' mode on the 6402 // same physical net interface, one for the guest and one for 6403 // the host. With additional setup on the IP address and the 6404 // routing table, it enables the communications between the 6405 // guest VM and the host machine. 6406 // Details: https://wiki.libvirt.org/page/TroubleshootMacvtapHostFail 6407 fn _test_macvtap(hotplug: bool, guest_macvtap_name: &str, host_macvtap_name: &str) { 6408 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6409 let guest = Guest::new(Box::new(focal)); 6410 let api_socket = temp_api_path(&guest.tmp_dir); 6411 6412 #[cfg(target_arch = "x86_64")] 6413 let kernel_path = direct_kernel_boot_path(); 6414 #[cfg(target_arch = "aarch64")] 6415 let kernel_path = edk2_path(); 6416 6417 let phy_net = "eth0"; 6418 6419 // Create a macvtap interface for the guest VM to use 6420 assert!(exec_host_command_status(&format!( 6421 "sudo ip link add link {phy_net} name {guest_macvtap_name} type macvtap mod bridge" 6422 )) 6423 .success()); 6424 assert!(exec_host_command_status(&format!( 6425 "sudo ip link set {} address {} up", 6426 guest_macvtap_name, guest.network.guest_mac 6427 )) 6428 .success()); 6429 assert!( 6430 exec_host_command_status(&format!("sudo ip link show {guest_macvtap_name}")).success() 6431 ); 6432 6433 let tap_index = 6434 fs::read_to_string(format!("/sys/class/net/{guest_macvtap_name}/ifindex")).unwrap(); 6435 let tap_device = format!("/dev/tap{}", tap_index.trim()); 6436 6437 assert!(exec_host_command_status(&format!("sudo chown $UID.$UID {tap_device}")).success()); 6438 6439 let cstr_tap_device = std::ffi::CString::new(tap_device).unwrap(); 6440 let tap_fd1 = unsafe { libc::open(cstr_tap_device.as_ptr(), libc::O_RDWR) }; 6441 assert!(tap_fd1 > 0); 6442 let tap_fd2 = unsafe { libc::open(cstr_tap_device.as_ptr(), libc::O_RDWR) }; 6443 assert!(tap_fd2 > 0); 6444 6445 // Create a macvtap on the same physical net interface for 6446 // the host machine to use 6447 assert!(exec_host_command_status(&format!( 6448 "sudo ip link add link {phy_net} name {host_macvtap_name} type macvtap mod bridge" 6449 )) 6450 .success()); 6451 // Use default mask "255.255.255.0" 6452 assert!(exec_host_command_status(&format!( 6453 "sudo ip address add {}/24 dev {}", 6454 guest.network.host_ip, host_macvtap_name 6455 )) 6456 .success()); 6457 assert!( 6458 exec_host_command_status(&format!("sudo ip link set dev {host_macvtap_name} up")) 6459 .success() 6460 ); 6461 6462 let mut guest_command = GuestCommand::new(&guest); 6463 guest_command 6464 .args(["--cpus", "boot=2"]) 6465 .args(["--memory", "size=512M"]) 6466 .args(["--kernel", kernel_path.to_str().unwrap()]) 6467 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6468 .default_disks() 6469 .args(["--api-socket", &api_socket]); 6470 6471 let net_params = format!( 6472 "fd=[{},{}],mac={},num_queues=4", 6473 tap_fd1, tap_fd2, guest.network.guest_mac 6474 ); 6475 6476 if !hotplug { 6477 guest_command.args(["--net", &net_params]); 6478 } 6479 6480 let mut child = guest_command.capture_output().spawn().unwrap(); 6481 6482 if hotplug { 6483 // Give some time to the VMM process to listen to the API 6484 // socket. This is the only requirement to avoid the following 6485 // call to ch-remote from failing. 6486 thread::sleep(std::time::Duration::new(10, 0)); 6487 // Hotplug the virtio-net device 6488 let (cmd_success, cmd_output) = 6489 remote_command_w_output(&api_socket, "add-net", Some(&net_params)); 6490 assert!(cmd_success); 6491 #[cfg(target_arch = "x86_64")] 6492 assert!(String::from_utf8_lossy(&cmd_output) 6493 .contains("{\"id\":\"_net2\",\"bdf\":\"0000:00:05.0\"}")); 6494 #[cfg(target_arch = "aarch64")] 6495 assert!(String::from_utf8_lossy(&cmd_output) 6496 .contains("{\"id\":\"_net0\",\"bdf\":\"0000:00:05.0\"}")); 6497 } 6498 6499 // The functional connectivity provided by the virtio-net device 6500 // gets tested through wait_vm_boot() as it expects to receive a 6501 // HTTP request, and through the SSH command as well. 6502 let r = std::panic::catch_unwind(|| { 6503 guest.wait_vm_boot(None).unwrap(); 6504 6505 assert_eq!( 6506 guest 6507 .ssh_command("ip -o link | wc -l") 6508 .unwrap() 6509 .trim() 6510 .parse::<u32>() 6511 .unwrap_or_default(), 6512 2 6513 ); 6514 6515 guest.reboot_linux(0, None); 6516 6517 assert_eq!( 6518 guest 6519 .ssh_command("ip -o link | wc -l") 6520 .unwrap() 6521 .trim() 6522 .parse::<u32>() 6523 .unwrap_or_default(), 6524 2 6525 ); 6526 }); 6527 6528 kill_child(&mut child); 6529 6530 exec_host_command_status(&format!("sudo ip link del {guest_macvtap_name}")); 6531 exec_host_command_status(&format!("sudo ip link del {host_macvtap_name}")); 6532 6533 let output = child.wait_with_output().unwrap(); 6534 6535 handle_child_output(r, &output); 6536 } 6537 6538 #[test] 6539 #[cfg_attr(target_arch = "aarch64", ignore = "See #5443")] 6540 fn test_macvtap() { 6541 _test_macvtap(false, "guestmacvtap0", "hostmacvtap0") 6542 } 6543 6544 #[test] 6545 #[cfg_attr(target_arch = "aarch64", ignore = "See #5443")] 6546 fn test_macvtap_hotplug() { 6547 _test_macvtap(true, "guestmacvtap1", "hostmacvtap1") 6548 } 6549 6550 #[test] 6551 #[cfg(not(feature = "mshv"))] 6552 fn test_ovs_dpdk() { 6553 let focal1 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6554 let guest1 = Guest::new(Box::new(focal1)); 6555 6556 let focal2 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6557 let guest2 = Guest::new(Box::new(focal2)); 6558 let api_socket_source = format!("{}.1", temp_api_path(&guest2.tmp_dir)); 6559 6560 let (mut child1, mut child2) = 6561 setup_ovs_dpdk_guests(&guest1, &guest2, &api_socket_source, false); 6562 6563 // Create the snapshot directory 6564 let snapshot_dir = temp_snapshot_dir_path(&guest2.tmp_dir); 6565 6566 let r = std::panic::catch_unwind(|| { 6567 // Remove one of the two ports from the OVS bridge 6568 assert!(exec_host_command_status("ovs-vsctl del-port vhost-user1").success()); 6569 6570 // Spawn a new netcat listener in the first VM 6571 let guest_ip = guest1.network.guest_ip.clone(); 6572 thread::spawn(move || { 6573 ssh_command_ip( 6574 "nc -l 12345", 6575 &guest_ip, 6576 DEFAULT_SSH_RETRIES, 6577 DEFAULT_SSH_TIMEOUT, 6578 ) 6579 .unwrap(); 6580 }); 6581 6582 // Wait for the server to be listening 6583 thread::sleep(std::time::Duration::new(5, 0)); 6584 6585 // Check the connection fails this time 6586 assert!(guest2.ssh_command("nc -vz 172.100.0.1 12345").is_err()); 6587 6588 // Add the OVS port back 6589 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface vhost-user1 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient1").success()); 6590 6591 // And finally check the connection is functional again 6592 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap(); 6593 6594 // Pause the VM 6595 assert!(remote_command(&api_socket_source, "pause", None)); 6596 6597 // Take a snapshot from the VM 6598 assert!(remote_command( 6599 &api_socket_source, 6600 "snapshot", 6601 Some(format!("file://{snapshot_dir}").as_str()), 6602 )); 6603 6604 // Wait to make sure the snapshot is completed 6605 thread::sleep(std::time::Duration::new(10, 0)); 6606 }); 6607 6608 // Shutdown the source VM 6609 kill_child(&mut child2); 6610 let output = child2.wait_with_output().unwrap(); 6611 handle_child_output(r, &output); 6612 6613 // Remove the vhost-user socket file. 6614 Command::new("rm") 6615 .arg("-f") 6616 .arg("/tmp/dpdkvhostclient2") 6617 .output() 6618 .unwrap(); 6619 6620 let api_socket_restored = format!("{}.2", temp_api_path(&guest2.tmp_dir)); 6621 // Restore the VM from the snapshot 6622 let mut child2 = GuestCommand::new(&guest2) 6623 .args(["--api-socket", &api_socket_restored]) 6624 .args([ 6625 "--restore", 6626 format!("source_url=file://{snapshot_dir}").as_str(), 6627 ]) 6628 .capture_output() 6629 .spawn() 6630 .unwrap(); 6631 6632 // Wait for the VM to be restored 6633 thread::sleep(std::time::Duration::new(10, 0)); 6634 6635 let r = std::panic::catch_unwind(|| { 6636 // Resume the VM 6637 assert!(remote_command(&api_socket_restored, "resume", None)); 6638 6639 // Spawn a new netcat listener in the first VM 6640 let guest_ip = guest1.network.guest_ip.clone(); 6641 thread::spawn(move || { 6642 ssh_command_ip( 6643 "nc -l 12345", 6644 &guest_ip, 6645 DEFAULT_SSH_RETRIES, 6646 DEFAULT_SSH_TIMEOUT, 6647 ) 6648 .unwrap(); 6649 }); 6650 6651 // Wait for the server to be listening 6652 thread::sleep(std::time::Duration::new(5, 0)); 6653 6654 // And check the connection is still functional after restore 6655 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap(); 6656 }); 6657 6658 kill_child(&mut child1); 6659 kill_child(&mut child2); 6660 6661 let output = child1.wait_with_output().unwrap(); 6662 child2.wait().unwrap(); 6663 6664 cleanup_ovs_dpdk(); 6665 6666 handle_child_output(r, &output); 6667 } 6668 6669 fn setup_spdk_nvme(nvme_dir: &std::path::Path) { 6670 cleanup_spdk_nvme(); 6671 6672 assert!(exec_host_command_status(&format!( 6673 "mkdir -p {}", 6674 nvme_dir.join("nvme-vfio-user").to_str().unwrap() 6675 )) 6676 .success()); 6677 assert!(exec_host_command_status(&format!( 6678 "truncate {} -s 128M", 6679 nvme_dir.join("test-disk.raw").to_str().unwrap() 6680 )) 6681 .success()); 6682 assert!(exec_host_command_status(&format!( 6683 "mkfs.ext4 {}", 6684 nvme_dir.join("test-disk.raw").to_str().unwrap() 6685 )) 6686 .success()); 6687 6688 // Start the SPDK nvmf_tgt daemon to present NVMe device as a VFIO user device 6689 Command::new("/usr/local/bin/spdk-nvme/nvmf_tgt") 6690 .args(["-i", "0", "-m", "0x1"]) 6691 .spawn() 6692 .unwrap(); 6693 thread::sleep(std::time::Duration::new(2, 0)); 6694 6695 assert!(exec_host_command_with_retries( 6696 "/usr/local/bin/spdk-nvme/rpc.py nvmf_create_transport -t VFIOUSER", 6697 3, 6698 std::time::Duration::new(5, 0), 6699 )); 6700 assert!(exec_host_command_status(&format!( 6701 "/usr/local/bin/spdk-nvme/rpc.py bdev_aio_create {} test 512", 6702 nvme_dir.join("test-disk.raw").to_str().unwrap() 6703 )) 6704 .success()); 6705 assert!(exec_host_command_status( 6706 "/usr/local/bin/spdk-nvme/rpc.py nvmf_create_subsystem nqn.2019-07.io.spdk:cnode -a -s test" 6707 ) 6708 .success()); 6709 assert!(exec_host_command_status( 6710 "/usr/local/bin/spdk-nvme/rpc.py nvmf_subsystem_add_ns nqn.2019-07.io.spdk:cnode test" 6711 ) 6712 .success()); 6713 assert!(exec_host_command_status(&format!( 6714 "/usr/local/bin/spdk-nvme/rpc.py nvmf_subsystem_add_listener nqn.2019-07.io.spdk:cnode -t VFIOUSER -a {} -s 0", 6715 nvme_dir.join("nvme-vfio-user").to_str().unwrap() 6716 )) 6717 .success()); 6718 } 6719 6720 fn cleanup_spdk_nvme() { 6721 exec_host_command_status("pkill -f nvmf_tgt"); 6722 } 6723 6724 #[test] 6725 fn test_vfio_user() { 6726 let jammy_image = JAMMY_IMAGE_NAME.to_string(); 6727 let jammy = UbuntuDiskConfig::new(jammy_image); 6728 let guest = Guest::new(Box::new(jammy)); 6729 6730 let spdk_nvme_dir = guest.tmp_dir.as_path().join("test-vfio-user"); 6731 setup_spdk_nvme(spdk_nvme_dir.as_path()); 6732 6733 let api_socket = temp_api_path(&guest.tmp_dir); 6734 let mut child = GuestCommand::new(&guest) 6735 .args(["--api-socket", &api_socket]) 6736 .args(["--cpus", "boot=1"]) 6737 .args(["--memory", "size=512M,shared=on,hugepages=on"]) 6738 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 6739 .args(["--serial", "tty", "--console", "off"]) 6740 .default_disks() 6741 .default_net() 6742 .capture_output() 6743 .spawn() 6744 .unwrap(); 6745 6746 let r = std::panic::catch_unwind(|| { 6747 guest.wait_vm_boot(None).unwrap(); 6748 6749 // Hotplug the SPDK-NVMe device to the VM 6750 let (cmd_success, cmd_output) = remote_command_w_output( 6751 &api_socket, 6752 "add-user-device", 6753 Some(&format!( 6754 "socket={},id=vfio_user0", 6755 spdk_nvme_dir 6756 .as_path() 6757 .join("nvme-vfio-user/cntrl") 6758 .to_str() 6759 .unwrap(), 6760 )), 6761 ); 6762 assert!(cmd_success); 6763 assert!(String::from_utf8_lossy(&cmd_output) 6764 .contains("{\"id\":\"vfio_user0\",\"bdf\":\"0000:00:05.0\"}")); 6765 6766 thread::sleep(std::time::Duration::new(10, 0)); 6767 6768 // Check both if /dev/nvme exists and if the block size is 128M. 6769 assert_eq!( 6770 guest 6771 .ssh_command("lsblk | grep nvme0n1 | grep -c 128M") 6772 .unwrap() 6773 .trim() 6774 .parse::<u32>() 6775 .unwrap_or_default(), 6776 1 6777 ); 6778 6779 // Check changes persist after reboot 6780 assert_eq!( 6781 guest.ssh_command("sudo mount /dev/nvme0n1 /mnt").unwrap(), 6782 "" 6783 ); 6784 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), "lost+found\n"); 6785 guest 6786 .ssh_command("echo test123 | sudo tee /mnt/test") 6787 .unwrap(); 6788 assert_eq!(guest.ssh_command("sudo umount /mnt").unwrap(), ""); 6789 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), ""); 6790 6791 guest.reboot_linux(0, None); 6792 assert_eq!( 6793 guest.ssh_command("sudo mount /dev/nvme0n1 /mnt").unwrap(), 6794 "" 6795 ); 6796 assert_eq!( 6797 guest.ssh_command("sudo cat /mnt/test").unwrap().trim(), 6798 "test123" 6799 ); 6800 }); 6801 6802 cleanup_spdk_nvme(); 6803 6804 kill_child(&mut child); 6805 let output = child.wait_with_output().unwrap(); 6806 6807 handle_child_output(r, &output); 6808 } 6809 6810 #[test] 6811 #[cfg(target_arch = "x86_64")] 6812 fn test_vdpa_block() { 6813 // Before trying to run the test, verify the vdpa_sim_blk module is correctly loaded. 6814 assert!(exec_host_command_status("lsmod | grep vdpa_sim_blk").success()); 6815 6816 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6817 let guest = Guest::new(Box::new(focal)); 6818 let api_socket = temp_api_path(&guest.tmp_dir); 6819 6820 let kernel_path = direct_kernel_boot_path(); 6821 6822 let mut child = GuestCommand::new(&guest) 6823 .args(["--cpus", "boot=2"]) 6824 .args(["--memory", "size=512M,hugepages=on"]) 6825 .args(["--kernel", kernel_path.to_str().unwrap()]) 6826 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6827 .default_disks() 6828 .default_net() 6829 .args(["--vdpa", "path=/dev/vhost-vdpa-0,num_queues=1"]) 6830 .args(["--platform", "num_pci_segments=2,iommu_segments=1"]) 6831 .args(["--api-socket", &api_socket]) 6832 .capture_output() 6833 .spawn() 6834 .unwrap(); 6835 6836 let r = std::panic::catch_unwind(|| { 6837 guest.wait_vm_boot(None).unwrap(); 6838 6839 // Check both if /dev/vdc exists and if the block size is 128M. 6840 assert_eq!( 6841 guest 6842 .ssh_command("lsblk | grep vdc | grep -c 128M") 6843 .unwrap() 6844 .trim() 6845 .parse::<u32>() 6846 .unwrap_or_default(), 6847 1 6848 ); 6849 6850 // Check the content of the block device after we wrote to it. 6851 // The vpda-sim-blk should let us read what we previously wrote. 6852 guest 6853 .ssh_command("sudo bash -c 'echo foobar > /dev/vdc'") 6854 .unwrap(); 6855 assert_eq!( 6856 guest.ssh_command("sudo head -1 /dev/vdc").unwrap().trim(), 6857 "foobar" 6858 ); 6859 6860 // Hotplug an extra vDPA block device behind the vIOMMU 6861 // Add a new vDPA device to the VM 6862 let (cmd_success, cmd_output) = remote_command_w_output( 6863 &api_socket, 6864 "add-vdpa", 6865 Some("id=myvdpa0,path=/dev/vhost-vdpa-1,num_queues=1,pci_segment=1,iommu=on"), 6866 ); 6867 assert!(cmd_success); 6868 assert!(String::from_utf8_lossy(&cmd_output) 6869 .contains("{\"id\":\"myvdpa0\",\"bdf\":\"0001:00:01.0\"}")); 6870 6871 thread::sleep(std::time::Duration::new(10, 0)); 6872 6873 // Check IOMMU setup 6874 assert!(guest 6875 .does_device_vendor_pair_match("0x1057", "0x1af4") 6876 .unwrap_or_default()); 6877 assert_eq!( 6878 guest 6879 .ssh_command("ls /sys/kernel/iommu_groups/0/devices") 6880 .unwrap() 6881 .trim(), 6882 "0001:00:01.0" 6883 ); 6884 6885 // Check both if /dev/vdd exists and if the block size is 128M. 6886 assert_eq!( 6887 guest 6888 .ssh_command("lsblk | grep vdd | grep -c 128M") 6889 .unwrap() 6890 .trim() 6891 .parse::<u32>() 6892 .unwrap_or_default(), 6893 1 6894 ); 6895 6896 // Write some content to the block device we've just plugged. 6897 guest 6898 .ssh_command("sudo bash -c 'echo foobar > /dev/vdd'") 6899 .unwrap(); 6900 6901 // Check we can read the content back. 6902 assert_eq!( 6903 guest.ssh_command("sudo head -1 /dev/vdd").unwrap().trim(), 6904 "foobar" 6905 ); 6906 6907 // Unplug the device 6908 let cmd_success = remote_command(&api_socket, "remove-device", Some("myvdpa0")); 6909 assert!(cmd_success); 6910 thread::sleep(std::time::Duration::new(10, 0)); 6911 6912 // Check /dev/vdd doesn't exist anymore 6913 assert_eq!( 6914 guest 6915 .ssh_command("lsblk | grep -c vdd || true") 6916 .unwrap() 6917 .trim() 6918 .parse::<u32>() 6919 .unwrap_or(1), 6920 0 6921 ); 6922 }); 6923 6924 kill_child(&mut child); 6925 let output = child.wait_with_output().unwrap(); 6926 6927 handle_child_output(r, &output); 6928 } 6929 6930 #[test] 6931 #[cfg(target_arch = "x86_64")] 6932 #[ignore = "See #5756"] 6933 fn test_vdpa_net() { 6934 // Before trying to run the test, verify the vdpa_sim_net module is correctly loaded. 6935 if !exec_host_command_status("lsmod | grep vdpa_sim_net").success() { 6936 return; 6937 } 6938 6939 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6940 let guest = Guest::new(Box::new(focal)); 6941 6942 let kernel_path = direct_kernel_boot_path(); 6943 6944 let mut child = GuestCommand::new(&guest) 6945 .args(["--cpus", "boot=2"]) 6946 .args(["--memory", "size=512M,hugepages=on"]) 6947 .args(["--kernel", kernel_path.to_str().unwrap()]) 6948 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6949 .default_disks() 6950 .default_net() 6951 .args(["--vdpa", "path=/dev/vhost-vdpa-2,num_queues=2"]) 6952 .capture_output() 6953 .spawn() 6954 .unwrap(); 6955 6956 let r = std::panic::catch_unwind(|| { 6957 guest.wait_vm_boot(None).unwrap(); 6958 6959 // Check we can find network interface related to vDPA device 6960 assert_eq!( 6961 guest 6962 .ssh_command("ip -o link | grep -c ens6") 6963 .unwrap() 6964 .trim() 6965 .parse::<u32>() 6966 .unwrap_or(0), 6967 1 6968 ); 6969 6970 guest 6971 .ssh_command("sudo ip addr add 172.16.1.2/24 dev ens6") 6972 .unwrap(); 6973 guest.ssh_command("sudo ip link set up dev ens6").unwrap(); 6974 6975 // Check there is no packet yet on both TX/RX of the network interface 6976 assert_eq!( 6977 guest 6978 .ssh_command("ip -j -p -s link show ens6 | grep -c '\"packets\": 0'") 6979 .unwrap() 6980 .trim() 6981 .parse::<u32>() 6982 .unwrap_or(0), 6983 2 6984 ); 6985 6986 // Send 6 packets with ping command 6987 guest.ssh_command("ping 172.16.1.10 -c 6 || true").unwrap(); 6988 6989 // Check we can find 6 packets on both TX/RX of the network interface 6990 assert_eq!( 6991 guest 6992 .ssh_command("ip -j -p -s link show ens6 | grep -c '\"packets\": 6'") 6993 .unwrap() 6994 .trim() 6995 .parse::<u32>() 6996 .unwrap_or(0), 6997 2 6998 ); 6999 7000 // No need to check for hotplug as we already tested it through 7001 // test_vdpa_block() 7002 }); 7003 7004 kill_child(&mut child); 7005 let output = child.wait_with_output().unwrap(); 7006 7007 handle_child_output(r, &output); 7008 } 7009 7010 #[test] 7011 #[cfg(target_arch = "x86_64")] 7012 fn test_tpm() { 7013 let focal = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string()); 7014 let guest = Guest::new(Box::new(focal)); 7015 7016 let (mut swtpm_command, swtpm_socket_path) = prepare_swtpm_daemon(&guest.tmp_dir); 7017 7018 let mut guest_cmd = GuestCommand::new(&guest); 7019 guest_cmd 7020 .args(["--cpus", "boot=1"]) 7021 .args(["--memory", "size=512M"]) 7022 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 7023 .args(["--tpm", &format!("socket={swtpm_socket_path}")]) 7024 .capture_output() 7025 .default_disks() 7026 .default_net(); 7027 7028 // Start swtpm daemon 7029 let mut swtpm_child = swtpm_command.spawn().unwrap(); 7030 thread::sleep(std::time::Duration::new(10, 0)); 7031 let mut child = guest_cmd.spawn().unwrap(); 7032 let r = std::panic::catch_unwind(|| { 7033 guest.wait_vm_boot(None).unwrap(); 7034 assert_eq!( 7035 guest.ssh_command("ls /dev/tpm0").unwrap().trim(), 7036 "/dev/tpm0" 7037 ); 7038 guest.ssh_command("sudo tpm2_selftest -f").unwrap(); 7039 guest 7040 .ssh_command("echo 'hello' > /tmp/checksum_test; ") 7041 .unwrap(); 7042 guest.ssh_command("cmp <(sudo tpm2_pcrevent /tmp/checksum_test | grep sha256 | awk '{print $2}') <(sha256sum /tmp/checksum_test| awk '{print $1}')").unwrap(); 7043 }); 7044 7045 let _ = swtpm_child.kill(); 7046 let _d_out = swtpm_child.wait_with_output().unwrap(); 7047 7048 kill_child(&mut child); 7049 let output = child.wait_with_output().unwrap(); 7050 7051 handle_child_output(r, &output); 7052 } 7053 7054 #[test] 7055 #[cfg(target_arch = "x86_64")] 7056 fn test_double_tty() { 7057 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7058 let guest = Guest::new(Box::new(focal)); 7059 let mut cmd = GuestCommand::new(&guest); 7060 let api_socket = temp_api_path(&guest.tmp_dir); 7061 let tty_str: &str = "console=hvc0 earlyprintk=ttyS0 "; 7062 // linux printk module enable console log. 7063 let con_dis_str: &str = "console [hvc0] enabled"; 7064 // linux printk module disable console log. 7065 let con_enb_str: &str = "bootconsole [earlyser0] disabled"; 7066 7067 let kernel_path = direct_kernel_boot_path(); 7068 7069 cmd.args(["--cpus", "boot=1"]) 7070 .args(["--memory", "size=512M"]) 7071 .args(["--kernel", kernel_path.to_str().unwrap()]) 7072 .args([ 7073 "--cmdline", 7074 DIRECT_KERNEL_BOOT_CMDLINE 7075 .replace("console=hvc0 ", tty_str) 7076 .as_str(), 7077 ]) 7078 .capture_output() 7079 .default_disks() 7080 .default_net() 7081 .args(["--serial", "tty"]) 7082 .args(["--console", "tty"]) 7083 .args(["--api-socket", &api_socket]); 7084 7085 let mut child = cmd.spawn().unwrap(); 7086 7087 let mut r = std::panic::catch_unwind(|| { 7088 guest.wait_vm_boot(None).unwrap(); 7089 }); 7090 7091 kill_child(&mut child); 7092 let output = child.wait_with_output().unwrap(); 7093 7094 if r.is_ok() { 7095 r = std::panic::catch_unwind(|| { 7096 let s = String::from_utf8_lossy(&output.stdout); 7097 assert!(s.contains(tty_str)); 7098 assert!(s.contains(con_dis_str)); 7099 assert!(s.contains(con_enb_str)); 7100 }); 7101 } 7102 7103 handle_child_output(r, &output); 7104 } 7105 7106 #[test] 7107 #[cfg(target_arch = "x86_64")] 7108 fn test_nmi() { 7109 let jammy = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string()); 7110 let guest = Guest::new(Box::new(jammy)); 7111 let api_socket = temp_api_path(&guest.tmp_dir); 7112 let event_path = temp_event_monitor_path(&guest.tmp_dir); 7113 7114 let kernel_path = direct_kernel_boot_path(); 7115 let cmd_line = format!("{} {}", DIRECT_KERNEL_BOOT_CMDLINE, "unknown_nmi_panic=1"); 7116 7117 let mut cmd = GuestCommand::new(&guest); 7118 cmd.args(["--cpus", "boot=4"]) 7119 .args(["--memory", "size=512M"]) 7120 .args(["--kernel", kernel_path.to_str().unwrap()]) 7121 .args(["--cmdline", cmd_line.as_str()]) 7122 .default_disks() 7123 .args(["--net", guest.default_net_string().as_str()]) 7124 .args(["--pvpanic"]) 7125 .args(["--api-socket", &api_socket]) 7126 .args(["--event-monitor", format!("path={event_path}").as_str()]) 7127 .capture_output(); 7128 7129 let mut child = cmd.spawn().unwrap(); 7130 7131 let r = std::panic::catch_unwind(|| { 7132 guest.wait_vm_boot(None).unwrap(); 7133 7134 assert!(remote_command(&api_socket, "nmi", None)); 7135 7136 // Wait a while for guest 7137 thread::sleep(std::time::Duration::new(3, 0)); 7138 7139 let expected_sequential_events = [&MetaEvent { 7140 event: "panic".to_string(), 7141 device_id: None, 7142 }]; 7143 assert!(check_latest_events_exact( 7144 &expected_sequential_events, 7145 &event_path 7146 )); 7147 }); 7148 7149 kill_child(&mut child); 7150 let output = child.wait_with_output().unwrap(); 7151 7152 handle_child_output(r, &output); 7153 } 7154 } 7155 7156 mod dbus_api { 7157 use crate::*; 7158 7159 // Start cloud-hypervisor with no VM parameters, running both the HTTP 7160 // and DBus APIs. Alternate calls to the external APIs (HTTP and DBus) 7161 // to create a VM, boot it, and verify that it can be shut down and then 7162 // booted again. 7163 #[test] 7164 fn test_api_dbus_and_http_interleaved() { 7165 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7166 let guest = Guest::new(Box::new(focal)); 7167 let dbus_api = TargetApi::new_dbus_api(&guest.tmp_dir); 7168 let http_api = TargetApi::new_http_api(&guest.tmp_dir); 7169 7170 let mut child = GuestCommand::new(&guest) 7171 .args(dbus_api.guest_args()) 7172 .args(http_api.guest_args()) 7173 .capture_output() 7174 .spawn() 7175 .unwrap(); 7176 7177 thread::sleep(std::time::Duration::new(1, 0)); 7178 7179 // Verify API servers are running 7180 assert!(dbus_api.remote_command("ping", None)); 7181 assert!(http_api.remote_command("ping", None)); 7182 7183 // Create the VM first 7184 let cpu_count: u8 = 4; 7185 let request_body = guest.api_create_body( 7186 cpu_count, 7187 direct_kernel_boot_path().to_str().unwrap(), 7188 DIRECT_KERNEL_BOOT_CMDLINE, 7189 ); 7190 7191 let temp_config_path = guest.tmp_dir.as_path().join("config"); 7192 std::fs::write(&temp_config_path, request_body).unwrap(); 7193 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 7194 7195 let r = std::panic::catch_unwind(|| { 7196 // Create the VM 7197 assert!(dbus_api.remote_command("create", Some(create_config),)); 7198 7199 // Then boot it 7200 assert!(http_api.remote_command("boot", None)); 7201 guest.wait_vm_boot(None).unwrap(); 7202 7203 // Check that the VM booted as expected 7204 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 7205 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 7206 7207 // Sync and shutdown without powering off to prevent filesystem 7208 // corruption. 7209 guest.ssh_command("sync").unwrap(); 7210 guest.ssh_command("sudo shutdown -H now").unwrap(); 7211 7212 // Wait for the guest to be fully shutdown 7213 thread::sleep(std::time::Duration::new(20, 0)); 7214 7215 // Then shutdown the VM 7216 assert!(dbus_api.remote_command("shutdown", None)); 7217 7218 // Then boot it again 7219 assert!(http_api.remote_command("boot", None)); 7220 guest.wait_vm_boot(None).unwrap(); 7221 7222 // Check that the VM booted as expected 7223 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 7224 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 7225 }); 7226 7227 kill_child(&mut child); 7228 let output = child.wait_with_output().unwrap(); 7229 7230 handle_child_output(r, &output); 7231 } 7232 7233 #[test] 7234 fn test_api_dbus_create_boot() { 7235 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7236 let guest = Guest::new(Box::new(focal)); 7237 7238 _test_api_create_boot(TargetApi::new_dbus_api(&guest.tmp_dir), guest) 7239 } 7240 7241 #[test] 7242 fn test_api_dbus_shutdown() { 7243 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7244 let guest = Guest::new(Box::new(focal)); 7245 7246 _test_api_shutdown(TargetApi::new_dbus_api(&guest.tmp_dir), guest) 7247 } 7248 7249 #[test] 7250 fn test_api_dbus_delete() { 7251 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7252 let guest = Guest::new(Box::new(focal)); 7253 7254 _test_api_delete(TargetApi::new_dbus_api(&guest.tmp_dir), guest); 7255 } 7256 7257 #[test] 7258 fn test_api_dbus_pause_resume() { 7259 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7260 let guest = Guest::new(Box::new(focal)); 7261 7262 _test_api_pause_resume(TargetApi::new_dbus_api(&guest.tmp_dir), guest) 7263 } 7264 } 7265 7266 mod common_sequential { 7267 use std::fs::remove_dir_all; 7268 7269 use crate::*; 7270 7271 #[test] 7272 #[cfg(not(feature = "mshv"))] 7273 fn test_memory_mergeable_on() { 7274 test_memory_mergeable(true) 7275 } 7276 7277 fn snapshot_and_check_events(api_socket: &str, snapshot_dir: &str, event_path: &str) { 7278 // Pause the VM 7279 assert!(remote_command(api_socket, "pause", None)); 7280 let latest_events: [&MetaEvent; 2] = [ 7281 &MetaEvent { 7282 event: "pausing".to_string(), 7283 device_id: None, 7284 }, 7285 &MetaEvent { 7286 event: "paused".to_string(), 7287 device_id: None, 7288 }, 7289 ]; 7290 // See: #5938 7291 thread::sleep(std::time::Duration::new(1, 0)); 7292 assert!(check_latest_events_exact(&latest_events, event_path)); 7293 7294 // Take a snapshot from the VM 7295 assert!(remote_command( 7296 api_socket, 7297 "snapshot", 7298 Some(format!("file://{snapshot_dir}").as_str()), 7299 )); 7300 7301 // Wait to make sure the snapshot is completed 7302 thread::sleep(std::time::Duration::new(10, 0)); 7303 7304 let latest_events = [ 7305 &MetaEvent { 7306 event: "snapshotting".to_string(), 7307 device_id: None, 7308 }, 7309 &MetaEvent { 7310 event: "snapshotted".to_string(), 7311 device_id: None, 7312 }, 7313 ]; 7314 // See: #5938 7315 thread::sleep(std::time::Duration::new(1, 0)); 7316 assert!(check_latest_events_exact(&latest_events, event_path)); 7317 } 7318 7319 // One thing to note about this test. The virtio-net device is heavily used 7320 // through each ssh command. There's no need to perform a dedicated test to 7321 // verify the migration went well for virtio-net. 7322 #[test] 7323 #[cfg(not(feature = "mshv"))] 7324 fn test_snapshot_restore_hotplug_virtiomem() { 7325 _test_snapshot_restore(true); 7326 } 7327 7328 #[test] 7329 fn test_snapshot_restore_basic() { 7330 _test_snapshot_restore(false); 7331 } 7332 7333 fn _test_snapshot_restore(use_hotplug: bool) { 7334 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7335 let guest = Guest::new(Box::new(focal)); 7336 let kernel_path = direct_kernel_boot_path(); 7337 7338 let api_socket_source = format!("{}.1", temp_api_path(&guest.tmp_dir)); 7339 7340 let net_id = "net123"; 7341 let net_params = format!( 7342 "id={},tap=,mac={},ip={},mask=255.255.255.0", 7343 net_id, guest.network.guest_mac, guest.network.host_ip 7344 ); 7345 let mut mem_params = "size=2G"; 7346 7347 if use_hotplug { 7348 mem_params = "size=2G,hotplug_method=virtio-mem,hotplug_size=32G" 7349 } 7350 7351 let cloudinit_params = format!( 7352 "path={},iommu=on", 7353 guest.disk_config.disk(DiskType::CloudInit).unwrap() 7354 ); 7355 7356 let socket = temp_vsock_path(&guest.tmp_dir); 7357 let event_path = temp_event_monitor_path(&guest.tmp_dir); 7358 7359 let mut child = GuestCommand::new(&guest) 7360 .args(["--api-socket", &api_socket_source]) 7361 .args(["--event-monitor", format!("path={event_path}").as_str()]) 7362 .args(["--cpus", "boot=4"]) 7363 .args(["--memory", mem_params]) 7364 .args(["--balloon", "size=0"]) 7365 .args(["--kernel", kernel_path.to_str().unwrap()]) 7366 .args([ 7367 "--disk", 7368 format!( 7369 "path={}", 7370 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 7371 ) 7372 .as_str(), 7373 cloudinit_params.as_str(), 7374 ]) 7375 .args(["--net", net_params.as_str()]) 7376 .args(["--vsock", format!("cid=3,socket={socket}").as_str()]) 7377 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 7378 .capture_output() 7379 .spawn() 7380 .unwrap(); 7381 7382 let console_text = String::from("On a branch floating down river a cricket, singing."); 7383 // Create the snapshot directory 7384 let snapshot_dir = temp_snapshot_dir_path(&guest.tmp_dir); 7385 7386 let r = std::panic::catch_unwind(|| { 7387 guest.wait_vm_boot(None).unwrap(); 7388 7389 // Check the number of vCPUs 7390 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 4); 7391 // Check the guest RAM 7392 assert!(guest.get_total_memory().unwrap_or_default() > 1_920_000); 7393 if use_hotplug { 7394 // Increase guest RAM with virtio-mem 7395 resize_command( 7396 &api_socket_source, 7397 None, 7398 Some(6 << 30), 7399 None, 7400 Some(&event_path), 7401 ); 7402 thread::sleep(std::time::Duration::new(5, 0)); 7403 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 7404 // Use balloon to remove RAM from the VM 7405 resize_command( 7406 &api_socket_source, 7407 None, 7408 None, 7409 Some(1 << 30), 7410 Some(&event_path), 7411 ); 7412 thread::sleep(std::time::Duration::new(5, 0)); 7413 let total_memory = guest.get_total_memory().unwrap_or_default(); 7414 assert!(total_memory > 4_800_000); 7415 assert!(total_memory < 5_760_000); 7416 } 7417 // Check the guest virtio-devices, e.g. block, rng, vsock, console, and net 7418 guest.check_devices_common(Some(&socket), Some(&console_text), None); 7419 7420 // x86_64: We check that removing and adding back the virtio-net device 7421 // does not break the snapshot/restore support for virtio-pci. 7422 // This is an important thing to test as the hotplug will 7423 // trigger a PCI BAR reprogramming, which is a good way of 7424 // checking if the stored resources are correctly restored. 7425 // Unplug the virtio-net device 7426 // AArch64: Device hotplug is currently not supported, skipping here. 7427 #[cfg(target_arch = "x86_64")] 7428 { 7429 assert!(remote_command( 7430 &api_socket_source, 7431 "remove-device", 7432 Some(net_id), 7433 )); 7434 thread::sleep(std::time::Duration::new(10, 0)); 7435 let latest_events = [&MetaEvent { 7436 event: "device-removed".to_string(), 7437 device_id: Some(net_id.to_string()), 7438 }]; 7439 // See: #5938 7440 thread::sleep(std::time::Duration::new(1, 0)); 7441 assert!(check_latest_events_exact(&latest_events, &event_path)); 7442 7443 // Plug the virtio-net device again 7444 assert!(remote_command( 7445 &api_socket_source, 7446 "add-net", 7447 Some(net_params.as_str()), 7448 )); 7449 thread::sleep(std::time::Duration::new(10, 0)); 7450 } 7451 7452 snapshot_and_check_events(&api_socket_source, &snapshot_dir, &event_path); 7453 }); 7454 7455 // Shutdown the source VM and check console output 7456 kill_child(&mut child); 7457 let output = child.wait_with_output().unwrap(); 7458 handle_child_output(r, &output); 7459 7460 let r = std::panic::catch_unwind(|| { 7461 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 7462 }); 7463 7464 handle_child_output(r, &output); 7465 7466 // Remove the vsock socket file. 7467 Command::new("rm") 7468 .arg("-f") 7469 .arg(socket.as_str()) 7470 .output() 7471 .unwrap(); 7472 7473 let api_socket_restored = format!("{}.2", temp_api_path(&guest.tmp_dir)); 7474 let event_path_restored = format!("{}.2", temp_event_monitor_path(&guest.tmp_dir)); 7475 7476 // Restore the VM from the snapshot 7477 let mut child = GuestCommand::new(&guest) 7478 .args(["--api-socket", &api_socket_restored]) 7479 .args([ 7480 "--event-monitor", 7481 format!("path={event_path_restored}").as_str(), 7482 ]) 7483 .args([ 7484 "--restore", 7485 format!("source_url=file://{snapshot_dir}").as_str(), 7486 ]) 7487 .capture_output() 7488 .spawn() 7489 .unwrap(); 7490 7491 // Wait for the VM to be restored 7492 thread::sleep(std::time::Duration::new(20, 0)); 7493 let expected_events = [ 7494 &MetaEvent { 7495 event: "starting".to_string(), 7496 device_id: None, 7497 }, 7498 &MetaEvent { 7499 event: "activated".to_string(), 7500 device_id: Some("__console".to_string()), 7501 }, 7502 &MetaEvent { 7503 event: "activated".to_string(), 7504 device_id: Some("__rng".to_string()), 7505 }, 7506 &MetaEvent { 7507 event: "restoring".to_string(), 7508 device_id: None, 7509 }, 7510 ]; 7511 assert!(check_sequential_events( 7512 &expected_events, 7513 &event_path_restored 7514 )); 7515 let latest_events = [&MetaEvent { 7516 event: "restored".to_string(), 7517 device_id: None, 7518 }]; 7519 assert!(check_latest_events_exact( 7520 &latest_events, 7521 &event_path_restored 7522 )); 7523 7524 // Remove the snapshot dir 7525 let _ = remove_dir_all(snapshot_dir.as_str()); 7526 7527 let r = std::panic::catch_unwind(|| { 7528 // Resume the VM 7529 assert!(remote_command(&api_socket_restored, "resume", None)); 7530 // There is no way that we can ensure the 'write()' to the 7531 // event file is completed when the 'resume' request is 7532 // returned successfully, because the 'write()' was done 7533 // asynchronously from a different thread of Cloud 7534 // Hypervisor (e.g. the event-monitor thread). 7535 thread::sleep(std::time::Duration::new(1, 0)); 7536 let latest_events = [ 7537 &MetaEvent { 7538 event: "resuming".to_string(), 7539 device_id: None, 7540 }, 7541 &MetaEvent { 7542 event: "resumed".to_string(), 7543 device_id: None, 7544 }, 7545 ]; 7546 assert!(check_latest_events_exact( 7547 &latest_events, 7548 &event_path_restored 7549 )); 7550 7551 // Perform same checks to validate VM has been properly restored 7552 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 4); 7553 let total_memory = guest.get_total_memory().unwrap_or_default(); 7554 if !use_hotplug { 7555 assert!(total_memory > 1_920_000); 7556 } else { 7557 assert!(total_memory > 4_800_000); 7558 assert!(total_memory < 5_760_000); 7559 // Deflate balloon to restore entire RAM to the VM 7560 resize_command(&api_socket_restored, None, None, Some(0), None); 7561 thread::sleep(std::time::Duration::new(5, 0)); 7562 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 7563 // Decrease guest RAM with virtio-mem 7564 resize_command(&api_socket_restored, None, Some(5 << 30), None, None); 7565 thread::sleep(std::time::Duration::new(5, 0)); 7566 let total_memory = guest.get_total_memory().unwrap_or_default(); 7567 assert!(total_memory > 4_800_000); 7568 assert!(total_memory < 5_760_000); 7569 } 7570 7571 guest.check_devices_common(Some(&socket), Some(&console_text), None); 7572 }); 7573 // Shutdown the target VM and check console output 7574 kill_child(&mut child); 7575 let output = child.wait_with_output().unwrap(); 7576 handle_child_output(r, &output); 7577 7578 let r = std::panic::catch_unwind(|| { 7579 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 7580 }); 7581 7582 handle_child_output(r, &output); 7583 } 7584 7585 #[test] 7586 fn test_snapshot_restore_with_fd() { 7587 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7588 let guest = Guest::new(Box::new(focal)); 7589 let kernel_path = direct_kernel_boot_path(); 7590 7591 let api_socket_source = format!("{}.1", temp_api_path(&guest.tmp_dir)); 7592 7593 let net_id = "net123"; 7594 let num_queue_pairs: usize = 2; 7595 // use a name that does not conflict with tap dev created from other tests 7596 let tap_name = "chtap999"; 7597 use std::str::FromStr; 7598 let taps = net_util::open_tap( 7599 Some(tap_name), 7600 Some(std::net::Ipv4Addr::from_str(&guest.network.host_ip).unwrap()), 7601 None, 7602 &mut None, 7603 None, 7604 num_queue_pairs, 7605 Some(libc::O_RDWR | libc::O_NONBLOCK), 7606 ) 7607 .unwrap(); 7608 let net_params = format!( 7609 "id={},fd=[{},{}],mac={},ip={},mask=255.255.255.0,num_queues={}", 7610 net_id, 7611 taps[0].as_raw_fd(), 7612 taps[1].as_raw_fd(), 7613 guest.network.guest_mac, 7614 guest.network.host_ip, 7615 num_queue_pairs * 2 7616 ); 7617 7618 let cloudinit_params = format!( 7619 "path={},iommu=on", 7620 guest.disk_config.disk(DiskType::CloudInit).unwrap() 7621 ); 7622 7623 let n_cpu = 2; 7624 let event_path = temp_event_monitor_path(&guest.tmp_dir); 7625 7626 let mut child = GuestCommand::new(&guest) 7627 .args(["--api-socket", &api_socket_source]) 7628 .args(["--event-monitor", format!("path={event_path}").as_str()]) 7629 .args(["--cpus", format!("boot={}", n_cpu).as_str()]) 7630 .args(["--memory", "size=1G"]) 7631 .args(["--kernel", kernel_path.to_str().unwrap()]) 7632 .args([ 7633 "--disk", 7634 format!( 7635 "path={}", 7636 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 7637 ) 7638 .as_str(), 7639 cloudinit_params.as_str(), 7640 ]) 7641 .args(["--net", net_params.as_str()]) 7642 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 7643 .capture_output() 7644 .spawn() 7645 .unwrap(); 7646 7647 let console_text = String::from("On a branch floating down river a cricket, singing."); 7648 // Create the snapshot directory 7649 let snapshot_dir = temp_snapshot_dir_path(&guest.tmp_dir); 7650 7651 let r = std::panic::catch_unwind(|| { 7652 guest.wait_vm_boot(None).unwrap(); 7653 7654 // close the fds after VM boots, as CH duplicates them before using 7655 for tap in taps.iter() { 7656 unsafe { libc::close(tap.as_raw_fd()) }; 7657 } 7658 7659 // Check the number of vCPUs 7660 assert_eq!(guest.get_cpu_count().unwrap_or_default(), n_cpu); 7661 // Check the guest RAM 7662 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 7663 7664 // Check the guest virtio-devices, e.g. block, rng, vsock, console, and net 7665 guest.check_devices_common(None, Some(&console_text), None); 7666 7667 snapshot_and_check_events(&api_socket_source, &snapshot_dir, &event_path); 7668 }); 7669 7670 // Shutdown the source VM and check console output 7671 kill_child(&mut child); 7672 let output = child.wait_with_output().unwrap(); 7673 handle_child_output(r, &output); 7674 7675 let r = std::panic::catch_unwind(|| { 7676 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 7677 }); 7678 7679 handle_child_output(r, &output); 7680 7681 let api_socket_restored = format!("{}.2", temp_api_path(&guest.tmp_dir)); 7682 let event_path_restored = format!("{}.2", temp_event_monitor_path(&guest.tmp_dir)); 7683 7684 // Restore the VM from the snapshot 7685 let mut child = GuestCommand::new(&guest) 7686 .args(["--api-socket", &api_socket_restored]) 7687 .args([ 7688 "--event-monitor", 7689 format!("path={event_path_restored}").as_str(), 7690 ]) 7691 .capture_output() 7692 .spawn() 7693 .unwrap(); 7694 thread::sleep(std::time::Duration::new(2, 0)); 7695 7696 let taps = net_util::open_tap( 7697 Some(tap_name), 7698 Some(std::net::Ipv4Addr::from_str(&guest.network.host_ip).unwrap()), 7699 None, 7700 &mut None, 7701 None, 7702 num_queue_pairs, 7703 Some(libc::O_RDWR | libc::O_NONBLOCK), 7704 ) 7705 .unwrap(); 7706 let restore_params = format!( 7707 "source_url=file://{},net_fds=[{}@[{},{}]]", 7708 snapshot_dir, 7709 net_id, 7710 taps[0].as_raw_fd(), 7711 taps[1].as_raw_fd() 7712 ); 7713 assert!(remote_command( 7714 &api_socket_restored, 7715 "restore", 7716 Some(restore_params.as_str()) 7717 )); 7718 7719 // Wait for the VM to be restored 7720 thread::sleep(std::time::Duration::new(20, 0)); 7721 7722 // close the fds as CH duplicates them before using 7723 for tap in taps.iter() { 7724 unsafe { libc::close(tap.as_raw_fd()) }; 7725 } 7726 7727 let expected_events = [ 7728 &MetaEvent { 7729 event: "starting".to_string(), 7730 device_id: None, 7731 }, 7732 &MetaEvent { 7733 event: "activated".to_string(), 7734 device_id: Some("__console".to_string()), 7735 }, 7736 &MetaEvent { 7737 event: "activated".to_string(), 7738 device_id: Some("__rng".to_string()), 7739 }, 7740 &MetaEvent { 7741 event: "restoring".to_string(), 7742 device_id: None, 7743 }, 7744 ]; 7745 assert!(check_sequential_events( 7746 &expected_events, 7747 &event_path_restored 7748 )); 7749 let latest_events = [&MetaEvent { 7750 event: "restored".to_string(), 7751 device_id: None, 7752 }]; 7753 assert!(check_latest_events_exact( 7754 &latest_events, 7755 &event_path_restored 7756 )); 7757 7758 // Remove the snapshot dir 7759 let _ = remove_dir_all(snapshot_dir.as_str()); 7760 7761 let r = std::panic::catch_unwind(|| { 7762 // Resume the VM 7763 assert!(remote_command(&api_socket_restored, "resume", None)); 7764 // There is no way that we can ensure the 'write()' to the 7765 // event file is completed when the 'resume' request is 7766 // returned successfully, because the 'write()' was done 7767 // asynchronously from a different thread of Cloud 7768 // Hypervisor (e.g. the event-monitor thread). 7769 thread::sleep(std::time::Duration::new(1, 0)); 7770 let latest_events = [ 7771 &MetaEvent { 7772 event: "resuming".to_string(), 7773 device_id: None, 7774 }, 7775 &MetaEvent { 7776 event: "resumed".to_string(), 7777 device_id: None, 7778 }, 7779 ]; 7780 assert!(check_latest_events_exact( 7781 &latest_events, 7782 &event_path_restored 7783 )); 7784 7785 // Perform same checks to validate VM has been properly restored 7786 assert_eq!(guest.get_cpu_count().unwrap_or_default(), n_cpu); 7787 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 7788 7789 guest.check_devices_common(None, Some(&console_text), None); 7790 }); 7791 // Shutdown the target VM and check console output 7792 kill_child(&mut child); 7793 let output = child.wait_with_output().unwrap(); 7794 handle_child_output(r, &output); 7795 7796 let r = std::panic::catch_unwind(|| { 7797 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 7798 }); 7799 7800 handle_child_output(r, &output); 7801 } 7802 7803 #[test] 7804 #[cfg(target_arch = "x86_64")] 7805 fn test_snapshot_restore_pvpanic() { 7806 _test_snapshot_restore_devices(true); 7807 } 7808 7809 fn _test_snapshot_restore_devices(pvpanic: bool) { 7810 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7811 let guest = Guest::new(Box::new(focal)); 7812 let kernel_path = direct_kernel_boot_path(); 7813 7814 let api_socket_source = format!("{}.1", temp_api_path(&guest.tmp_dir)); 7815 7816 let device_params = { 7817 let mut data = vec![]; 7818 if pvpanic { 7819 data.push("--pvpanic"); 7820 } 7821 data 7822 }; 7823 7824 let socket = temp_vsock_path(&guest.tmp_dir); 7825 let event_path = temp_event_monitor_path(&guest.tmp_dir); 7826 7827 let mut child = GuestCommand::new(&guest) 7828 .args(["--api-socket", &api_socket_source]) 7829 .args(["--event-monitor", format!("path={}", event_path).as_str()]) 7830 .args(["--cpus", "boot=2"]) 7831 .args(["--memory", "size=1G"]) 7832 .args(["--kernel", kernel_path.to_str().unwrap()]) 7833 .default_disks() 7834 .default_net() 7835 .args(["--vsock", format!("cid=3,socket={}", socket).as_str()]) 7836 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 7837 .args(device_params) 7838 .capture_output() 7839 .spawn() 7840 .unwrap(); 7841 7842 let console_text = String::from("On a branch floating down river a cricket, singing."); 7843 // Create the snapshot directory 7844 let snapshot_dir = temp_snapshot_dir_path(&guest.tmp_dir); 7845 7846 let r = std::panic::catch_unwind(|| { 7847 guest.wait_vm_boot(None).unwrap(); 7848 7849 // Check the number of vCPUs 7850 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 7851 7852 snapshot_and_check_events(&api_socket_source, &snapshot_dir, &event_path); 7853 }); 7854 7855 // Shutdown the source VM and check console output 7856 kill_child(&mut child); 7857 let output = child.wait_with_output().unwrap(); 7858 handle_child_output(r, &output); 7859 7860 // Remove the vsock socket file. 7861 Command::new("rm") 7862 .arg("-f") 7863 .arg(socket.as_str()) 7864 .output() 7865 .unwrap(); 7866 7867 let api_socket_restored = format!("{}.2", temp_api_path(&guest.tmp_dir)); 7868 let event_path_restored = format!("{}.2", temp_event_monitor_path(&guest.tmp_dir)); 7869 7870 // Restore the VM from the snapshot 7871 let mut child = GuestCommand::new(&guest) 7872 .args(["--api-socket", &api_socket_restored]) 7873 .args([ 7874 "--event-monitor", 7875 format!("path={event_path_restored}").as_str(), 7876 ]) 7877 .args([ 7878 "--restore", 7879 format!("source_url=file://{snapshot_dir}").as_str(), 7880 ]) 7881 .capture_output() 7882 .spawn() 7883 .unwrap(); 7884 7885 // Wait for the VM to be restored 7886 thread::sleep(std::time::Duration::new(20, 0)); 7887 7888 let latest_events = [&MetaEvent { 7889 event: "restored".to_string(), 7890 device_id: None, 7891 }]; 7892 assert!(check_latest_events_exact( 7893 &latest_events, 7894 &event_path_restored 7895 )); 7896 7897 // Remove the snapshot dir 7898 let _ = remove_dir_all(snapshot_dir.as_str()); 7899 7900 let r = std::panic::catch_unwind(|| { 7901 // Resume the VM 7902 assert!(remote_command(&api_socket_restored, "resume", None)); 7903 // There is no way that we can ensure the 'write()' to the 7904 // event file is completed when the 'resume' request is 7905 // returned successfully, because the 'write()' was done 7906 // asynchronously from a different thread of Cloud 7907 // Hypervisor (e.g. the event-monitor thread). 7908 thread::sleep(std::time::Duration::new(1, 0)); 7909 let latest_events = [ 7910 &MetaEvent { 7911 event: "resuming".to_string(), 7912 device_id: None, 7913 }, 7914 &MetaEvent { 7915 event: "resumed".to_string(), 7916 device_id: None, 7917 }, 7918 ]; 7919 assert!(check_latest_events_exact( 7920 &latest_events, 7921 &event_path_restored 7922 )); 7923 7924 // Check the number of vCPUs 7925 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 7926 guest.check_devices_common(Some(&socket), Some(&console_text), None); 7927 7928 if pvpanic { 7929 // Trigger guest a panic 7930 make_guest_panic(&guest); 7931 // Wait a while for guest 7932 thread::sleep(std::time::Duration::new(10, 0)); 7933 7934 let expected_sequential_events = [&MetaEvent { 7935 event: "panic".to_string(), 7936 device_id: None, 7937 }]; 7938 assert!(check_latest_events_exact( 7939 &expected_sequential_events, 7940 &event_path_restored 7941 )); 7942 } 7943 }); 7944 // Shutdown the target VM and check console output 7945 kill_child(&mut child); 7946 let output = child.wait_with_output().unwrap(); 7947 handle_child_output(r, &output); 7948 7949 let r = std::panic::catch_unwind(|| { 7950 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 7951 }); 7952 7953 handle_child_output(r, &output); 7954 } 7955 } 7956 7957 mod windows { 7958 use crate::*; 7959 use once_cell::sync::Lazy; 7960 7961 static NEXT_DISK_ID: Lazy<Mutex<u8>> = Lazy::new(|| Mutex::new(1)); 7962 7963 struct WindowsGuest { 7964 guest: Guest, 7965 auth: PasswordAuth, 7966 } 7967 7968 trait FsType { 7969 const FS_FAT: u8; 7970 const FS_NTFS: u8; 7971 } 7972 impl FsType for WindowsGuest { 7973 const FS_FAT: u8 = 0; 7974 const FS_NTFS: u8 = 1; 7975 } 7976 7977 impl WindowsGuest { 7978 fn new() -> Self { 7979 let disk = WindowsDiskConfig::new(WINDOWS_IMAGE_NAME.to_string()); 7980 let guest = Guest::new(Box::new(disk)); 7981 let auth = PasswordAuth { 7982 username: String::from("administrator"), 7983 password: String::from("Admin123"), 7984 }; 7985 7986 WindowsGuest { guest, auth } 7987 } 7988 7989 fn guest(&self) -> &Guest { 7990 &self.guest 7991 } 7992 7993 fn ssh_cmd(&self, cmd: &str) -> String { 7994 ssh_command_ip_with_auth( 7995 cmd, 7996 &self.auth, 7997 &self.guest.network.guest_ip, 7998 DEFAULT_SSH_RETRIES, 7999 DEFAULT_SSH_TIMEOUT, 8000 ) 8001 .unwrap() 8002 } 8003 8004 fn cpu_count(&self) -> u8 { 8005 self.ssh_cmd("powershell -Command \"(Get-CimInstance win32_computersystem).NumberOfLogicalProcessors\"") 8006 .trim() 8007 .parse::<u8>() 8008 .unwrap_or(0) 8009 } 8010 8011 fn ram_size(&self) -> usize { 8012 self.ssh_cmd("powershell -Command \"(Get-CimInstance win32_computersystem).TotalPhysicalMemory\"") 8013 .trim() 8014 .parse::<usize>() 8015 .unwrap_or(0) 8016 } 8017 8018 fn netdev_count(&self) -> u8 { 8019 self.ssh_cmd("powershell -Command \"netsh int ipv4 show interfaces | Select-String ethernet | Measure-Object -Line | Format-Table -HideTableHeaders\"") 8020 .trim() 8021 .parse::<u8>() 8022 .unwrap_or(0) 8023 } 8024 8025 fn disk_count(&self) -> u8 { 8026 self.ssh_cmd("powershell -Command \"Get-Disk | Measure-Object -Line | Format-Table -HideTableHeaders\"") 8027 .trim() 8028 .parse::<u8>() 8029 .unwrap_or(0) 8030 } 8031 8032 fn reboot(&self) { 8033 let _ = self.ssh_cmd("shutdown /r /t 0"); 8034 } 8035 8036 fn shutdown(&self) { 8037 let _ = self.ssh_cmd("shutdown /s /t 0"); 8038 } 8039 8040 fn run_dnsmasq(&self) -> std::process::Child { 8041 let listen_address = format!("--listen-address={}", self.guest.network.host_ip); 8042 let dhcp_host = format!( 8043 "--dhcp-host={},{}", 8044 self.guest.network.guest_mac, self.guest.network.guest_ip 8045 ); 8046 let dhcp_range = format!( 8047 "--dhcp-range=eth,{},{}", 8048 self.guest.network.guest_ip, self.guest.network.guest_ip 8049 ); 8050 8051 Command::new("dnsmasq") 8052 .arg("--no-daemon") 8053 .arg("--log-queries") 8054 .arg(listen_address.as_str()) 8055 .arg("--except-interface=lo") 8056 .arg("--bind-dynamic") // Allow listening to host_ip while the interface is not ready yet. 8057 .arg("--conf-file=/dev/null") 8058 .arg(dhcp_host.as_str()) 8059 .arg(dhcp_range.as_str()) 8060 .spawn() 8061 .unwrap() 8062 } 8063 8064 // TODO Cleanup image file explicitly after test, if there's some space issues. 8065 fn disk_new(&self, fs: u8, sz: usize) -> String { 8066 let mut guard = NEXT_DISK_ID.lock().unwrap(); 8067 let id = *guard; 8068 *guard = id + 1; 8069 8070 let img = PathBuf::from(format!("/tmp/test-hotplug-{id}.raw")); 8071 let _ = fs::remove_file(&img); 8072 8073 // Create an image file 8074 let out = Command::new("qemu-img") 8075 .args([ 8076 "create", 8077 "-f", 8078 "raw", 8079 img.to_str().unwrap(), 8080 format!("{sz}m").as_str(), 8081 ]) 8082 .output() 8083 .expect("qemu-img command failed") 8084 .stdout; 8085 println!("{out:?}"); 8086 8087 // Associate image to a loop device 8088 let out = Command::new("losetup") 8089 .args(["--show", "-f", img.to_str().unwrap()]) 8090 .output() 8091 .expect("failed to create loop device") 8092 .stdout; 8093 let _tmp = String::from_utf8_lossy(&out); 8094 let loop_dev = _tmp.trim(); 8095 println!("{out:?}"); 8096 8097 // Create a partition table 8098 // echo 'type=7' | sudo sfdisk "${LOOP}" 8099 let mut child = Command::new("sfdisk") 8100 .args([loop_dev]) 8101 .stdin(Stdio::piped()) 8102 .spawn() 8103 .unwrap(); 8104 let stdin = child.stdin.as_mut().expect("failed to open stdin"); 8105 stdin 8106 .write_all("type=7".as_bytes()) 8107 .expect("failed to write stdin"); 8108 let out = child.wait_with_output().expect("sfdisk failed").stdout; 8109 println!("{out:?}"); 8110 8111 // Disengage the loop device 8112 let out = Command::new("losetup") 8113 .args(["-d", loop_dev]) 8114 .output() 8115 .expect("loop device not found") 8116 .stdout; 8117 println!("{out:?}"); 8118 8119 // Re-associate loop device pointing to the partition only 8120 let out = Command::new("losetup") 8121 .args([ 8122 "--show", 8123 "--offset", 8124 (512 * 2048).to_string().as_str(), 8125 "-f", 8126 img.to_str().unwrap(), 8127 ]) 8128 .output() 8129 .expect("failed to create loop device") 8130 .stdout; 8131 let _tmp = String::from_utf8_lossy(&out); 8132 let loop_dev = _tmp.trim(); 8133 println!("{out:?}"); 8134 8135 // Create filesystem. 8136 let fs_cmd = match fs { 8137 WindowsGuest::FS_FAT => "mkfs.msdos", 8138 WindowsGuest::FS_NTFS => "mkfs.ntfs", 8139 _ => panic!("Unknown filesystem type '{fs}'"), 8140 }; 8141 let out = Command::new(fs_cmd) 8142 .args([&loop_dev]) 8143 .output() 8144 .unwrap_or_else(|_| panic!("{fs_cmd} failed")) 8145 .stdout; 8146 println!("{out:?}"); 8147 8148 // Disengage the loop device 8149 let out = Command::new("losetup") 8150 .args(["-d", loop_dev]) 8151 .output() 8152 .unwrap_or_else(|_| panic!("loop device '{loop_dev}' not found")) 8153 .stdout; 8154 println!("{out:?}"); 8155 8156 img.to_str().unwrap().to_string() 8157 } 8158 8159 fn disks_set_rw(&self) { 8160 let _ = self.ssh_cmd("powershell -Command \"Get-Disk | Where-Object IsOffline -eq $True | Set-Disk -IsReadOnly $False\""); 8161 } 8162 8163 fn disks_online(&self) { 8164 let _ = self.ssh_cmd("powershell -Command \"Get-Disk | Where-Object IsOffline -eq $True | Set-Disk -IsOffline $False\""); 8165 } 8166 8167 fn disk_file_put(&self, fname: &str, data: &str) { 8168 let _ = self.ssh_cmd(&format!( 8169 "powershell -Command \"'{data}' | Set-Content -Path {fname}\"" 8170 )); 8171 } 8172 8173 fn disk_file_read(&self, fname: &str) -> String { 8174 self.ssh_cmd(&format!( 8175 "powershell -Command \"Get-Content -Path {fname}\"" 8176 )) 8177 } 8178 8179 fn wait_for_boot(&self) -> bool { 8180 let cmd = "dir /b c:\\ | find \"Windows\""; 8181 let tmo_max = 180; 8182 // The timeout increase by n*1+n*2+n*3+..., therefore the initial 8183 // interval must be small. 8184 let tmo_int = 2; 8185 let out = ssh_command_ip_with_auth( 8186 cmd, 8187 &self.auth, 8188 &self.guest.network.guest_ip, 8189 { 8190 let mut ret = 1; 8191 let mut tmo_acc = 0; 8192 loop { 8193 tmo_acc += tmo_int * ret; 8194 if tmo_acc >= tmo_max { 8195 break; 8196 } 8197 ret += 1; 8198 } 8199 ret 8200 }, 8201 tmo_int, 8202 ) 8203 .unwrap(); 8204 8205 if "Windows" == out.trim() { 8206 return true; 8207 } 8208 8209 false 8210 } 8211 } 8212 8213 fn vcpu_threads_count(pid: u32) -> u8 { 8214 // ps -T -p 12345 | grep vcpu | wc -l 8215 let out = Command::new("ps") 8216 .args(["-T", "-p", format!("{pid}").as_str()]) 8217 .output() 8218 .expect("ps command failed") 8219 .stdout; 8220 return String::from_utf8_lossy(&out).matches("vcpu").count() as u8; 8221 } 8222 8223 fn netdev_ctrl_threads_count(pid: u32) -> u8 { 8224 // ps -T -p 12345 | grep "_net[0-9]*_ctrl" | wc -l 8225 let out = Command::new("ps") 8226 .args(["-T", "-p", format!("{pid}").as_str()]) 8227 .output() 8228 .expect("ps command failed") 8229 .stdout; 8230 let mut n = 0; 8231 String::from_utf8_lossy(&out) 8232 .split_whitespace() 8233 .for_each(|s| n += (s.starts_with("_net") && s.ends_with("_ctrl")) as u8); // _net1_ctrl 8234 n 8235 } 8236 8237 fn disk_ctrl_threads_count(pid: u32) -> u8 { 8238 // ps -T -p 15782 | grep "_disk[0-9]*_q0" | wc -l 8239 let out = Command::new("ps") 8240 .args(["-T", "-p", format!("{pid}").as_str()]) 8241 .output() 8242 .expect("ps command failed") 8243 .stdout; 8244 let mut n = 0; 8245 String::from_utf8_lossy(&out) 8246 .split_whitespace() 8247 .for_each(|s| n += (s.starts_with("_disk") && s.ends_with("_q0")) as u8); // _disk0_q0, don't care about multiple queues as they're related to the same hdd 8248 n 8249 } 8250 8251 #[test] 8252 fn test_windows_guest() { 8253 let windows_guest = WindowsGuest::new(); 8254 8255 let mut child = GuestCommand::new(windows_guest.guest()) 8256 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8257 .args(["--memory", "size=4G"]) 8258 .args(["--kernel", edk2_path().to_str().unwrap()]) 8259 .args(["--serial", "tty"]) 8260 .args(["--console", "off"]) 8261 .default_disks() 8262 .default_net() 8263 .capture_output() 8264 .spawn() 8265 .unwrap(); 8266 8267 let fd = child.stdout.as_ref().unwrap().as_raw_fd(); 8268 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 8269 let fd = child.stderr.as_ref().unwrap().as_raw_fd(); 8270 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 8271 8272 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE); 8273 8274 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8275 8276 let r = std::panic::catch_unwind(|| { 8277 // Wait to make sure Windows boots up 8278 assert!(windows_guest.wait_for_boot()); 8279 8280 windows_guest.shutdown(); 8281 }); 8282 8283 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8284 let _ = child.kill(); 8285 let output = child.wait_with_output().unwrap(); 8286 8287 let _ = child_dnsmasq.kill(); 8288 let _ = child_dnsmasq.wait(); 8289 8290 handle_child_output(r, &output); 8291 } 8292 8293 #[test] 8294 fn test_windows_guest_multiple_queues() { 8295 let windows_guest = WindowsGuest::new(); 8296 8297 let mut ovmf_path = dirs::home_dir().unwrap(); 8298 ovmf_path.push("workloads"); 8299 ovmf_path.push(OVMF_NAME); 8300 8301 let mut child = GuestCommand::new(windows_guest.guest()) 8302 .args(["--cpus", "boot=4,kvm_hyperv=on"]) 8303 .args(["--memory", "size=4G"]) 8304 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8305 .args(["--serial", "tty"]) 8306 .args(["--console", "off"]) 8307 .args([ 8308 "--disk", 8309 format!( 8310 "path={},num_queues=4", 8311 windows_guest 8312 .guest() 8313 .disk_config 8314 .disk(DiskType::OperatingSystem) 8315 .unwrap() 8316 ) 8317 .as_str(), 8318 ]) 8319 .args([ 8320 "--net", 8321 format!( 8322 "tap=,mac={},ip={},mask=255.255.255.0,num_queues=8", 8323 windows_guest.guest().network.guest_mac, 8324 windows_guest.guest().network.host_ip 8325 ) 8326 .as_str(), 8327 ]) 8328 .capture_output() 8329 .spawn() 8330 .unwrap(); 8331 8332 let fd = child.stdout.as_ref().unwrap().as_raw_fd(); 8333 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 8334 let fd = child.stderr.as_ref().unwrap().as_raw_fd(); 8335 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 8336 8337 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE); 8338 8339 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8340 8341 let r = std::panic::catch_unwind(|| { 8342 // Wait to make sure Windows boots up 8343 assert!(windows_guest.wait_for_boot()); 8344 8345 windows_guest.shutdown(); 8346 }); 8347 8348 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8349 let _ = child.kill(); 8350 let output = child.wait_with_output().unwrap(); 8351 8352 let _ = child_dnsmasq.kill(); 8353 let _ = child_dnsmasq.wait(); 8354 8355 handle_child_output(r, &output); 8356 } 8357 8358 #[test] 8359 #[cfg(not(feature = "mshv"))] 8360 #[ignore = "See #4327"] 8361 fn test_windows_guest_snapshot_restore() { 8362 let windows_guest = WindowsGuest::new(); 8363 8364 let mut ovmf_path = dirs::home_dir().unwrap(); 8365 ovmf_path.push("workloads"); 8366 ovmf_path.push(OVMF_NAME); 8367 8368 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8369 let api_socket_source = format!("{}.1", temp_api_path(&tmp_dir)); 8370 8371 let mut child = GuestCommand::new(windows_guest.guest()) 8372 .args(["--api-socket", &api_socket_source]) 8373 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8374 .args(["--memory", "size=4G"]) 8375 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8376 .args(["--serial", "tty"]) 8377 .args(["--console", "off"]) 8378 .default_disks() 8379 .default_net() 8380 .capture_output() 8381 .spawn() 8382 .unwrap(); 8383 8384 let fd = child.stdout.as_ref().unwrap().as_raw_fd(); 8385 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 8386 let fd = child.stderr.as_ref().unwrap().as_raw_fd(); 8387 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 8388 8389 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE); 8390 8391 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8392 8393 // Wait to make sure Windows boots up 8394 assert!(windows_guest.wait_for_boot()); 8395 8396 let snapshot_dir = temp_snapshot_dir_path(&tmp_dir); 8397 8398 // Pause the VM 8399 assert!(remote_command(&api_socket_source, "pause", None)); 8400 8401 // Take a snapshot from the VM 8402 assert!(remote_command( 8403 &api_socket_source, 8404 "snapshot", 8405 Some(format!("file://{snapshot_dir}").as_str()), 8406 )); 8407 8408 // Wait to make sure the snapshot is completed 8409 thread::sleep(std::time::Duration::new(30, 0)); 8410 8411 let _ = child.kill(); 8412 child.wait().unwrap(); 8413 8414 let api_socket_restored = format!("{}.2", temp_api_path(&tmp_dir)); 8415 8416 // Restore the VM from the snapshot 8417 let mut child = GuestCommand::new(windows_guest.guest()) 8418 .args(["--api-socket", &api_socket_restored]) 8419 .args([ 8420 "--restore", 8421 format!("source_url=file://{snapshot_dir}").as_str(), 8422 ]) 8423 .capture_output() 8424 .spawn() 8425 .unwrap(); 8426 8427 // Wait for the VM to be restored 8428 thread::sleep(std::time::Duration::new(20, 0)); 8429 8430 let r = std::panic::catch_unwind(|| { 8431 // Resume the VM 8432 assert!(remote_command(&api_socket_restored, "resume", None)); 8433 8434 windows_guest.shutdown(); 8435 }); 8436 8437 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8438 let _ = child.kill(); 8439 let output = child.wait_with_output().unwrap(); 8440 8441 let _ = child_dnsmasq.kill(); 8442 let _ = child_dnsmasq.wait(); 8443 8444 handle_child_output(r, &output); 8445 } 8446 8447 #[test] 8448 #[cfg(not(feature = "mshv"))] 8449 #[cfg(not(target_arch = "aarch64"))] 8450 fn test_windows_guest_cpu_hotplug() { 8451 let windows_guest = WindowsGuest::new(); 8452 8453 let mut ovmf_path = dirs::home_dir().unwrap(); 8454 ovmf_path.push("workloads"); 8455 ovmf_path.push(OVMF_NAME); 8456 8457 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8458 let api_socket = temp_api_path(&tmp_dir); 8459 8460 let mut child = GuestCommand::new(windows_guest.guest()) 8461 .args(["--api-socket", &api_socket]) 8462 .args(["--cpus", "boot=2,max=8,kvm_hyperv=on"]) 8463 .args(["--memory", "size=4G"]) 8464 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8465 .args(["--serial", "tty"]) 8466 .args(["--console", "off"]) 8467 .default_disks() 8468 .default_net() 8469 .capture_output() 8470 .spawn() 8471 .unwrap(); 8472 8473 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8474 8475 let r = std::panic::catch_unwind(|| { 8476 // Wait to make sure Windows boots up 8477 assert!(windows_guest.wait_for_boot()); 8478 8479 let vcpu_num = 2; 8480 // Check the initial number of CPUs the guest sees 8481 assert_eq!(windows_guest.cpu_count(), vcpu_num); 8482 // Check the initial number of vcpu threads in the CH process 8483 assert_eq!(vcpu_threads_count(child.id()), vcpu_num); 8484 8485 let vcpu_num = 6; 8486 // Hotplug some CPUs 8487 resize_command(&api_socket, Some(vcpu_num), None, None, None); 8488 // Wait to make sure CPUs are added 8489 thread::sleep(std::time::Duration::new(10, 0)); 8490 // Check the guest sees the correct number 8491 assert_eq!(windows_guest.cpu_count(), vcpu_num); 8492 // Check the CH process has the correct number of vcpu threads 8493 assert_eq!(vcpu_threads_count(child.id()), vcpu_num); 8494 8495 let vcpu_num = 4; 8496 // Remove some CPUs. Note that Windows doesn't support hot-remove. 8497 resize_command(&api_socket, Some(vcpu_num), None, None, None); 8498 // Wait to make sure CPUs are removed 8499 thread::sleep(std::time::Duration::new(10, 0)); 8500 // Reboot to let Windows catch up 8501 windows_guest.reboot(); 8502 // Wait to make sure Windows completely rebooted 8503 thread::sleep(std::time::Duration::new(60, 0)); 8504 // Check the guest sees the correct number 8505 assert_eq!(windows_guest.cpu_count(), vcpu_num); 8506 // Check the CH process has the correct number of vcpu threads 8507 assert_eq!(vcpu_threads_count(child.id()), vcpu_num); 8508 8509 windows_guest.shutdown(); 8510 }); 8511 8512 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8513 let _ = child.kill(); 8514 let output = child.wait_with_output().unwrap(); 8515 8516 let _ = child_dnsmasq.kill(); 8517 let _ = child_dnsmasq.wait(); 8518 8519 handle_child_output(r, &output); 8520 } 8521 8522 #[test] 8523 #[cfg(not(feature = "mshv"))] 8524 #[cfg(not(target_arch = "aarch64"))] 8525 fn test_windows_guest_ram_hotplug() { 8526 let windows_guest = WindowsGuest::new(); 8527 8528 let mut ovmf_path = dirs::home_dir().unwrap(); 8529 ovmf_path.push("workloads"); 8530 ovmf_path.push(OVMF_NAME); 8531 8532 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8533 let api_socket = temp_api_path(&tmp_dir); 8534 8535 let mut child = GuestCommand::new(windows_guest.guest()) 8536 .args(["--api-socket", &api_socket]) 8537 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8538 .args(["--memory", "size=2G,hotplug_size=5G"]) 8539 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8540 .args(["--serial", "tty"]) 8541 .args(["--console", "off"]) 8542 .default_disks() 8543 .default_net() 8544 .capture_output() 8545 .spawn() 8546 .unwrap(); 8547 8548 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8549 8550 let r = std::panic::catch_unwind(|| { 8551 // Wait to make sure Windows boots up 8552 assert!(windows_guest.wait_for_boot()); 8553 8554 let ram_size = 2 * 1024 * 1024 * 1024; 8555 // Check the initial number of RAM the guest sees 8556 let current_ram_size = windows_guest.ram_size(); 8557 // This size seems to be reserved by the system and thus the 8558 // reported amount differs by this constant value. 8559 let reserved_ram_size = ram_size - current_ram_size; 8560 // Verify that there's not more than 4mb constant diff wasted 8561 // by the reserved ram. 8562 assert!(reserved_ram_size < 4 * 1024 * 1024); 8563 8564 let ram_size = 4 * 1024 * 1024 * 1024; 8565 // Hotplug some RAM 8566 resize_command(&api_socket, None, Some(ram_size), None, None); 8567 // Wait to make sure RAM has been added 8568 thread::sleep(std::time::Duration::new(10, 0)); 8569 // Check the guest sees the correct number 8570 assert_eq!(windows_guest.ram_size(), ram_size - reserved_ram_size); 8571 8572 let ram_size = 3 * 1024 * 1024 * 1024; 8573 // Unplug some RAM. Note that hot-remove most likely won't work. 8574 resize_command(&api_socket, None, Some(ram_size), None, None); 8575 // Wait to make sure RAM has been added 8576 thread::sleep(std::time::Duration::new(10, 0)); 8577 // Reboot to let Windows catch up 8578 windows_guest.reboot(); 8579 // Wait to make sure guest completely rebooted 8580 thread::sleep(std::time::Duration::new(60, 0)); 8581 // Check the guest sees the correct number 8582 assert_eq!(windows_guest.ram_size(), ram_size - reserved_ram_size); 8583 8584 windows_guest.shutdown(); 8585 }); 8586 8587 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8588 let _ = child.kill(); 8589 let output = child.wait_with_output().unwrap(); 8590 8591 let _ = child_dnsmasq.kill(); 8592 let _ = child_dnsmasq.wait(); 8593 8594 handle_child_output(r, &output); 8595 } 8596 8597 #[test] 8598 #[cfg(not(feature = "mshv"))] 8599 fn test_windows_guest_netdev_hotplug() { 8600 let windows_guest = WindowsGuest::new(); 8601 8602 let mut ovmf_path = dirs::home_dir().unwrap(); 8603 ovmf_path.push("workloads"); 8604 ovmf_path.push(OVMF_NAME); 8605 8606 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8607 let api_socket = temp_api_path(&tmp_dir); 8608 8609 let mut child = GuestCommand::new(windows_guest.guest()) 8610 .args(["--api-socket", &api_socket]) 8611 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8612 .args(["--memory", "size=4G"]) 8613 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8614 .args(["--serial", "tty"]) 8615 .args(["--console", "off"]) 8616 .default_disks() 8617 .default_net() 8618 .capture_output() 8619 .spawn() 8620 .unwrap(); 8621 8622 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8623 8624 let r = std::panic::catch_unwind(|| { 8625 // Wait to make sure Windows boots up 8626 assert!(windows_guest.wait_for_boot()); 8627 8628 // Initially present network device 8629 let netdev_num = 1; 8630 assert_eq!(windows_guest.netdev_count(), netdev_num); 8631 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 8632 8633 // Hotplug network device 8634 let (cmd_success, cmd_output) = remote_command_w_output( 8635 &api_socket, 8636 "add-net", 8637 Some(windows_guest.guest().default_net_string().as_str()), 8638 ); 8639 assert!(cmd_success); 8640 assert!(String::from_utf8_lossy(&cmd_output).contains("\"id\":\"_net2\"")); 8641 thread::sleep(std::time::Duration::new(5, 0)); 8642 // Verify the device is on the system 8643 let netdev_num = 2; 8644 assert_eq!(windows_guest.netdev_count(), netdev_num); 8645 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 8646 8647 // Remove network device 8648 let cmd_success = remote_command(&api_socket, "remove-device", Some("_net2")); 8649 assert!(cmd_success); 8650 thread::sleep(std::time::Duration::new(5, 0)); 8651 // Verify the device has been removed 8652 let netdev_num = 1; 8653 assert_eq!(windows_guest.netdev_count(), netdev_num); 8654 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 8655 8656 windows_guest.shutdown(); 8657 }); 8658 8659 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8660 let _ = child.kill(); 8661 let output = child.wait_with_output().unwrap(); 8662 8663 let _ = child_dnsmasq.kill(); 8664 let _ = child_dnsmasq.wait(); 8665 8666 handle_child_output(r, &output); 8667 } 8668 8669 #[test] 8670 #[ignore = "See #6037"] 8671 #[cfg(not(feature = "mshv"))] 8672 #[cfg(not(target_arch = "aarch64"))] 8673 fn test_windows_guest_disk_hotplug() { 8674 let windows_guest = WindowsGuest::new(); 8675 8676 let mut ovmf_path = dirs::home_dir().unwrap(); 8677 ovmf_path.push("workloads"); 8678 ovmf_path.push(OVMF_NAME); 8679 8680 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8681 let api_socket = temp_api_path(&tmp_dir); 8682 8683 let mut child = GuestCommand::new(windows_guest.guest()) 8684 .args(["--api-socket", &api_socket]) 8685 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8686 .args(["--memory", "size=4G"]) 8687 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8688 .args(["--serial", "tty"]) 8689 .args(["--console", "off"]) 8690 .default_disks() 8691 .default_net() 8692 .capture_output() 8693 .spawn() 8694 .unwrap(); 8695 8696 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8697 8698 let disk = windows_guest.disk_new(WindowsGuest::FS_FAT, 100); 8699 8700 let r = std::panic::catch_unwind(|| { 8701 // Wait to make sure Windows boots up 8702 assert!(windows_guest.wait_for_boot()); 8703 8704 // Initially present disk device 8705 let disk_num = 1; 8706 assert_eq!(windows_guest.disk_count(), disk_num); 8707 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8708 8709 // Hotplug disk device 8710 let (cmd_success, cmd_output) = remote_command_w_output( 8711 &api_socket, 8712 "add-disk", 8713 Some(format!("path={disk},readonly=off").as_str()), 8714 ); 8715 assert!(cmd_success); 8716 assert!(String::from_utf8_lossy(&cmd_output).contains("\"id\":\"_disk2\"")); 8717 thread::sleep(std::time::Duration::new(5, 0)); 8718 // Online disk device 8719 windows_guest.disks_set_rw(); 8720 windows_guest.disks_online(); 8721 // Verify the device is on the system 8722 let disk_num = 2; 8723 assert_eq!(windows_guest.disk_count(), disk_num); 8724 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8725 8726 let data = "hello"; 8727 let fname = "d:\\world"; 8728 windows_guest.disk_file_put(fname, data); 8729 8730 // Unmount disk device 8731 let cmd_success = remote_command(&api_socket, "remove-device", Some("_disk2")); 8732 assert!(cmd_success); 8733 thread::sleep(std::time::Duration::new(5, 0)); 8734 // Verify the device has been removed 8735 let disk_num = 1; 8736 assert_eq!(windows_guest.disk_count(), disk_num); 8737 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8738 8739 // Remount and check the file exists with the expected contents 8740 let (cmd_success, _cmd_output) = remote_command_w_output( 8741 &api_socket, 8742 "add-disk", 8743 Some(format!("path={disk},readonly=off").as_str()), 8744 ); 8745 assert!(cmd_success); 8746 thread::sleep(std::time::Duration::new(5, 0)); 8747 let out = windows_guest.disk_file_read(fname); 8748 assert_eq!(data, out.trim()); 8749 8750 // Intentionally no unmount, it'll happen at shutdown. 8751 8752 windows_guest.shutdown(); 8753 }); 8754 8755 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8756 let _ = child.kill(); 8757 let output = child.wait_with_output().unwrap(); 8758 8759 let _ = child_dnsmasq.kill(); 8760 let _ = child_dnsmasq.wait(); 8761 8762 handle_child_output(r, &output); 8763 } 8764 8765 #[test] 8766 #[ignore = "See #6037"] 8767 #[cfg(not(feature = "mshv"))] 8768 #[cfg(not(target_arch = "aarch64"))] 8769 fn test_windows_guest_disk_hotplug_multi() { 8770 let windows_guest = WindowsGuest::new(); 8771 8772 let mut ovmf_path = dirs::home_dir().unwrap(); 8773 ovmf_path.push("workloads"); 8774 ovmf_path.push(OVMF_NAME); 8775 8776 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8777 let api_socket = temp_api_path(&tmp_dir); 8778 8779 let mut child = GuestCommand::new(windows_guest.guest()) 8780 .args(["--api-socket", &api_socket]) 8781 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8782 .args(["--memory", "size=2G"]) 8783 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8784 .args(["--serial", "tty"]) 8785 .args(["--console", "off"]) 8786 .default_disks() 8787 .default_net() 8788 .capture_output() 8789 .spawn() 8790 .unwrap(); 8791 8792 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8793 8794 // Predefined data to used at various test stages 8795 let disk_test_data: [[String; 4]; 2] = [ 8796 [ 8797 "_disk2".to_string(), 8798 windows_guest.disk_new(WindowsGuest::FS_FAT, 123), 8799 "d:\\world".to_string(), 8800 "hello".to_string(), 8801 ], 8802 [ 8803 "_disk3".to_string(), 8804 windows_guest.disk_new(WindowsGuest::FS_NTFS, 333), 8805 "e:\\hello".to_string(), 8806 "world".to_string(), 8807 ], 8808 ]; 8809 8810 let r = std::panic::catch_unwind(|| { 8811 // Wait to make sure Windows boots up 8812 assert!(windows_guest.wait_for_boot()); 8813 8814 // Initially present disk device 8815 let disk_num = 1; 8816 assert_eq!(windows_guest.disk_count(), disk_num); 8817 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8818 8819 for it in &disk_test_data { 8820 let disk_id = it[0].as_str(); 8821 let disk = it[1].as_str(); 8822 // Hotplug disk device 8823 let (cmd_success, cmd_output) = remote_command_w_output( 8824 &api_socket, 8825 "add-disk", 8826 Some(format!("path={disk},readonly=off").as_str()), 8827 ); 8828 assert!(cmd_success); 8829 assert!(String::from_utf8_lossy(&cmd_output) 8830 .contains(format!("\"id\":\"{disk_id}\"").as_str())); 8831 thread::sleep(std::time::Duration::new(5, 0)); 8832 // Online disk devices 8833 windows_guest.disks_set_rw(); 8834 windows_guest.disks_online(); 8835 } 8836 // Verify the devices are on the system 8837 let disk_num = (disk_test_data.len() + 1) as u8; 8838 assert_eq!(windows_guest.disk_count(), disk_num); 8839 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8840 8841 // Put test data 8842 for it in &disk_test_data { 8843 let fname = it[2].as_str(); 8844 let data = it[3].as_str(); 8845 windows_guest.disk_file_put(fname, data); 8846 } 8847 8848 // Unmount disk devices 8849 for it in &disk_test_data { 8850 let disk_id = it[0].as_str(); 8851 let cmd_success = remote_command(&api_socket, "remove-device", Some(disk_id)); 8852 assert!(cmd_success); 8853 thread::sleep(std::time::Duration::new(5, 0)); 8854 } 8855 8856 // Verify the devices have been removed 8857 let disk_num = 1; 8858 assert_eq!(windows_guest.disk_count(), disk_num); 8859 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8860 8861 // Remount 8862 for it in &disk_test_data { 8863 let disk = it[1].as_str(); 8864 let (cmd_success, _cmd_output) = remote_command_w_output( 8865 &api_socket, 8866 "add-disk", 8867 Some(format!("path={disk},readonly=off").as_str()), 8868 ); 8869 assert!(cmd_success); 8870 thread::sleep(std::time::Duration::new(5, 0)); 8871 } 8872 8873 // Check the files exists with the expected contents 8874 for it in &disk_test_data { 8875 let fname = it[2].as_str(); 8876 let data = it[3].as_str(); 8877 let out = windows_guest.disk_file_read(fname); 8878 assert_eq!(data, out.trim()); 8879 } 8880 8881 // Intentionally no unmount, it'll happen at shutdown. 8882 8883 windows_guest.shutdown(); 8884 }); 8885 8886 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8887 let _ = child.kill(); 8888 let output = child.wait_with_output().unwrap(); 8889 8890 let _ = child_dnsmasq.kill(); 8891 let _ = child_dnsmasq.wait(); 8892 8893 handle_child_output(r, &output); 8894 } 8895 8896 #[test] 8897 #[cfg(not(feature = "mshv"))] 8898 #[cfg(not(target_arch = "aarch64"))] 8899 fn test_windows_guest_netdev_multi() { 8900 let windows_guest = WindowsGuest::new(); 8901 8902 let mut ovmf_path = dirs::home_dir().unwrap(); 8903 ovmf_path.push("workloads"); 8904 ovmf_path.push(OVMF_NAME); 8905 8906 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8907 let api_socket = temp_api_path(&tmp_dir); 8908 8909 let mut child = GuestCommand::new(windows_guest.guest()) 8910 .args(["--api-socket", &api_socket]) 8911 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8912 .args(["--memory", "size=4G"]) 8913 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8914 .args(["--serial", "tty"]) 8915 .args(["--console", "off"]) 8916 .default_disks() 8917 // The multi net dev config is borrowed from test_multiple_network_interfaces 8918 .args([ 8919 "--net", 8920 windows_guest.guest().default_net_string().as_str(), 8921 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0", 8922 "tap=mytap42,mac=fe:1f:9e:e1:60:f2,ip=192.168.4.1,mask=255.255.255.0", 8923 ]) 8924 .capture_output() 8925 .spawn() 8926 .unwrap(); 8927 8928 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8929 8930 let r = std::panic::catch_unwind(|| { 8931 // Wait to make sure Windows boots up 8932 assert!(windows_guest.wait_for_boot()); 8933 8934 let netdev_num = 3; 8935 assert_eq!(windows_guest.netdev_count(), netdev_num); 8936 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 8937 8938 let tap_count = exec_host_command_output("ip link | grep -c mytap42"); 8939 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1"); 8940 8941 windows_guest.shutdown(); 8942 }); 8943 8944 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8945 let _ = child.kill(); 8946 let output = child.wait_with_output().unwrap(); 8947 8948 let _ = child_dnsmasq.kill(); 8949 let _ = child_dnsmasq.wait(); 8950 8951 handle_child_output(r, &output); 8952 } 8953 } 8954 8955 #[cfg(target_arch = "x86_64")] 8956 mod sgx { 8957 use crate::*; 8958 8959 #[test] 8960 fn test_sgx() { 8961 let jammy_image = JAMMY_IMAGE_NAME.to_string(); 8962 let jammy = UbuntuDiskConfig::new(jammy_image); 8963 let guest = Guest::new(Box::new(jammy)); 8964 8965 let mut child = GuestCommand::new(&guest) 8966 .args(["--cpus", "boot=1"]) 8967 .args(["--memory", "size=512M"]) 8968 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 8969 .default_disks() 8970 .default_net() 8971 .args(["--sgx-epc", "id=epc0,size=64M"]) 8972 .capture_output() 8973 .spawn() 8974 .unwrap(); 8975 8976 let r = std::panic::catch_unwind(|| { 8977 guest.wait_vm_boot(None).unwrap(); 8978 8979 // Check if SGX is correctly detected in the guest. 8980 guest.check_sgx_support().unwrap(); 8981 8982 // Validate the SGX EPC section is 64MiB. 8983 assert_eq!( 8984 guest 8985 .ssh_command("cpuid -l 0x12 -s 2 | grep 'section size' | cut -d '=' -f 2") 8986 .unwrap() 8987 .trim(), 8988 "0x0000000004000000" 8989 ); 8990 }); 8991 8992 let _ = child.kill(); 8993 let output = child.wait_with_output().unwrap(); 8994 8995 handle_child_output(r, &output); 8996 } 8997 } 8998 8999 #[cfg(target_arch = "x86_64")] 9000 mod vfio { 9001 use crate::*; 9002 9003 fn test_nvidia_card_memory_hotplug(hotplug_method: &str) { 9004 let jammy = UbuntuDiskConfig::new(JAMMY_NVIDIA_IMAGE_NAME.to_string()); 9005 let guest = Guest::new(Box::new(jammy)); 9006 let api_socket = temp_api_path(&guest.tmp_dir); 9007 9008 let mut child = GuestCommand::new(&guest) 9009 .args(["--cpus", "boot=4"]) 9010 .args([ 9011 "--memory", 9012 format!("size=4G,hotplug_size=4G,hotplug_method={hotplug_method}").as_str(), 9013 ]) 9014 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 9015 .args(["--device", "path=/sys/bus/pci/devices/0000:31:00.0/"]) 9016 .args(["--api-socket", &api_socket]) 9017 .default_disks() 9018 .default_net() 9019 .capture_output() 9020 .spawn() 9021 .unwrap(); 9022 9023 let r = std::panic::catch_unwind(|| { 9024 guest.wait_vm_boot(None).unwrap(); 9025 9026 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9027 9028 guest.enable_memory_hotplug(); 9029 9030 // Add RAM to the VM 9031 let desired_ram = 6 << 30; 9032 resize_command(&api_socket, None, Some(desired_ram), None, None); 9033 thread::sleep(std::time::Duration::new(30, 0)); 9034 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 9035 9036 // Check the VFIO device works when RAM is increased to 6GiB 9037 guest.check_nvidia_gpu(); 9038 }); 9039 9040 let _ = child.kill(); 9041 let output = child.wait_with_output().unwrap(); 9042 9043 handle_child_output(r, &output); 9044 } 9045 9046 #[test] 9047 fn test_nvidia_card_memory_hotplug_acpi() { 9048 test_nvidia_card_memory_hotplug("acpi") 9049 } 9050 9051 #[test] 9052 fn test_nvidia_card_memory_hotplug_virtio_mem() { 9053 test_nvidia_card_memory_hotplug("virtio-mem") 9054 } 9055 9056 #[test] 9057 fn test_nvidia_card_pci_hotplug() { 9058 let jammy = UbuntuDiskConfig::new(JAMMY_NVIDIA_IMAGE_NAME.to_string()); 9059 let guest = Guest::new(Box::new(jammy)); 9060 let api_socket = temp_api_path(&guest.tmp_dir); 9061 9062 let mut child = GuestCommand::new(&guest) 9063 .args(["--cpus", "boot=4"]) 9064 .args(["--memory", "size=4G"]) 9065 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 9066 .args(["--api-socket", &api_socket]) 9067 .default_disks() 9068 .default_net() 9069 .capture_output() 9070 .spawn() 9071 .unwrap(); 9072 9073 let r = std::panic::catch_unwind(|| { 9074 guest.wait_vm_boot(None).unwrap(); 9075 9076 // Hotplug the card to the VM 9077 let (cmd_success, cmd_output) = remote_command_w_output( 9078 &api_socket, 9079 "add-device", 9080 Some("id=vfio0,path=/sys/bus/pci/devices/0000:31:00.0/"), 9081 ); 9082 assert!(cmd_success); 9083 assert!(String::from_utf8_lossy(&cmd_output) 9084 .contains("{\"id\":\"vfio0\",\"bdf\":\"0000:00:06.0\"}")); 9085 9086 thread::sleep(std::time::Duration::new(10, 0)); 9087 9088 // Check the VFIO device works after hotplug 9089 guest.check_nvidia_gpu(); 9090 }); 9091 9092 let _ = child.kill(); 9093 let output = child.wait_with_output().unwrap(); 9094 9095 handle_child_output(r, &output); 9096 } 9097 9098 #[test] 9099 fn test_nvidia_card_reboot() { 9100 let jammy = UbuntuDiskConfig::new(JAMMY_NVIDIA_IMAGE_NAME.to_string()); 9101 let guest = Guest::new(Box::new(jammy)); 9102 let api_socket = temp_api_path(&guest.tmp_dir); 9103 9104 let mut child = GuestCommand::new(&guest) 9105 .args(["--cpus", "boot=4"]) 9106 .args(["--memory", "size=4G"]) 9107 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 9108 .args(["--device", "path=/sys/bus/pci/devices/0000:31:00.0/"]) 9109 .args(["--api-socket", &api_socket]) 9110 .default_disks() 9111 .default_net() 9112 .capture_output() 9113 .spawn() 9114 .unwrap(); 9115 9116 let r = std::panic::catch_unwind(|| { 9117 guest.wait_vm_boot(None).unwrap(); 9118 9119 // Check the VFIO device works after boot 9120 guest.check_nvidia_gpu(); 9121 9122 guest.reboot_linux(0, None); 9123 9124 // Check the VFIO device works after reboot 9125 guest.check_nvidia_gpu(); 9126 }); 9127 9128 let _ = child.kill(); 9129 let output = child.wait_with_output().unwrap(); 9130 9131 handle_child_output(r, &output); 9132 } 9133 } 9134 9135 mod live_migration { 9136 use crate::*; 9137 9138 fn start_live_migration( 9139 migration_socket: &str, 9140 src_api_socket: &str, 9141 dest_api_socket: &str, 9142 local: bool, 9143 ) -> bool { 9144 // Start to receive migration from the destination VM 9145 let mut receive_migration = Command::new(clh_command("ch-remote")) 9146 .args([ 9147 &format!("--api-socket={dest_api_socket}"), 9148 "receive-migration", 9149 &format! {"unix:{migration_socket}"}, 9150 ]) 9151 .stderr(Stdio::piped()) 9152 .stdout(Stdio::piped()) 9153 .spawn() 9154 .unwrap(); 9155 // Give it '1s' to make sure the 'migration_socket' file is properly created 9156 thread::sleep(std::time::Duration::new(1, 0)); 9157 // Start to send migration from the source VM 9158 9159 let mut args = [ 9160 format!("--api-socket={}", &src_api_socket), 9161 "send-migration".to_string(), 9162 format! {"unix:{migration_socket}"}, 9163 ] 9164 .to_vec(); 9165 9166 if local { 9167 args.insert(2, "--local".to_string()); 9168 } 9169 9170 let mut send_migration = Command::new(clh_command("ch-remote")) 9171 .args(&args) 9172 .stderr(Stdio::piped()) 9173 .stdout(Stdio::piped()) 9174 .spawn() 9175 .unwrap(); 9176 9177 // The 'send-migration' command should be executed successfully within the given timeout 9178 let send_success = if let Some(status) = send_migration 9179 .wait_timeout(std::time::Duration::from_secs(30)) 9180 .unwrap() 9181 { 9182 status.success() 9183 } else { 9184 false 9185 }; 9186 9187 if !send_success { 9188 let _ = send_migration.kill(); 9189 let output = send_migration.wait_with_output().unwrap(); 9190 eprintln!( 9191 "\n\n==== Start 'send_migration' output ==== \ 9192 \n\n---stdout---\n{}\n\n---stderr---\n{} \ 9193 \n\n==== End 'send_migration' output ====\n\n", 9194 String::from_utf8_lossy(&output.stdout), 9195 String::from_utf8_lossy(&output.stderr) 9196 ); 9197 } 9198 9199 // The 'receive-migration' command should be executed successfully within the given timeout 9200 let receive_success = if let Some(status) = receive_migration 9201 .wait_timeout(std::time::Duration::from_secs(30)) 9202 .unwrap() 9203 { 9204 status.success() 9205 } else { 9206 false 9207 }; 9208 9209 if !receive_success { 9210 let _ = receive_migration.kill(); 9211 let output = receive_migration.wait_with_output().unwrap(); 9212 eprintln!( 9213 "\n\n==== Start 'receive_migration' output ==== \ 9214 \n\n---stdout---\n{}\n\n---stderr---\n{} \ 9215 \n\n==== End 'receive_migration' output ====\n\n", 9216 String::from_utf8_lossy(&output.stdout), 9217 String::from_utf8_lossy(&output.stderr) 9218 ); 9219 } 9220 9221 send_success && receive_success 9222 } 9223 9224 fn print_and_panic(src_vm: Child, dest_vm: Child, ovs_vm: Option<Child>, message: &str) -> ! { 9225 let mut src_vm = src_vm; 9226 let mut dest_vm = dest_vm; 9227 9228 let _ = src_vm.kill(); 9229 let src_output = src_vm.wait_with_output().unwrap(); 9230 eprintln!( 9231 "\n\n==== Start 'source_vm' stdout ====\n\n{}\n\n==== End 'source_vm' stdout ====", 9232 String::from_utf8_lossy(&src_output.stdout) 9233 ); 9234 eprintln!( 9235 "\n\n==== Start 'source_vm' stderr ====\n\n{}\n\n==== End 'source_vm' stderr ====", 9236 String::from_utf8_lossy(&src_output.stderr) 9237 ); 9238 let _ = dest_vm.kill(); 9239 let dest_output = dest_vm.wait_with_output().unwrap(); 9240 eprintln!( 9241 "\n\n==== Start 'destination_vm' stdout ====\n\n{}\n\n==== End 'destination_vm' stdout ====", 9242 String::from_utf8_lossy(&dest_output.stdout) 9243 ); 9244 eprintln!( 9245 "\n\n==== Start 'destination_vm' stderr ====\n\n{}\n\n==== End 'destination_vm' stderr ====", 9246 String::from_utf8_lossy(&dest_output.stderr) 9247 ); 9248 9249 if let Some(ovs_vm) = ovs_vm { 9250 let mut ovs_vm = ovs_vm; 9251 let _ = ovs_vm.kill(); 9252 let ovs_output = ovs_vm.wait_with_output().unwrap(); 9253 eprintln!( 9254 "\n\n==== Start 'ovs_vm' stdout ====\n\n{}\n\n==== End 'ovs_vm' stdout ====", 9255 String::from_utf8_lossy(&ovs_output.stdout) 9256 ); 9257 eprintln!( 9258 "\n\n==== Start 'ovs_vm' stderr ====\n\n{}\n\n==== End 'ovs_vm' stderr ====", 9259 String::from_utf8_lossy(&ovs_output.stderr) 9260 ); 9261 9262 cleanup_ovs_dpdk(); 9263 } 9264 9265 panic!("Test failed: {message}") 9266 } 9267 9268 // This test exercises the local live-migration between two Cloud Hypervisor VMs on the 9269 // same host. It ensures the following behaviors: 9270 // 1. The source VM is up and functional (including various virtio-devices are working properly); 9271 // 2. The 'send-migration' and 'receive-migration' command finished successfully; 9272 // 3. The source VM terminated gracefully after live migration; 9273 // 4. The destination VM is functional (including various virtio-devices are working properly) after 9274 // live migration; 9275 // Note: This test does not use vsock as we can't create two identical vsock on the same host. 9276 fn _test_live_migration(upgrade_test: bool, local: bool) { 9277 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9278 let guest = Guest::new(Box::new(focal)); 9279 let kernel_path = direct_kernel_boot_path(); 9280 let console_text = String::from("On a branch floating down river a cricket, singing."); 9281 let net_id = "net123"; 9282 let net_params = format!( 9283 "id={},tap=,mac={},ip={},mask=255.255.255.0", 9284 net_id, guest.network.guest_mac, guest.network.host_ip 9285 ); 9286 9287 let memory_param: &[&str] = if local { 9288 &["--memory", "size=4G,shared=on"] 9289 } else { 9290 &["--memory", "size=4G"] 9291 }; 9292 9293 let boot_vcpus = 2; 9294 let max_vcpus = 4; 9295 9296 let pmem_temp_file = TempFile::new().unwrap(); 9297 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 9298 std::process::Command::new("mkfs.ext4") 9299 .arg(pmem_temp_file.as_path()) 9300 .output() 9301 .expect("Expect creating disk image to succeed"); 9302 let pmem_path = String::from("/dev/pmem0"); 9303 9304 // Start the source VM 9305 let src_vm_path = if !upgrade_test { 9306 clh_command("cloud-hypervisor") 9307 } else { 9308 cloud_hypervisor_release_path() 9309 }; 9310 let src_api_socket = temp_api_path(&guest.tmp_dir); 9311 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 9312 src_vm_cmd 9313 .args([ 9314 "--cpus", 9315 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 9316 ]) 9317 .args(memory_param) 9318 .args(["--kernel", kernel_path.to_str().unwrap()]) 9319 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9320 .default_disks() 9321 .args(["--net", net_params.as_str()]) 9322 .args(["--api-socket", &src_api_socket]) 9323 .args([ 9324 "--pmem", 9325 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 9326 ]); 9327 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 9328 9329 // Start the destination VM 9330 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 9331 dest_api_socket.push_str(".dest"); 9332 let mut dest_child = GuestCommand::new(&guest) 9333 .args(["--api-socket", &dest_api_socket]) 9334 .capture_output() 9335 .spawn() 9336 .unwrap(); 9337 9338 let r = std::panic::catch_unwind(|| { 9339 guest.wait_vm_boot(None).unwrap(); 9340 9341 // Make sure the source VM is functional 9342 // Check the number of vCPUs 9343 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9344 9345 // Check the guest RAM 9346 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9347 9348 // Check the guest virtio-devices, e.g. block, rng, console, and net 9349 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9350 9351 // x86_64: Following what's done in the `test_snapshot_restore`, we need 9352 // to make sure that removing and adding back the virtio-net device does 9353 // not break the live-migration support for virtio-pci. 9354 #[cfg(target_arch = "x86_64")] 9355 { 9356 assert!(remote_command( 9357 &src_api_socket, 9358 "remove-device", 9359 Some(net_id), 9360 )); 9361 thread::sleep(std::time::Duration::new(10, 0)); 9362 9363 // Plug the virtio-net device again 9364 assert!(remote_command( 9365 &src_api_socket, 9366 "add-net", 9367 Some(net_params.as_str()), 9368 )); 9369 thread::sleep(std::time::Duration::new(10, 0)); 9370 } 9371 9372 // Start the live-migration 9373 let migration_socket = String::from( 9374 guest 9375 .tmp_dir 9376 .as_path() 9377 .join("live-migration.sock") 9378 .to_str() 9379 .unwrap(), 9380 ); 9381 9382 assert!( 9383 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9384 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9385 ); 9386 }); 9387 9388 // Check and report any errors occurred during the live-migration 9389 if r.is_err() { 9390 print_and_panic( 9391 src_child, 9392 dest_child, 9393 None, 9394 "Error occurred during live-migration", 9395 ); 9396 } 9397 9398 // Check the source vm has been terminated successful (give it '3s' to settle) 9399 thread::sleep(std::time::Duration::new(3, 0)); 9400 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 9401 print_and_panic( 9402 src_child, 9403 dest_child, 9404 None, 9405 "source VM was not terminated successfully.", 9406 ); 9407 }; 9408 9409 // Post live-migration check to make sure the destination VM is functional 9410 let r = std::panic::catch_unwind(|| { 9411 // Perform same checks to validate VM has been properly migrated 9412 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9413 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9414 9415 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9416 }); 9417 9418 // Clean-up the destination VM and make sure it terminated correctly 9419 let _ = dest_child.kill(); 9420 let dest_output = dest_child.wait_with_output().unwrap(); 9421 handle_child_output(r, &dest_output); 9422 9423 // Check the destination VM has the expected 'console_text' from its output 9424 let r = std::panic::catch_unwind(|| { 9425 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 9426 }); 9427 handle_child_output(r, &dest_output); 9428 } 9429 9430 fn _test_live_migration_balloon(upgrade_test: bool, local: bool) { 9431 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9432 let guest = Guest::new(Box::new(focal)); 9433 let kernel_path = direct_kernel_boot_path(); 9434 let console_text = String::from("On a branch floating down river a cricket, singing."); 9435 let net_id = "net123"; 9436 let net_params = format!( 9437 "id={},tap=,mac={},ip={},mask=255.255.255.0", 9438 net_id, guest.network.guest_mac, guest.network.host_ip 9439 ); 9440 9441 let memory_param: &[&str] = if local { 9442 &[ 9443 "--memory", 9444 "size=4G,hotplug_method=virtio-mem,hotplug_size=8G,shared=on", 9445 "--balloon", 9446 "size=0", 9447 ] 9448 } else { 9449 &[ 9450 "--memory", 9451 "size=4G,hotplug_method=virtio-mem,hotplug_size=8G", 9452 "--balloon", 9453 "size=0", 9454 ] 9455 }; 9456 9457 let boot_vcpus = 2; 9458 let max_vcpus = 4; 9459 9460 let pmem_temp_file = TempFile::new().unwrap(); 9461 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 9462 std::process::Command::new("mkfs.ext4") 9463 .arg(pmem_temp_file.as_path()) 9464 .output() 9465 .expect("Expect creating disk image to succeed"); 9466 let pmem_path = String::from("/dev/pmem0"); 9467 9468 // Start the source VM 9469 let src_vm_path = if !upgrade_test { 9470 clh_command("cloud-hypervisor") 9471 } else { 9472 cloud_hypervisor_release_path() 9473 }; 9474 let src_api_socket = temp_api_path(&guest.tmp_dir); 9475 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 9476 src_vm_cmd 9477 .args([ 9478 "--cpus", 9479 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 9480 ]) 9481 .args(memory_param) 9482 .args(["--kernel", kernel_path.to_str().unwrap()]) 9483 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9484 .default_disks() 9485 .args(["--net", net_params.as_str()]) 9486 .args(["--api-socket", &src_api_socket]) 9487 .args([ 9488 "--pmem", 9489 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 9490 ]); 9491 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 9492 9493 // Start the destination VM 9494 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 9495 dest_api_socket.push_str(".dest"); 9496 let mut dest_child = GuestCommand::new(&guest) 9497 .args(["--api-socket", &dest_api_socket]) 9498 .capture_output() 9499 .spawn() 9500 .unwrap(); 9501 9502 let r = std::panic::catch_unwind(|| { 9503 guest.wait_vm_boot(None).unwrap(); 9504 9505 // Make sure the source VM is functional 9506 // Check the number of vCPUs 9507 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9508 9509 // Check the guest RAM 9510 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9511 // Increase the guest RAM 9512 resize_command(&src_api_socket, None, Some(6 << 30), None, None); 9513 thread::sleep(std::time::Duration::new(5, 0)); 9514 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 9515 // Use balloon to remove RAM from the VM 9516 resize_command(&src_api_socket, None, None, Some(1 << 30), None); 9517 thread::sleep(std::time::Duration::new(5, 0)); 9518 let total_memory = guest.get_total_memory().unwrap_or_default(); 9519 assert!(total_memory > 4_800_000); 9520 assert!(total_memory < 5_760_000); 9521 9522 // Check the guest virtio-devices, e.g. block, rng, console, and net 9523 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9524 9525 // x86_64: Following what's done in the `test_snapshot_restore`, we need 9526 // to make sure that removing and adding back the virtio-net device does 9527 // not break the live-migration support for virtio-pci. 9528 #[cfg(target_arch = "x86_64")] 9529 { 9530 assert!(remote_command( 9531 &src_api_socket, 9532 "remove-device", 9533 Some(net_id), 9534 )); 9535 thread::sleep(std::time::Duration::new(10, 0)); 9536 9537 // Plug the virtio-net device again 9538 assert!(remote_command( 9539 &src_api_socket, 9540 "add-net", 9541 Some(net_params.as_str()), 9542 )); 9543 thread::sleep(std::time::Duration::new(10, 0)); 9544 } 9545 9546 // Start the live-migration 9547 let migration_socket = String::from( 9548 guest 9549 .tmp_dir 9550 .as_path() 9551 .join("live-migration.sock") 9552 .to_str() 9553 .unwrap(), 9554 ); 9555 9556 assert!( 9557 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9558 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9559 ); 9560 }); 9561 9562 // Check and report any errors occurred during the live-migration 9563 if r.is_err() { 9564 print_and_panic( 9565 src_child, 9566 dest_child, 9567 None, 9568 "Error occurred during live-migration", 9569 ); 9570 } 9571 9572 // Check the source vm has been terminated successful (give it '3s' to settle) 9573 thread::sleep(std::time::Duration::new(3, 0)); 9574 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 9575 print_and_panic( 9576 src_child, 9577 dest_child, 9578 None, 9579 "source VM was not terminated successfully.", 9580 ); 9581 }; 9582 9583 // Post live-migration check to make sure the destination VM is functional 9584 let r = std::panic::catch_unwind(|| { 9585 // Perform same checks to validate VM has been properly migrated 9586 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9587 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9588 9589 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9590 9591 // Perform checks on guest RAM using balloon 9592 let total_memory = guest.get_total_memory().unwrap_or_default(); 9593 assert!(total_memory > 4_800_000); 9594 assert!(total_memory < 5_760_000); 9595 // Deflate balloon to restore entire RAM to the VM 9596 resize_command(&dest_api_socket, None, None, Some(0), None); 9597 thread::sleep(std::time::Duration::new(5, 0)); 9598 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 9599 // Decrease guest RAM with virtio-mem 9600 resize_command(&dest_api_socket, None, Some(5 << 30), None, None); 9601 thread::sleep(std::time::Duration::new(5, 0)); 9602 let total_memory = guest.get_total_memory().unwrap_or_default(); 9603 assert!(total_memory > 4_800_000); 9604 assert!(total_memory < 5_760_000); 9605 }); 9606 9607 // Clean-up the destination VM and make sure it terminated correctly 9608 let _ = dest_child.kill(); 9609 let dest_output = dest_child.wait_with_output().unwrap(); 9610 handle_child_output(r, &dest_output); 9611 9612 // Check the destination VM has the expected 'console_text' from its output 9613 let r = std::panic::catch_unwind(|| { 9614 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 9615 }); 9616 handle_child_output(r, &dest_output); 9617 } 9618 9619 fn _test_live_migration_numa(upgrade_test: bool, local: bool) { 9620 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9621 let guest = Guest::new(Box::new(focal)); 9622 let kernel_path = direct_kernel_boot_path(); 9623 let console_text = String::from("On a branch floating down river a cricket, singing."); 9624 let net_id = "net123"; 9625 let net_params = format!( 9626 "id={},tap=,mac={},ip={},mask=255.255.255.0", 9627 net_id, guest.network.guest_mac, guest.network.host_ip 9628 ); 9629 9630 let memory_param: &[&str] = if local { 9631 &[ 9632 "--memory", 9633 "size=0,hotplug_method=virtio-mem,shared=on", 9634 "--memory-zone", 9635 "id=mem0,size=1G,hotplug_size=4G,shared=on", 9636 "id=mem1,size=1G,hotplug_size=4G,shared=on", 9637 "id=mem2,size=2G,hotplug_size=4G,shared=on", 9638 "--numa", 9639 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0", 9640 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1", 9641 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2", 9642 ] 9643 } else { 9644 &[ 9645 "--memory", 9646 "size=0,hotplug_method=virtio-mem", 9647 "--memory-zone", 9648 "id=mem0,size=1G,hotplug_size=4G", 9649 "id=mem1,size=1G,hotplug_size=4G", 9650 "id=mem2,size=2G,hotplug_size=4G", 9651 "--numa", 9652 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0", 9653 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1", 9654 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2", 9655 ] 9656 }; 9657 9658 let boot_vcpus = 6; 9659 let max_vcpus = 12; 9660 9661 let pmem_temp_file = TempFile::new().unwrap(); 9662 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 9663 std::process::Command::new("mkfs.ext4") 9664 .arg(pmem_temp_file.as_path()) 9665 .output() 9666 .expect("Expect creating disk image to succeed"); 9667 let pmem_path = String::from("/dev/pmem0"); 9668 9669 // Start the source VM 9670 let src_vm_path = if !upgrade_test { 9671 clh_command("cloud-hypervisor") 9672 } else { 9673 cloud_hypervisor_release_path() 9674 }; 9675 let src_api_socket = temp_api_path(&guest.tmp_dir); 9676 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 9677 src_vm_cmd 9678 .args([ 9679 "--cpus", 9680 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 9681 ]) 9682 .args(memory_param) 9683 .args(["--kernel", kernel_path.to_str().unwrap()]) 9684 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9685 .default_disks() 9686 .args(["--net", net_params.as_str()]) 9687 .args(["--api-socket", &src_api_socket]) 9688 .args([ 9689 "--pmem", 9690 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 9691 ]); 9692 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 9693 9694 // Start the destination VM 9695 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 9696 dest_api_socket.push_str(".dest"); 9697 let mut dest_child = GuestCommand::new(&guest) 9698 .args(["--api-socket", &dest_api_socket]) 9699 .capture_output() 9700 .spawn() 9701 .unwrap(); 9702 9703 let r = std::panic::catch_unwind(|| { 9704 guest.wait_vm_boot(None).unwrap(); 9705 9706 // Make sure the source VM is functional 9707 // Check the number of vCPUs 9708 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9709 9710 // Check the guest RAM 9711 assert!(guest.get_total_memory().unwrap_or_default() > 2_880_000); 9712 9713 // Check the guest virtio-devices, e.g. block, rng, console, and net 9714 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9715 9716 // Check the NUMA parameters are applied correctly and resize 9717 // each zone to test the case where we migrate a VM with the 9718 // virtio-mem regions being used. 9719 { 9720 guest.check_numa_common( 9721 Some(&[960_000, 960_000, 1_920_000]), 9722 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 9723 Some(&["10 15 20", "20 10 25", "25 30 10"]), 9724 ); 9725 9726 // AArch64 currently does not support hotplug, and therefore we only 9727 // test hotplug-related function on x86_64 here. 9728 #[cfg(target_arch = "x86_64")] 9729 { 9730 guest.enable_memory_hotplug(); 9731 9732 // Resize every memory zone and check each associated NUMA node 9733 // has been assigned the right amount of memory. 9734 resize_zone_command(&src_api_socket, "mem0", "2G"); 9735 resize_zone_command(&src_api_socket, "mem1", "2G"); 9736 resize_zone_command(&src_api_socket, "mem2", "3G"); 9737 thread::sleep(std::time::Duration::new(5, 0)); 9738 9739 guest.check_numa_common(Some(&[1_920_000, 1_920_000, 1_920_000]), None, None); 9740 } 9741 } 9742 9743 // x86_64: Following what's done in the `test_snapshot_restore`, we need 9744 // to make sure that removing and adding back the virtio-net device does 9745 // not break the live-migration support for virtio-pci. 9746 #[cfg(target_arch = "x86_64")] 9747 { 9748 assert!(remote_command( 9749 &src_api_socket, 9750 "remove-device", 9751 Some(net_id), 9752 )); 9753 thread::sleep(std::time::Duration::new(10, 0)); 9754 9755 // Plug the virtio-net device again 9756 assert!(remote_command( 9757 &src_api_socket, 9758 "add-net", 9759 Some(net_params.as_str()), 9760 )); 9761 thread::sleep(std::time::Duration::new(10, 0)); 9762 } 9763 9764 // Start the live-migration 9765 let migration_socket = String::from( 9766 guest 9767 .tmp_dir 9768 .as_path() 9769 .join("live-migration.sock") 9770 .to_str() 9771 .unwrap(), 9772 ); 9773 9774 assert!( 9775 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9776 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9777 ); 9778 }); 9779 9780 // Check and report any errors occurred during the live-migration 9781 if r.is_err() { 9782 print_and_panic( 9783 src_child, 9784 dest_child, 9785 None, 9786 "Error occurred during live-migration", 9787 ); 9788 } 9789 9790 // Check the source vm has been terminated successful (give it '3s' to settle) 9791 thread::sleep(std::time::Duration::new(3, 0)); 9792 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 9793 print_and_panic( 9794 src_child, 9795 dest_child, 9796 None, 9797 "source VM was not terminated successfully.", 9798 ); 9799 }; 9800 9801 // Post live-migration check to make sure the destination VM is functional 9802 let r = std::panic::catch_unwind(|| { 9803 // Perform same checks to validate VM has been properly migrated 9804 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9805 #[cfg(target_arch = "x86_64")] 9806 assert!(guest.get_total_memory().unwrap_or_default() > 6_720_000); 9807 #[cfg(target_arch = "aarch64")] 9808 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9809 9810 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9811 9812 // Perform NUMA related checks 9813 { 9814 #[cfg(target_arch = "aarch64")] 9815 { 9816 guest.check_numa_common( 9817 Some(&[960_000, 960_000, 1_920_000]), 9818 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 9819 Some(&["10 15 20", "20 10 25", "25 30 10"]), 9820 ); 9821 } 9822 9823 // AArch64 currently does not support hotplug, and therefore we only 9824 // test hotplug-related function on x86_64 here. 9825 #[cfg(target_arch = "x86_64")] 9826 { 9827 guest.check_numa_common( 9828 Some(&[1_920_000, 1_920_000, 2_880_000]), 9829 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 9830 Some(&["10 15 20", "20 10 25", "25 30 10"]), 9831 ); 9832 9833 guest.enable_memory_hotplug(); 9834 9835 // Resize every memory zone and check each associated NUMA node 9836 // has been assigned the right amount of memory. 9837 resize_zone_command(&dest_api_socket, "mem0", "4G"); 9838 resize_zone_command(&dest_api_socket, "mem1", "4G"); 9839 resize_zone_command(&dest_api_socket, "mem2", "4G"); 9840 // Resize to the maximum amount of CPUs and check each NUMA 9841 // node has been assigned the right CPUs set. 9842 resize_command(&dest_api_socket, Some(max_vcpus), None, None, None); 9843 thread::sleep(std::time::Duration::new(5, 0)); 9844 9845 guest.check_numa_common( 9846 Some(&[3_840_000, 3_840_000, 3_840_000]), 9847 Some(&[vec![0, 1, 2, 9], vec![3, 4, 6, 7, 8], vec![5, 10, 11]]), 9848 None, 9849 ); 9850 } 9851 } 9852 }); 9853 9854 // Clean-up the destination VM and make sure it terminated correctly 9855 let _ = dest_child.kill(); 9856 let dest_output = dest_child.wait_with_output().unwrap(); 9857 handle_child_output(r, &dest_output); 9858 9859 // Check the destination VM has the expected 'console_text' from its output 9860 let r = std::panic::catch_unwind(|| { 9861 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 9862 }); 9863 handle_child_output(r, &dest_output); 9864 } 9865 9866 fn _test_live_migration_watchdog(upgrade_test: bool, local: bool) { 9867 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9868 let guest = Guest::new(Box::new(focal)); 9869 let kernel_path = direct_kernel_boot_path(); 9870 let console_text = String::from("On a branch floating down river a cricket, singing."); 9871 let net_id = "net123"; 9872 let net_params = format!( 9873 "id={},tap=,mac={},ip={},mask=255.255.255.0", 9874 net_id, guest.network.guest_mac, guest.network.host_ip 9875 ); 9876 9877 let memory_param: &[&str] = if local { 9878 &["--memory", "size=4G,shared=on"] 9879 } else { 9880 &["--memory", "size=4G"] 9881 }; 9882 9883 let boot_vcpus = 2; 9884 let max_vcpus = 4; 9885 9886 let pmem_temp_file = TempFile::new().unwrap(); 9887 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 9888 std::process::Command::new("mkfs.ext4") 9889 .arg(pmem_temp_file.as_path()) 9890 .output() 9891 .expect("Expect creating disk image to succeed"); 9892 let pmem_path = String::from("/dev/pmem0"); 9893 9894 // Start the source VM 9895 let src_vm_path = if !upgrade_test { 9896 clh_command("cloud-hypervisor") 9897 } else { 9898 cloud_hypervisor_release_path() 9899 }; 9900 let src_api_socket = temp_api_path(&guest.tmp_dir); 9901 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 9902 src_vm_cmd 9903 .args([ 9904 "--cpus", 9905 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 9906 ]) 9907 .args(memory_param) 9908 .args(["--kernel", kernel_path.to_str().unwrap()]) 9909 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9910 .default_disks() 9911 .args(["--net", net_params.as_str()]) 9912 .args(["--api-socket", &src_api_socket]) 9913 .args([ 9914 "--pmem", 9915 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 9916 ]) 9917 .args(["--watchdog"]); 9918 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 9919 9920 // Start the destination VM 9921 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 9922 dest_api_socket.push_str(".dest"); 9923 let mut dest_child = GuestCommand::new(&guest) 9924 .args(["--api-socket", &dest_api_socket]) 9925 .capture_output() 9926 .spawn() 9927 .unwrap(); 9928 9929 let r = std::panic::catch_unwind(|| { 9930 guest.wait_vm_boot(None).unwrap(); 9931 9932 // Make sure the source VM is functional 9933 // Check the number of vCPUs 9934 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9935 // Check the guest RAM 9936 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9937 // Check the guest virtio-devices, e.g. block, rng, console, and net 9938 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9939 // x86_64: Following what's done in the `test_snapshot_restore`, we need 9940 // to make sure that removing and adding back the virtio-net device does 9941 // not break the live-migration support for virtio-pci. 9942 #[cfg(target_arch = "x86_64")] 9943 { 9944 assert!(remote_command( 9945 &src_api_socket, 9946 "remove-device", 9947 Some(net_id), 9948 )); 9949 thread::sleep(std::time::Duration::new(10, 0)); 9950 9951 // Plug the virtio-net device again 9952 assert!(remote_command( 9953 &src_api_socket, 9954 "add-net", 9955 Some(net_params.as_str()), 9956 )); 9957 thread::sleep(std::time::Duration::new(10, 0)); 9958 } 9959 9960 // Enable watchdog and ensure its functional 9961 let expected_reboot_count = 1; 9962 // Enable the watchdog with a 15s timeout 9963 enable_guest_watchdog(&guest, 15); 9964 9965 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9966 assert_eq!( 9967 guest 9968 .ssh_command("sudo journalctl | grep -c -- \"Watchdog started\"") 9969 .unwrap() 9970 .trim() 9971 .parse::<u32>() 9972 .unwrap_or_default(), 9973 1 9974 ); 9975 // Allow some normal time to elapse to check we don't get spurious reboots 9976 thread::sleep(std::time::Duration::new(40, 0)); 9977 // Check no reboot 9978 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9979 9980 // Start the live-migration 9981 let migration_socket = String::from( 9982 guest 9983 .tmp_dir 9984 .as_path() 9985 .join("live-migration.sock") 9986 .to_str() 9987 .unwrap(), 9988 ); 9989 9990 assert!( 9991 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9992 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9993 ); 9994 }); 9995 9996 // Check and report any errors occurred during the live-migration 9997 if r.is_err() { 9998 print_and_panic( 9999 src_child, 10000 dest_child, 10001 None, 10002 "Error occurred during live-migration", 10003 ); 10004 } 10005 10006 // Check the source vm has been terminated successful (give it '3s' to settle) 10007 thread::sleep(std::time::Duration::new(3, 0)); 10008 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 10009 print_and_panic( 10010 src_child, 10011 dest_child, 10012 None, 10013 "source VM was not terminated successfully.", 10014 ); 10015 }; 10016 10017 // Post live-migration check to make sure the destination VM is functional 10018 let r = std::panic::catch_unwind(|| { 10019 // Perform same checks to validate VM has been properly migrated 10020 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 10021 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 10022 10023 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 10024 10025 // Perform checks on watchdog 10026 let mut expected_reboot_count = 1; 10027 10028 // Allow some normal time to elapse to check we don't get spurious reboots 10029 thread::sleep(std::time::Duration::new(40, 0)); 10030 // Check no reboot 10031 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 10032 10033 // Trigger a panic (sync first). We need to do this inside a screen with a delay so the SSH command returns. 10034 guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap(); 10035 // Allow some time for the watchdog to trigger (max 30s) and reboot to happen 10036 guest.wait_vm_boot(Some(50)).unwrap(); 10037 // Check a reboot is triggered by the watchdog 10038 expected_reboot_count += 1; 10039 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 10040 10041 #[cfg(target_arch = "x86_64")] 10042 { 10043 // Now pause the VM and remain offline for 30s 10044 assert!(remote_command(&dest_api_socket, "pause", None)); 10045 thread::sleep(std::time::Duration::new(30, 0)); 10046 assert!(remote_command(&dest_api_socket, "resume", None)); 10047 10048 // Check no reboot 10049 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 10050 } 10051 }); 10052 10053 // Clean-up the destination VM and make sure it terminated correctly 10054 let _ = dest_child.kill(); 10055 let dest_output = dest_child.wait_with_output().unwrap(); 10056 handle_child_output(r, &dest_output); 10057 10058 // Check the destination VM has the expected 'console_text' from its output 10059 let r = std::panic::catch_unwind(|| { 10060 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 10061 }); 10062 handle_child_output(r, &dest_output); 10063 } 10064 10065 fn _test_live_migration_ovs_dpdk(upgrade_test: bool, local: bool) { 10066 let ovs_focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10067 let ovs_guest = Guest::new(Box::new(ovs_focal)); 10068 10069 let migration_focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10070 let migration_guest = Guest::new(Box::new(migration_focal)); 10071 let src_api_socket = temp_api_path(&migration_guest.tmp_dir); 10072 10073 // Start two VMs that are connected through ovs-dpdk and one of the VMs is the source VM for live-migration 10074 let (mut ovs_child, mut src_child) = 10075 setup_ovs_dpdk_guests(&ovs_guest, &migration_guest, &src_api_socket, upgrade_test); 10076 10077 // Start the destination VM 10078 let mut dest_api_socket = temp_api_path(&migration_guest.tmp_dir); 10079 dest_api_socket.push_str(".dest"); 10080 let mut dest_child = GuestCommand::new(&migration_guest) 10081 .args(["--api-socket", &dest_api_socket]) 10082 .capture_output() 10083 .spawn() 10084 .unwrap(); 10085 10086 let r = std::panic::catch_unwind(|| { 10087 // Give it '1s' to make sure the 'dest_api_socket' file is properly created 10088 thread::sleep(std::time::Duration::new(1, 0)); 10089 10090 // Start the live-migration 10091 let migration_socket = String::from( 10092 migration_guest 10093 .tmp_dir 10094 .as_path() 10095 .join("live-migration.sock") 10096 .to_str() 10097 .unwrap(), 10098 ); 10099 10100 assert!( 10101 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 10102 "Unsuccessful command: 'send-migration' or 'receive-migration'." 10103 ); 10104 }); 10105 10106 // Check and report any errors occurred during the live-migration 10107 if r.is_err() { 10108 print_and_panic( 10109 src_child, 10110 dest_child, 10111 Some(ovs_child), 10112 "Error occurred during live-migration", 10113 ); 10114 } 10115 10116 // Check the source vm has been terminated successful (give it '3s' to settle) 10117 thread::sleep(std::time::Duration::new(3, 0)); 10118 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 10119 print_and_panic( 10120 src_child, 10121 dest_child, 10122 Some(ovs_child), 10123 "source VM was not terminated successfully.", 10124 ); 10125 }; 10126 10127 // Post live-migration check to make sure the destination VM is functional 10128 let r = std::panic::catch_unwind(|| { 10129 // Perform same checks to validate VM has been properly migrated 10130 // Spawn a new netcat listener in the OVS VM 10131 let guest_ip = ovs_guest.network.guest_ip.clone(); 10132 thread::spawn(move || { 10133 ssh_command_ip( 10134 "nc -l 12345", 10135 &guest_ip, 10136 DEFAULT_SSH_RETRIES, 10137 DEFAULT_SSH_TIMEOUT, 10138 ) 10139 .unwrap(); 10140 }); 10141 10142 // Wait for the server to be listening 10143 thread::sleep(std::time::Duration::new(5, 0)); 10144 10145 // And check the connection is still functional after live-migration 10146 migration_guest 10147 .ssh_command("nc -vz 172.100.0.1 12345") 10148 .unwrap(); 10149 }); 10150 10151 // Clean-up the destination VM and OVS VM, and make sure they terminated correctly 10152 let _ = dest_child.kill(); 10153 let _ = ovs_child.kill(); 10154 let dest_output = dest_child.wait_with_output().unwrap(); 10155 let ovs_output = ovs_child.wait_with_output().unwrap(); 10156 10157 cleanup_ovs_dpdk(); 10158 10159 handle_child_output(r, &dest_output); 10160 handle_child_output(Ok(()), &ovs_output); 10161 } 10162 10163 // This test exercises the local live-migration between two Cloud Hypervisor VMs on the 10164 // same host with Landlock enabled on both VMs. The test validates the following: 10165 // 1. The source VM is up and functional 10166 // 2. Ensure Landlock is enabled on source VM by hotplugging a disk. As the path for this 10167 // disk is not known to the source VM this step will fail. 10168 // 3. The 'send-migration' and 'receive-migration' command finished successfully; 10169 // 4. The source VM terminated gracefully after live migration; 10170 // 5. The destination VM is functional after live migration; 10171 // 6. Ensure Landlock is enabled on destination VM by hotplugging a disk. As the path for 10172 // this disk is not known to the destination VM this step will fail. 10173 fn _test_live_migration_with_landlock() { 10174 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10175 let guest = Guest::new(Box::new(focal)); 10176 let kernel_path = direct_kernel_boot_path(); 10177 let net_id = "net123"; 10178 let net_params = format!( 10179 "id={},tap=,mac={},ip={},mask=255.255.255.0", 10180 net_id, guest.network.guest_mac, guest.network.host_ip 10181 ); 10182 10183 let boot_vcpus = 2; 10184 let max_vcpus = 4; 10185 10186 let mut blk_file_path = dirs::home_dir().unwrap(); 10187 blk_file_path.push("workloads"); 10188 blk_file_path.push("blk.img"); 10189 10190 let src_api_socket = temp_api_path(&guest.tmp_dir); 10191 let mut src_child = GuestCommand::new(&guest) 10192 .args([ 10193 "--cpus", 10194 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 10195 ]) 10196 .args(["--memory", "size=4G,shared=on"]) 10197 .args(["--kernel", kernel_path.to_str().unwrap()]) 10198 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 10199 .default_disks() 10200 .args(["--api-socket", &src_api_socket]) 10201 .args(["--landlock"]) 10202 .args(["--net", net_params.as_str()]) 10203 .args([ 10204 "--landlock-rules", 10205 format!("path={:?},access=rw", guest.tmp_dir.as_path()).as_str(), 10206 ]) 10207 .capture_output() 10208 .spawn() 10209 .unwrap(); 10210 10211 // Start the destination VM 10212 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 10213 dest_api_socket.push_str(".dest"); 10214 let mut dest_child = GuestCommand::new(&guest) 10215 .args(["--api-socket", &dest_api_socket]) 10216 .capture_output() 10217 .spawn() 10218 .unwrap(); 10219 10220 let r = std::panic::catch_unwind(|| { 10221 guest.wait_vm_boot(None).unwrap(); 10222 10223 // Make sure the source VM is functaionl 10224 // Check the number of vCPUs 10225 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 10226 10227 // Check the guest RAM 10228 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 10229 10230 // Check Landlock is enabled by hot-plugging a disk. 10231 assert!(!remote_command( 10232 &src_api_socket, 10233 "add-disk", 10234 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 10235 )); 10236 10237 // Start the live-migration 10238 let migration_socket = String::from( 10239 guest 10240 .tmp_dir 10241 .as_path() 10242 .join("live-migration.sock") 10243 .to_str() 10244 .unwrap(), 10245 ); 10246 10247 assert!( 10248 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, true), 10249 "Unsuccessful command: 'send-migration' or 'receive-migration'." 10250 ); 10251 }); 10252 10253 // Check and report any errors occurred during the live-migration 10254 if r.is_err() { 10255 print_and_panic( 10256 src_child, 10257 dest_child, 10258 None, 10259 "Error occurred during live-migration", 10260 ); 10261 } 10262 10263 // Check the source vm has been terminated successful (give it '3s' to settle) 10264 thread::sleep(std::time::Duration::new(3, 0)); 10265 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 10266 print_and_panic( 10267 src_child, 10268 dest_child, 10269 None, 10270 "source VM was not terminated successfully.", 10271 ); 10272 }; 10273 10274 // Post live-migration check to make sure the destination VM is funcational 10275 let r = std::panic::catch_unwind(|| { 10276 // Perform same checks to validate VM has been properly migrated 10277 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 10278 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 10279 }); 10280 10281 // Check Landlock is enabled on destination VM by hot-plugging a disk. 10282 assert!(!remote_command( 10283 &dest_api_socket, 10284 "add-disk", 10285 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 10286 )); 10287 10288 // Clean-up the destination VM and make sure it terminated correctly 10289 let _ = dest_child.kill(); 10290 let dest_output = dest_child.wait_with_output().unwrap(); 10291 handle_child_output(r, &dest_output); 10292 } 10293 10294 mod live_migration_parallel { 10295 use super::*; 10296 #[test] 10297 fn test_live_migration_basic() { 10298 _test_live_migration(false, false) 10299 } 10300 10301 #[test] 10302 fn test_live_migration_local() { 10303 _test_live_migration(false, true) 10304 } 10305 10306 #[test] 10307 fn test_live_migration_watchdog() { 10308 _test_live_migration_watchdog(false, false) 10309 } 10310 10311 #[test] 10312 fn test_live_migration_watchdog_local() { 10313 _test_live_migration_watchdog(false, true) 10314 } 10315 10316 #[test] 10317 fn test_live_upgrade_basic() { 10318 _test_live_migration(true, false) 10319 } 10320 10321 #[test] 10322 fn test_live_upgrade_local() { 10323 _test_live_migration(true, true) 10324 } 10325 10326 #[test] 10327 fn test_live_upgrade_watchdog() { 10328 _test_live_migration_watchdog(true, false) 10329 } 10330 10331 #[test] 10332 fn test_live_upgrade_watchdog_local() { 10333 _test_live_migration_watchdog(true, true) 10334 } 10335 #[test] 10336 #[cfg(target_arch = "x86_64")] 10337 fn test_live_migration_with_landlock() { 10338 _test_live_migration_with_landlock() 10339 } 10340 } 10341 10342 mod live_migration_sequential { 10343 use super::*; 10344 10345 // NUMA & balloon live migration tests are large so run sequentially 10346 10347 #[test] 10348 fn test_live_migration_balloon() { 10349 _test_live_migration_balloon(false, false) 10350 } 10351 10352 #[test] 10353 fn test_live_migration_balloon_local() { 10354 _test_live_migration_balloon(false, true) 10355 } 10356 10357 #[test] 10358 fn test_live_upgrade_balloon() { 10359 _test_live_migration_balloon(true, false) 10360 } 10361 10362 #[test] 10363 fn test_live_upgrade_balloon_local() { 10364 _test_live_migration_balloon(true, true) 10365 } 10366 10367 #[test] 10368 #[cfg(not(feature = "mshv"))] 10369 fn test_live_migration_numa() { 10370 _test_live_migration_numa(false, false) 10371 } 10372 10373 #[test] 10374 #[cfg(not(feature = "mshv"))] 10375 fn test_live_migration_numa_local() { 10376 _test_live_migration_numa(false, true) 10377 } 10378 10379 #[test] 10380 #[cfg(not(feature = "mshv"))] 10381 fn test_live_upgrade_numa() { 10382 _test_live_migration_numa(true, false) 10383 } 10384 10385 #[test] 10386 #[cfg(not(feature = "mshv"))] 10387 fn test_live_upgrade_numa_local() { 10388 _test_live_migration_numa(true, true) 10389 } 10390 10391 // Require to run ovs-dpdk tests sequentially because they rely on the same ovs-dpdk setup 10392 #[test] 10393 #[ignore = "See #5532"] 10394 #[cfg(target_arch = "x86_64")] 10395 #[cfg(not(feature = "mshv"))] 10396 fn test_live_migration_ovs_dpdk() { 10397 _test_live_migration_ovs_dpdk(false, false); 10398 } 10399 10400 #[test] 10401 #[cfg(target_arch = "x86_64")] 10402 #[cfg(not(feature = "mshv"))] 10403 fn test_live_migration_ovs_dpdk_local() { 10404 _test_live_migration_ovs_dpdk(false, true); 10405 } 10406 10407 #[test] 10408 #[ignore = "See #5532"] 10409 #[cfg(target_arch = "x86_64")] 10410 #[cfg(not(feature = "mshv"))] 10411 fn test_live_upgrade_ovs_dpdk() { 10412 _test_live_migration_ovs_dpdk(true, false); 10413 } 10414 10415 #[test] 10416 #[ignore = "See #5532"] 10417 #[cfg(target_arch = "x86_64")] 10418 #[cfg(not(feature = "mshv"))] 10419 fn test_live_upgrade_ovs_dpdk_local() { 10420 _test_live_migration_ovs_dpdk(true, true); 10421 } 10422 } 10423 } 10424 10425 #[cfg(target_arch = "aarch64")] 10426 mod aarch64_acpi { 10427 use crate::*; 10428 10429 #[test] 10430 fn test_simple_launch_acpi() { 10431 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10432 10433 vec![Box::new(focal)].drain(..).for_each(|disk_config| { 10434 let guest = Guest::new(disk_config); 10435 10436 let mut child = GuestCommand::new(&guest) 10437 .args(["--cpus", "boot=1"]) 10438 .args(["--memory", "size=512M"]) 10439 .args(["--kernel", edk2_path().to_str().unwrap()]) 10440 .default_disks() 10441 .default_net() 10442 .args(["--serial", "tty", "--console", "off"]) 10443 .capture_output() 10444 .spawn() 10445 .unwrap(); 10446 10447 let r = std::panic::catch_unwind(|| { 10448 guest.wait_vm_boot(Some(120)).unwrap(); 10449 10450 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 10451 assert!(guest.get_total_memory().unwrap_or_default() > 400_000); 10452 assert_eq!(guest.get_pci_bridge_class().unwrap_or_default(), "0x060000"); 10453 }); 10454 10455 let _ = child.kill(); 10456 let output = child.wait_with_output().unwrap(); 10457 10458 handle_child_output(r, &output); 10459 }); 10460 } 10461 10462 #[test] 10463 fn test_guest_numa_nodes_acpi() { 10464 _test_guest_numa_nodes(true); 10465 } 10466 10467 #[test] 10468 fn test_cpu_topology_421_acpi() { 10469 test_cpu_topology(4, 2, 1, true); 10470 } 10471 10472 #[test] 10473 fn test_cpu_topology_142_acpi() { 10474 test_cpu_topology(1, 4, 2, true); 10475 } 10476 10477 #[test] 10478 fn test_cpu_topology_262_acpi() { 10479 test_cpu_topology(2, 6, 2, true); 10480 } 10481 10482 #[test] 10483 fn test_power_button_acpi() { 10484 _test_power_button(true); 10485 } 10486 10487 #[test] 10488 fn test_virtio_iommu() { 10489 _test_virtio_iommu(true) 10490 } 10491 } 10492 10493 mod rate_limiter { 10494 use super::*; 10495 10496 // Check if the 'measured' rate is within the expected 'difference' (in percentage) 10497 // compared to given 'limit' rate. 10498 fn check_rate_limit(measured: f64, limit: f64, difference: f64) -> bool { 10499 let upper_limit = limit * (1_f64 + difference); 10500 let lower_limit = limit * (1_f64 - difference); 10501 10502 if measured > lower_limit && measured < upper_limit { 10503 return true; 10504 } 10505 10506 eprintln!( 10507 "\n\n==== Start 'check_rate_limit' failed ==== \ 10508 \n\nmeasured={measured}, , lower_limit={lower_limit}, upper_limit={upper_limit} \ 10509 \n\n==== End 'check_rate_limit' failed ====\n\n" 10510 ); 10511 10512 false 10513 } 10514 10515 fn _test_rate_limiter_net(rx: bool) { 10516 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10517 let guest = Guest::new(Box::new(focal)); 10518 10519 let test_timeout = 10; 10520 let num_queues = 2; 10521 let queue_size = 256; 10522 let bw_size = 10485760_u64; // bytes 10523 let bw_refill_time = 100; // ms 10524 let limit_bps = (bw_size * 8 * 1000) as f64 / bw_refill_time as f64; 10525 10526 let net_params = format!( 10527 "tap=,mac={},ip={},mask=255.255.255.0,num_queues={},queue_size={},bw_size={},bw_refill_time={}", 10528 guest.network.guest_mac, 10529 guest.network.host_ip, 10530 num_queues, 10531 queue_size, 10532 bw_size, 10533 bw_refill_time, 10534 ); 10535 10536 let mut child = GuestCommand::new(&guest) 10537 .args(["--cpus", &format!("boot={}", num_queues / 2)]) 10538 .args(["--memory", "size=4G"]) 10539 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 10540 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 10541 .default_disks() 10542 .args(["--net", net_params.as_str()]) 10543 .capture_output() 10544 .spawn() 10545 .unwrap(); 10546 10547 let r = std::panic::catch_unwind(|| { 10548 guest.wait_vm_boot(None).unwrap(); 10549 let measured_bps = 10550 measure_virtio_net_throughput(test_timeout, num_queues / 2, &guest, rx, true) 10551 .unwrap(); 10552 assert!(check_rate_limit(measured_bps, limit_bps, 0.1)); 10553 }); 10554 10555 let _ = child.kill(); 10556 let output = child.wait_with_output().unwrap(); 10557 handle_child_output(r, &output); 10558 } 10559 10560 #[test] 10561 fn test_rate_limiter_net_rx() { 10562 _test_rate_limiter_net(true); 10563 } 10564 10565 #[test] 10566 fn test_rate_limiter_net_tx() { 10567 _test_rate_limiter_net(false); 10568 } 10569 10570 fn _test_rate_limiter_block(bandwidth: bool, num_queues: u32) { 10571 let test_timeout = 10; 10572 let fio_ops = FioOps::RandRW; 10573 10574 let bw_size = if bandwidth { 10575 10485760_u64 // bytes 10576 } else { 10577 100_u64 // I/O 10578 }; 10579 let bw_refill_time = 100; // ms 10580 let limit_rate = (bw_size * 1000) as f64 / bw_refill_time as f64; 10581 10582 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10583 let guest = Guest::new(Box::new(focal)); 10584 let api_socket = temp_api_path(&guest.tmp_dir); 10585 let test_img_dir = TempDir::new_with_prefix("/var/tmp/ch").unwrap(); 10586 let blk_rate_limiter_test_img = 10587 String::from(test_img_dir.as_path().join("blk.img").to_str().unwrap()); 10588 10589 // Create the test block image 10590 assert!(exec_host_command_output(&format!( 10591 "dd if=/dev/zero of={blk_rate_limiter_test_img} bs=1M count=1024" 10592 )) 10593 .status 10594 .success()); 10595 10596 let test_blk_params = if bandwidth { 10597 format!( 10598 "path={blk_rate_limiter_test_img},num_queues={num_queues},bw_size={bw_size},bw_refill_time={bw_refill_time}" 10599 ) 10600 } else { 10601 format!( 10602 "path={blk_rate_limiter_test_img},num_queues={num_queues},ops_size={bw_size},ops_refill_time={bw_refill_time}" 10603 ) 10604 }; 10605 10606 let mut child = GuestCommand::new(&guest) 10607 .args(["--cpus", &format!("boot={num_queues}")]) 10608 .args(["--memory", "size=4G"]) 10609 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 10610 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 10611 .args([ 10612 "--disk", 10613 format!( 10614 "path={}", 10615 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 10616 ) 10617 .as_str(), 10618 format!( 10619 "path={}", 10620 guest.disk_config.disk(DiskType::CloudInit).unwrap() 10621 ) 10622 .as_str(), 10623 test_blk_params.as_str(), 10624 ]) 10625 .default_net() 10626 .args(["--api-socket", &api_socket]) 10627 .capture_output() 10628 .spawn() 10629 .unwrap(); 10630 10631 let r = std::panic::catch_unwind(|| { 10632 guest.wait_vm_boot(None).unwrap(); 10633 10634 let fio_command = format!( 10635 "sudo fio --filename=/dev/vdc --name=test --output-format=json \ 10636 --direct=1 --bs=4k --ioengine=io_uring --iodepth=64 \ 10637 --rw={fio_ops} --runtime={test_timeout} --numjobs={num_queues}" 10638 ); 10639 let output = guest.ssh_command(&fio_command).unwrap(); 10640 10641 // Parse fio output 10642 let measured_rate = if bandwidth { 10643 parse_fio_output(&output, &fio_ops, num_queues).unwrap() 10644 } else { 10645 parse_fio_output_iops(&output, &fio_ops, num_queues).unwrap() 10646 }; 10647 assert!(check_rate_limit(measured_rate, limit_rate, 0.1)); 10648 }); 10649 10650 let _ = child.kill(); 10651 let output = child.wait_with_output().unwrap(); 10652 handle_child_output(r, &output); 10653 } 10654 10655 fn _test_rate_limiter_group_block(bandwidth: bool, num_queues: u32, num_disks: u32) { 10656 let test_timeout = 10; 10657 let fio_ops = FioOps::RandRW; 10658 10659 let bw_size = if bandwidth { 10660 10485760_u64 // bytes 10661 } else { 10662 100_u64 // I/O 10663 }; 10664 let bw_refill_time = 100; // ms 10665 let limit_rate = (bw_size * 1000) as f64 / bw_refill_time as f64; 10666 10667 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10668 let guest = Guest::new(Box::new(focal)); 10669 let api_socket = temp_api_path(&guest.tmp_dir); 10670 let test_img_dir = TempDir::new_with_prefix("/var/tmp/ch").unwrap(); 10671 10672 let rate_limit_group_arg = if bandwidth { 10673 format!("id=group0,bw_size={bw_size},bw_refill_time={bw_refill_time}") 10674 } else { 10675 format!("id=group0,ops_size={bw_size},ops_refill_time={bw_refill_time}") 10676 }; 10677 10678 let mut disk_args = vec![ 10679 "--disk".to_string(), 10680 format!( 10681 "path={}", 10682 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 10683 ), 10684 format!( 10685 "path={}", 10686 guest.disk_config.disk(DiskType::CloudInit).unwrap() 10687 ), 10688 ]; 10689 10690 for i in 0..num_disks { 10691 let test_img_path = String::from( 10692 test_img_dir 10693 .as_path() 10694 .join(format!("blk{}.img", i)) 10695 .to_str() 10696 .unwrap(), 10697 ); 10698 10699 assert!(exec_host_command_output(&format!( 10700 "dd if=/dev/zero of={test_img_path} bs=1M count=1024" 10701 )) 10702 .status 10703 .success()); 10704 10705 disk_args.push(format!( 10706 "path={test_img_path},num_queues={num_queues},rate_limit_group=group0" 10707 )); 10708 } 10709 10710 let mut child = GuestCommand::new(&guest) 10711 .args(["--cpus", &format!("boot={}", num_queues * num_disks)]) 10712 .args(["--memory", "size=4G"]) 10713 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 10714 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 10715 .args(["--rate-limit-group", &rate_limit_group_arg]) 10716 .args(disk_args) 10717 .default_net() 10718 .args(["--api-socket", &api_socket]) 10719 .capture_output() 10720 .spawn() 10721 .unwrap(); 10722 10723 let r = std::panic::catch_unwind(|| { 10724 guest.wait_vm_boot(None).unwrap(); 10725 10726 let mut fio_command = format!( 10727 "sudo fio --name=global --output-format=json \ 10728 --direct=1 --bs=4k --ioengine=io_uring --iodepth=64 \ 10729 --rw={fio_ops} --runtime={test_timeout} --numjobs={num_queues}" 10730 ); 10731 10732 // Generate additional argument for each disk: 10733 // --name=job0 --filename=/dev/vdc \ 10734 // --name=job1 --filename=/dev/vdd \ 10735 // --name=job2 --filename=/dev/vde \ 10736 // ... 10737 for i in 0..num_disks { 10738 let c: char = 'c'; 10739 let arg = format!( 10740 " --name=job{i} --filename=/dev/vd{}", 10741 char::from_u32((c as u32) + i).unwrap() 10742 ); 10743 fio_command += &arg; 10744 } 10745 let output = guest.ssh_command(&fio_command).unwrap(); 10746 10747 // Parse fio output 10748 let measured_rate = if bandwidth { 10749 parse_fio_output(&output, &fio_ops, num_queues * num_disks).unwrap() 10750 } else { 10751 parse_fio_output_iops(&output, &fio_ops, num_queues * num_disks).unwrap() 10752 }; 10753 assert!(check_rate_limit(measured_rate, limit_rate, 0.2)); 10754 }); 10755 10756 let _ = child.kill(); 10757 let output = child.wait_with_output().unwrap(); 10758 handle_child_output(r, &output); 10759 } 10760 10761 #[test] 10762 fn test_rate_limiter_block_bandwidth() { 10763 _test_rate_limiter_block(true, 1); 10764 _test_rate_limiter_block(true, 2) 10765 } 10766 10767 #[test] 10768 fn test_rate_limiter_group_block_bandwidth() { 10769 _test_rate_limiter_group_block(true, 1, 1); 10770 _test_rate_limiter_group_block(true, 2, 1); 10771 _test_rate_limiter_group_block(true, 1, 2); 10772 _test_rate_limiter_group_block(true, 2, 2); 10773 } 10774 10775 #[test] 10776 fn test_rate_limiter_block_iops() { 10777 _test_rate_limiter_block(false, 1); 10778 _test_rate_limiter_block(false, 2); 10779 } 10780 10781 #[test] 10782 fn test_rate_limiter_group_block_iops() { 10783 _test_rate_limiter_group_block(false, 1, 1); 10784 _test_rate_limiter_group_block(false, 2, 1); 10785 _test_rate_limiter_group_block(false, 1, 2); 10786 _test_rate_limiter_group_block(false, 2, 2); 10787 } 10788 } 10789