1 // Copyright © 2020 Intel Corporation 2 // 3 // SPDX-License-Identifier: Apache-2.0 4 // 5 #![allow(clippy::undocumented_unsafe_blocks)] 6 // When enabling the `mshv` feature, we skip quite some tests and 7 // hence have known dead-code. This annotation silences dead-code 8 // related warnings for our quality workflow to pass. 9 #![allow(dead_code)] 10 11 extern crate test_infra; 12 13 use net_util::MacAddr; 14 use std::collections::HashMap; 15 use std::fs; 16 use std::io; 17 use std::io::BufRead; 18 use std::io::Read; 19 use std::io::Seek; 20 use std::io::Write; 21 use std::os::unix::io::AsRawFd; 22 use std::path::PathBuf; 23 use std::process::{Child, Command, Stdio}; 24 use std::string::String; 25 use std::sync::mpsc; 26 use std::sync::mpsc::Receiver; 27 use std::sync::Mutex; 28 use std::thread; 29 use test_infra::*; 30 use vmm_sys_util::{tempdir::TempDir, tempfile::TempFile}; 31 use wait_timeout::ChildExt; 32 33 // Constant taken from the VMM crate. 34 const MAX_NUM_PCI_SEGMENTS: u16 = 96; 35 36 #[cfg(target_arch = "x86_64")] 37 mod x86_64 { 38 pub const FOCAL_IMAGE_NAME: &str = "focal-server-cloudimg-amd64-custom-20210609-0.raw"; 39 pub const JAMMY_NVIDIA_IMAGE_NAME: &str = "jammy-server-cloudimg-amd64-nvidia.raw"; 40 pub const FOCAL_IMAGE_NAME_QCOW2: &str = "focal-server-cloudimg-amd64-custom-20210609-0.qcow2"; 41 pub const FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE: &str = 42 "focal-server-cloudimg-amd64-custom-20210609-0-backing.qcow2"; 43 pub const FOCAL_IMAGE_NAME_VHD: &str = "focal-server-cloudimg-amd64-custom-20210609-0.vhd"; 44 pub const FOCAL_IMAGE_NAME_VHDX: &str = "focal-server-cloudimg-amd64-custom-20210609-0.vhdx"; 45 pub const JAMMY_IMAGE_NAME: &str = "jammy-server-cloudimg-amd64-custom-20230119-0.raw"; 46 pub const WINDOWS_IMAGE_NAME: &str = "windows-server-2022-amd64-2.raw"; 47 pub const OVMF_NAME: &str = "CLOUDHV.fd"; 48 pub const GREP_SERIAL_IRQ_CMD: &str = "grep -c 'IO-APIC.*ttyS0' /proc/interrupts || true"; 49 } 50 51 #[cfg(target_arch = "x86_64")] 52 use x86_64::*; 53 54 #[cfg(target_arch = "aarch64")] 55 mod aarch64 { 56 pub const BIONIC_IMAGE_NAME: &str = "bionic-server-cloudimg-arm64.raw"; 57 pub const FOCAL_IMAGE_NAME: &str = "focal-server-cloudimg-arm64-custom-20210929-0.raw"; 58 pub const FOCAL_IMAGE_UPDATE_KERNEL_NAME: &str = 59 "focal-server-cloudimg-arm64-custom-20210929-0-update-kernel.raw"; 60 pub const FOCAL_IMAGE_NAME_QCOW2: &str = "focal-server-cloudimg-arm64-custom-20210929-0.qcow2"; 61 pub const FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE: &str = 62 "focal-server-cloudimg-arm64-custom-20210929-0-backing.qcow2"; 63 pub const FOCAL_IMAGE_NAME_VHD: &str = "focal-server-cloudimg-arm64-custom-20210929-0.vhd"; 64 pub const FOCAL_IMAGE_NAME_VHDX: &str = "focal-server-cloudimg-arm64-custom-20210929-0.vhdx"; 65 pub const JAMMY_IMAGE_NAME: &str = "jammy-server-cloudimg-arm64-custom-20220329-0.raw"; 66 pub const WINDOWS_IMAGE_NAME: &str = "windows-11-iot-enterprise-aarch64.raw"; 67 pub const OVMF_NAME: &str = "CLOUDHV_EFI.fd"; 68 pub const GREP_SERIAL_IRQ_CMD: &str = "grep -c 'GICv3.*uart-pl011' /proc/interrupts || true"; 69 pub const GREP_PMU_IRQ_CMD: &str = "grep -c 'GICv3.*arm-pmu' /proc/interrupts || true"; 70 } 71 72 #[cfg(target_arch = "aarch64")] 73 use aarch64::*; 74 75 const DIRECT_KERNEL_BOOT_CMDLINE: &str = 76 "root=/dev/vda1 console=hvc0 rw systemd.journald.forward_to_console=1"; 77 78 const CONSOLE_TEST_STRING: &str = "Started OpenBSD Secure Shell server"; 79 80 // This enum exists to make it more convenient to 81 // implement test for both D-Bus and REST APIs. 82 enum TargetApi { 83 // API socket 84 HttpApi(String), 85 // well known service name, object path 86 DBusApi(String, String), 87 } 88 89 impl TargetApi { 90 fn new_http_api(tmp_dir: &TempDir) -> Self { 91 Self::HttpApi(temp_api_path(tmp_dir)) 92 } 93 94 fn new_dbus_api(tmp_dir: &TempDir) -> Self { 95 // `tmp_dir` is in the form of "/tmp/chXXXXXX" 96 // and we take the `chXXXXXX` part as a unique identifier for the guest 97 let id = tmp_dir.as_path().file_name().unwrap().to_str().unwrap(); 98 99 Self::DBusApi( 100 format!("org.cloudhypervisor.{id}"), 101 format!("/org/cloudhypervisor/{id}"), 102 ) 103 } 104 105 fn guest_args(&self) -> Vec<&str> { 106 match self { 107 TargetApi::HttpApi(api_socket) => { 108 vec!["--api-socket", api_socket.as_str()] 109 } 110 TargetApi::DBusApi(service_name, object_path) => { 111 vec![ 112 "--dbus-service-name", 113 service_name.as_str(), 114 "--dbus-object-path", 115 object_path.as_str(), 116 ] 117 } 118 } 119 } 120 121 fn remote_args(&self) -> Vec<&str> { 122 // `guest_args` and `remote_args` are consistent with each other 123 self.guest_args() 124 } 125 126 fn remote_command(&self, command: &str, arg: Option<&str>) -> bool { 127 let mut cmd = Command::new(clh_command("ch-remote")); 128 cmd.args(self.remote_args()); 129 cmd.arg(command); 130 131 if let Some(arg) = arg { 132 cmd.arg(arg); 133 } 134 135 let output = cmd.output().unwrap(); 136 if output.status.success() { 137 true 138 } else { 139 eprintln!("Error running ch-remote command: {:?}", &cmd); 140 let stderr = String::from_utf8_lossy(&output.stderr); 141 eprintln!("stderr: {stderr}"); 142 false 143 } 144 } 145 } 146 147 // Start cloud-hypervisor with no VM parameters, only the API server running. 148 // From the API: Create a VM, boot it and check that it looks as expected. 149 fn _test_api_create_boot(target_api: TargetApi, guest: Guest) { 150 let mut child = GuestCommand::new(&guest) 151 .args(target_api.guest_args()) 152 .capture_output() 153 .spawn() 154 .unwrap(); 155 156 thread::sleep(std::time::Duration::new(1, 0)); 157 158 // Verify API server is running 159 assert!(target_api.remote_command("ping", None)); 160 161 // Create the VM first 162 let cpu_count: u8 = 4; 163 let request_body = guest.api_create_body( 164 cpu_count, 165 direct_kernel_boot_path().to_str().unwrap(), 166 DIRECT_KERNEL_BOOT_CMDLINE, 167 ); 168 169 let temp_config_path = guest.tmp_dir.as_path().join("config"); 170 std::fs::write(&temp_config_path, request_body).unwrap(); 171 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 172 173 assert!(target_api.remote_command("create", Some(create_config),)); 174 175 // Then boot it 176 assert!(target_api.remote_command("boot", None)); 177 thread::sleep(std::time::Duration::new(20, 0)); 178 179 let r = std::panic::catch_unwind(|| { 180 // Check that the VM booted as expected 181 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 182 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 183 }); 184 185 let _ = child.kill(); 186 let output = child.wait_with_output().unwrap(); 187 188 handle_child_output(r, &output); 189 } 190 191 // Start cloud-hypervisor with no VM parameters, only the API server running. 192 // From the API: Create a VM, boot it and check it can be shutdown and then 193 // booted again 194 fn _test_api_shutdown(target_api: TargetApi, guest: Guest) { 195 let mut child = GuestCommand::new(&guest) 196 .args(target_api.guest_args()) 197 .capture_output() 198 .spawn() 199 .unwrap(); 200 201 thread::sleep(std::time::Duration::new(1, 0)); 202 203 // Verify API server is running 204 assert!(target_api.remote_command("ping", None)); 205 206 // Create the VM first 207 let cpu_count: u8 = 4; 208 let request_body = guest.api_create_body( 209 cpu_count, 210 direct_kernel_boot_path().to_str().unwrap(), 211 DIRECT_KERNEL_BOOT_CMDLINE, 212 ); 213 214 let temp_config_path = guest.tmp_dir.as_path().join("config"); 215 std::fs::write(&temp_config_path, request_body).unwrap(); 216 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 217 218 let r = std::panic::catch_unwind(|| { 219 assert!(target_api.remote_command("create", Some(create_config))); 220 221 // Then boot it 222 assert!(target_api.remote_command("boot", None)); 223 224 guest.wait_vm_boot(None).unwrap(); 225 226 // Check that the VM booted as expected 227 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 228 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 229 230 // Sync and shutdown without powering off to prevent filesystem 231 // corruption. 232 guest.ssh_command("sync").unwrap(); 233 guest.ssh_command("sudo shutdown -H now").unwrap(); 234 235 // Wait for the guest to be fully shutdown 236 thread::sleep(std::time::Duration::new(20, 0)); 237 238 // Then shut it down 239 assert!(target_api.remote_command("shutdown", None)); 240 241 // Then boot it again 242 assert!(target_api.remote_command("boot", None)); 243 244 guest.wait_vm_boot(None).unwrap(); 245 246 // Check that the VM booted as expected 247 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 248 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 249 }); 250 251 let _ = child.kill(); 252 let output = child.wait_with_output().unwrap(); 253 254 handle_child_output(r, &output); 255 } 256 257 // Start cloud-hypervisor with no VM parameters, only the API server running. 258 // From the API: Create a VM, boot it and check it can be deleted and then recreated 259 // booted again. 260 fn _test_api_delete(target_api: TargetApi, guest: Guest) { 261 let mut child = GuestCommand::new(&guest) 262 .args(target_api.guest_args()) 263 .capture_output() 264 .spawn() 265 .unwrap(); 266 267 thread::sleep(std::time::Duration::new(1, 0)); 268 269 // Verify API server is running 270 assert!(target_api.remote_command("ping", None)); 271 272 // Create the VM first 273 let cpu_count: u8 = 4; 274 let request_body = guest.api_create_body( 275 cpu_count, 276 direct_kernel_boot_path().to_str().unwrap(), 277 DIRECT_KERNEL_BOOT_CMDLINE, 278 ); 279 let temp_config_path = guest.tmp_dir.as_path().join("config"); 280 std::fs::write(&temp_config_path, request_body).unwrap(); 281 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 282 283 let r = std::panic::catch_unwind(|| { 284 assert!(target_api.remote_command("create", Some(create_config))); 285 286 // Then boot it 287 assert!(target_api.remote_command("boot", None)); 288 289 guest.wait_vm_boot(None).unwrap(); 290 291 // Check that the VM booted as expected 292 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 293 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 294 295 // Sync and shutdown without powering off to prevent filesystem 296 // corruption. 297 guest.ssh_command("sync").unwrap(); 298 guest.ssh_command("sudo shutdown -H now").unwrap(); 299 300 // Wait for the guest to be fully shutdown 301 thread::sleep(std::time::Duration::new(20, 0)); 302 303 // Then delete it 304 assert!(target_api.remote_command("delete", None)); 305 306 assert!(target_api.remote_command("create", Some(create_config))); 307 308 // Then boot it again 309 assert!(target_api.remote_command("boot", None)); 310 311 guest.wait_vm_boot(None).unwrap(); 312 313 // Check that the VM booted as expected 314 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 315 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 316 }); 317 318 let _ = child.kill(); 319 let output = child.wait_with_output().unwrap(); 320 321 handle_child_output(r, &output); 322 } 323 324 // Start cloud-hypervisor with no VM parameters, only the API server running. 325 // From the API: Create a VM, boot it and check that it looks as expected. 326 // Then we pause the VM, check that it's no longer available. 327 // Finally we resume the VM and check that it's available. 328 fn _test_api_pause_resume(target_api: TargetApi, guest: Guest) { 329 let mut child = GuestCommand::new(&guest) 330 .args(target_api.guest_args()) 331 .capture_output() 332 .spawn() 333 .unwrap(); 334 335 thread::sleep(std::time::Duration::new(1, 0)); 336 337 // Verify API server is running 338 assert!(target_api.remote_command("ping", None)); 339 340 // Create the VM first 341 let cpu_count: u8 = 4; 342 let request_body = guest.api_create_body( 343 cpu_count, 344 direct_kernel_boot_path().to_str().unwrap(), 345 DIRECT_KERNEL_BOOT_CMDLINE, 346 ); 347 348 let temp_config_path = guest.tmp_dir.as_path().join("config"); 349 std::fs::write(&temp_config_path, request_body).unwrap(); 350 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 351 352 assert!(target_api.remote_command("create", Some(create_config))); 353 354 // Then boot it 355 assert!(target_api.remote_command("boot", None)); 356 thread::sleep(std::time::Duration::new(20, 0)); 357 358 let r = std::panic::catch_unwind(|| { 359 // Check that the VM booted as expected 360 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 361 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 362 363 // We now pause the VM 364 assert!(target_api.remote_command("pause", None)); 365 366 // Check pausing again fails 367 assert!(!target_api.remote_command("pause", None)); 368 369 thread::sleep(std::time::Duration::new(2, 0)); 370 371 // SSH into the VM should fail 372 assert!(ssh_command_ip( 373 "grep -c processor /proc/cpuinfo", 374 &guest.network.guest_ip, 375 2, 376 5 377 ) 378 .is_err()); 379 380 // Resume the VM 381 assert!(target_api.remote_command("resume", None)); 382 383 // Check resuming again fails 384 assert!(!target_api.remote_command("resume", None)); 385 386 thread::sleep(std::time::Duration::new(2, 0)); 387 388 // Now we should be able to SSH back in and get the right number of CPUs 389 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 390 }); 391 392 let _ = child.kill(); 393 let output = child.wait_with_output().unwrap(); 394 395 handle_child_output(r, &output); 396 } 397 398 fn _test_pty_interaction(pty_path: PathBuf) { 399 let mut cf = std::fs::OpenOptions::new() 400 .write(true) 401 .read(true) 402 .open(pty_path) 403 .unwrap(); 404 405 // Some dumb sleeps but we don't want to write 406 // before the console is up and we don't want 407 // to try and write the next line before the 408 // login process is ready. 409 thread::sleep(std::time::Duration::new(5, 0)); 410 assert_eq!(cf.write(b"cloud\n").unwrap(), 6); 411 thread::sleep(std::time::Duration::new(2, 0)); 412 assert_eq!(cf.write(b"cloud123\n").unwrap(), 9); 413 thread::sleep(std::time::Duration::new(2, 0)); 414 assert_eq!(cf.write(b"echo test_pty_console\n").unwrap(), 22); 415 thread::sleep(std::time::Duration::new(2, 0)); 416 417 // read pty and ensure they have a login shell 418 // some fairly hacky workarounds to avoid looping 419 // forever in case the channel is blocked getting output 420 let ptyc = pty_read(cf); 421 let mut empty = 0; 422 let mut prev = String::new(); 423 loop { 424 thread::sleep(std::time::Duration::new(2, 0)); 425 match ptyc.try_recv() { 426 Ok(line) => { 427 empty = 0; 428 prev = prev + &line; 429 if prev.contains("test_pty_console") { 430 break; 431 } 432 } 433 Err(mpsc::TryRecvError::Empty) => { 434 empty += 1; 435 assert!(empty <= 5, "No login on pty"); 436 } 437 _ => { 438 panic!("No login on pty") 439 } 440 } 441 } 442 } 443 444 fn prepare_virtiofsd(tmp_dir: &TempDir, shared_dir: &str) -> (std::process::Child, String) { 445 let mut workload_path = dirs::home_dir().unwrap(); 446 workload_path.push("workloads"); 447 448 let mut virtiofsd_path = workload_path; 449 virtiofsd_path.push("virtiofsd"); 450 let virtiofsd_path = String::from(virtiofsd_path.to_str().unwrap()); 451 452 let virtiofsd_socket_path = 453 String::from(tmp_dir.as_path().join("virtiofs.sock").to_str().unwrap()); 454 455 // Start the daemon 456 let child = Command::new(virtiofsd_path.as_str()) 457 .args(["--shared-dir", shared_dir]) 458 .args(["--socket-path", virtiofsd_socket_path.as_str()]) 459 .args(["--cache", "never"]) 460 .spawn() 461 .unwrap(); 462 463 thread::sleep(std::time::Duration::new(10, 0)); 464 465 (child, virtiofsd_socket_path) 466 } 467 468 fn prepare_vubd( 469 tmp_dir: &TempDir, 470 blk_img: &str, 471 num_queues: usize, 472 rdonly: bool, 473 direct: bool, 474 ) -> (std::process::Child, String) { 475 let mut workload_path = dirs::home_dir().unwrap(); 476 workload_path.push("workloads"); 477 478 let mut blk_file_path = workload_path; 479 blk_file_path.push(blk_img); 480 let blk_file_path = String::from(blk_file_path.to_str().unwrap()); 481 482 let vubd_socket_path = String::from(tmp_dir.as_path().join("vub.sock").to_str().unwrap()); 483 484 // Start the daemon 485 let child = Command::new(clh_command("vhost_user_block")) 486 .args([ 487 "--block-backend", 488 format!( 489 "path={blk_file_path},socket={vubd_socket_path},num_queues={num_queues},readonly={rdonly},direct={direct}" 490 ) 491 .as_str(), 492 ]) 493 .spawn() 494 .unwrap(); 495 496 thread::sleep(std::time::Duration::new(10, 0)); 497 498 (child, vubd_socket_path) 499 } 500 501 fn temp_vsock_path(tmp_dir: &TempDir) -> String { 502 String::from(tmp_dir.as_path().join("vsock").to_str().unwrap()) 503 } 504 505 fn temp_api_path(tmp_dir: &TempDir) -> String { 506 String::from( 507 tmp_dir 508 .as_path() 509 .join("cloud-hypervisor.sock") 510 .to_str() 511 .unwrap(), 512 ) 513 } 514 515 fn temp_event_monitor_path(tmp_dir: &TempDir) -> String { 516 String::from(tmp_dir.as_path().join("event.json").to_str().unwrap()) 517 } 518 519 // Creates the directory and returns the path. 520 fn temp_snapshot_dir_path(tmp_dir: &TempDir) -> String { 521 let snapshot_dir = String::from(tmp_dir.as_path().join("snapshot").to_str().unwrap()); 522 std::fs::create_dir(&snapshot_dir).unwrap(); 523 snapshot_dir 524 } 525 526 fn temp_vmcore_file_path(tmp_dir: &TempDir) -> String { 527 let vmcore_file = String::from(tmp_dir.as_path().join("vmcore").to_str().unwrap()); 528 vmcore_file 529 } 530 531 // Creates the path for direct kernel boot and return the path. 532 // For x86_64, this function returns the vmlinux kernel path. 533 // For AArch64, this function returns the PE kernel path. 534 fn direct_kernel_boot_path() -> PathBuf { 535 let mut workload_path = dirs::home_dir().unwrap(); 536 workload_path.push("workloads"); 537 538 let mut kernel_path = workload_path; 539 #[cfg(target_arch = "x86_64")] 540 kernel_path.push("vmlinux"); 541 #[cfg(target_arch = "aarch64")] 542 kernel_path.push("Image"); 543 544 kernel_path 545 } 546 547 fn edk2_path() -> PathBuf { 548 let mut workload_path = dirs::home_dir().unwrap(); 549 workload_path.push("workloads"); 550 let mut edk2_path = workload_path; 551 edk2_path.push(OVMF_NAME); 552 553 edk2_path 554 } 555 556 fn cloud_hypervisor_release_path() -> String { 557 let mut workload_path = dirs::home_dir().unwrap(); 558 workload_path.push("workloads"); 559 560 let mut ch_release_path = workload_path; 561 #[cfg(target_arch = "x86_64")] 562 ch_release_path.push("cloud-hypervisor-static"); 563 #[cfg(target_arch = "aarch64")] 564 ch_release_path.push("cloud-hypervisor-static-aarch64"); 565 566 ch_release_path.into_os_string().into_string().unwrap() 567 } 568 569 fn prepare_vhost_user_net_daemon( 570 tmp_dir: &TempDir, 571 ip: &str, 572 tap: Option<&str>, 573 mtu: Option<u16>, 574 num_queues: usize, 575 client_mode: bool, 576 ) -> (std::process::Command, String) { 577 let vunet_socket_path = String::from(tmp_dir.as_path().join("vunet.sock").to_str().unwrap()); 578 579 // Start the daemon 580 let mut net_params = format!( 581 "ip={ip},mask=255.255.255.0,socket={vunet_socket_path},num_queues={num_queues},queue_size=1024,client={client_mode}" 582 ); 583 584 if let Some(tap) = tap { 585 net_params.push_str(format!(",tap={tap}").as_str()); 586 } 587 588 if let Some(mtu) = mtu { 589 net_params.push_str(format!(",mtu={mtu}").as_str()); 590 } 591 592 let mut command = Command::new(clh_command("vhost_user_net")); 593 command.args(["--net-backend", net_params.as_str()]); 594 595 (command, vunet_socket_path) 596 } 597 598 fn prepare_swtpm_daemon(tmp_dir: &TempDir) -> (std::process::Command, String) { 599 let swtpm_tpm_dir = String::from(tmp_dir.as_path().join("swtpm").to_str().unwrap()); 600 let swtpm_socket_path = String::from( 601 tmp_dir 602 .as_path() 603 .join("swtpm") 604 .join("swtpm.sock") 605 .to_str() 606 .unwrap(), 607 ); 608 std::fs::create_dir(&swtpm_tpm_dir).unwrap(); 609 610 let mut swtpm_command = Command::new("swtpm"); 611 let swtpm_args = [ 612 "socket", 613 "--tpmstate", 614 &format!("dir={swtpm_tpm_dir}"), 615 "--ctrl", 616 &format!("type=unixio,path={swtpm_socket_path}"), 617 "--flags", 618 "startup-clear", 619 "--tpm2", 620 ]; 621 swtpm_command.args(swtpm_args); 622 623 (swtpm_command, swtpm_socket_path) 624 } 625 626 fn remote_command(api_socket: &str, command: &str, arg: Option<&str>) -> bool { 627 let mut cmd = Command::new(clh_command("ch-remote")); 628 cmd.args(["--api-socket", api_socket, command]); 629 630 if let Some(arg) = arg { 631 cmd.arg(arg); 632 } 633 let output = cmd.output().unwrap(); 634 if output.status.success() { 635 true 636 } else { 637 eprintln!("Error running ch-remote command: {:?}", &cmd); 638 let stderr = String::from_utf8_lossy(&output.stderr); 639 eprintln!("stderr: {stderr}"); 640 false 641 } 642 } 643 644 fn remote_command_w_output(api_socket: &str, command: &str, arg: Option<&str>) -> (bool, Vec<u8>) { 645 let mut cmd = Command::new(clh_command("ch-remote")); 646 cmd.args(["--api-socket", api_socket, command]); 647 648 if let Some(arg) = arg { 649 cmd.arg(arg); 650 } 651 652 let output = cmd.output().expect("Failed to launch ch-remote"); 653 654 (output.status.success(), output.stdout) 655 } 656 657 fn resize_command( 658 api_socket: &str, 659 desired_vcpus: Option<u8>, 660 desired_ram: Option<usize>, 661 desired_balloon: Option<usize>, 662 event_file: Option<&str>, 663 ) -> bool { 664 let mut cmd = Command::new(clh_command("ch-remote")); 665 cmd.args(["--api-socket", api_socket, "resize"]); 666 667 if let Some(desired_vcpus) = desired_vcpus { 668 cmd.args(["--cpus", &format!("{desired_vcpus}")]); 669 } 670 671 if let Some(desired_ram) = desired_ram { 672 cmd.args(["--memory", &format!("{desired_ram}")]); 673 } 674 675 if let Some(desired_balloon) = desired_balloon { 676 cmd.args(["--balloon", &format!("{desired_balloon}")]); 677 } 678 679 let ret = cmd.status().expect("Failed to launch ch-remote").success(); 680 681 if let Some(event_path) = event_file { 682 let latest_events = [ 683 &MetaEvent { 684 event: "resizing".to_string(), 685 device_id: None, 686 }, 687 &MetaEvent { 688 event: "resized".to_string(), 689 device_id: None, 690 }, 691 ]; 692 assert!(check_latest_events_exact(&latest_events, event_path)); 693 } 694 695 ret 696 } 697 698 fn resize_zone_command(api_socket: &str, id: &str, desired_size: &str) -> bool { 699 let mut cmd = Command::new(clh_command("ch-remote")); 700 cmd.args([ 701 "--api-socket", 702 api_socket, 703 "resize-zone", 704 "--id", 705 id, 706 "--size", 707 desired_size, 708 ]); 709 710 cmd.status().expect("Failed to launch ch-remote").success() 711 } 712 713 // setup OVS-DPDK bridge and ports 714 fn setup_ovs_dpdk() { 715 // setup OVS-DPDK 716 assert!(exec_host_command_status("service openvswitch-switch start").success()); 717 assert!(exec_host_command_status("ovs-vsctl init").success()); 718 assert!( 719 exec_host_command_status("ovs-vsctl set Open_vSwitch . other_config:dpdk-init=true") 720 .success() 721 ); 722 assert!(exec_host_command_status("service openvswitch-switch restart").success()); 723 724 // Create OVS-DPDK bridge and ports 725 assert!(exec_host_command_status( 726 "ovs-vsctl add-br ovsbr0 -- set bridge ovsbr0 datapath_type=netdev", 727 ) 728 .success()); 729 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface vhost-user1 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient1").success()); 730 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user2 -- set Interface vhost-user2 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient2").success()); 731 assert!(exec_host_command_status("ip link set up dev ovsbr0").success()); 732 assert!(exec_host_command_status("service openvswitch-switch restart").success()); 733 } 734 fn cleanup_ovs_dpdk() { 735 assert!(exec_host_command_status("ovs-vsctl del-br ovsbr0").success()); 736 exec_host_command_status("rm -f ovs-vsctl /tmp/dpdkvhostclient1 /tmp/dpdkvhostclient2"); 737 } 738 // Setup two guests and ensure they are connected through ovs-dpdk 739 fn setup_ovs_dpdk_guests( 740 guest1: &Guest, 741 guest2: &Guest, 742 api_socket: &str, 743 release_binary: bool, 744 ) -> (Child, Child) { 745 setup_ovs_dpdk(); 746 747 let clh_path = if !release_binary { 748 clh_command("cloud-hypervisor") 749 } else { 750 cloud_hypervisor_release_path() 751 }; 752 753 let mut child1 = GuestCommand::new_with_binary_path(guest1, &clh_path) 754 .args(["--cpus", "boot=2"]) 755 .args(["--memory", "size=0,shared=on"]) 756 .args(["--memory-zone", "id=mem0,size=1G,shared=on,host_numa_node=0"]) 757 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 758 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 759 .default_disks() 760 .args(["--net", guest1.default_net_string().as_str(), "--net", "vhost_user=true,socket=/tmp/dpdkvhostclient1,num_queues=2,queue_size=256,vhost_mode=server"]) 761 .capture_output() 762 .spawn() 763 .unwrap(); 764 765 #[cfg(target_arch = "x86_64")] 766 let guest_net_iface = "ens5"; 767 #[cfg(target_arch = "aarch64")] 768 let guest_net_iface = "enp0s5"; 769 770 let r = std::panic::catch_unwind(|| { 771 guest1.wait_vm_boot(None).unwrap(); 772 773 guest1 774 .ssh_command(&format!( 775 "sudo ip addr add 172.100.0.1/24 dev {guest_net_iface}" 776 )) 777 .unwrap(); 778 guest1 779 .ssh_command(&format!("sudo ip link set up dev {guest_net_iface}")) 780 .unwrap(); 781 782 let guest_ip = guest1.network.guest_ip.clone(); 783 thread::spawn(move || { 784 ssh_command_ip( 785 "nc -l 12345", 786 &guest_ip, 787 DEFAULT_SSH_RETRIES, 788 DEFAULT_SSH_TIMEOUT, 789 ) 790 .unwrap(); 791 }); 792 }); 793 if r.is_err() { 794 cleanup_ovs_dpdk(); 795 796 let _ = child1.kill(); 797 let output = child1.wait_with_output().unwrap(); 798 handle_child_output(r, &output); 799 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return 800 } 801 802 let mut child2 = GuestCommand::new_with_binary_path(guest2, &clh_path) 803 .args(["--api-socket", api_socket]) 804 .args(["--cpus", "boot=2"]) 805 .args(["--memory", "size=0,shared=on"]) 806 .args(["--memory-zone", "id=mem0,size=1G,shared=on,host_numa_node=0"]) 807 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 808 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 809 .default_disks() 810 .args(["--net", guest2.default_net_string().as_str(), "--net", "vhost_user=true,socket=/tmp/dpdkvhostclient2,num_queues=2,queue_size=256,vhost_mode=server"]) 811 .capture_output() 812 .spawn() 813 .unwrap(); 814 815 let r = std::panic::catch_unwind(|| { 816 guest2.wait_vm_boot(None).unwrap(); 817 818 guest2 819 .ssh_command(&format!( 820 "sudo ip addr add 172.100.0.2/24 dev {guest_net_iface}" 821 )) 822 .unwrap(); 823 guest2 824 .ssh_command(&format!("sudo ip link set up dev {guest_net_iface}")) 825 .unwrap(); 826 827 // Check the connection works properly between the two VMs 828 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap(); 829 }); 830 if r.is_err() { 831 cleanup_ovs_dpdk(); 832 833 let _ = child1.kill(); 834 let _ = child2.kill(); 835 let output = child2.wait_with_output().unwrap(); 836 handle_child_output(r, &output); 837 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return 838 } 839 840 (child1, child2) 841 } 842 843 enum FwType { 844 Ovmf, 845 RustHypervisorFirmware, 846 } 847 848 fn fw_path(_fw_type: FwType) -> String { 849 let mut workload_path = dirs::home_dir().unwrap(); 850 workload_path.push("workloads"); 851 852 let mut fw_path = workload_path; 853 #[cfg(target_arch = "aarch64")] 854 fw_path.push("CLOUDHV_EFI.fd"); 855 #[cfg(target_arch = "x86_64")] 856 { 857 match _fw_type { 858 FwType::Ovmf => fw_path.push(OVMF_NAME), 859 FwType::RustHypervisorFirmware => fw_path.push("hypervisor-fw"), 860 } 861 } 862 863 fw_path.to_str().unwrap().to_string() 864 } 865 866 struct MetaEvent { 867 event: String, 868 device_id: Option<String>, 869 } 870 871 impl MetaEvent { 872 pub fn match_with_json_event(&self, v: &serde_json::Value) -> bool { 873 let mut matched = false; 874 if v["event"].as_str().unwrap() == self.event { 875 if let Some(device_id) = &self.device_id { 876 if v["properties"]["id"].as_str().unwrap() == device_id { 877 matched = true 878 } 879 } else { 880 matched = true; 881 } 882 } 883 matched 884 } 885 } 886 887 // Parse the event_monitor file based on the format that each event 888 // is followed by a double newline 889 fn parse_event_file(event_file: &str) -> Vec<serde_json::Value> { 890 let content = fs::read(event_file).unwrap(); 891 let mut ret = Vec::new(); 892 for entry in String::from_utf8_lossy(&content) 893 .trim() 894 .split("\n\n") 895 .collect::<Vec<&str>>() 896 { 897 ret.push(serde_json::from_str(entry).unwrap()); 898 } 899 900 ret 901 } 902 903 // Return true if all events from the input 'expected_events' are matched sequentially 904 // with events from the 'event_file' 905 fn check_sequential_events(expected_events: &[&MetaEvent], event_file: &str) -> bool { 906 let json_events = parse_event_file(event_file); 907 let len = expected_events.len(); 908 let mut idx = 0; 909 for e in &json_events { 910 if idx == len { 911 break; 912 } 913 if expected_events[idx].match_with_json_event(e) { 914 idx += 1; 915 } 916 } 917 918 idx == len 919 } 920 921 // Return true if all events from the input 'expected_events' are matched exactly 922 // with events from the 'event_file' 923 fn check_sequential_events_exact(expected_events: &[&MetaEvent], event_file: &str) -> bool { 924 let json_events = parse_event_file(event_file); 925 assert!(expected_events.len() <= json_events.len()); 926 let json_events = &json_events[..expected_events.len()]; 927 928 for (idx, e) in json_events.iter().enumerate() { 929 if !expected_events[idx].match_with_json_event(e) { 930 return false; 931 } 932 } 933 934 true 935 } 936 937 // Return true if events from the input 'latest_events' are matched exactly 938 // with the most recent events from the 'event_file' 939 fn check_latest_events_exact(latest_events: &[&MetaEvent], event_file: &str) -> bool { 940 let json_events = parse_event_file(event_file); 941 assert!(latest_events.len() <= json_events.len()); 942 let json_events = &json_events[(json_events.len() - latest_events.len())..]; 943 944 for (idx, e) in json_events.iter().enumerate() { 945 if !latest_events[idx].match_with_json_event(e) { 946 return false; 947 } 948 } 949 950 true 951 } 952 953 fn test_cpu_topology(threads_per_core: u8, cores_per_package: u8, packages: u8, use_fw: bool) { 954 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 955 let guest = Guest::new(Box::new(focal)); 956 let total_vcpus = threads_per_core * cores_per_package * packages; 957 let direct_kernel_boot_path = direct_kernel_boot_path(); 958 let mut kernel_path = direct_kernel_boot_path.to_str().unwrap(); 959 let fw_path = fw_path(FwType::RustHypervisorFirmware); 960 if use_fw { 961 kernel_path = fw_path.as_str(); 962 } 963 964 let mut child = GuestCommand::new(&guest) 965 .args([ 966 "--cpus", 967 &format!( 968 "boot={total_vcpus},topology={threads_per_core}:{cores_per_package}:1:{packages}" 969 ), 970 ]) 971 .args(["--memory", "size=512M"]) 972 .args(["--kernel", kernel_path]) 973 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 974 .default_disks() 975 .default_net() 976 .capture_output() 977 .spawn() 978 .unwrap(); 979 980 let r = std::panic::catch_unwind(|| { 981 guest.wait_vm_boot(None).unwrap(); 982 assert_eq!( 983 guest.get_cpu_count().unwrap_or_default(), 984 u32::from(total_vcpus) 985 ); 986 assert_eq!( 987 guest 988 .ssh_command("lscpu | grep \"per core\" | cut -f 2 -d \":\" | sed \"s# *##\"") 989 .unwrap() 990 .trim() 991 .parse::<u8>() 992 .unwrap_or(0), 993 threads_per_core 994 ); 995 996 assert_eq!( 997 guest 998 .ssh_command("lscpu | grep \"per socket\" | cut -f 2 -d \":\" | sed \"s# *##\"") 999 .unwrap() 1000 .trim() 1001 .parse::<u8>() 1002 .unwrap_or(0), 1003 cores_per_package 1004 ); 1005 1006 assert_eq!( 1007 guest 1008 .ssh_command("lscpu | grep \"Socket\" | cut -f 2 -d \":\" | sed \"s# *##\"") 1009 .unwrap() 1010 .trim() 1011 .parse::<u8>() 1012 .unwrap_or(0), 1013 packages 1014 ); 1015 }); 1016 1017 let _ = child.kill(); 1018 let output = child.wait_with_output().unwrap(); 1019 1020 handle_child_output(r, &output); 1021 } 1022 1023 #[allow(unused_variables)] 1024 fn _test_guest_numa_nodes(acpi: bool) { 1025 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1026 let guest = Guest::new(Box::new(focal)); 1027 let api_socket = temp_api_path(&guest.tmp_dir); 1028 #[cfg(target_arch = "x86_64")] 1029 let kernel_path = direct_kernel_boot_path(); 1030 #[cfg(target_arch = "aarch64")] 1031 let kernel_path = if acpi { 1032 edk2_path() 1033 } else { 1034 direct_kernel_boot_path() 1035 }; 1036 1037 let mut child = GuestCommand::new(&guest) 1038 .args(["--cpus", "boot=6,max=12"]) 1039 .args(["--memory", "size=0,hotplug_method=virtio-mem"]) 1040 .args([ 1041 "--memory-zone", 1042 "id=mem0,size=1G,hotplug_size=3G", 1043 "--memory-zone", 1044 "id=mem1,size=2G,hotplug_size=3G", 1045 "--memory-zone", 1046 "id=mem2,size=3G,hotplug_size=3G", 1047 ]) 1048 .args([ 1049 "--numa", 1050 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0", 1051 "--numa", 1052 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1", 1053 "--numa", 1054 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2", 1055 ]) 1056 .args(["--kernel", kernel_path.to_str().unwrap()]) 1057 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1058 .args(["--api-socket", &api_socket]) 1059 .capture_output() 1060 .default_disks() 1061 .default_net() 1062 .spawn() 1063 .unwrap(); 1064 1065 let r = std::panic::catch_unwind(|| { 1066 guest.wait_vm_boot(None).unwrap(); 1067 1068 guest.check_numa_common( 1069 Some(&[960_000, 1_920_000, 2_880_000]), 1070 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 1071 Some(&["10 15 20", "20 10 25", "25 30 10"]), 1072 ); 1073 1074 // AArch64 currently does not support hotplug, and therefore we only 1075 // test hotplug-related function on x86_64 here. 1076 #[cfg(target_arch = "x86_64")] 1077 { 1078 guest.enable_memory_hotplug(); 1079 1080 // Resize every memory zone and check each associated NUMA node 1081 // has been assigned the right amount of memory. 1082 resize_zone_command(&api_socket, "mem0", "4G"); 1083 resize_zone_command(&api_socket, "mem1", "4G"); 1084 resize_zone_command(&api_socket, "mem2", "4G"); 1085 // Resize to the maximum amount of CPUs and check each NUMA 1086 // node has been assigned the right CPUs set. 1087 resize_command(&api_socket, Some(12), None, None, None); 1088 thread::sleep(std::time::Duration::new(5, 0)); 1089 1090 guest.check_numa_common( 1091 Some(&[3_840_000, 3_840_000, 3_840_000]), 1092 Some(&[vec![0, 1, 2, 9], vec![3, 4, 6, 7, 8], vec![5, 10, 11]]), 1093 None, 1094 ); 1095 } 1096 }); 1097 1098 let _ = child.kill(); 1099 let output = child.wait_with_output().unwrap(); 1100 1101 handle_child_output(r, &output); 1102 } 1103 1104 #[allow(unused_variables)] 1105 fn _test_power_button(acpi: bool) { 1106 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1107 let guest = Guest::new(Box::new(focal)); 1108 let mut cmd = GuestCommand::new(&guest); 1109 let api_socket = temp_api_path(&guest.tmp_dir); 1110 1111 #[cfg(target_arch = "x86_64")] 1112 let kernel_path = direct_kernel_boot_path(); 1113 #[cfg(target_arch = "aarch64")] 1114 let kernel_path = if acpi { 1115 edk2_path() 1116 } else { 1117 direct_kernel_boot_path() 1118 }; 1119 1120 cmd.args(["--cpus", "boot=1"]) 1121 .args(["--memory", "size=512M"]) 1122 .args(["--kernel", kernel_path.to_str().unwrap()]) 1123 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1124 .capture_output() 1125 .default_disks() 1126 .default_net() 1127 .args(["--api-socket", &api_socket]); 1128 1129 let child = cmd.spawn().unwrap(); 1130 1131 let r = std::panic::catch_unwind(|| { 1132 guest.wait_vm_boot(None).unwrap(); 1133 assert!(remote_command(&api_socket, "power-button", None)); 1134 }); 1135 1136 let output = child.wait_with_output().unwrap(); 1137 assert!(output.status.success()); 1138 handle_child_output(r, &output); 1139 } 1140 1141 type PrepareNetDaemon = dyn Fn( 1142 &TempDir, 1143 &str, 1144 Option<&str>, 1145 Option<u16>, 1146 usize, 1147 bool, 1148 ) -> (std::process::Command, String); 1149 1150 fn test_vhost_user_net( 1151 tap: Option<&str>, 1152 num_queues: usize, 1153 prepare_daemon: &PrepareNetDaemon, 1154 generate_host_mac: bool, 1155 client_mode_daemon: bool, 1156 ) { 1157 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1158 let guest = Guest::new(Box::new(focal)); 1159 let api_socket = temp_api_path(&guest.tmp_dir); 1160 1161 let kernel_path = direct_kernel_boot_path(); 1162 1163 let host_mac = if generate_host_mac { 1164 Some(MacAddr::local_random()) 1165 } else { 1166 None 1167 }; 1168 1169 let mtu = Some(3000); 1170 1171 let (mut daemon_command, vunet_socket_path) = prepare_daemon( 1172 &guest.tmp_dir, 1173 &guest.network.host_ip, 1174 tap, 1175 mtu, 1176 num_queues, 1177 client_mode_daemon, 1178 ); 1179 1180 let net_params = format!( 1181 "vhost_user=true,mac={},socket={},num_queues={},queue_size=1024{},vhost_mode={},mtu=3000", 1182 guest.network.guest_mac, 1183 vunet_socket_path, 1184 num_queues, 1185 if let Some(host_mac) = host_mac { 1186 format!(",host_mac={host_mac}") 1187 } else { 1188 "".to_owned() 1189 }, 1190 if client_mode_daemon { 1191 "server" 1192 } else { 1193 "client" 1194 }, 1195 ); 1196 1197 let mut ch_command = GuestCommand::new(&guest); 1198 ch_command 1199 .args(["--cpus", format!("boot={}", num_queues / 2).as_str()]) 1200 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"]) 1201 .args(["--kernel", kernel_path.to_str().unwrap()]) 1202 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1203 .default_disks() 1204 .args(["--net", net_params.as_str()]) 1205 .args(["--api-socket", &api_socket]) 1206 .capture_output(); 1207 1208 let mut daemon_child: std::process::Child; 1209 let mut child: std::process::Child; 1210 1211 if client_mode_daemon { 1212 child = ch_command.spawn().unwrap(); 1213 // Make sure the VMM is waiting for the backend to connect 1214 thread::sleep(std::time::Duration::new(10, 0)); 1215 daemon_child = daemon_command.spawn().unwrap(); 1216 } else { 1217 daemon_child = daemon_command.spawn().unwrap(); 1218 // Make sure the backend is waiting for the VMM to connect 1219 thread::sleep(std::time::Duration::new(10, 0)); 1220 child = ch_command.spawn().unwrap(); 1221 } 1222 1223 let r = std::panic::catch_unwind(|| { 1224 guest.wait_vm_boot(None).unwrap(); 1225 1226 if let Some(tap_name) = tap { 1227 let tap_count = exec_host_command_output(&format!("ip link | grep -c {tap_name}")); 1228 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1"); 1229 } 1230 1231 if let Some(host_mac) = tap { 1232 let mac_count = exec_host_command_output(&format!("ip link | grep -c {host_mac}")); 1233 assert_eq!(String::from_utf8_lossy(&mac_count.stdout).trim(), "1"); 1234 } 1235 1236 #[cfg(target_arch = "aarch64")] 1237 let iface = "enp0s4"; 1238 #[cfg(target_arch = "x86_64")] 1239 let iface = "ens4"; 1240 1241 assert_eq!( 1242 guest 1243 .ssh_command(format!("cat /sys/class/net/{iface}/mtu").as_str()) 1244 .unwrap() 1245 .trim(), 1246 "3000" 1247 ); 1248 1249 // 1 network interface + default localhost ==> 2 interfaces 1250 // It's important to note that this test is fully exercising the 1251 // vhost-user-net implementation and the associated backend since 1252 // it does not define any --net network interface. That means all 1253 // the ssh communication in that test happens through the network 1254 // interface backed by vhost-user-net. 1255 assert_eq!( 1256 guest 1257 .ssh_command("ip -o link | wc -l") 1258 .unwrap() 1259 .trim() 1260 .parse::<u32>() 1261 .unwrap_or_default(), 1262 2 1263 ); 1264 1265 // The following pci devices will appear on guest with PCI-MSI 1266 // interrupt vectors assigned. 1267 // 1 virtio-console with 3 vectors: config, Rx, Tx 1268 // 1 virtio-blk with 2 vectors: config, Request 1269 // 1 virtio-blk with 2 vectors: config, Request 1270 // 1 virtio-rng with 2 vectors: config, Request 1271 // Since virtio-net has 2 queue pairs, its vectors is as follows: 1272 // 1 virtio-net with 5 vectors: config, Rx (2), Tx (2) 1273 // Based on the above, the total vectors should 14. 1274 #[cfg(target_arch = "x86_64")] 1275 let grep_cmd = "grep -c PCI-MSI /proc/interrupts"; 1276 #[cfg(target_arch = "aarch64")] 1277 let grep_cmd = "grep -c ITS-MSI /proc/interrupts"; 1278 assert_eq!( 1279 guest 1280 .ssh_command(grep_cmd) 1281 .unwrap() 1282 .trim() 1283 .parse::<u32>() 1284 .unwrap_or_default(), 1285 10 + (num_queues as u32) 1286 ); 1287 1288 // ACPI feature is needed. 1289 #[cfg(target_arch = "x86_64")] 1290 { 1291 guest.enable_memory_hotplug(); 1292 1293 // Add RAM to the VM 1294 let desired_ram = 1024 << 20; 1295 resize_command(&api_socket, None, Some(desired_ram), None, None); 1296 1297 thread::sleep(std::time::Duration::new(10, 0)); 1298 1299 // Here by simply checking the size (through ssh), we validate 1300 // the connection is still working, which means vhost-user-net 1301 // keeps working after the resize. 1302 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 1303 } 1304 }); 1305 1306 let _ = child.kill(); 1307 let output = child.wait_with_output().unwrap(); 1308 1309 thread::sleep(std::time::Duration::new(5, 0)); 1310 let _ = daemon_child.kill(); 1311 let _ = daemon_child.wait(); 1312 1313 handle_child_output(r, &output); 1314 } 1315 1316 type PrepareBlkDaemon = dyn Fn(&TempDir, &str, usize, bool, bool) -> (std::process::Child, String); 1317 1318 fn test_vhost_user_blk( 1319 num_queues: usize, 1320 readonly: bool, 1321 direct: bool, 1322 prepare_vhost_user_blk_daemon: Option<&PrepareBlkDaemon>, 1323 ) { 1324 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1325 let guest = Guest::new(Box::new(focal)); 1326 let api_socket = temp_api_path(&guest.tmp_dir); 1327 1328 let kernel_path = direct_kernel_boot_path(); 1329 1330 let (blk_params, daemon_child) = { 1331 let prepare_daemon = prepare_vhost_user_blk_daemon.unwrap(); 1332 // Start the daemon 1333 let (daemon_child, vubd_socket_path) = 1334 prepare_daemon(&guest.tmp_dir, "blk.img", num_queues, readonly, direct); 1335 1336 ( 1337 format!( 1338 "vhost_user=true,socket={vubd_socket_path},num_queues={num_queues},queue_size=128", 1339 ), 1340 Some(daemon_child), 1341 ) 1342 }; 1343 1344 let mut child = GuestCommand::new(&guest) 1345 .args(["--cpus", format!("boot={num_queues}").as_str()]) 1346 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"]) 1347 .args(["--kernel", kernel_path.to_str().unwrap()]) 1348 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1349 .args([ 1350 "--disk", 1351 format!( 1352 "path={}", 1353 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 1354 ) 1355 .as_str(), 1356 "--disk", 1357 format!( 1358 "path={}", 1359 guest.disk_config.disk(DiskType::CloudInit).unwrap() 1360 ) 1361 .as_str(), 1362 "--disk", 1363 blk_params.as_str(), 1364 ]) 1365 .default_net() 1366 .args(["--api-socket", &api_socket]) 1367 .capture_output() 1368 .spawn() 1369 .unwrap(); 1370 1371 let r = std::panic::catch_unwind(|| { 1372 guest.wait_vm_boot(None).unwrap(); 1373 1374 // Check both if /dev/vdc exists and if the block size is 16M. 1375 assert_eq!( 1376 guest 1377 .ssh_command("lsblk | grep vdc | grep -c 16M") 1378 .unwrap() 1379 .trim() 1380 .parse::<u32>() 1381 .unwrap_or_default(), 1382 1 1383 ); 1384 1385 // Check if this block is RO or RW. 1386 assert_eq!( 1387 guest 1388 .ssh_command("lsblk | grep vdc | awk '{print $5}'") 1389 .unwrap() 1390 .trim() 1391 .parse::<u32>() 1392 .unwrap_or_default(), 1393 readonly as u32 1394 ); 1395 1396 // Check if the number of queues in /sys/block/vdc/mq matches the 1397 // expected num_queues. 1398 assert_eq!( 1399 guest 1400 .ssh_command("ls -ll /sys/block/vdc/mq | grep ^d | wc -l") 1401 .unwrap() 1402 .trim() 1403 .parse::<u32>() 1404 .unwrap_or_default(), 1405 num_queues as u32 1406 ); 1407 1408 // Mount the device 1409 let mount_ro_rw_flag = if readonly { "ro,noload" } else { "rw" }; 1410 guest.ssh_command("mkdir mount_image").unwrap(); 1411 guest 1412 .ssh_command( 1413 format!("sudo mount -o {mount_ro_rw_flag} -t ext4 /dev/vdc mount_image/").as_str(), 1414 ) 1415 .unwrap(); 1416 1417 // Check the content of the block device. The file "foo" should 1418 // contain "bar". 1419 assert_eq!( 1420 guest.ssh_command("cat mount_image/foo").unwrap().trim(), 1421 "bar" 1422 ); 1423 1424 // ACPI feature is needed. 1425 #[cfg(target_arch = "x86_64")] 1426 { 1427 guest.enable_memory_hotplug(); 1428 1429 // Add RAM to the VM 1430 let desired_ram = 1024 << 20; 1431 resize_command(&api_socket, None, Some(desired_ram), None, None); 1432 1433 thread::sleep(std::time::Duration::new(10, 0)); 1434 1435 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 1436 1437 // Check again the content of the block device after the resize 1438 // has been performed. 1439 assert_eq!( 1440 guest.ssh_command("cat mount_image/foo").unwrap().trim(), 1441 "bar" 1442 ); 1443 } 1444 1445 // Unmount the device 1446 guest.ssh_command("sudo umount /dev/vdc").unwrap(); 1447 guest.ssh_command("rm -r mount_image").unwrap(); 1448 }); 1449 1450 let _ = child.kill(); 1451 let output = child.wait_with_output().unwrap(); 1452 1453 if let Some(mut daemon_child) = daemon_child { 1454 thread::sleep(std::time::Duration::new(5, 0)); 1455 let _ = daemon_child.kill(); 1456 let _ = daemon_child.wait(); 1457 } 1458 1459 handle_child_output(r, &output); 1460 } 1461 1462 fn test_boot_from_vhost_user_blk( 1463 num_queues: usize, 1464 readonly: bool, 1465 direct: bool, 1466 prepare_vhost_user_blk_daemon: Option<&PrepareBlkDaemon>, 1467 ) { 1468 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1469 let guest = Guest::new(Box::new(focal)); 1470 1471 let kernel_path = direct_kernel_boot_path(); 1472 1473 let disk_path = guest.disk_config.disk(DiskType::OperatingSystem).unwrap(); 1474 1475 let (blk_boot_params, daemon_child) = { 1476 let prepare_daemon = prepare_vhost_user_blk_daemon.unwrap(); 1477 // Start the daemon 1478 let (daemon_child, vubd_socket_path) = prepare_daemon( 1479 &guest.tmp_dir, 1480 disk_path.as_str(), 1481 num_queues, 1482 readonly, 1483 direct, 1484 ); 1485 1486 ( 1487 format!( 1488 "vhost_user=true,socket={vubd_socket_path},num_queues={num_queues},queue_size=128", 1489 ), 1490 Some(daemon_child), 1491 ) 1492 }; 1493 1494 let mut child = GuestCommand::new(&guest) 1495 .args(["--cpus", format!("boot={num_queues}").as_str()]) 1496 .args(["--memory", "size=512M,shared=on"]) 1497 .args(["--kernel", kernel_path.to_str().unwrap()]) 1498 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1499 .args([ 1500 "--disk", 1501 blk_boot_params.as_str(), 1502 "--disk", 1503 format!( 1504 "path={}", 1505 guest.disk_config.disk(DiskType::CloudInit).unwrap() 1506 ) 1507 .as_str(), 1508 ]) 1509 .default_net() 1510 .capture_output() 1511 .spawn() 1512 .unwrap(); 1513 1514 let r = std::panic::catch_unwind(|| { 1515 guest.wait_vm_boot(None).unwrap(); 1516 1517 // Just check the VM booted correctly. 1518 assert_eq!(guest.get_cpu_count().unwrap_or_default(), num_queues as u32); 1519 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 1520 }); 1521 let _ = child.kill(); 1522 let output = child.wait_with_output().unwrap(); 1523 1524 if let Some(mut daemon_child) = daemon_child { 1525 thread::sleep(std::time::Duration::new(5, 0)); 1526 let _ = daemon_child.kill(); 1527 let _ = daemon_child.wait(); 1528 } 1529 1530 handle_child_output(r, &output); 1531 } 1532 1533 fn _test_virtio_fs( 1534 prepare_daemon: &dyn Fn(&TempDir, &str) -> (std::process::Child, String), 1535 hotplug: bool, 1536 pci_segment: Option<u16>, 1537 ) { 1538 #[cfg(target_arch = "aarch64")] 1539 let focal_image = if hotplug { 1540 FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string() 1541 } else { 1542 FOCAL_IMAGE_NAME.to_string() 1543 }; 1544 #[cfg(target_arch = "x86_64")] 1545 let focal_image = FOCAL_IMAGE_NAME.to_string(); 1546 let focal = UbuntuDiskConfig::new(focal_image); 1547 let guest = Guest::new(Box::new(focal)); 1548 let api_socket = temp_api_path(&guest.tmp_dir); 1549 1550 let mut workload_path = dirs::home_dir().unwrap(); 1551 workload_path.push("workloads"); 1552 1553 let mut shared_dir = workload_path; 1554 shared_dir.push("shared_dir"); 1555 1556 #[cfg(target_arch = "x86_64")] 1557 let kernel_path = direct_kernel_boot_path(); 1558 #[cfg(target_arch = "aarch64")] 1559 let kernel_path = if hotplug { 1560 edk2_path() 1561 } else { 1562 direct_kernel_boot_path() 1563 }; 1564 1565 let (mut daemon_child, virtiofsd_socket_path) = 1566 prepare_daemon(&guest.tmp_dir, shared_dir.to_str().unwrap()); 1567 1568 let mut guest_command = GuestCommand::new(&guest); 1569 guest_command 1570 .args(["--cpus", "boot=1"]) 1571 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"]) 1572 .args(["--kernel", kernel_path.to_str().unwrap()]) 1573 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1574 .default_disks() 1575 .default_net() 1576 .args(["--api-socket", &api_socket]); 1577 if pci_segment.is_some() { 1578 guest_command.args([ 1579 "--platform", 1580 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 1581 ]); 1582 } 1583 1584 let fs_params = format!( 1585 "id=myfs0,tag=myfs,socket={},num_queues=1,queue_size=1024{}", 1586 virtiofsd_socket_path, 1587 if let Some(pci_segment) = pci_segment { 1588 format!(",pci_segment={pci_segment}") 1589 } else { 1590 "".to_owned() 1591 } 1592 ); 1593 1594 if !hotplug { 1595 guest_command.args(["--fs", fs_params.as_str()]); 1596 } 1597 1598 let mut child = guest_command.capture_output().spawn().unwrap(); 1599 1600 let r = std::panic::catch_unwind(|| { 1601 guest.wait_vm_boot(None).unwrap(); 1602 1603 if hotplug { 1604 // Add fs to the VM 1605 let (cmd_success, cmd_output) = 1606 remote_command_w_output(&api_socket, "add-fs", Some(&fs_params)); 1607 assert!(cmd_success); 1608 1609 if let Some(pci_segment) = pci_segment { 1610 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 1611 "{{\"id\":\"myfs0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 1612 ))); 1613 } else { 1614 assert!(String::from_utf8_lossy(&cmd_output) 1615 .contains("{\"id\":\"myfs0\",\"bdf\":\"0000:00:06.0\"}")); 1616 } 1617 1618 thread::sleep(std::time::Duration::new(10, 0)); 1619 } 1620 1621 // Mount shared directory through virtio_fs filesystem 1622 guest 1623 .ssh_command("mkdir -p mount_dir && sudo mount -t virtiofs myfs mount_dir/") 1624 .unwrap(); 1625 1626 // Check file1 exists and its content is "foo" 1627 assert_eq!( 1628 guest.ssh_command("cat mount_dir/file1").unwrap().trim(), 1629 "foo" 1630 ); 1631 // Check file2 does not exist 1632 guest 1633 .ssh_command("[ ! -f 'mount_dir/file2' ] || true") 1634 .unwrap(); 1635 1636 // Check file3 exists and its content is "bar" 1637 assert_eq!( 1638 guest.ssh_command("cat mount_dir/file3").unwrap().trim(), 1639 "bar" 1640 ); 1641 1642 // ACPI feature is needed. 1643 #[cfg(target_arch = "x86_64")] 1644 { 1645 guest.enable_memory_hotplug(); 1646 1647 // Add RAM to the VM 1648 let desired_ram = 1024 << 20; 1649 resize_command(&api_socket, None, Some(desired_ram), None, None); 1650 1651 thread::sleep(std::time::Duration::new(30, 0)); 1652 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 1653 1654 // After the resize, check again that file1 exists and its 1655 // content is "foo". 1656 assert_eq!( 1657 guest.ssh_command("cat mount_dir/file1").unwrap().trim(), 1658 "foo" 1659 ); 1660 } 1661 1662 if hotplug { 1663 // Remove from VM 1664 guest.ssh_command("sudo umount mount_dir").unwrap(); 1665 assert!(remote_command(&api_socket, "remove-device", Some("myfs0"))); 1666 } 1667 }); 1668 1669 let (r, hotplug_daemon_child) = if r.is_ok() && hotplug { 1670 thread::sleep(std::time::Duration::new(10, 0)); 1671 let (daemon_child, virtiofsd_socket_path) = 1672 prepare_daemon(&guest.tmp_dir, shared_dir.to_str().unwrap()); 1673 1674 let r = std::panic::catch_unwind(|| { 1675 thread::sleep(std::time::Duration::new(10, 0)); 1676 let fs_params = format!( 1677 "id=myfs0,tag=myfs,socket={},num_queues=1,queue_size=1024{}", 1678 virtiofsd_socket_path, 1679 if let Some(pci_segment) = pci_segment { 1680 format!(",pci_segment={pci_segment}") 1681 } else { 1682 "".to_owned() 1683 } 1684 ); 1685 1686 // Add back and check it works 1687 let (cmd_success, cmd_output) = 1688 remote_command_w_output(&api_socket, "add-fs", Some(&fs_params)); 1689 assert!(cmd_success); 1690 if let Some(pci_segment) = pci_segment { 1691 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 1692 "{{\"id\":\"myfs0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 1693 ))); 1694 } else { 1695 assert!(String::from_utf8_lossy(&cmd_output) 1696 .contains("{\"id\":\"myfs0\",\"bdf\":\"0000:00:06.0\"}")); 1697 } 1698 1699 thread::sleep(std::time::Duration::new(10, 0)); 1700 // Mount shared directory through virtio_fs filesystem 1701 guest 1702 .ssh_command("mkdir -p mount_dir && sudo mount -t virtiofs myfs mount_dir/") 1703 .unwrap(); 1704 1705 // Check file1 exists and its content is "foo" 1706 assert_eq!( 1707 guest.ssh_command("cat mount_dir/file1").unwrap().trim(), 1708 "foo" 1709 ); 1710 }); 1711 1712 (r, Some(daemon_child)) 1713 } else { 1714 (r, None) 1715 }; 1716 1717 let _ = child.kill(); 1718 let output = child.wait_with_output().unwrap(); 1719 1720 let _ = daemon_child.kill(); 1721 let _ = daemon_child.wait(); 1722 1723 if let Some(mut daemon_child) = hotplug_daemon_child { 1724 let _ = daemon_child.kill(); 1725 let _ = daemon_child.wait(); 1726 } 1727 1728 handle_child_output(r, &output); 1729 } 1730 1731 fn test_virtio_pmem(discard_writes: bool, specify_size: bool) { 1732 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1733 let guest = Guest::new(Box::new(focal)); 1734 1735 let kernel_path = direct_kernel_boot_path(); 1736 1737 let pmem_temp_file = TempFile::new().unwrap(); 1738 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 1739 1740 std::process::Command::new("mkfs.ext4") 1741 .arg(pmem_temp_file.as_path()) 1742 .output() 1743 .expect("Expect creating disk image to succeed"); 1744 1745 let mut child = GuestCommand::new(&guest) 1746 .args(["--cpus", "boot=1"]) 1747 .args(["--memory", "size=512M"]) 1748 .args(["--kernel", kernel_path.to_str().unwrap()]) 1749 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1750 .default_disks() 1751 .default_net() 1752 .args([ 1753 "--pmem", 1754 format!( 1755 "file={}{}{}", 1756 pmem_temp_file.as_path().to_str().unwrap(), 1757 if specify_size { ",size=128M" } else { "" }, 1758 if discard_writes { 1759 ",discard_writes=on" 1760 } else { 1761 "" 1762 } 1763 ) 1764 .as_str(), 1765 ]) 1766 .capture_output() 1767 .spawn() 1768 .unwrap(); 1769 1770 let r = std::panic::catch_unwind(|| { 1771 guest.wait_vm_boot(None).unwrap(); 1772 1773 // Check for the presence of /dev/pmem0 1774 assert_eq!( 1775 guest.ssh_command("ls /dev/pmem0").unwrap().trim(), 1776 "/dev/pmem0" 1777 ); 1778 1779 // Check changes persist after reboot 1780 assert_eq!(guest.ssh_command("sudo mount /dev/pmem0 /mnt").unwrap(), ""); 1781 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), "lost+found\n"); 1782 guest 1783 .ssh_command("echo test123 | sudo tee /mnt/test") 1784 .unwrap(); 1785 assert_eq!(guest.ssh_command("sudo umount /mnt").unwrap(), ""); 1786 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), ""); 1787 1788 guest.reboot_linux(0, None); 1789 assert_eq!(guest.ssh_command("sudo mount /dev/pmem0 /mnt").unwrap(), ""); 1790 assert_eq!( 1791 guest 1792 .ssh_command("sudo cat /mnt/test || true") 1793 .unwrap() 1794 .trim(), 1795 if discard_writes { "" } else { "test123" } 1796 ); 1797 }); 1798 1799 let _ = child.kill(); 1800 let output = child.wait_with_output().unwrap(); 1801 1802 handle_child_output(r, &output); 1803 } 1804 1805 fn get_fd_count(pid: u32) -> usize { 1806 fs::read_dir(format!("/proc/{pid}/fd")).unwrap().count() 1807 } 1808 1809 fn _test_virtio_vsock(hotplug: bool) { 1810 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1811 let guest = Guest::new(Box::new(focal)); 1812 1813 #[cfg(target_arch = "x86_64")] 1814 let kernel_path = direct_kernel_boot_path(); 1815 #[cfg(target_arch = "aarch64")] 1816 let kernel_path = if hotplug { 1817 edk2_path() 1818 } else { 1819 direct_kernel_boot_path() 1820 }; 1821 1822 let socket = temp_vsock_path(&guest.tmp_dir); 1823 let api_socket = temp_api_path(&guest.tmp_dir); 1824 1825 let mut cmd = GuestCommand::new(&guest); 1826 cmd.args(["--api-socket", &api_socket]); 1827 cmd.args(["--cpus", "boot=1"]); 1828 cmd.args(["--memory", "size=512M"]); 1829 cmd.args(["--kernel", kernel_path.to_str().unwrap()]); 1830 cmd.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]); 1831 cmd.default_disks(); 1832 cmd.default_net(); 1833 1834 if !hotplug { 1835 cmd.args(["--vsock", format!("cid=3,socket={socket}").as_str()]); 1836 } 1837 1838 let mut child = cmd.capture_output().spawn().unwrap(); 1839 1840 let r = std::panic::catch_unwind(|| { 1841 guest.wait_vm_boot(None).unwrap(); 1842 1843 if hotplug { 1844 let (cmd_success, cmd_output) = remote_command_w_output( 1845 &api_socket, 1846 "add-vsock", 1847 Some(format!("cid=3,socket={socket},id=test0").as_str()), 1848 ); 1849 assert!(cmd_success); 1850 assert!(String::from_utf8_lossy(&cmd_output) 1851 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 1852 thread::sleep(std::time::Duration::new(10, 0)); 1853 // Check adding a second one fails 1854 assert!(!remote_command( 1855 &api_socket, 1856 "add-vsock", 1857 Some("cid=1234,socket=/tmp/fail") 1858 )); 1859 } 1860 1861 // Validate vsock works as expected. 1862 guest.check_vsock(socket.as_str()); 1863 guest.reboot_linux(0, None); 1864 // Validate vsock still works after a reboot. 1865 guest.check_vsock(socket.as_str()); 1866 1867 if hotplug { 1868 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 1869 } 1870 }); 1871 1872 let _ = child.kill(); 1873 let output = child.wait_with_output().unwrap(); 1874 1875 handle_child_output(r, &output); 1876 } 1877 1878 fn get_ksm_pages_shared() -> u32 { 1879 fs::read_to_string("/sys/kernel/mm/ksm/pages_shared") 1880 .unwrap() 1881 .trim() 1882 .parse::<u32>() 1883 .unwrap() 1884 } 1885 1886 fn test_memory_mergeable(mergeable: bool) { 1887 let memory_param = if mergeable { 1888 "mergeable=on" 1889 } else { 1890 "mergeable=off" 1891 }; 1892 1893 // We are assuming the rest of the system in our CI is not using mergeable memory 1894 let ksm_ps_init = get_ksm_pages_shared(); 1895 assert!(ksm_ps_init == 0); 1896 1897 let focal1 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1898 let guest1 = Guest::new(Box::new(focal1)); 1899 let mut child1 = GuestCommand::new(&guest1) 1900 .args(["--cpus", "boot=1"]) 1901 .args(["--memory", format!("size=512M,{memory_param}").as_str()]) 1902 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 1903 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1904 .default_disks() 1905 .args(["--net", guest1.default_net_string().as_str()]) 1906 .args(["--serial", "tty", "--console", "off"]) 1907 .capture_output() 1908 .spawn() 1909 .unwrap(); 1910 1911 let r = std::panic::catch_unwind(|| { 1912 guest1.wait_vm_boot(None).unwrap(); 1913 }); 1914 if r.is_err() { 1915 let _ = child1.kill(); 1916 let output = child1.wait_with_output().unwrap(); 1917 handle_child_output(r, &output); 1918 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return 1919 } 1920 1921 let ksm_ps_guest1 = get_ksm_pages_shared(); 1922 1923 let focal2 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1924 let guest2 = Guest::new(Box::new(focal2)); 1925 let mut child2 = GuestCommand::new(&guest2) 1926 .args(["--cpus", "boot=1"]) 1927 .args(["--memory", format!("size=512M,{memory_param}").as_str()]) 1928 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 1929 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1930 .default_disks() 1931 .args(["--net", guest2.default_net_string().as_str()]) 1932 .args(["--serial", "tty", "--console", "off"]) 1933 .capture_output() 1934 .spawn() 1935 .unwrap(); 1936 1937 let r = std::panic::catch_unwind(|| { 1938 guest2.wait_vm_boot(None).unwrap(); 1939 let ksm_ps_guest2 = get_ksm_pages_shared(); 1940 1941 if mergeable { 1942 println!( 1943 "ksm pages_shared after vm1 booted '{ksm_ps_guest1}', ksm pages_shared after vm2 booted '{ksm_ps_guest2}'" 1944 ); 1945 // We are expecting the number of shared pages to increase as the number of VM increases 1946 assert!(ksm_ps_guest1 < ksm_ps_guest2); 1947 } else { 1948 assert!(ksm_ps_guest1 == 0); 1949 assert!(ksm_ps_guest2 == 0); 1950 } 1951 }); 1952 1953 let _ = child1.kill(); 1954 let _ = child2.kill(); 1955 1956 let output = child1.wait_with_output().unwrap(); 1957 child2.wait().unwrap(); 1958 1959 handle_child_output(r, &output); 1960 } 1961 1962 fn _get_vmm_overhead(pid: u32, guest_memory_size: u32) -> HashMap<String, u32> { 1963 let smaps = fs::File::open(format!("/proc/{pid}/smaps")).unwrap(); 1964 let reader = io::BufReader::new(smaps); 1965 1966 let mut skip_map: bool = false; 1967 let mut region_name: String = "".to_string(); 1968 let mut region_maps = HashMap::new(); 1969 for line in reader.lines() { 1970 let l = line.unwrap(); 1971 1972 if l.contains('-') { 1973 let values: Vec<&str> = l.split_whitespace().collect(); 1974 region_name = values.last().unwrap().trim().to_string(); 1975 if region_name == "0" { 1976 region_name = "anonymous".to_string() 1977 } 1978 } 1979 1980 // Each section begins with something that looks like: 1981 // Size: 2184 kB 1982 if l.starts_with("Size:") { 1983 let values: Vec<&str> = l.split_whitespace().collect(); 1984 let map_size = values[1].parse::<u32>().unwrap(); 1985 // We skip the assigned guest RAM map, its RSS is only 1986 // dependent on the guest actual memory usage. 1987 // Everything else can be added to the VMM overhead. 1988 skip_map = map_size >= guest_memory_size; 1989 continue; 1990 } 1991 1992 // If this is a map we're taking into account, then we only 1993 // count the RSS. The sum of all counted RSS is the VMM overhead. 1994 if !skip_map && l.starts_with("Rss:") { 1995 let values: Vec<&str> = l.split_whitespace().collect(); 1996 let value = values[1].trim().parse::<u32>().unwrap(); 1997 *region_maps.entry(region_name.clone()).or_insert(0) += value; 1998 } 1999 } 2000 2001 region_maps 2002 } 2003 2004 fn get_vmm_overhead(pid: u32, guest_memory_size: u32) -> u32 { 2005 let mut total = 0; 2006 2007 for (region_name, value) in &_get_vmm_overhead(pid, guest_memory_size) { 2008 eprintln!("{region_name}: {value}"); 2009 total += value; 2010 } 2011 2012 total 2013 } 2014 2015 fn process_rss_kib(pid: u32) -> usize { 2016 let command = format!("ps -q {pid} -o rss="); 2017 let rss = exec_host_command_output(&command); 2018 String::from_utf8_lossy(&rss.stdout).trim().parse().unwrap() 2019 } 2020 2021 // 10MB is our maximum accepted overhead. 2022 const MAXIMUM_VMM_OVERHEAD_KB: u32 = 10 * 1024; 2023 2024 #[derive(PartialEq, Eq, PartialOrd)] 2025 struct Counters { 2026 rx_bytes: u64, 2027 rx_frames: u64, 2028 tx_bytes: u64, 2029 tx_frames: u64, 2030 read_bytes: u64, 2031 write_bytes: u64, 2032 read_ops: u64, 2033 write_ops: u64, 2034 } 2035 2036 fn get_counters(api_socket: &str) -> Counters { 2037 // Get counters 2038 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "counters", None); 2039 assert!(cmd_success); 2040 2041 let counters: HashMap<&str, HashMap<&str, u64>> = 2042 serde_json::from_slice(&cmd_output).unwrap_or_default(); 2043 2044 let rx_bytes = *counters.get("_net2").unwrap().get("rx_bytes").unwrap(); 2045 let rx_frames = *counters.get("_net2").unwrap().get("rx_frames").unwrap(); 2046 let tx_bytes = *counters.get("_net2").unwrap().get("tx_bytes").unwrap(); 2047 let tx_frames = *counters.get("_net2").unwrap().get("tx_frames").unwrap(); 2048 2049 let read_bytes = *counters.get("_disk0").unwrap().get("read_bytes").unwrap(); 2050 let write_bytes = *counters.get("_disk0").unwrap().get("write_bytes").unwrap(); 2051 let read_ops = *counters.get("_disk0").unwrap().get("read_ops").unwrap(); 2052 let write_ops = *counters.get("_disk0").unwrap().get("write_ops").unwrap(); 2053 2054 Counters { 2055 rx_bytes, 2056 rx_frames, 2057 tx_bytes, 2058 tx_frames, 2059 read_bytes, 2060 write_bytes, 2061 read_ops, 2062 write_ops, 2063 } 2064 } 2065 2066 fn pty_read(mut pty: std::fs::File) -> Receiver<String> { 2067 let (tx, rx) = mpsc::channel::<String>(); 2068 thread::spawn(move || loop { 2069 thread::sleep(std::time::Duration::new(1, 0)); 2070 let mut buf = [0; 512]; 2071 match pty.read(&mut buf) { 2072 Ok(_) => { 2073 let output = std::str::from_utf8(&buf).unwrap().to_string(); 2074 match tx.send(output) { 2075 Ok(_) => (), 2076 Err(_) => break, 2077 } 2078 } 2079 Err(_) => break, 2080 } 2081 }); 2082 rx 2083 } 2084 2085 fn get_pty_path(api_socket: &str, pty_type: &str) -> PathBuf { 2086 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None); 2087 assert!(cmd_success); 2088 let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default(); 2089 assert_eq!("Pty", info["config"][pty_type]["mode"]); 2090 PathBuf::from( 2091 info["config"][pty_type]["file"] 2092 .as_str() 2093 .expect("Missing pty path"), 2094 ) 2095 } 2096 2097 // VFIO test network setup. 2098 // We reserve a different IP class for it: 172.18.0.0/24. 2099 #[cfg(target_arch = "x86_64")] 2100 fn setup_vfio_network_interfaces() { 2101 // 'vfio-br0' 2102 assert!(exec_host_command_status("sudo ip link add name vfio-br0 type bridge").success()); 2103 assert!(exec_host_command_status("sudo ip link set vfio-br0 up").success()); 2104 assert!(exec_host_command_status("sudo ip addr add 172.18.0.1/24 dev vfio-br0").success()); 2105 // 'vfio-tap0' 2106 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap0 mode tap").success()); 2107 assert!(exec_host_command_status("sudo ip link set vfio-tap0 master vfio-br0").success()); 2108 assert!(exec_host_command_status("sudo ip link set vfio-tap0 up").success()); 2109 // 'vfio-tap1' 2110 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap1 mode tap").success()); 2111 assert!(exec_host_command_status("sudo ip link set vfio-tap1 master vfio-br0").success()); 2112 assert!(exec_host_command_status("sudo ip link set vfio-tap1 up").success()); 2113 // 'vfio-tap2' 2114 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap2 mode tap").success()); 2115 assert!(exec_host_command_status("sudo ip link set vfio-tap2 master vfio-br0").success()); 2116 assert!(exec_host_command_status("sudo ip link set vfio-tap2 up").success()); 2117 // 'vfio-tap3' 2118 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap3 mode tap").success()); 2119 assert!(exec_host_command_status("sudo ip link set vfio-tap3 master vfio-br0").success()); 2120 assert!(exec_host_command_status("sudo ip link set vfio-tap3 up").success()); 2121 } 2122 2123 // Tear VFIO test network down 2124 #[cfg(target_arch = "x86_64")] 2125 fn cleanup_vfio_network_interfaces() { 2126 assert!(exec_host_command_status("sudo ip link del vfio-br0").success()); 2127 assert!(exec_host_command_status("sudo ip link del vfio-tap0").success()); 2128 assert!(exec_host_command_status("sudo ip link del vfio-tap1").success()); 2129 assert!(exec_host_command_status("sudo ip link del vfio-tap2").success()); 2130 assert!(exec_host_command_status("sudo ip link del vfio-tap3").success()); 2131 } 2132 2133 fn balloon_size(api_socket: &str) -> u64 { 2134 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None); 2135 assert!(cmd_success); 2136 2137 let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default(); 2138 let total_mem = &info["config"]["memory"]["size"] 2139 .to_string() 2140 .parse::<u64>() 2141 .unwrap(); 2142 let actual_mem = &info["memory_actual_size"] 2143 .to_string() 2144 .parse::<u64>() 2145 .unwrap(); 2146 total_mem - actual_mem 2147 } 2148 2149 fn vm_state(api_socket: &str) -> String { 2150 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None); 2151 assert!(cmd_success); 2152 2153 let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default(); 2154 let state = &info["state"].as_str().unwrap(); 2155 2156 state.to_string() 2157 } 2158 2159 // This test validates that it can find the virtio-iommu device at first. 2160 // It also verifies that both disks and the network card are attached to 2161 // the virtual IOMMU by looking at /sys/kernel/iommu_groups directory. 2162 // The last interesting part of this test is that it exercises the network 2163 // interface attached to the virtual IOMMU since this is the one used to 2164 // send all commands through SSH. 2165 fn _test_virtio_iommu(acpi: bool) { 2166 // Virtio-iommu support is ready in recent kernel (v5.14). But the kernel in 2167 // Focal image is still old. 2168 // So if ACPI is enabled on AArch64, we use a modified Focal image in which 2169 // the kernel binary has been updated. 2170 #[cfg(target_arch = "aarch64")] 2171 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string(); 2172 #[cfg(target_arch = "x86_64")] 2173 let focal_image = FOCAL_IMAGE_NAME.to_string(); 2174 let focal = UbuntuDiskConfig::new(focal_image); 2175 let guest = Guest::new(Box::new(focal)); 2176 2177 #[cfg(target_arch = "x86_64")] 2178 let kernel_path = direct_kernel_boot_path(); 2179 #[cfg(target_arch = "aarch64")] 2180 let kernel_path = if acpi { 2181 edk2_path() 2182 } else { 2183 direct_kernel_boot_path() 2184 }; 2185 2186 let mut child = GuestCommand::new(&guest) 2187 .args(["--cpus", "boot=1"]) 2188 .args(["--memory", "size=512M"]) 2189 .args(["--kernel", kernel_path.to_str().unwrap()]) 2190 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2191 .args([ 2192 "--disk", 2193 format!( 2194 "path={},iommu=on", 2195 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 2196 ) 2197 .as_str(), 2198 "--disk", 2199 format!( 2200 "path={},iommu=on", 2201 guest.disk_config.disk(DiskType::CloudInit).unwrap() 2202 ) 2203 .as_str(), 2204 ]) 2205 .args(["--net", guest.default_net_string_w_iommu().as_str()]) 2206 .capture_output() 2207 .spawn() 2208 .unwrap(); 2209 2210 let r = std::panic::catch_unwind(|| { 2211 guest.wait_vm_boot(None).unwrap(); 2212 2213 // Verify the virtio-iommu device is present. 2214 assert!(guest 2215 .does_device_vendor_pair_match("0x1057", "0x1af4") 2216 .unwrap_or_default()); 2217 2218 // On AArch64, if the guest system boots from FDT, the behavior of IOMMU is a bit 2219 // different with ACPI. 2220 // All devices on the PCI bus will be attached to the virtual IOMMU, except the 2221 // virtio-iommu device itself. So these devices will all be added to IOMMU groups, 2222 // and appear under folder '/sys/kernel/iommu_groups/'. 2223 // The result is, in the case of FDT, IOMMU group '0' contains "0000:00:01.0" 2224 // which is the console. The first disk "0000:00:02.0" is in group '1'. 2225 // While on ACPI, console device is not attached to IOMMU. So the IOMMU group '0' 2226 // contains "0000:00:02.0" which is the first disk. 2227 // 2228 // Verify the iommu group of the first disk. 2229 let iommu_group = !acpi as i32; 2230 assert_eq!( 2231 guest 2232 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str()) 2233 .unwrap() 2234 .trim(), 2235 "0000:00:02.0" 2236 ); 2237 2238 // Verify the iommu group of the second disk. 2239 let iommu_group = if acpi { 1 } else { 2 }; 2240 assert_eq!( 2241 guest 2242 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str()) 2243 .unwrap() 2244 .trim(), 2245 "0000:00:03.0" 2246 ); 2247 2248 // Verify the iommu group of the network card. 2249 let iommu_group = if acpi { 2 } else { 3 }; 2250 assert_eq!( 2251 guest 2252 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str()) 2253 .unwrap() 2254 .trim(), 2255 "0000:00:04.0" 2256 ); 2257 }); 2258 2259 let _ = child.kill(); 2260 let output = child.wait_with_output().unwrap(); 2261 2262 handle_child_output(r, &output); 2263 } 2264 2265 fn get_reboot_count(guest: &Guest) -> u32 { 2266 guest 2267 .ssh_command("sudo last | grep -c reboot") 2268 .unwrap() 2269 .trim() 2270 .parse::<u32>() 2271 .unwrap_or_default() 2272 } 2273 2274 fn enable_guest_watchdog(guest: &Guest, watchdog_sec: u32) { 2275 // Check for PCI device 2276 assert!(guest 2277 .does_device_vendor_pair_match("0x1063", "0x1af4") 2278 .unwrap_or_default()); 2279 2280 // Enable systemd watchdog 2281 guest 2282 .ssh_command(&format!( 2283 "echo RuntimeWatchdogSec={watchdog_sec}s | sudo tee -a /etc/systemd/system.conf" 2284 )) 2285 .unwrap(); 2286 } 2287 2288 fn make_guest_panic(guest: &Guest) { 2289 // Check for pvpanic device 2290 assert!(guest 2291 .does_device_vendor_pair_match("0x0011", "0x1b36") 2292 .unwrap_or_default()); 2293 2294 // Trigger guest a panic 2295 guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap(); 2296 } 2297 2298 mod common_parallel { 2299 use std::{fs::OpenOptions, io::SeekFrom}; 2300 2301 use crate::*; 2302 2303 #[test] 2304 #[cfg(target_arch = "x86_64")] 2305 fn test_focal_hypervisor_fw() { 2306 test_simple_launch(fw_path(FwType::RustHypervisorFirmware), FOCAL_IMAGE_NAME) 2307 } 2308 2309 #[test] 2310 #[cfg(target_arch = "x86_64")] 2311 fn test_focal_ovmf() { 2312 test_simple_launch(fw_path(FwType::Ovmf), FOCAL_IMAGE_NAME) 2313 } 2314 2315 #[cfg(target_arch = "x86_64")] 2316 fn test_simple_launch(fw_path: String, disk_path: &str) { 2317 let disk_config = Box::new(UbuntuDiskConfig::new(disk_path.to_string())); 2318 let guest = Guest::new(disk_config); 2319 let event_path = temp_event_monitor_path(&guest.tmp_dir); 2320 2321 let mut child = GuestCommand::new(&guest) 2322 .args(["--cpus", "boot=1"]) 2323 .args(["--memory", "size=512M"]) 2324 .args(["--kernel", fw_path.as_str()]) 2325 .default_disks() 2326 .default_net() 2327 .args(["--serial", "tty", "--console", "off"]) 2328 .args(["--event-monitor", format!("path={event_path}").as_str()]) 2329 .capture_output() 2330 .spawn() 2331 .unwrap(); 2332 2333 let r = std::panic::catch_unwind(|| { 2334 guest.wait_vm_boot(Some(120)).unwrap(); 2335 2336 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 2337 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 2338 assert_eq!(guest.get_pci_bridge_class().unwrap_or_default(), "0x060000"); 2339 2340 let expected_sequential_events = [ 2341 &MetaEvent { 2342 event: "starting".to_string(), 2343 device_id: None, 2344 }, 2345 &MetaEvent { 2346 event: "booting".to_string(), 2347 device_id: None, 2348 }, 2349 &MetaEvent { 2350 event: "booted".to_string(), 2351 device_id: None, 2352 }, 2353 &MetaEvent { 2354 event: "activated".to_string(), 2355 device_id: Some("_disk0".to_string()), 2356 }, 2357 &MetaEvent { 2358 event: "reset".to_string(), 2359 device_id: Some("_disk0".to_string()), 2360 }, 2361 ]; 2362 assert!(check_sequential_events( 2363 &expected_sequential_events, 2364 &event_path 2365 )); 2366 2367 // It's been observed on the Bionic image that udev and snapd 2368 // services can cause some delay in the VM's shutdown. Disabling 2369 // them improves the reliability of this test. 2370 let _ = guest.ssh_command("sudo systemctl disable udev"); 2371 let _ = guest.ssh_command("sudo systemctl stop udev"); 2372 let _ = guest.ssh_command("sudo systemctl disable snapd"); 2373 let _ = guest.ssh_command("sudo systemctl stop snapd"); 2374 2375 guest.ssh_command("sudo poweroff").unwrap(); 2376 thread::sleep(std::time::Duration::new(20, 0)); 2377 let latest_events = [ 2378 &MetaEvent { 2379 event: "shutdown".to_string(), 2380 device_id: None, 2381 }, 2382 &MetaEvent { 2383 event: "deleted".to_string(), 2384 device_id: None, 2385 }, 2386 &MetaEvent { 2387 event: "shutdown".to_string(), 2388 device_id: None, 2389 }, 2390 ]; 2391 assert!(check_latest_events_exact(&latest_events, &event_path)); 2392 }); 2393 2394 let _ = child.kill(); 2395 let output = child.wait_with_output().unwrap(); 2396 2397 handle_child_output(r, &output); 2398 } 2399 2400 #[test] 2401 fn test_multi_cpu() { 2402 let jammy_image = JAMMY_IMAGE_NAME.to_string(); 2403 let jammy = UbuntuDiskConfig::new(jammy_image); 2404 let guest = Guest::new(Box::new(jammy)); 2405 2406 let mut cmd = GuestCommand::new(&guest); 2407 cmd.args(["--cpus", "boot=2,max=4"]) 2408 .args(["--memory", "size=512M"]) 2409 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2410 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2411 .capture_output() 2412 .default_disks() 2413 .default_net(); 2414 2415 let mut child = cmd.spawn().unwrap(); 2416 2417 let r = std::panic::catch_unwind(|| { 2418 guest.wait_vm_boot(Some(120)).unwrap(); 2419 2420 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 2421 2422 assert_eq!( 2423 guest 2424 .ssh_command( 2425 r#"sudo dmesg | grep "smp: Brought up" | sed "s/\[\ *[0-9.]*\] //""# 2426 ) 2427 .unwrap() 2428 .trim(), 2429 "smp: Brought up 1 node, 2 CPUs" 2430 ); 2431 }); 2432 2433 let _ = child.kill(); 2434 let output = child.wait_with_output().unwrap(); 2435 2436 handle_child_output(r, &output); 2437 } 2438 2439 #[test] 2440 fn test_cpu_topology_421() { 2441 test_cpu_topology(4, 2, 1, false); 2442 } 2443 2444 #[test] 2445 fn test_cpu_topology_142() { 2446 test_cpu_topology(1, 4, 2, false); 2447 } 2448 2449 #[test] 2450 fn test_cpu_topology_262() { 2451 test_cpu_topology(2, 6, 2, false); 2452 } 2453 2454 #[test] 2455 #[cfg(target_arch = "x86_64")] 2456 #[cfg(not(feature = "mshv"))] 2457 fn test_cpu_physical_bits() { 2458 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2459 let guest = Guest::new(Box::new(focal)); 2460 let max_phys_bits: u8 = 36; 2461 let mut child = GuestCommand::new(&guest) 2462 .args(["--cpus", &format!("max_phys_bits={max_phys_bits}")]) 2463 .args(["--memory", "size=512M"]) 2464 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2465 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2466 .default_disks() 2467 .default_net() 2468 .capture_output() 2469 .spawn() 2470 .unwrap(); 2471 2472 let r = std::panic::catch_unwind(|| { 2473 guest.wait_vm_boot(None).unwrap(); 2474 2475 assert!( 2476 guest 2477 .ssh_command("lscpu | grep \"Address sizes:\" | cut -f 2 -d \":\" | sed \"s# *##\" | cut -f 1 -d \" \"") 2478 .unwrap() 2479 .trim() 2480 .parse::<u8>() 2481 .unwrap_or(max_phys_bits + 1) <= max_phys_bits, 2482 ); 2483 }); 2484 2485 let _ = child.kill(); 2486 let output = child.wait_with_output().unwrap(); 2487 2488 handle_child_output(r, &output); 2489 } 2490 2491 #[test] 2492 fn test_cpu_affinity() { 2493 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2494 let guest = Guest::new(Box::new(focal)); 2495 2496 // We need the host to have at least 4 CPUs if we want to be able 2497 // to run this test. 2498 let host_cpus_count = exec_host_command_output("nproc"); 2499 assert!( 2500 String::from_utf8_lossy(&host_cpus_count.stdout) 2501 .trim() 2502 .parse::<u16>() 2503 .unwrap_or(0) 2504 >= 4 2505 ); 2506 2507 let mut child = GuestCommand::new(&guest) 2508 .args(["--cpus", "boot=2,affinity=[0@[0,2],1@[1,3]]"]) 2509 .args(["--memory", "size=512M"]) 2510 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2511 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2512 .default_disks() 2513 .default_net() 2514 .capture_output() 2515 .spawn() 2516 .unwrap(); 2517 2518 let r = std::panic::catch_unwind(|| { 2519 guest.wait_vm_boot(None).unwrap(); 2520 let pid = child.id(); 2521 let taskset_vcpu0 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep vcpu0 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2522 assert_eq!(String::from_utf8_lossy(&taskset_vcpu0.stdout).trim(), "0,2"); 2523 let taskset_vcpu1 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep vcpu1 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2524 assert_eq!(String::from_utf8_lossy(&taskset_vcpu1.stdout).trim(), "1,3"); 2525 }); 2526 2527 let _ = child.kill(); 2528 let output = child.wait_with_output().unwrap(); 2529 2530 handle_child_output(r, &output); 2531 } 2532 2533 #[test] 2534 #[cfg(not(feature = "mshv"))] 2535 fn test_large_vm() { 2536 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2537 let guest = Guest::new(Box::new(focal)); 2538 let mut cmd = GuestCommand::new(&guest); 2539 cmd.args(["--cpus", "boot=48"]) 2540 .args(["--memory", "size=5120M"]) 2541 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2542 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2543 .args(["--serial", "tty"]) 2544 .args(["--console", "off"]) 2545 .capture_output() 2546 .default_disks() 2547 .default_net(); 2548 2549 let mut child = cmd.spawn().unwrap(); 2550 2551 guest.wait_vm_boot(None).unwrap(); 2552 2553 let r = std::panic::catch_unwind(|| { 2554 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 48); 2555 assert_eq!( 2556 guest 2557 .ssh_command("lscpu | grep \"On-line\" | cut -f 2 -d \":\" | sed \"s# *##\"") 2558 .unwrap() 2559 .trim(), 2560 "0-47" 2561 ); 2562 2563 assert!(guest.get_total_memory().unwrap_or_default() > 5_000_000); 2564 }); 2565 2566 let _ = child.kill(); 2567 let output = child.wait_with_output().unwrap(); 2568 2569 handle_child_output(r, &output); 2570 } 2571 2572 #[test] 2573 #[cfg(not(feature = "mshv"))] 2574 fn test_huge_memory() { 2575 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2576 let guest = Guest::new(Box::new(focal)); 2577 let mut cmd = GuestCommand::new(&guest); 2578 cmd.args(["--cpus", "boot=1"]) 2579 .args(["--memory", "size=128G"]) 2580 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2581 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2582 .capture_output() 2583 .default_disks() 2584 .default_net(); 2585 2586 let mut child = cmd.spawn().unwrap(); 2587 2588 guest.wait_vm_boot(Some(120)).unwrap(); 2589 2590 let r = std::panic::catch_unwind(|| { 2591 assert!(guest.get_total_memory().unwrap_or_default() > 128_000_000); 2592 }); 2593 2594 let _ = child.kill(); 2595 let output = child.wait_with_output().unwrap(); 2596 2597 handle_child_output(r, &output); 2598 } 2599 2600 #[test] 2601 fn test_power_button() { 2602 _test_power_button(false); 2603 } 2604 2605 #[test] 2606 #[cfg(not(feature = "mshv"))] 2607 fn test_user_defined_memory_regions() { 2608 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2609 let guest = Guest::new(Box::new(focal)); 2610 let api_socket = temp_api_path(&guest.tmp_dir); 2611 2612 let kernel_path = direct_kernel_boot_path(); 2613 2614 let mut child = GuestCommand::new(&guest) 2615 .args(["--cpus", "boot=1"]) 2616 .args(["--memory", "size=0,hotplug_method=virtio-mem"]) 2617 .args([ 2618 "--memory-zone", 2619 "id=mem0,size=1G,hotplug_size=2G", 2620 "--memory-zone", 2621 "id=mem1,size=1G,shared=on", 2622 "--memory-zone", 2623 "id=mem2,size=1G,host_numa_node=0,hotplug_size=2G", 2624 ]) 2625 .args(["--kernel", kernel_path.to_str().unwrap()]) 2626 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2627 .args(["--api-socket", &api_socket]) 2628 .capture_output() 2629 .default_disks() 2630 .default_net() 2631 .spawn() 2632 .unwrap(); 2633 2634 let r = std::panic::catch_unwind(|| { 2635 guest.wait_vm_boot(None).unwrap(); 2636 2637 assert!(guest.get_total_memory().unwrap_or_default() > 2_880_000); 2638 2639 guest.enable_memory_hotplug(); 2640 2641 resize_zone_command(&api_socket, "mem0", "3G"); 2642 thread::sleep(std::time::Duration::new(5, 0)); 2643 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000); 2644 resize_zone_command(&api_socket, "mem2", "3G"); 2645 thread::sleep(std::time::Duration::new(5, 0)); 2646 assert!(guest.get_total_memory().unwrap_or_default() > 6_720_000); 2647 resize_zone_command(&api_socket, "mem0", "2G"); 2648 thread::sleep(std::time::Duration::new(5, 0)); 2649 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 2650 resize_zone_command(&api_socket, "mem2", "2G"); 2651 thread::sleep(std::time::Duration::new(5, 0)); 2652 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000); 2653 2654 guest.reboot_linux(0, None); 2655 2656 // Check the amount of RAM after reboot 2657 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000); 2658 assert!(guest.get_total_memory().unwrap_or_default() < 5_760_000); 2659 2660 // Check if we can still resize down to the initial 'boot'size 2661 resize_zone_command(&api_socket, "mem0", "1G"); 2662 thread::sleep(std::time::Duration::new(5, 0)); 2663 assert!(guest.get_total_memory().unwrap_or_default() < 4_800_000); 2664 resize_zone_command(&api_socket, "mem2", "1G"); 2665 thread::sleep(std::time::Duration::new(5, 0)); 2666 assert!(guest.get_total_memory().unwrap_or_default() < 3_840_000); 2667 }); 2668 2669 let _ = child.kill(); 2670 let output = child.wait_with_output().unwrap(); 2671 2672 handle_child_output(r, &output); 2673 } 2674 2675 #[test] 2676 #[cfg(not(feature = "mshv"))] 2677 fn test_guest_numa_nodes() { 2678 _test_guest_numa_nodes(false); 2679 } 2680 2681 #[test] 2682 #[cfg(target_arch = "x86_64")] 2683 fn test_iommu_segments() { 2684 let focal_image = FOCAL_IMAGE_NAME.to_string(); 2685 let focal = UbuntuDiskConfig::new(focal_image); 2686 let guest = Guest::new(Box::new(focal)); 2687 2688 // Prepare another disk file for the virtio-disk device 2689 let test_disk_path = String::from( 2690 guest 2691 .tmp_dir 2692 .as_path() 2693 .join("test-disk.raw") 2694 .to_str() 2695 .unwrap(), 2696 ); 2697 assert!( 2698 exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success() 2699 ); 2700 assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success()); 2701 2702 let api_socket = temp_api_path(&guest.tmp_dir); 2703 let mut cmd = GuestCommand::new(&guest); 2704 2705 cmd.args(["--cpus", "boot=1"]) 2706 .args(["--api-socket", &api_socket]) 2707 .args(["--memory", "size=512M"]) 2708 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2709 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2710 .args([ 2711 "--platform", 2712 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS},iommu_segments=[1]"), 2713 ]) 2714 .default_disks() 2715 .capture_output() 2716 .default_net(); 2717 2718 let mut child = cmd.spawn().unwrap(); 2719 2720 guest.wait_vm_boot(None).unwrap(); 2721 2722 let r = std::panic::catch_unwind(|| { 2723 let (cmd_success, cmd_output) = remote_command_w_output( 2724 &api_socket, 2725 "add-disk", 2726 Some( 2727 format!( 2728 "path={},id=test0,pci_segment=1,iommu=on", 2729 test_disk_path.as_str() 2730 ) 2731 .as_str(), 2732 ), 2733 ); 2734 assert!(cmd_success); 2735 assert!(String::from_utf8_lossy(&cmd_output) 2736 .contains("{\"id\":\"test0\",\"bdf\":\"0001:00:01.0\"}")); 2737 2738 // Check IOMMU setup 2739 assert!(guest 2740 .does_device_vendor_pair_match("0x1057", "0x1af4") 2741 .unwrap_or_default()); 2742 assert_eq!( 2743 guest 2744 .ssh_command("ls /sys/kernel/iommu_groups/0/devices") 2745 .unwrap() 2746 .trim(), 2747 "0001:00:01.0" 2748 ); 2749 }); 2750 2751 let _ = child.kill(); 2752 let output = child.wait_with_output().unwrap(); 2753 2754 handle_child_output(r, &output); 2755 } 2756 2757 #[test] 2758 fn test_pci_msi() { 2759 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2760 let guest = Guest::new(Box::new(focal)); 2761 let mut cmd = GuestCommand::new(&guest); 2762 cmd.args(["--cpus", "boot=1"]) 2763 .args(["--memory", "size=512M"]) 2764 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2765 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2766 .capture_output() 2767 .default_disks() 2768 .default_net(); 2769 2770 let mut child = cmd.spawn().unwrap(); 2771 2772 guest.wait_vm_boot(None).unwrap(); 2773 2774 #[cfg(target_arch = "x86_64")] 2775 let grep_cmd = "grep -c PCI-MSI /proc/interrupts"; 2776 #[cfg(target_arch = "aarch64")] 2777 let grep_cmd = "grep -c ITS-MSI /proc/interrupts"; 2778 2779 let r = std::panic::catch_unwind(|| { 2780 assert_eq!( 2781 guest 2782 .ssh_command(grep_cmd) 2783 .unwrap() 2784 .trim() 2785 .parse::<u32>() 2786 .unwrap_or_default(), 2787 12 2788 ); 2789 }); 2790 2791 let _ = child.kill(); 2792 let output = child.wait_with_output().unwrap(); 2793 2794 handle_child_output(r, &output); 2795 } 2796 2797 #[test] 2798 fn test_virtio_net_ctrl_queue() { 2799 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2800 let guest = Guest::new(Box::new(focal)); 2801 let mut cmd = GuestCommand::new(&guest); 2802 cmd.args(["--cpus", "boot=1"]) 2803 .args(["--memory", "size=512M"]) 2804 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2805 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2806 .args(["--net", guest.default_net_string_w_mtu(3000).as_str()]) 2807 .capture_output() 2808 .default_disks(); 2809 2810 let mut child = cmd.spawn().unwrap(); 2811 2812 guest.wait_vm_boot(None).unwrap(); 2813 2814 #[cfg(target_arch = "aarch64")] 2815 let iface = "enp0s4"; 2816 #[cfg(target_arch = "x86_64")] 2817 let iface = "ens4"; 2818 2819 let r = std::panic::catch_unwind(|| { 2820 assert_eq!( 2821 guest 2822 .ssh_command( 2823 format!("sudo ethtool -K {iface} rx-gro-hw off && echo success").as_str() 2824 ) 2825 .unwrap() 2826 .trim(), 2827 "success" 2828 ); 2829 assert_eq!( 2830 guest 2831 .ssh_command(format!("cat /sys/class/net/{iface}/mtu").as_str()) 2832 .unwrap() 2833 .trim(), 2834 "3000" 2835 ); 2836 }); 2837 2838 let _ = child.kill(); 2839 let output = child.wait_with_output().unwrap(); 2840 2841 handle_child_output(r, &output); 2842 } 2843 2844 #[test] 2845 fn test_pci_multiple_segments() { 2846 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2847 let guest = Guest::new(Box::new(focal)); 2848 2849 // Prepare another disk file for the virtio-disk device 2850 let test_disk_path = String::from( 2851 guest 2852 .tmp_dir 2853 .as_path() 2854 .join("test-disk.raw") 2855 .to_str() 2856 .unwrap(), 2857 ); 2858 assert!( 2859 exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success() 2860 ); 2861 assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success()); 2862 2863 let mut cmd = GuestCommand::new(&guest); 2864 cmd.args(["--cpus", "boot=1"]) 2865 .args(["--memory", "size=512M"]) 2866 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2867 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2868 .args([ 2869 "--platform", 2870 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 2871 ]) 2872 .args([ 2873 "--disk", 2874 format!( 2875 "path={}", 2876 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 2877 ) 2878 .as_str(), 2879 "--disk", 2880 format!( 2881 "path={}", 2882 guest.disk_config.disk(DiskType::CloudInit).unwrap() 2883 ) 2884 .as_str(), 2885 "--disk", 2886 format!("path={test_disk_path},pci_segment=15").as_str(), 2887 ]) 2888 .capture_output() 2889 .default_net(); 2890 2891 let mut child = cmd.spawn().unwrap(); 2892 2893 guest.wait_vm_boot(None).unwrap(); 2894 2895 let grep_cmd = "lspci | grep \"Host bridge\" | wc -l"; 2896 2897 let r = std::panic::catch_unwind(|| { 2898 // There should be MAX_NUM_PCI_SEGMENTS PCI host bridges in the guest. 2899 assert_eq!( 2900 guest 2901 .ssh_command(grep_cmd) 2902 .unwrap() 2903 .trim() 2904 .parse::<u16>() 2905 .unwrap_or_default(), 2906 MAX_NUM_PCI_SEGMENTS 2907 ); 2908 2909 // Check both if /dev/vdc exists and if the block size is 4M. 2910 assert_eq!( 2911 guest 2912 .ssh_command("lsblk | grep vdc | grep -c 4M") 2913 .unwrap() 2914 .trim() 2915 .parse::<u32>() 2916 .unwrap_or_default(), 2917 1 2918 ); 2919 2920 // Mount the device. 2921 guest.ssh_command("mkdir mount_image").unwrap(); 2922 guest 2923 .ssh_command("sudo mount -o rw -t ext4 /dev/vdc mount_image/") 2924 .unwrap(); 2925 // Grant all users with write permission. 2926 guest.ssh_command("sudo chmod a+w mount_image/").unwrap(); 2927 2928 // Write something to the device. 2929 guest 2930 .ssh_command("sudo echo \"bar\" >> mount_image/foo") 2931 .unwrap(); 2932 2933 // Check the content of the block device. The file "foo" should 2934 // contain "bar". 2935 assert_eq!( 2936 guest 2937 .ssh_command("sudo cat mount_image/foo") 2938 .unwrap() 2939 .trim(), 2940 "bar" 2941 ); 2942 }); 2943 2944 let _ = child.kill(); 2945 let output = child.wait_with_output().unwrap(); 2946 2947 handle_child_output(r, &output); 2948 } 2949 2950 fn test_pci_multiple_segments_numa_node() { 2951 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2952 let guest = Guest::new(Box::new(focal)); 2953 let api_socket = temp_api_path(&guest.tmp_dir); 2954 #[cfg(target_arch = "x86_64")] 2955 let kernel_path = direct_kernel_boot_path(); 2956 #[cfg(target_arch = "aarch64")] 2957 let kernel_path = edk2_path(); 2958 2959 // Prepare another disk file for the virtio-disk device 2960 let test_disk_path = String::from( 2961 guest 2962 .tmp_dir 2963 .as_path() 2964 .join("test-disk.raw") 2965 .to_str() 2966 .unwrap(), 2967 ); 2968 assert!( 2969 exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success() 2970 ); 2971 assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success()); 2972 const TEST_DISK_NODE: u16 = 1; 2973 2974 let mut child = GuestCommand::new(&guest) 2975 .args(["--platform", "num_pci_segments=2"]) 2976 .args(["--cpus", "boot=2"]) 2977 .args(["--memory", "size=0"]) 2978 .args([ 2979 "--memory-zone", 2980 "id=mem0,size=256M", 2981 "--memory-zone", 2982 "id=mem1,size=256M", 2983 ]) 2984 .args([ 2985 "--numa", 2986 "guest_numa_id=0,cpus=[0],memory_zones=mem0,pci_segments=[0]", 2987 "--numa", 2988 "guest_numa_id=1,cpus=[1],memory_zones=mem1,pci_segments=[1]", 2989 ]) 2990 .args(["--kernel", kernel_path.to_str().unwrap()]) 2991 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2992 .args(["--api-socket", &api_socket]) 2993 .capture_output() 2994 .args([ 2995 "--disk", 2996 format!( 2997 "path={}", 2998 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 2999 ) 3000 .as_str(), 3001 "--disk", 3002 format!( 3003 "path={}", 3004 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3005 ) 3006 .as_str(), 3007 "--disk", 3008 format!("path={test_disk_path},pci_segment={TEST_DISK_NODE}").as_str(), 3009 ]) 3010 .default_net() 3011 .spawn() 3012 .unwrap(); 3013 3014 let cmd = "cat /sys/block/vdc/device/../numa_node"; 3015 3016 let r = std::panic::catch_unwind(|| { 3017 guest.wait_vm_boot(None).unwrap(); 3018 3019 assert_eq!( 3020 guest 3021 .ssh_command(cmd) 3022 .unwrap() 3023 .trim() 3024 .parse::<u16>() 3025 .unwrap_or_default(), 3026 TEST_DISK_NODE 3027 ); 3028 }); 3029 3030 let _ = child.kill(); 3031 let output = child.wait_with_output().unwrap(); 3032 3033 handle_child_output(r, &output); 3034 } 3035 3036 #[test] 3037 fn test_direct_kernel_boot() { 3038 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3039 let guest = Guest::new(Box::new(focal)); 3040 3041 let kernel_path = direct_kernel_boot_path(); 3042 3043 let mut child = GuestCommand::new(&guest) 3044 .args(["--cpus", "boot=1"]) 3045 .args(["--memory", "size=512M"]) 3046 .args(["--kernel", kernel_path.to_str().unwrap()]) 3047 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3048 .default_disks() 3049 .default_net() 3050 .capture_output() 3051 .spawn() 3052 .unwrap(); 3053 3054 let r = std::panic::catch_unwind(|| { 3055 guest.wait_vm_boot(None).unwrap(); 3056 3057 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 3058 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 3059 3060 let grep_cmd = if cfg!(target_arch = "x86_64") { 3061 "grep -c PCI-MSI /proc/interrupts" 3062 } else { 3063 "grep -c ITS-MSI /proc/interrupts" 3064 }; 3065 assert_eq!( 3066 guest 3067 .ssh_command(grep_cmd) 3068 .unwrap() 3069 .trim() 3070 .parse::<u32>() 3071 .unwrap_or_default(), 3072 12 3073 ); 3074 }); 3075 3076 let _ = child.kill(); 3077 let output = child.wait_with_output().unwrap(); 3078 3079 handle_child_output(r, &output); 3080 } 3081 3082 fn _test_virtio_block(image_name: &str, disable_io_uring: bool) { 3083 let focal = UbuntuDiskConfig::new(image_name.to_string()); 3084 let guest = Guest::new(Box::new(focal)); 3085 3086 let mut workload_path = dirs::home_dir().unwrap(); 3087 workload_path.push("workloads"); 3088 3089 let mut blk_file_path = workload_path; 3090 blk_file_path.push("blk.img"); 3091 3092 let kernel_path = direct_kernel_boot_path(); 3093 3094 let mut cloud_child = GuestCommand::new(&guest) 3095 .args(["--cpus", "boot=4"]) 3096 .args(["--memory", "size=512M,shared=on"]) 3097 .args(["--kernel", kernel_path.to_str().unwrap()]) 3098 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3099 .args([ 3100 "--disk", 3101 format!( 3102 "path={}", 3103 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 3104 ) 3105 .as_str(), 3106 "--disk", 3107 format!( 3108 "path={}", 3109 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3110 ) 3111 .as_str(), 3112 "--disk", 3113 format!( 3114 "path={},readonly=on,direct=on,num_queues=4,_disable_io_uring={}", 3115 blk_file_path.to_str().unwrap(), 3116 disable_io_uring 3117 ) 3118 .as_str(), 3119 ]) 3120 .default_net() 3121 .capture_output() 3122 .spawn() 3123 .unwrap(); 3124 3125 let r = std::panic::catch_unwind(|| { 3126 guest.wait_vm_boot(None).unwrap(); 3127 3128 // Check both if /dev/vdc exists and if the block size is 16M. 3129 assert_eq!( 3130 guest 3131 .ssh_command("lsblk | grep vdc | grep -c 16M") 3132 .unwrap() 3133 .trim() 3134 .parse::<u32>() 3135 .unwrap_or_default(), 3136 1 3137 ); 3138 3139 // Check both if /dev/vdc exists and if this block is RO. 3140 assert_eq!( 3141 guest 3142 .ssh_command("lsblk | grep vdc | awk '{print $5}'") 3143 .unwrap() 3144 .trim() 3145 .parse::<u32>() 3146 .unwrap_or_default(), 3147 1 3148 ); 3149 3150 // Check if the number of queues is 4. 3151 assert_eq!( 3152 guest 3153 .ssh_command("ls -ll /sys/block/vdc/mq | grep ^d | wc -l") 3154 .unwrap() 3155 .trim() 3156 .parse::<u32>() 3157 .unwrap_or_default(), 3158 4 3159 ); 3160 }); 3161 3162 let _ = cloud_child.kill(); 3163 let output = cloud_child.wait_with_output().unwrap(); 3164 3165 handle_child_output(r, &output); 3166 } 3167 3168 #[test] 3169 fn test_virtio_block() { 3170 _test_virtio_block(FOCAL_IMAGE_NAME, false) 3171 } 3172 3173 #[test] 3174 fn test_virtio_block_disable_io_uring() { 3175 _test_virtio_block(FOCAL_IMAGE_NAME, true) 3176 } 3177 3178 #[test] 3179 fn test_virtio_block_qcow2() { 3180 _test_virtio_block(FOCAL_IMAGE_NAME_QCOW2, false) 3181 } 3182 3183 #[test] 3184 fn test_virtio_block_qcow2_backing_file() { 3185 _test_virtio_block(FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE, false) 3186 } 3187 3188 #[test] 3189 fn test_virtio_block_vhd() { 3190 let mut workload_path = dirs::home_dir().unwrap(); 3191 workload_path.push("workloads"); 3192 3193 let mut raw_file_path = workload_path.clone(); 3194 let mut vhd_file_path = workload_path; 3195 raw_file_path.push(FOCAL_IMAGE_NAME); 3196 vhd_file_path.push(FOCAL_IMAGE_NAME_VHD); 3197 3198 // Generate VHD file from RAW file 3199 std::process::Command::new("qemu-img") 3200 .arg("convert") 3201 .arg("-p") 3202 .args(["-f", "raw"]) 3203 .args(["-O", "vpc"]) 3204 .args(["-o", "subformat=fixed"]) 3205 .arg(raw_file_path.to_str().unwrap()) 3206 .arg(vhd_file_path.to_str().unwrap()) 3207 .output() 3208 .expect("Expect generating VHD image from RAW image"); 3209 3210 _test_virtio_block(FOCAL_IMAGE_NAME_VHD, false) 3211 } 3212 3213 #[test] 3214 fn test_virtio_block_vhdx() { 3215 let mut workload_path = dirs::home_dir().unwrap(); 3216 workload_path.push("workloads"); 3217 3218 let mut raw_file_path = workload_path.clone(); 3219 let mut vhdx_file_path = workload_path; 3220 raw_file_path.push(FOCAL_IMAGE_NAME); 3221 vhdx_file_path.push(FOCAL_IMAGE_NAME_VHDX); 3222 3223 // Generate dynamic VHDX file from RAW file 3224 std::process::Command::new("qemu-img") 3225 .arg("convert") 3226 .arg("-p") 3227 .args(["-f", "raw"]) 3228 .args(["-O", "vhdx"]) 3229 .arg(raw_file_path.to_str().unwrap()) 3230 .arg(vhdx_file_path.to_str().unwrap()) 3231 .output() 3232 .expect("Expect generating dynamic VHDx image from RAW image"); 3233 3234 _test_virtio_block(FOCAL_IMAGE_NAME_VHDX, false) 3235 } 3236 3237 #[test] 3238 fn test_virtio_block_dynamic_vhdx_expand() { 3239 const VIRTUAL_DISK_SIZE: u64 = 100 << 20; 3240 const EMPTY_VHDX_FILE_SIZE: u64 = 8 << 20; 3241 const FULL_VHDX_FILE_SIZE: u64 = 112 << 20; 3242 const DYNAMIC_VHDX_NAME: &str = "dynamic.vhdx"; 3243 3244 let mut workload_path = dirs::home_dir().unwrap(); 3245 workload_path.push("workloads"); 3246 3247 let mut vhdx_file_path = workload_path; 3248 vhdx_file_path.push(DYNAMIC_VHDX_NAME); 3249 let vhdx_path = vhdx_file_path.to_str().unwrap(); 3250 3251 // Generate a 100 MiB dynamic VHDX file 3252 std::process::Command::new("qemu-img") 3253 .arg("create") 3254 .args(["-f", "vhdx"]) 3255 .arg(vhdx_path) 3256 .arg(VIRTUAL_DISK_SIZE.to_string()) 3257 .output() 3258 .expect("Expect generating dynamic VHDx image from RAW image"); 3259 3260 // Check if the size matches with empty VHDx file size 3261 assert_eq!(vhdx_image_size(vhdx_path), EMPTY_VHDX_FILE_SIZE); 3262 3263 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3264 let guest = Guest::new(Box::new(focal)); 3265 let kernel_path = direct_kernel_boot_path(); 3266 3267 let mut cloud_child = GuestCommand::new(&guest) 3268 .args(["--cpus", "boot=1"]) 3269 .args(["--memory", "size=512M"]) 3270 .args(["--kernel", kernel_path.to_str().unwrap()]) 3271 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3272 .args([ 3273 "--disk", 3274 format!( 3275 "path={}", 3276 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 3277 ) 3278 .as_str(), 3279 "--disk", 3280 format!( 3281 "path={}", 3282 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3283 ) 3284 .as_str(), 3285 "--disk", 3286 format!("path={vhdx_path}").as_str(), 3287 ]) 3288 .default_net() 3289 .capture_output() 3290 .spawn() 3291 .unwrap(); 3292 3293 let r = std::panic::catch_unwind(|| { 3294 guest.wait_vm_boot(None).unwrap(); 3295 3296 // Check both if /dev/vdc exists and if the block size is 100 MiB. 3297 assert_eq!( 3298 guest 3299 .ssh_command("lsblk | grep vdc | grep -c 100M") 3300 .unwrap() 3301 .trim() 3302 .parse::<u32>() 3303 .unwrap_or_default(), 3304 1 3305 ); 3306 3307 // Write 100 MB of data to the VHDx disk 3308 guest 3309 .ssh_command("sudo dd if=/dev/urandom of=/dev/vdc bs=1M count=100") 3310 .unwrap(); 3311 }); 3312 3313 // Check if the size matches with expected expanded VHDx file size 3314 assert_eq!(vhdx_image_size(vhdx_path), FULL_VHDX_FILE_SIZE); 3315 3316 let _ = cloud_child.kill(); 3317 let output = cloud_child.wait_with_output().unwrap(); 3318 3319 handle_child_output(r, &output); 3320 } 3321 3322 fn vhdx_image_size(disk_name: &str) -> u64 { 3323 std::fs::File::open(disk_name) 3324 .unwrap() 3325 .seek(SeekFrom::End(0)) 3326 .unwrap() 3327 } 3328 3329 #[test] 3330 fn test_virtio_block_direct_and_firmware() { 3331 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3332 let guest = Guest::new(Box::new(focal)); 3333 3334 // The OS disk must be copied to a location that is not backed by 3335 // tmpfs, otherwise the syscall openat(2) with O_DIRECT simply fails 3336 // with EINVAL because tmpfs doesn't support this flag. 3337 let mut workloads_path = dirs::home_dir().unwrap(); 3338 workloads_path.push("workloads"); 3339 let os_dir = TempDir::new_in(workloads_path.as_path()).unwrap(); 3340 let mut os_path = os_dir.as_path().to_path_buf(); 3341 os_path.push("osdisk.img"); 3342 rate_limited_copy( 3343 guest.disk_config.disk(DiskType::OperatingSystem).unwrap(), 3344 os_path.as_path(), 3345 ) 3346 .expect("copying of OS disk failed"); 3347 3348 let mut child = GuestCommand::new(&guest) 3349 .args(["--cpus", "boot=1"]) 3350 .args(["--memory", "size=512M"]) 3351 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 3352 .args([ 3353 "--disk", 3354 format!("path={},direct=on", os_path.as_path().to_str().unwrap()).as_str(), 3355 "--disk", 3356 format!( 3357 "path={}", 3358 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3359 ) 3360 .as_str(), 3361 ]) 3362 .default_net() 3363 .capture_output() 3364 .spawn() 3365 .unwrap(); 3366 3367 let r = std::panic::catch_unwind(|| { 3368 guest.wait_vm_boot(Some(120)).unwrap(); 3369 }); 3370 3371 let _ = child.kill(); 3372 let output = child.wait_with_output().unwrap(); 3373 3374 handle_child_output(r, &output); 3375 } 3376 3377 #[test] 3378 fn test_vhost_user_net_default() { 3379 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, false, false) 3380 } 3381 3382 #[test] 3383 fn test_vhost_user_net_named_tap() { 3384 test_vhost_user_net( 3385 Some("mytap0"), 3386 2, 3387 &prepare_vhost_user_net_daemon, 3388 false, 3389 false, 3390 ) 3391 } 3392 3393 #[test] 3394 fn test_vhost_user_net_existing_tap() { 3395 test_vhost_user_net( 3396 Some("vunet-tap0"), 3397 2, 3398 &prepare_vhost_user_net_daemon, 3399 false, 3400 false, 3401 ) 3402 } 3403 3404 #[test] 3405 fn test_vhost_user_net_multiple_queues() { 3406 test_vhost_user_net(None, 4, &prepare_vhost_user_net_daemon, false, false) 3407 } 3408 3409 #[test] 3410 fn test_vhost_user_net_tap_multiple_queues() { 3411 test_vhost_user_net( 3412 Some("vunet-tap1"), 3413 4, 3414 &prepare_vhost_user_net_daemon, 3415 false, 3416 false, 3417 ) 3418 } 3419 3420 #[test] 3421 fn test_vhost_user_net_host_mac() { 3422 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, true, false) 3423 } 3424 3425 #[test] 3426 fn test_vhost_user_net_client_mode() { 3427 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, false, true) 3428 } 3429 3430 #[test] 3431 fn test_vhost_user_blk_default() { 3432 test_vhost_user_blk(2, false, false, Some(&prepare_vubd)) 3433 } 3434 3435 #[test] 3436 fn test_vhost_user_blk_readonly() { 3437 test_vhost_user_blk(1, true, false, Some(&prepare_vubd)) 3438 } 3439 3440 #[test] 3441 fn test_vhost_user_blk_direct() { 3442 test_vhost_user_blk(1, false, true, Some(&prepare_vubd)) 3443 } 3444 3445 #[test] 3446 fn test_boot_from_vhost_user_blk_default() { 3447 test_boot_from_vhost_user_blk(1, false, false, Some(&prepare_vubd)) 3448 } 3449 3450 #[test] 3451 #[cfg(target_arch = "x86_64")] 3452 fn test_split_irqchip() { 3453 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3454 let guest = Guest::new(Box::new(focal)); 3455 3456 let mut child = GuestCommand::new(&guest) 3457 .args(["--cpus", "boot=1"]) 3458 .args(["--memory", "size=512M"]) 3459 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3460 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3461 .default_disks() 3462 .default_net() 3463 .capture_output() 3464 .spawn() 3465 .unwrap(); 3466 3467 let r = std::panic::catch_unwind(|| { 3468 guest.wait_vm_boot(None).unwrap(); 3469 3470 assert_eq!( 3471 guest 3472 .ssh_command("grep -c IO-APIC.*timer /proc/interrupts || true") 3473 .unwrap() 3474 .trim() 3475 .parse::<u32>() 3476 .unwrap_or(1), 3477 0 3478 ); 3479 assert_eq!( 3480 guest 3481 .ssh_command("grep -c IO-APIC.*cascade /proc/interrupts || true") 3482 .unwrap() 3483 .trim() 3484 .parse::<u32>() 3485 .unwrap_or(1), 3486 0 3487 ); 3488 }); 3489 3490 let _ = child.kill(); 3491 let output = child.wait_with_output().unwrap(); 3492 3493 handle_child_output(r, &output); 3494 } 3495 3496 #[test] 3497 #[cfg(target_arch = "x86_64")] 3498 fn test_dmi_serial_number() { 3499 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3500 let guest = Guest::new(Box::new(focal)); 3501 3502 let mut child = GuestCommand::new(&guest) 3503 .args(["--cpus", "boot=1"]) 3504 .args(["--memory", "size=512M"]) 3505 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3506 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3507 .args(["--platform", "serial_number=a=b;c=d"]) 3508 .default_disks() 3509 .default_net() 3510 .capture_output() 3511 .spawn() 3512 .unwrap(); 3513 3514 let r = std::panic::catch_unwind(|| { 3515 guest.wait_vm_boot(None).unwrap(); 3516 3517 assert_eq!( 3518 guest 3519 .ssh_command("sudo cat /sys/class/dmi/id/product_serial") 3520 .unwrap() 3521 .trim(), 3522 "a=b;c=d" 3523 ); 3524 }); 3525 3526 let _ = child.kill(); 3527 let output = child.wait_with_output().unwrap(); 3528 3529 handle_child_output(r, &output); 3530 } 3531 3532 #[test] 3533 #[cfg(target_arch = "x86_64")] 3534 fn test_dmi_uuid() { 3535 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3536 let guest = Guest::new(Box::new(focal)); 3537 3538 let mut child = GuestCommand::new(&guest) 3539 .args(["--cpus", "boot=1"]) 3540 .args(["--memory", "size=512M"]) 3541 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3542 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3543 .args(["--platform", "uuid=1e8aa28a-435d-4027-87f4-40dceff1fa0a"]) 3544 .default_disks() 3545 .default_net() 3546 .capture_output() 3547 .spawn() 3548 .unwrap(); 3549 3550 let r = std::panic::catch_unwind(|| { 3551 guest.wait_vm_boot(None).unwrap(); 3552 3553 assert_eq!( 3554 guest 3555 .ssh_command("sudo cat /sys/class/dmi/id/product_uuid") 3556 .unwrap() 3557 .trim(), 3558 "1e8aa28a-435d-4027-87f4-40dceff1fa0a" 3559 ); 3560 }); 3561 3562 let _ = child.kill(); 3563 let output = child.wait_with_output().unwrap(); 3564 3565 handle_child_output(r, &output); 3566 } 3567 3568 #[test] 3569 #[cfg(target_arch = "x86_64")] 3570 fn test_dmi_oem_strings() { 3571 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3572 let guest = Guest::new(Box::new(focal)); 3573 3574 let s1 = "io.systemd.credential:xx=yy"; 3575 let s2 = "This is a test string"; 3576 3577 let oem_strings = format!("oem_strings=[{s1},{s2}]"); 3578 3579 let mut child = GuestCommand::new(&guest) 3580 .args(["--cpus", "boot=1"]) 3581 .args(["--memory", "size=512M"]) 3582 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3583 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3584 .args(["--platform", &oem_strings]) 3585 .default_disks() 3586 .default_net() 3587 .capture_output() 3588 .spawn() 3589 .unwrap(); 3590 3591 let r = std::panic::catch_unwind(|| { 3592 guest.wait_vm_boot(None).unwrap(); 3593 3594 assert_eq!( 3595 guest 3596 .ssh_command("sudo dmidecode --oem-string count") 3597 .unwrap() 3598 .trim(), 3599 "2" 3600 ); 3601 3602 assert_eq!( 3603 guest 3604 .ssh_command("sudo dmidecode --oem-string 1") 3605 .unwrap() 3606 .trim(), 3607 s1 3608 ); 3609 3610 assert_eq!( 3611 guest 3612 .ssh_command("sudo dmidecode --oem-string 2") 3613 .unwrap() 3614 .trim(), 3615 s2 3616 ); 3617 }); 3618 3619 let _ = child.kill(); 3620 let output = child.wait_with_output().unwrap(); 3621 3622 handle_child_output(r, &output); 3623 } 3624 3625 #[test] 3626 fn test_virtio_fs() { 3627 _test_virtio_fs(&prepare_virtiofsd, false, None) 3628 } 3629 3630 #[test] 3631 fn test_virtio_fs_hotplug() { 3632 _test_virtio_fs(&prepare_virtiofsd, true, None) 3633 } 3634 3635 #[test] 3636 #[cfg(not(feature = "mshv"))] 3637 fn test_virtio_fs_multi_segment_hotplug() { 3638 _test_virtio_fs(&prepare_virtiofsd, true, Some(15)) 3639 } 3640 3641 #[test] 3642 #[cfg(not(feature = "mshv"))] 3643 fn test_virtio_fs_multi_segment() { 3644 _test_virtio_fs(&prepare_virtiofsd, false, Some(15)) 3645 } 3646 3647 #[test] 3648 fn test_virtio_pmem_persist_writes() { 3649 test_virtio_pmem(false, false) 3650 } 3651 3652 #[test] 3653 fn test_virtio_pmem_discard_writes() { 3654 test_virtio_pmem(true, false) 3655 } 3656 3657 #[test] 3658 fn test_virtio_pmem_with_size() { 3659 test_virtio_pmem(true, true) 3660 } 3661 3662 #[test] 3663 fn test_boot_from_virtio_pmem() { 3664 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3665 let guest = Guest::new(Box::new(focal)); 3666 3667 let kernel_path = direct_kernel_boot_path(); 3668 3669 let mut child = GuestCommand::new(&guest) 3670 .args(["--cpus", "boot=1"]) 3671 .args(["--memory", "size=512M"]) 3672 .args(["--kernel", kernel_path.to_str().unwrap()]) 3673 .args([ 3674 "--disk", 3675 format!( 3676 "path={}", 3677 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3678 ) 3679 .as_str(), 3680 ]) 3681 .default_net() 3682 .args([ 3683 "--pmem", 3684 format!( 3685 "file={},size={}", 3686 guest.disk_config.disk(DiskType::OperatingSystem).unwrap(), 3687 fs::metadata(guest.disk_config.disk(DiskType::OperatingSystem).unwrap()) 3688 .unwrap() 3689 .len() 3690 ) 3691 .as_str(), 3692 ]) 3693 .args([ 3694 "--cmdline", 3695 DIRECT_KERNEL_BOOT_CMDLINE 3696 .replace("vda1", "pmem0p1") 3697 .as_str(), 3698 ]) 3699 .capture_output() 3700 .spawn() 3701 .unwrap(); 3702 3703 let r = std::panic::catch_unwind(|| { 3704 guest.wait_vm_boot(None).unwrap(); 3705 3706 // Simple checks to validate the VM booted properly 3707 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 3708 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 3709 }); 3710 3711 let _ = child.kill(); 3712 let output = child.wait_with_output().unwrap(); 3713 3714 handle_child_output(r, &output); 3715 } 3716 3717 #[test] 3718 fn test_multiple_network_interfaces() { 3719 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3720 let guest = Guest::new(Box::new(focal)); 3721 3722 let kernel_path = direct_kernel_boot_path(); 3723 3724 let mut child = GuestCommand::new(&guest) 3725 .args(["--cpus", "boot=1"]) 3726 .args(["--memory", "size=512M"]) 3727 .args(["--kernel", kernel_path.to_str().unwrap()]) 3728 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3729 .default_disks() 3730 .args([ 3731 "--net", 3732 guest.default_net_string().as_str(), 3733 "--net", 3734 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0", 3735 "--net", 3736 "tap=mytap1,mac=fe:1f:9e:e1:60:f2,ip=192.168.4.1,mask=255.255.255.0", 3737 ]) 3738 .capture_output() 3739 .spawn() 3740 .unwrap(); 3741 3742 let r = std::panic::catch_unwind(|| { 3743 guest.wait_vm_boot(None).unwrap(); 3744 3745 let tap_count = exec_host_command_output("ip link | grep -c mytap1"); 3746 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1"); 3747 3748 // 3 network interfaces + default localhost ==> 4 interfaces 3749 assert_eq!( 3750 guest 3751 .ssh_command("ip -o link | wc -l") 3752 .unwrap() 3753 .trim() 3754 .parse::<u32>() 3755 .unwrap_or_default(), 3756 4 3757 ); 3758 }); 3759 3760 let _ = child.kill(); 3761 let output = child.wait_with_output().unwrap(); 3762 3763 handle_child_output(r, &output); 3764 } 3765 3766 #[test] 3767 #[cfg(target_arch = "aarch64")] 3768 fn test_pmu_on() { 3769 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3770 let guest = Guest::new(Box::new(focal)); 3771 let mut child = GuestCommand::new(&guest) 3772 .args(["--cpus", "boot=1"]) 3773 .args(["--memory", "size=512M"]) 3774 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3775 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3776 .default_disks() 3777 .default_net() 3778 .capture_output() 3779 .spawn() 3780 .unwrap(); 3781 3782 let r = std::panic::catch_unwind(|| { 3783 guest.wait_vm_boot(None).unwrap(); 3784 3785 // Test that PMU exists. 3786 assert_eq!( 3787 guest 3788 .ssh_command(GREP_PMU_IRQ_CMD) 3789 .unwrap() 3790 .trim() 3791 .parse::<u32>() 3792 .unwrap_or_default(), 3793 1 3794 ); 3795 }); 3796 3797 let _ = child.kill(); 3798 let output = child.wait_with_output().unwrap(); 3799 3800 handle_child_output(r, &output); 3801 } 3802 3803 #[test] 3804 fn test_serial_off() { 3805 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3806 let guest = Guest::new(Box::new(focal)); 3807 let mut child = GuestCommand::new(&guest) 3808 .args(["--cpus", "boot=1"]) 3809 .args(["--memory", "size=512M"]) 3810 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3811 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3812 .default_disks() 3813 .default_net() 3814 .args(["--serial", "off"]) 3815 .capture_output() 3816 .spawn() 3817 .unwrap(); 3818 3819 let r = std::panic::catch_unwind(|| { 3820 guest.wait_vm_boot(None).unwrap(); 3821 3822 // Test that there is no ttyS0 3823 assert_eq!( 3824 guest 3825 .ssh_command(GREP_SERIAL_IRQ_CMD) 3826 .unwrap() 3827 .trim() 3828 .parse::<u32>() 3829 .unwrap_or(1), 3830 0 3831 ); 3832 }); 3833 3834 let _ = child.kill(); 3835 let output = child.wait_with_output().unwrap(); 3836 3837 handle_child_output(r, &output); 3838 } 3839 3840 #[test] 3841 fn test_serial_null() { 3842 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3843 let guest = Guest::new(Box::new(focal)); 3844 let mut cmd = GuestCommand::new(&guest); 3845 #[cfg(target_arch = "x86_64")] 3846 let console_str: &str = "console=ttyS0"; 3847 #[cfg(target_arch = "aarch64")] 3848 let console_str: &str = "console=ttyAMA0"; 3849 3850 cmd.args(["--cpus", "boot=1"]) 3851 .args(["--memory", "size=512M"]) 3852 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3853 .args([ 3854 "--cmdline", 3855 DIRECT_KERNEL_BOOT_CMDLINE 3856 .replace("console=hvc0 ", console_str) 3857 .as_str(), 3858 ]) 3859 .default_disks() 3860 .default_net() 3861 .args(["--serial", "null"]) 3862 .args(["--console", "off"]) 3863 .capture_output(); 3864 3865 let mut child = cmd.spawn().unwrap(); 3866 3867 let r = std::panic::catch_unwind(|| { 3868 guest.wait_vm_boot(None).unwrap(); 3869 3870 // Test that there is a ttyS0 3871 assert_eq!( 3872 guest 3873 .ssh_command(GREP_SERIAL_IRQ_CMD) 3874 .unwrap() 3875 .trim() 3876 .parse::<u32>() 3877 .unwrap_or_default(), 3878 1 3879 ); 3880 }); 3881 3882 let _ = child.kill(); 3883 let output = child.wait_with_output().unwrap(); 3884 handle_child_output(r, &output); 3885 3886 let r = std::panic::catch_unwind(|| { 3887 assert!(!String::from_utf8_lossy(&output.stdout).contains(CONSOLE_TEST_STRING)); 3888 }); 3889 3890 handle_child_output(r, &output); 3891 } 3892 3893 #[test] 3894 fn test_serial_tty() { 3895 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3896 let guest = Guest::new(Box::new(focal)); 3897 3898 let kernel_path = direct_kernel_boot_path(); 3899 3900 #[cfg(target_arch = "x86_64")] 3901 let console_str: &str = "console=ttyS0"; 3902 #[cfg(target_arch = "aarch64")] 3903 let console_str: &str = "console=ttyAMA0"; 3904 3905 let mut child = GuestCommand::new(&guest) 3906 .args(["--cpus", "boot=1"]) 3907 .args(["--memory", "size=512M"]) 3908 .args(["--kernel", kernel_path.to_str().unwrap()]) 3909 .args([ 3910 "--cmdline", 3911 DIRECT_KERNEL_BOOT_CMDLINE 3912 .replace("console=hvc0 ", console_str) 3913 .as_str(), 3914 ]) 3915 .default_disks() 3916 .default_net() 3917 .args(["--serial", "tty"]) 3918 .args(["--console", "off"]) 3919 .capture_output() 3920 .spawn() 3921 .unwrap(); 3922 3923 let r = std::panic::catch_unwind(|| { 3924 guest.wait_vm_boot(None).unwrap(); 3925 3926 // Test that there is a ttyS0 3927 assert_eq!( 3928 guest 3929 .ssh_command(GREP_SERIAL_IRQ_CMD) 3930 .unwrap() 3931 .trim() 3932 .parse::<u32>() 3933 .unwrap_or_default(), 3934 1 3935 ); 3936 }); 3937 3938 // This sleep is needed to wait for the login prompt 3939 thread::sleep(std::time::Duration::new(2, 0)); 3940 3941 let _ = child.kill(); 3942 let output = child.wait_with_output().unwrap(); 3943 handle_child_output(r, &output); 3944 3945 let r = std::panic::catch_unwind(|| { 3946 assert!(String::from_utf8_lossy(&output.stdout).contains(CONSOLE_TEST_STRING)); 3947 }); 3948 3949 handle_child_output(r, &output); 3950 } 3951 3952 #[test] 3953 fn test_serial_file() { 3954 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3955 let guest = Guest::new(Box::new(focal)); 3956 3957 let serial_path = guest.tmp_dir.as_path().join("/tmp/serial-output"); 3958 #[cfg(target_arch = "x86_64")] 3959 let console_str: &str = "console=ttyS0"; 3960 #[cfg(target_arch = "aarch64")] 3961 let console_str: &str = "console=ttyAMA0"; 3962 3963 let mut child = GuestCommand::new(&guest) 3964 .args(["--cpus", "boot=1"]) 3965 .args(["--memory", "size=512M"]) 3966 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3967 .args([ 3968 "--cmdline", 3969 DIRECT_KERNEL_BOOT_CMDLINE 3970 .replace("console=hvc0 ", console_str) 3971 .as_str(), 3972 ]) 3973 .default_disks() 3974 .default_net() 3975 .args([ 3976 "--serial", 3977 format!("file={}", serial_path.to_str().unwrap()).as_str(), 3978 ]) 3979 .capture_output() 3980 .spawn() 3981 .unwrap(); 3982 3983 let r = std::panic::catch_unwind(|| { 3984 guest.wait_vm_boot(None).unwrap(); 3985 3986 // Test that there is a ttyS0 3987 assert_eq!( 3988 guest 3989 .ssh_command(GREP_SERIAL_IRQ_CMD) 3990 .unwrap() 3991 .trim() 3992 .parse::<u32>() 3993 .unwrap_or_default(), 3994 1 3995 ); 3996 3997 guest.ssh_command("sudo shutdown -h now").unwrap(); 3998 }); 3999 4000 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 4001 let _ = child.kill(); 4002 let output = child.wait_with_output().unwrap(); 4003 handle_child_output(r, &output); 4004 4005 let r = std::panic::catch_unwind(|| { 4006 // Check that the cloud-hypervisor binary actually terminated 4007 assert!(output.status.success()); 4008 4009 // Do this check after shutdown of the VM as an easy way to ensure 4010 // all writes are flushed to disk 4011 let mut f = std::fs::File::open(serial_path).unwrap(); 4012 let mut buf = String::new(); 4013 f.read_to_string(&mut buf).unwrap(); 4014 assert!(buf.contains(CONSOLE_TEST_STRING)); 4015 }); 4016 4017 handle_child_output(r, &output); 4018 } 4019 4020 #[test] 4021 fn test_pty_interaction() { 4022 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4023 let guest = Guest::new(Box::new(focal)); 4024 let api_socket = temp_api_path(&guest.tmp_dir); 4025 let serial_option = if cfg!(target_arch = "x86_64") { 4026 " console=ttyS0" 4027 } else { 4028 " console=ttyAMA0" 4029 }; 4030 let cmdline = DIRECT_KERNEL_BOOT_CMDLINE.to_owned() + serial_option; 4031 4032 let mut child = GuestCommand::new(&guest) 4033 .args(["--cpus", "boot=1"]) 4034 .args(["--memory", "size=512M"]) 4035 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4036 .args(["--cmdline", &cmdline]) 4037 .default_disks() 4038 .default_net() 4039 .args(["--serial", "null"]) 4040 .args(["--console", "pty"]) 4041 .args(["--api-socket", &api_socket]) 4042 .spawn() 4043 .unwrap(); 4044 4045 let r = std::panic::catch_unwind(|| { 4046 guest.wait_vm_boot(None).unwrap(); 4047 // Get pty fd for console 4048 let console_path = get_pty_path(&api_socket, "console"); 4049 _test_pty_interaction(console_path); 4050 4051 guest.ssh_command("sudo shutdown -h now").unwrap(); 4052 }); 4053 4054 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 4055 let _ = child.kill(); 4056 let output = child.wait_with_output().unwrap(); 4057 handle_child_output(r, &output); 4058 4059 let r = std::panic::catch_unwind(|| { 4060 // Check that the cloud-hypervisor binary actually terminated 4061 assert!(output.status.success()) 4062 }); 4063 handle_child_output(r, &output); 4064 } 4065 4066 #[test] 4067 fn test_serial_socket_interaction() { 4068 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4069 let guest = Guest::new(Box::new(focal)); 4070 let serial_socket = guest.tmp_dir.as_path().join("/tmp/serial.socket"); 4071 let serial_socket_pty = guest.tmp_dir.as_path().join("/tmp/serial.pty"); 4072 let serial_option = if cfg!(target_arch = "x86_64") { 4073 " console=ttyS0" 4074 } else { 4075 " console=ttyAMA0" 4076 }; 4077 let cmdline = DIRECT_KERNEL_BOOT_CMDLINE.to_owned() + serial_option; 4078 4079 let mut child = GuestCommand::new(&guest) 4080 .args(["--cpus", "boot=1"]) 4081 .args(["--memory", "size=512M"]) 4082 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4083 .args(["--cmdline", &cmdline]) 4084 .default_disks() 4085 .default_net() 4086 .args(["--console", "null"]) 4087 .args([ 4088 "--serial", 4089 format!("socket={}", serial_socket.to_str().unwrap()).as_str(), 4090 ]) 4091 .spawn() 4092 .unwrap(); 4093 4094 let _ = std::panic::catch_unwind(|| { 4095 guest.wait_vm_boot(None).unwrap(); 4096 }); 4097 4098 let mut socat_command = Command::new("socat"); 4099 let socat_args = [ 4100 &format!("pty,link={},raw", serial_socket_pty.display()), 4101 &format!("UNIX-CONNECT:{}", serial_socket.display()), 4102 ]; 4103 socat_command.args(socat_args); 4104 4105 let mut socat_child = socat_command.spawn().unwrap(); 4106 thread::sleep(std::time::Duration::new(1, 0)); 4107 4108 let _ = std::panic::catch_unwind(|| { 4109 _test_pty_interaction(serial_socket_pty); 4110 }); 4111 4112 let _ = socat_child.kill(); 4113 4114 let r = std::panic::catch_unwind(|| { 4115 guest.ssh_command("sudo shutdown -h now").unwrap(); 4116 }); 4117 4118 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 4119 let _ = child.kill(); 4120 let output = child.wait_with_output().unwrap(); 4121 handle_child_output(r, &output); 4122 4123 let r = std::panic::catch_unwind(|| { 4124 // Check that the cloud-hypervisor binary actually terminated 4125 assert!(output.status.success()) 4126 }); 4127 handle_child_output(r, &output); 4128 } 4129 4130 #[test] 4131 fn test_virtio_console() { 4132 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4133 let guest = Guest::new(Box::new(focal)); 4134 4135 let kernel_path = direct_kernel_boot_path(); 4136 4137 let mut child = GuestCommand::new(&guest) 4138 .args(["--cpus", "boot=1"]) 4139 .args(["--memory", "size=512M"]) 4140 .args(["--kernel", kernel_path.to_str().unwrap()]) 4141 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4142 .default_disks() 4143 .default_net() 4144 .args(["--console", "tty"]) 4145 .args(["--serial", "null"]) 4146 .capture_output() 4147 .spawn() 4148 .unwrap(); 4149 4150 let text = String::from("On a branch floating down river a cricket, singing."); 4151 let cmd = format!("echo {text} | sudo tee /dev/hvc0"); 4152 4153 let r = std::panic::catch_unwind(|| { 4154 guest.wait_vm_boot(None).unwrap(); 4155 4156 assert!(guest 4157 .does_device_vendor_pair_match("0x1043", "0x1af4") 4158 .unwrap_or_default()); 4159 4160 guest.ssh_command(&cmd).unwrap(); 4161 }); 4162 4163 let _ = child.kill(); 4164 let output = child.wait_with_output().unwrap(); 4165 handle_child_output(r, &output); 4166 4167 let r = std::panic::catch_unwind(|| { 4168 assert!(String::from_utf8_lossy(&output.stdout).contains(&text)); 4169 }); 4170 4171 handle_child_output(r, &output); 4172 } 4173 4174 #[test] 4175 fn test_console_file() { 4176 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4177 let guest = Guest::new(Box::new(focal)); 4178 4179 let console_path = guest.tmp_dir.as_path().join("/tmp/console-output"); 4180 let mut child = GuestCommand::new(&guest) 4181 .args(["--cpus", "boot=1"]) 4182 .args(["--memory", "size=512M"]) 4183 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4184 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4185 .default_disks() 4186 .default_net() 4187 .args([ 4188 "--console", 4189 format!("file={}", console_path.to_str().unwrap()).as_str(), 4190 ]) 4191 .capture_output() 4192 .spawn() 4193 .unwrap(); 4194 4195 guest.wait_vm_boot(None).unwrap(); 4196 4197 guest.ssh_command("sudo shutdown -h now").unwrap(); 4198 4199 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 4200 let _ = child.kill(); 4201 let output = child.wait_with_output().unwrap(); 4202 4203 let r = std::panic::catch_unwind(|| { 4204 // Check that the cloud-hypervisor binary actually terminated 4205 assert!(output.status.success()); 4206 4207 // Do this check after shutdown of the VM as an easy way to ensure 4208 // all writes are flushed to disk 4209 let mut f = std::fs::File::open(console_path).unwrap(); 4210 let mut buf = String::new(); 4211 f.read_to_string(&mut buf).unwrap(); 4212 4213 if !buf.contains(CONSOLE_TEST_STRING) { 4214 eprintln!( 4215 "\n\n==== Console file output ====\n\n{buf}\n\n==== End console file output ====" 4216 ); 4217 } 4218 assert!(buf.contains(CONSOLE_TEST_STRING)); 4219 }); 4220 4221 handle_child_output(r, &output); 4222 } 4223 4224 #[test] 4225 #[cfg(target_arch = "x86_64")] 4226 #[cfg(not(feature = "mshv"))] 4227 // The VFIO integration test starts cloud-hypervisor guest with 3 TAP 4228 // backed networking interfaces, bound through a simple bridge on the host. 4229 // So if the nested cloud-hypervisor succeeds in getting a directly 4230 // assigned interface from its cloud-hypervisor host, we should be able to 4231 // ssh into it, and verify that it's running with the right kernel command 4232 // line (We tag the command line from cloud-hypervisor for that purpose). 4233 // The third device is added to validate that hotplug works correctly since 4234 // it is being added to the L2 VM through hotplugging mechanism. 4235 // Also, we pass-through a virtio-blk device to the L2 VM to test the 32-bit 4236 // vfio device support 4237 fn test_vfio() { 4238 setup_vfio_network_interfaces(); 4239 4240 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4241 let guest = Guest::new_from_ip_range(Box::new(focal), "172.18", 0); 4242 4243 let mut workload_path = dirs::home_dir().unwrap(); 4244 workload_path.push("workloads"); 4245 4246 let kernel_path = direct_kernel_boot_path(); 4247 4248 let mut vfio_path = workload_path.clone(); 4249 vfio_path.push("vfio"); 4250 4251 let mut cloud_init_vfio_base_path = vfio_path.clone(); 4252 cloud_init_vfio_base_path.push("cloudinit.img"); 4253 4254 // We copy our cloudinit into the vfio mount point, for the nested 4255 // cloud-hypervisor guest to use. 4256 rate_limited_copy( 4257 guest.disk_config.disk(DiskType::CloudInit).unwrap(), 4258 &cloud_init_vfio_base_path, 4259 ) 4260 .expect("copying of cloud-init disk failed"); 4261 4262 let mut vfio_disk_path = workload_path.clone(); 4263 vfio_disk_path.push("vfio.img"); 4264 4265 // Create the vfio disk image 4266 let output = Command::new("mkfs.ext4") 4267 .arg("-d") 4268 .arg(vfio_path.to_str().unwrap()) 4269 .arg(vfio_disk_path.to_str().unwrap()) 4270 .arg("2g") 4271 .output() 4272 .unwrap(); 4273 if !output.status.success() { 4274 eprintln!("{}", String::from_utf8_lossy(&output.stderr)); 4275 panic!("mkfs.ext4 command generated an error"); 4276 } 4277 4278 let mut blk_file_path = workload_path; 4279 blk_file_path.push("blk.img"); 4280 4281 let vfio_tap0 = "vfio-tap0"; 4282 let vfio_tap1 = "vfio-tap1"; 4283 let vfio_tap2 = "vfio-tap2"; 4284 let vfio_tap3 = "vfio-tap3"; 4285 4286 let mut child = GuestCommand::new(&guest) 4287 .args(["--cpus", "boot=4"]) 4288 .args(["--memory", "size=2G,hugepages=on,shared=on"]) 4289 .args(["--kernel", kernel_path.to_str().unwrap()]) 4290 .args([ 4291 "--disk", 4292 format!( 4293 "path={}", 4294 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 4295 ) 4296 .as_str(), 4297 "--disk", 4298 format!( 4299 "path={}", 4300 guest.disk_config.disk(DiskType::CloudInit).unwrap() 4301 ) 4302 .as_str(), 4303 "--disk", 4304 format!("path={}", vfio_disk_path.to_str().unwrap()).as_str(), 4305 "--disk", 4306 format!("path={},iommu=on", blk_file_path.to_str().unwrap()).as_str(), 4307 ]) 4308 .args([ 4309 "--cmdline", 4310 format!( 4311 "{DIRECT_KERNEL_BOOT_CMDLINE} kvm-intel.nested=1 vfio_iommu_type1.allow_unsafe_interrupts" 4312 ) 4313 .as_str(), 4314 ]) 4315 .args([ 4316 "--net", 4317 format!("tap={},mac={}", vfio_tap0, guest.network.guest_mac).as_str(), 4318 "--net", 4319 format!( 4320 "tap={},mac={},iommu=on", 4321 vfio_tap1, guest.network.l2_guest_mac1 4322 ) 4323 .as_str(), 4324 "--net", 4325 format!( 4326 "tap={},mac={},iommu=on", 4327 vfio_tap2, guest.network.l2_guest_mac2 4328 ) 4329 .as_str(), 4330 "--net", 4331 format!( 4332 "tap={},mac={},iommu=on", 4333 vfio_tap3, guest.network.l2_guest_mac3 4334 ) 4335 .as_str(), 4336 ]) 4337 .capture_output() 4338 .spawn() 4339 .unwrap(); 4340 4341 thread::sleep(std::time::Duration::new(30, 0)); 4342 4343 let r = std::panic::catch_unwind(|| { 4344 guest.ssh_command_l1("sudo systemctl start vfio").unwrap(); 4345 thread::sleep(std::time::Duration::new(120, 0)); 4346 4347 // We booted our cloud hypervisor L2 guest with a "VFIOTAG" tag 4348 // added to its kernel command line. 4349 // Let's ssh into it and verify that it's there. If it is it means 4350 // we're in the right guest (The L2 one) because the QEMU L1 guest 4351 // does not have this command line tag. 4352 assert_eq!( 4353 guest 4354 .ssh_command_l2_1("grep -c VFIOTAG /proc/cmdline") 4355 .unwrap() 4356 .trim() 4357 .parse::<u32>() 4358 .unwrap_or_default(), 4359 1 4360 ); 4361 4362 // Let's also verify from the second virtio-net device passed to 4363 // the L2 VM. 4364 assert_eq!( 4365 guest 4366 .ssh_command_l2_2("grep -c VFIOTAG /proc/cmdline") 4367 .unwrap() 4368 .trim() 4369 .parse::<u32>() 4370 .unwrap_or_default(), 4371 1 4372 ); 4373 4374 // Check the amount of PCI devices appearing in L2 VM. 4375 assert_eq!( 4376 guest 4377 .ssh_command_l2_1("ls /sys/bus/pci/devices | wc -l") 4378 .unwrap() 4379 .trim() 4380 .parse::<u32>() 4381 .unwrap_or_default(), 4382 8, 4383 ); 4384 4385 // Check both if /dev/vdc exists and if the block size is 16M in L2 VM 4386 assert_eq!( 4387 guest 4388 .ssh_command_l2_1("lsblk | grep vdc | grep -c 16M") 4389 .unwrap() 4390 .trim() 4391 .parse::<u32>() 4392 .unwrap_or_default(), 4393 1 4394 ); 4395 4396 // Hotplug an extra virtio-net device through L2 VM. 4397 guest 4398 .ssh_command_l1( 4399 "echo 0000:00:09.0 | sudo tee /sys/bus/pci/devices/0000:00:09.0/driver/unbind", 4400 ) 4401 .unwrap(); 4402 guest 4403 .ssh_command_l1("echo 0000:00:09.0 | sudo tee /sys/bus/pci/drivers/vfio-pci/bind") 4404 .unwrap(); 4405 let vfio_hotplug_output = guest 4406 .ssh_command_l1( 4407 "sudo /mnt/ch-remote \ 4408 --api-socket /tmp/ch_api.sock \ 4409 add-device path=/sys/bus/pci/devices/0000:00:09.0,id=vfio123", 4410 ) 4411 .unwrap(); 4412 assert!(vfio_hotplug_output.contains("{\"id\":\"vfio123\",\"bdf\":\"0000:00:08.0\"}")); 4413 4414 thread::sleep(std::time::Duration::new(10, 0)); 4415 4416 // Let's also verify from the third virtio-net device passed to 4417 // the L2 VM. This third device has been hotplugged through the L2 4418 // VM, so this is our way to validate hotplug works for VFIO PCI. 4419 assert_eq!( 4420 guest 4421 .ssh_command_l2_3("grep -c VFIOTAG /proc/cmdline") 4422 .unwrap() 4423 .trim() 4424 .parse::<u32>() 4425 .unwrap_or_default(), 4426 1 4427 ); 4428 4429 // Check the amount of PCI devices appearing in L2 VM. 4430 // There should be one more device than before, raising the count 4431 // up to 9 PCI devices. 4432 assert_eq!( 4433 guest 4434 .ssh_command_l2_1("ls /sys/bus/pci/devices | wc -l") 4435 .unwrap() 4436 .trim() 4437 .parse::<u32>() 4438 .unwrap_or_default(), 4439 9, 4440 ); 4441 4442 // Let's now verify that we can correctly remove the virtio-net 4443 // device through the "remove-device" command responsible for 4444 // unplugging VFIO devices. 4445 guest 4446 .ssh_command_l1( 4447 "sudo /mnt/ch-remote \ 4448 --api-socket /tmp/ch_api.sock \ 4449 remove-device vfio123", 4450 ) 4451 .unwrap(); 4452 thread::sleep(std::time::Duration::new(10, 0)); 4453 4454 // Check the amount of PCI devices appearing in L2 VM is back down 4455 // to 8 devices. 4456 assert_eq!( 4457 guest 4458 .ssh_command_l2_1("ls /sys/bus/pci/devices | wc -l") 4459 .unwrap() 4460 .trim() 4461 .parse::<u32>() 4462 .unwrap_or_default(), 4463 8, 4464 ); 4465 4466 // Perform memory hotplug in L2 and validate the memory is showing 4467 // up as expected. In order to check, we will use the virtio-net 4468 // device already passed through L2 as a VFIO device, this will 4469 // verify that VFIO devices are functional with memory hotplug. 4470 assert!(guest.get_total_memory_l2().unwrap_or_default() > 480_000); 4471 guest 4472 .ssh_command_l2_1( 4473 "sudo bash -c 'echo online > /sys/devices/system/memory/auto_online_blocks'", 4474 ) 4475 .unwrap(); 4476 guest 4477 .ssh_command_l1( 4478 "sudo /mnt/ch-remote \ 4479 --api-socket /tmp/ch_api.sock \ 4480 resize --memory 1073741824", 4481 ) 4482 .unwrap(); 4483 assert!(guest.get_total_memory_l2().unwrap_or_default() > 960_000); 4484 }); 4485 4486 let _ = child.kill(); 4487 let output = child.wait_with_output().unwrap(); 4488 4489 cleanup_vfio_network_interfaces(); 4490 4491 handle_child_output(r, &output); 4492 } 4493 4494 #[test] 4495 fn test_direct_kernel_boot_noacpi() { 4496 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4497 let guest = Guest::new(Box::new(focal)); 4498 4499 let kernel_path = direct_kernel_boot_path(); 4500 4501 let mut child = GuestCommand::new(&guest) 4502 .args(["--cpus", "boot=1"]) 4503 .args(["--memory", "size=512M"]) 4504 .args(["--kernel", kernel_path.to_str().unwrap()]) 4505 .args([ 4506 "--cmdline", 4507 format!("{DIRECT_KERNEL_BOOT_CMDLINE} acpi=off").as_str(), 4508 ]) 4509 .default_disks() 4510 .default_net() 4511 .capture_output() 4512 .spawn() 4513 .unwrap(); 4514 4515 let r = std::panic::catch_unwind(|| { 4516 guest.wait_vm_boot(None).unwrap(); 4517 4518 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 4519 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4520 }); 4521 4522 let _ = child.kill(); 4523 let output = child.wait_with_output().unwrap(); 4524 4525 handle_child_output(r, &output); 4526 } 4527 4528 #[test] 4529 fn test_virtio_vsock() { 4530 _test_virtio_vsock(false) 4531 } 4532 4533 #[test] 4534 fn test_virtio_vsock_hotplug() { 4535 _test_virtio_vsock(true); 4536 } 4537 4538 #[test] 4539 fn test_api_http_shutdown() { 4540 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4541 let guest = Guest::new(Box::new(focal)); 4542 4543 _test_api_shutdown(TargetApi::new_http_api(&guest.tmp_dir), guest) 4544 } 4545 4546 #[test] 4547 fn test_api_http_delete() { 4548 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4549 let guest = Guest::new(Box::new(focal)); 4550 4551 _test_api_delete(TargetApi::new_http_api(&guest.tmp_dir), guest); 4552 } 4553 4554 #[test] 4555 fn test_api_http_pause_resume() { 4556 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4557 let guest = Guest::new(Box::new(focal)); 4558 4559 _test_api_pause_resume(TargetApi::new_http_api(&guest.tmp_dir), guest) 4560 } 4561 4562 #[test] 4563 fn test_api_http_create_boot() { 4564 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4565 let guest = Guest::new(Box::new(focal)); 4566 4567 _test_api_create_boot(TargetApi::new_http_api(&guest.tmp_dir), guest) 4568 } 4569 4570 #[test] 4571 fn test_virtio_iommu() { 4572 _test_virtio_iommu(cfg!(target_arch = "x86_64")) 4573 } 4574 4575 #[test] 4576 // We cannot force the software running in the guest to reprogram the BAR 4577 // with some different addresses, but we have a reliable way of testing it 4578 // with a standard Linux kernel. 4579 // By removing a device from the PCI tree, and then rescanning the tree, 4580 // Linux consistently chooses to reorganize the PCI device BARs to other 4581 // locations in the guest address space. 4582 // This test creates a dedicated PCI network device to be checked as being 4583 // properly probed first, then removing it, and adding it again by doing a 4584 // rescan. 4585 fn test_pci_bar_reprogramming() { 4586 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4587 let guest = Guest::new(Box::new(focal)); 4588 4589 #[cfg(target_arch = "x86_64")] 4590 let kernel_path = direct_kernel_boot_path(); 4591 #[cfg(target_arch = "aarch64")] 4592 let kernel_path = edk2_path(); 4593 4594 let mut child = GuestCommand::new(&guest) 4595 .args(["--cpus", "boot=1"]) 4596 .args(["--memory", "size=512M"]) 4597 .args(["--kernel", kernel_path.to_str().unwrap()]) 4598 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4599 .default_disks() 4600 .args([ 4601 "--net", 4602 guest.default_net_string().as_str(), 4603 "--net", 4604 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0", 4605 ]) 4606 .capture_output() 4607 .spawn() 4608 .unwrap(); 4609 4610 let r = std::panic::catch_unwind(|| { 4611 guest.wait_vm_boot(None).unwrap(); 4612 4613 // 2 network interfaces + default localhost ==> 3 interfaces 4614 assert_eq!( 4615 guest 4616 .ssh_command("ip -o link | wc -l") 4617 .unwrap() 4618 .trim() 4619 .parse::<u32>() 4620 .unwrap_or_default(), 4621 3 4622 ); 4623 4624 let init_bar_addr = guest 4625 .ssh_command( 4626 "sudo awk '{print $1; exit}' /sys/bus/pci/devices/0000:00:05.0/resource", 4627 ) 4628 .unwrap(); 4629 4630 // Remove the PCI device 4631 guest 4632 .ssh_command("echo 1 | sudo tee /sys/bus/pci/devices/0000:00:05.0/remove") 4633 .unwrap(); 4634 4635 // Only 1 network interface left + default localhost ==> 2 interfaces 4636 assert_eq!( 4637 guest 4638 .ssh_command("ip -o link | wc -l") 4639 .unwrap() 4640 .trim() 4641 .parse::<u32>() 4642 .unwrap_or_default(), 4643 2 4644 ); 4645 4646 // Remove the PCI device 4647 guest 4648 .ssh_command("echo 1 | sudo tee /sys/bus/pci/rescan") 4649 .unwrap(); 4650 4651 // Back to 2 network interface + default localhost ==> 3 interfaces 4652 assert_eq!( 4653 guest 4654 .ssh_command("ip -o link | wc -l") 4655 .unwrap() 4656 .trim() 4657 .parse::<u32>() 4658 .unwrap_or_default(), 4659 3 4660 ); 4661 4662 let new_bar_addr = guest 4663 .ssh_command( 4664 "sudo awk '{print $1; exit}' /sys/bus/pci/devices/0000:00:05.0/resource", 4665 ) 4666 .unwrap(); 4667 4668 // Let's compare the BAR addresses for our virtio-net device. 4669 // They should be different as we expect the BAR reprogramming 4670 // to have happened. 4671 assert_ne!(init_bar_addr, new_bar_addr); 4672 }); 4673 4674 let _ = child.kill(); 4675 let output = child.wait_with_output().unwrap(); 4676 4677 handle_child_output(r, &output); 4678 } 4679 4680 #[test] 4681 fn test_memory_mergeable_off() { 4682 test_memory_mergeable(false) 4683 } 4684 4685 #[test] 4686 #[cfg(target_arch = "x86_64")] 4687 fn test_cpu_hotplug() { 4688 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4689 let guest = Guest::new(Box::new(focal)); 4690 let api_socket = temp_api_path(&guest.tmp_dir); 4691 4692 let kernel_path = direct_kernel_boot_path(); 4693 4694 let mut child = GuestCommand::new(&guest) 4695 .args(["--cpus", "boot=2,max=4"]) 4696 .args(["--memory", "size=512M"]) 4697 .args(["--kernel", kernel_path.to_str().unwrap()]) 4698 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4699 .default_disks() 4700 .default_net() 4701 .args(["--api-socket", &api_socket]) 4702 .capture_output() 4703 .spawn() 4704 .unwrap(); 4705 4706 let r = std::panic::catch_unwind(|| { 4707 guest.wait_vm_boot(None).unwrap(); 4708 4709 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 4710 4711 // Resize the VM 4712 let desired_vcpus = 4; 4713 resize_command(&api_socket, Some(desired_vcpus), None, None, None); 4714 4715 guest 4716 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online") 4717 .unwrap(); 4718 guest 4719 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online") 4720 .unwrap(); 4721 thread::sleep(std::time::Duration::new(10, 0)); 4722 assert_eq!( 4723 guest.get_cpu_count().unwrap_or_default(), 4724 u32::from(desired_vcpus) 4725 ); 4726 4727 guest.reboot_linux(0, None); 4728 4729 assert_eq!( 4730 guest.get_cpu_count().unwrap_or_default(), 4731 u32::from(desired_vcpus) 4732 ); 4733 4734 // Resize the VM 4735 let desired_vcpus = 2; 4736 resize_command(&api_socket, Some(desired_vcpus), None, None, None); 4737 4738 thread::sleep(std::time::Duration::new(10, 0)); 4739 assert_eq!( 4740 guest.get_cpu_count().unwrap_or_default(), 4741 u32::from(desired_vcpus) 4742 ); 4743 4744 // Resize the VM back up to 4 4745 let desired_vcpus = 4; 4746 resize_command(&api_socket, Some(desired_vcpus), None, None, None); 4747 4748 guest 4749 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online") 4750 .unwrap(); 4751 guest 4752 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online") 4753 .unwrap(); 4754 thread::sleep(std::time::Duration::new(10, 0)); 4755 assert_eq!( 4756 guest.get_cpu_count().unwrap_or_default(), 4757 u32::from(desired_vcpus) 4758 ); 4759 }); 4760 4761 let _ = child.kill(); 4762 let output = child.wait_with_output().unwrap(); 4763 4764 handle_child_output(r, &output); 4765 } 4766 4767 #[test] 4768 fn test_memory_hotplug() { 4769 #[cfg(target_arch = "aarch64")] 4770 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string(); 4771 #[cfg(target_arch = "x86_64")] 4772 let focal_image = FOCAL_IMAGE_NAME.to_string(); 4773 let focal = UbuntuDiskConfig::new(focal_image); 4774 let guest = Guest::new(Box::new(focal)); 4775 let api_socket = temp_api_path(&guest.tmp_dir); 4776 4777 #[cfg(target_arch = "aarch64")] 4778 let kernel_path = edk2_path(); 4779 #[cfg(target_arch = "x86_64")] 4780 let kernel_path = direct_kernel_boot_path(); 4781 4782 let mut child = GuestCommand::new(&guest) 4783 .args(["--cpus", "boot=2,max=4"]) 4784 .args(["--memory", "size=512M,hotplug_size=8192M"]) 4785 .args(["--kernel", kernel_path.to_str().unwrap()]) 4786 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4787 .default_disks() 4788 .default_net() 4789 .args(["--balloon", "size=0"]) 4790 .args(["--api-socket", &api_socket]) 4791 .capture_output() 4792 .spawn() 4793 .unwrap(); 4794 4795 let r = std::panic::catch_unwind(|| { 4796 guest.wait_vm_boot(None).unwrap(); 4797 4798 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4799 4800 guest.enable_memory_hotplug(); 4801 4802 // Add RAM to the VM 4803 let desired_ram = 1024 << 20; 4804 resize_command(&api_socket, None, Some(desired_ram), None, None); 4805 4806 thread::sleep(std::time::Duration::new(10, 0)); 4807 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4808 4809 // Use balloon to remove RAM from the VM 4810 let desired_balloon = 512 << 20; 4811 resize_command(&api_socket, None, None, Some(desired_balloon), None); 4812 4813 thread::sleep(std::time::Duration::new(10, 0)); 4814 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4815 assert!(guest.get_total_memory().unwrap_or_default() < 960_000); 4816 4817 guest.reboot_linux(0, None); 4818 4819 assert!(guest.get_total_memory().unwrap_or_default() < 960_000); 4820 4821 // Use balloon add RAM to the VM 4822 let desired_balloon = 0; 4823 resize_command(&api_socket, None, None, Some(desired_balloon), None); 4824 4825 thread::sleep(std::time::Duration::new(10, 0)); 4826 4827 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4828 4829 guest.enable_memory_hotplug(); 4830 4831 // Add RAM to the VM 4832 let desired_ram = 2048 << 20; 4833 resize_command(&api_socket, None, Some(desired_ram), None, None); 4834 4835 thread::sleep(std::time::Duration::new(10, 0)); 4836 assert!(guest.get_total_memory().unwrap_or_default() > 1_920_000); 4837 4838 // Remove RAM to the VM (only applies after reboot) 4839 let desired_ram = 1024 << 20; 4840 resize_command(&api_socket, None, Some(desired_ram), None, None); 4841 4842 guest.reboot_linux(1, None); 4843 4844 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4845 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000); 4846 }); 4847 4848 let _ = child.kill(); 4849 let output = child.wait_with_output().unwrap(); 4850 4851 handle_child_output(r, &output); 4852 } 4853 4854 #[test] 4855 #[cfg(not(feature = "mshv"))] 4856 fn test_virtio_mem() { 4857 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4858 let guest = Guest::new(Box::new(focal)); 4859 let api_socket = temp_api_path(&guest.tmp_dir); 4860 4861 let kernel_path = direct_kernel_boot_path(); 4862 4863 let mut child = GuestCommand::new(&guest) 4864 .args(["--cpus", "boot=2,max=4"]) 4865 .args([ 4866 "--memory", 4867 "size=512M,hotplug_method=virtio-mem,hotplug_size=8192M", 4868 ]) 4869 .args(["--kernel", kernel_path.to_str().unwrap()]) 4870 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4871 .default_disks() 4872 .default_net() 4873 .args(["--api-socket", &api_socket]) 4874 .capture_output() 4875 .spawn() 4876 .unwrap(); 4877 4878 let r = std::panic::catch_unwind(|| { 4879 guest.wait_vm_boot(None).unwrap(); 4880 4881 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4882 4883 guest.enable_memory_hotplug(); 4884 4885 // Add RAM to the VM 4886 let desired_ram = 1024 << 20; 4887 resize_command(&api_socket, None, Some(desired_ram), None, None); 4888 4889 thread::sleep(std::time::Duration::new(10, 0)); 4890 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4891 4892 // Add RAM to the VM 4893 let desired_ram = 2048 << 20; 4894 resize_command(&api_socket, None, Some(desired_ram), None, None); 4895 4896 thread::sleep(std::time::Duration::new(10, 0)); 4897 assert!(guest.get_total_memory().unwrap_or_default() > 1_920_000); 4898 4899 // Remove RAM from the VM 4900 let desired_ram = 1024 << 20; 4901 resize_command(&api_socket, None, Some(desired_ram), None, None); 4902 4903 thread::sleep(std::time::Duration::new(10, 0)); 4904 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4905 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000); 4906 4907 guest.reboot_linux(0, None); 4908 4909 // Check the amount of memory after reboot is 1GiB 4910 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4911 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000); 4912 4913 // Check we can still resize to 512MiB 4914 let desired_ram = 512 << 20; 4915 resize_command(&api_socket, None, Some(desired_ram), None, None); 4916 thread::sleep(std::time::Duration::new(10, 0)); 4917 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4918 assert!(guest.get_total_memory().unwrap_or_default() < 960_000); 4919 }); 4920 4921 let _ = child.kill(); 4922 let output = child.wait_with_output().unwrap(); 4923 4924 handle_child_output(r, &output); 4925 } 4926 4927 #[test] 4928 #[cfg(target_arch = "x86_64")] 4929 #[cfg(not(feature = "mshv"))] 4930 // Test both vCPU and memory resizing together 4931 fn test_resize() { 4932 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4933 let guest = Guest::new(Box::new(focal)); 4934 let api_socket = temp_api_path(&guest.tmp_dir); 4935 4936 let kernel_path = direct_kernel_boot_path(); 4937 4938 let mut child = GuestCommand::new(&guest) 4939 .args(["--cpus", "boot=2,max=4"]) 4940 .args(["--memory", "size=512M,hotplug_size=8192M"]) 4941 .args(["--kernel", kernel_path.to_str().unwrap()]) 4942 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4943 .default_disks() 4944 .default_net() 4945 .args(["--api-socket", &api_socket]) 4946 .capture_output() 4947 .spawn() 4948 .unwrap(); 4949 4950 let r = std::panic::catch_unwind(|| { 4951 guest.wait_vm_boot(None).unwrap(); 4952 4953 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 4954 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4955 4956 guest.enable_memory_hotplug(); 4957 4958 // Resize the VM 4959 let desired_vcpus = 4; 4960 let desired_ram = 1024 << 20; 4961 resize_command( 4962 &api_socket, 4963 Some(desired_vcpus), 4964 Some(desired_ram), 4965 None, 4966 None, 4967 ); 4968 4969 guest 4970 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online") 4971 .unwrap(); 4972 guest 4973 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online") 4974 .unwrap(); 4975 thread::sleep(std::time::Duration::new(10, 0)); 4976 assert_eq!( 4977 guest.get_cpu_count().unwrap_or_default(), 4978 u32::from(desired_vcpus) 4979 ); 4980 4981 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4982 }); 4983 4984 let _ = child.kill(); 4985 let output = child.wait_with_output().unwrap(); 4986 4987 handle_child_output(r, &output); 4988 } 4989 4990 #[test] 4991 fn test_memory_overhead() { 4992 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4993 let guest = Guest::new(Box::new(focal)); 4994 4995 let kernel_path = direct_kernel_boot_path(); 4996 4997 let guest_memory_size_kb = 512 * 1024; 4998 4999 let mut child = GuestCommand::new(&guest) 5000 .args(["--cpus", "boot=1"]) 5001 .args(["--memory", format!("size={guest_memory_size_kb}K").as_str()]) 5002 .args(["--kernel", kernel_path.to_str().unwrap()]) 5003 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5004 .default_net() 5005 .default_disks() 5006 .capture_output() 5007 .spawn() 5008 .unwrap(); 5009 5010 guest.wait_vm_boot(None).unwrap(); 5011 5012 let r = std::panic::catch_unwind(|| { 5013 let overhead = get_vmm_overhead(child.id(), guest_memory_size_kb); 5014 eprintln!("Guest memory overhead: {overhead} vs {MAXIMUM_VMM_OVERHEAD_KB}"); 5015 assert!(overhead <= MAXIMUM_VMM_OVERHEAD_KB); 5016 }); 5017 5018 let _ = child.kill(); 5019 let output = child.wait_with_output().unwrap(); 5020 5021 handle_child_output(r, &output); 5022 } 5023 5024 #[test] 5025 fn test_disk_hotplug() { 5026 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5027 let guest = Guest::new(Box::new(focal)); 5028 5029 #[cfg(target_arch = "x86_64")] 5030 let kernel_path = direct_kernel_boot_path(); 5031 #[cfg(target_arch = "aarch64")] 5032 let kernel_path = edk2_path(); 5033 5034 let api_socket = temp_api_path(&guest.tmp_dir); 5035 5036 let mut child = GuestCommand::new(&guest) 5037 .args(["--api-socket", &api_socket]) 5038 .args(["--cpus", "boot=1"]) 5039 .args(["--memory", "size=512M"]) 5040 .args(["--kernel", kernel_path.to_str().unwrap()]) 5041 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5042 .default_disks() 5043 .default_net() 5044 .capture_output() 5045 .spawn() 5046 .unwrap(); 5047 5048 let r = std::panic::catch_unwind(|| { 5049 guest.wait_vm_boot(None).unwrap(); 5050 5051 // Check /dev/vdc is not there 5052 assert_eq!( 5053 guest 5054 .ssh_command("lsblk | grep -c vdc.*16M || true") 5055 .unwrap() 5056 .trim() 5057 .parse::<u32>() 5058 .unwrap_or(1), 5059 0 5060 ); 5061 5062 // Now let's add the extra disk. 5063 let mut blk_file_path = dirs::home_dir().unwrap(); 5064 blk_file_path.push("workloads"); 5065 blk_file_path.push("blk.img"); 5066 let (cmd_success, cmd_output) = remote_command_w_output( 5067 &api_socket, 5068 "add-disk", 5069 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 5070 ); 5071 assert!(cmd_success); 5072 assert!(String::from_utf8_lossy(&cmd_output) 5073 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 5074 5075 thread::sleep(std::time::Duration::new(10, 0)); 5076 5077 // Check that /dev/vdc exists and the block size is 16M. 5078 assert_eq!( 5079 guest 5080 .ssh_command("lsblk | grep vdc | grep -c 16M") 5081 .unwrap() 5082 .trim() 5083 .parse::<u32>() 5084 .unwrap_or_default(), 5085 1 5086 ); 5087 // And check the block device can be read. 5088 guest 5089 .ssh_command("sudo dd if=/dev/vdc of=/dev/null bs=1M iflag=direct count=16") 5090 .unwrap(); 5091 5092 // Let's remove it the extra disk. 5093 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 5094 thread::sleep(std::time::Duration::new(5, 0)); 5095 // And check /dev/vdc is not there 5096 assert_eq!( 5097 guest 5098 .ssh_command("lsblk | grep -c vdc.*16M || true") 5099 .unwrap() 5100 .trim() 5101 .parse::<u32>() 5102 .unwrap_or(1), 5103 0 5104 ); 5105 5106 // And add it back to validate unplug did work correctly. 5107 let (cmd_success, cmd_output) = remote_command_w_output( 5108 &api_socket, 5109 "add-disk", 5110 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 5111 ); 5112 assert!(cmd_success); 5113 assert!(String::from_utf8_lossy(&cmd_output) 5114 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 5115 5116 thread::sleep(std::time::Duration::new(10, 0)); 5117 5118 // Check that /dev/vdc exists and the block size is 16M. 5119 assert_eq!( 5120 guest 5121 .ssh_command("lsblk | grep vdc | grep -c 16M") 5122 .unwrap() 5123 .trim() 5124 .parse::<u32>() 5125 .unwrap_or_default(), 5126 1 5127 ); 5128 // And check the block device can be read. 5129 guest 5130 .ssh_command("sudo dd if=/dev/vdc of=/dev/null bs=1M iflag=direct count=16") 5131 .unwrap(); 5132 5133 // Reboot the VM. 5134 guest.reboot_linux(0, None); 5135 5136 // Check still there after reboot 5137 assert_eq!( 5138 guest 5139 .ssh_command("lsblk | grep vdc | grep -c 16M") 5140 .unwrap() 5141 .trim() 5142 .parse::<u32>() 5143 .unwrap_or_default(), 5144 1 5145 ); 5146 5147 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 5148 5149 thread::sleep(std::time::Duration::new(20, 0)); 5150 5151 // Check device has gone away 5152 assert_eq!( 5153 guest 5154 .ssh_command("lsblk | grep -c vdc.*16M || true") 5155 .unwrap() 5156 .trim() 5157 .parse::<u32>() 5158 .unwrap_or(1), 5159 0 5160 ); 5161 5162 guest.reboot_linux(1, None); 5163 5164 // Check device still absent 5165 assert_eq!( 5166 guest 5167 .ssh_command("lsblk | grep -c vdc.*16M || true") 5168 .unwrap() 5169 .trim() 5170 .parse::<u32>() 5171 .unwrap_or(1), 5172 0 5173 ); 5174 }); 5175 5176 let _ = child.kill(); 5177 let output = child.wait_with_output().unwrap(); 5178 5179 handle_child_output(r, &output); 5180 } 5181 5182 fn create_loop_device(backing_file_path: &str, block_size: u32, num_retries: usize) -> String { 5183 const LOOP_CONFIGURE: u64 = 0x4c0a; 5184 const LOOP_CTL_GET_FREE: u64 = 0x4c82; 5185 const LOOP_CTL_PATH: &str = "/dev/loop-control"; 5186 const LOOP_DEVICE_PREFIX: &str = "/dev/loop"; 5187 5188 #[repr(C)] 5189 struct LoopInfo64 { 5190 lo_device: u64, 5191 lo_inode: u64, 5192 lo_rdevice: u64, 5193 lo_offset: u64, 5194 lo_sizelimit: u64, 5195 lo_number: u32, 5196 lo_encrypt_type: u32, 5197 lo_encrypt_key_size: u32, 5198 lo_flags: u32, 5199 lo_file_name: [u8; 64], 5200 lo_crypt_name: [u8; 64], 5201 lo_encrypt_key: [u8; 32], 5202 lo_init: [u64; 2], 5203 } 5204 5205 impl Default for LoopInfo64 { 5206 fn default() -> Self { 5207 LoopInfo64 { 5208 lo_device: 0, 5209 lo_inode: 0, 5210 lo_rdevice: 0, 5211 lo_offset: 0, 5212 lo_sizelimit: 0, 5213 lo_number: 0, 5214 lo_encrypt_type: 0, 5215 lo_encrypt_key_size: 0, 5216 lo_flags: 0, 5217 lo_file_name: [0; 64], 5218 lo_crypt_name: [0; 64], 5219 lo_encrypt_key: [0; 32], 5220 lo_init: [0; 2], 5221 } 5222 } 5223 } 5224 5225 #[derive(Default)] 5226 #[repr(C)] 5227 struct LoopConfig { 5228 fd: u32, 5229 block_size: u32, 5230 info: LoopInfo64, 5231 _reserved: [u64; 8], 5232 } 5233 5234 // Open loop-control device 5235 let loop_ctl_file = OpenOptions::new() 5236 .read(true) 5237 .write(true) 5238 .open(LOOP_CTL_PATH) 5239 .unwrap(); 5240 5241 // Request a free loop device 5242 let loop_device_number = 5243 unsafe { libc::ioctl(loop_ctl_file.as_raw_fd(), LOOP_CTL_GET_FREE as _) }; 5244 5245 if loop_device_number < 0 { 5246 panic!("Couldn't find a free loop device"); 5247 } 5248 5249 // Create loop device path 5250 let loop_device_path = format!("{LOOP_DEVICE_PREFIX}{loop_device_number}"); 5251 5252 // Open loop device 5253 let loop_device_file = OpenOptions::new() 5254 .read(true) 5255 .write(true) 5256 .open(&loop_device_path) 5257 .unwrap(); 5258 5259 // Open backing file 5260 let backing_file = OpenOptions::new() 5261 .read(true) 5262 .write(true) 5263 .open(backing_file_path) 5264 .unwrap(); 5265 5266 let loop_config = LoopConfig { 5267 fd: backing_file.as_raw_fd() as u32, 5268 block_size, 5269 ..Default::default() 5270 }; 5271 5272 for i in 0..num_retries { 5273 let ret = unsafe { 5274 libc::ioctl( 5275 loop_device_file.as_raw_fd(), 5276 LOOP_CONFIGURE as _, 5277 &loop_config, 5278 ) 5279 }; 5280 if ret != 0 { 5281 if i < num_retries - 1 { 5282 println!( 5283 "Iteration {}: Failed to configure the loop device {}: {}", 5284 i, 5285 loop_device_path, 5286 std::io::Error::last_os_error() 5287 ); 5288 } else { 5289 panic!( 5290 "Failed {} times trying to configure the loop device {}: {}", 5291 num_retries, 5292 loop_device_path, 5293 std::io::Error::last_os_error() 5294 ); 5295 } 5296 } else { 5297 break; 5298 } 5299 5300 // Wait for a bit before retrying 5301 thread::sleep(std::time::Duration::new(5, 0)); 5302 } 5303 5304 loop_device_path 5305 } 5306 5307 #[test] 5308 fn test_virtio_block_topology() { 5309 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5310 let guest = Guest::new(Box::new(focal)); 5311 5312 let kernel_path = direct_kernel_boot_path(); 5313 let test_disk_path = guest.tmp_dir.as_path().join("test.img"); 5314 5315 let output = exec_host_command_output( 5316 format!( 5317 "qemu-img create -f raw {} 16M", 5318 test_disk_path.to_str().unwrap() 5319 ) 5320 .as_str(), 5321 ); 5322 if !output.status.success() { 5323 let stdout = String::from_utf8_lossy(&output.stdout); 5324 let stderr = String::from_utf8_lossy(&output.stderr); 5325 panic!("qemu-img command failed\nstdout\n{stdout}\nstderr\n{stderr}"); 5326 } 5327 5328 let loop_dev = create_loop_device(test_disk_path.to_str().unwrap(), 4096, 5); 5329 5330 let mut child = GuestCommand::new(&guest) 5331 .args(["--cpus", "boot=1"]) 5332 .args(["--memory", "size=512M"]) 5333 .args(["--kernel", kernel_path.to_str().unwrap()]) 5334 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5335 .args([ 5336 "--disk", 5337 format!( 5338 "path={}", 5339 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 5340 ) 5341 .as_str(), 5342 "--disk", 5343 format!( 5344 "path={}", 5345 guest.disk_config.disk(DiskType::CloudInit).unwrap() 5346 ) 5347 .as_str(), 5348 "--disk", 5349 format!("path={}", &loop_dev).as_str(), 5350 ]) 5351 .default_net() 5352 .capture_output() 5353 .spawn() 5354 .unwrap(); 5355 5356 let r = std::panic::catch_unwind(|| { 5357 guest.wait_vm_boot(None).unwrap(); 5358 5359 // MIN-IO column 5360 assert_eq!( 5361 guest 5362 .ssh_command("lsblk -t| grep vdc | awk '{print $3}'") 5363 .unwrap() 5364 .trim() 5365 .parse::<u32>() 5366 .unwrap_or_default(), 5367 4096 5368 ); 5369 // PHY-SEC column 5370 assert_eq!( 5371 guest 5372 .ssh_command("lsblk -t| grep vdc | awk '{print $5}'") 5373 .unwrap() 5374 .trim() 5375 .parse::<u32>() 5376 .unwrap_or_default(), 5377 4096 5378 ); 5379 // LOG-SEC column 5380 assert_eq!( 5381 guest 5382 .ssh_command("lsblk -t| grep vdc | awk '{print $6}'") 5383 .unwrap() 5384 .trim() 5385 .parse::<u32>() 5386 .unwrap_or_default(), 5387 4096 5388 ); 5389 }); 5390 5391 let _ = child.kill(); 5392 let output = child.wait_with_output().unwrap(); 5393 5394 handle_child_output(r, &output); 5395 5396 Command::new("losetup") 5397 .args(["-d", &loop_dev]) 5398 .output() 5399 .expect("loop device not found"); 5400 } 5401 5402 #[test] 5403 fn test_virtio_balloon_deflate_on_oom() { 5404 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5405 let guest = Guest::new(Box::new(focal)); 5406 5407 let kernel_path = direct_kernel_boot_path(); 5408 5409 let api_socket = temp_api_path(&guest.tmp_dir); 5410 5411 //Let's start a 4G guest with balloon occupied 2G memory 5412 let mut child = GuestCommand::new(&guest) 5413 .args(["--api-socket", &api_socket]) 5414 .args(["--cpus", "boot=1"]) 5415 .args(["--memory", "size=4G"]) 5416 .args(["--kernel", kernel_path.to_str().unwrap()]) 5417 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5418 .args(["--balloon", "size=2G,deflate_on_oom=on"]) 5419 .default_disks() 5420 .default_net() 5421 .capture_output() 5422 .spawn() 5423 .unwrap(); 5424 5425 let r = std::panic::catch_unwind(|| { 5426 guest.wait_vm_boot(None).unwrap(); 5427 5428 // Wait for balloon memory's initialization and check its size. 5429 // The virtio-balloon driver might take a few seconds to report the 5430 // balloon effective size back to the VMM. 5431 thread::sleep(std::time::Duration::new(20, 0)); 5432 5433 let orig_balloon = balloon_size(&api_socket); 5434 println!("The original balloon memory size is {orig_balloon} bytes"); 5435 assert!(orig_balloon == 2147483648); 5436 5437 // Two steps to verify if the 'deflate_on_oom' parameter works. 5438 // 1st: run a command to trigger an OOM in the guest. 5439 guest 5440 .ssh_command("echo f | sudo tee /proc/sysrq-trigger") 5441 .unwrap(); 5442 5443 // Give some time for the OOM to happen in the guest and be reported 5444 // back to the host. 5445 thread::sleep(std::time::Duration::new(20, 0)); 5446 5447 // 2nd: check balloon_mem's value to verify balloon has been automatically deflated 5448 let deflated_balloon = balloon_size(&api_socket); 5449 println!("After deflating, balloon memory size is {deflated_balloon} bytes"); 5450 // Verify the balloon size deflated 5451 assert!(deflated_balloon < 2147483648); 5452 }); 5453 5454 let _ = child.kill(); 5455 let output = child.wait_with_output().unwrap(); 5456 5457 handle_child_output(r, &output); 5458 } 5459 5460 #[test] 5461 #[cfg(not(feature = "mshv"))] 5462 fn test_virtio_balloon_free_page_reporting() { 5463 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5464 let guest = Guest::new(Box::new(focal)); 5465 5466 //Let's start a 4G guest with balloon occupied 2G memory 5467 let mut child = GuestCommand::new(&guest) 5468 .args(["--cpus", "boot=1"]) 5469 .args(["--memory", "size=4G"]) 5470 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 5471 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5472 .args(["--balloon", "size=0,free_page_reporting=on"]) 5473 .default_disks() 5474 .default_net() 5475 .capture_output() 5476 .spawn() 5477 .unwrap(); 5478 5479 let pid = child.id(); 5480 let r = std::panic::catch_unwind(|| { 5481 guest.wait_vm_boot(None).unwrap(); 5482 5483 // Check the initial RSS is less than 1GiB 5484 let rss = process_rss_kib(pid); 5485 println!("RSS {rss} < 1048576"); 5486 assert!(rss < 1048576); 5487 5488 // Spawn a command inside the guest to consume 2GiB of RAM for 60 5489 // seconds 5490 let guest_ip = guest.network.guest_ip.clone(); 5491 thread::spawn(move || { 5492 ssh_command_ip( 5493 "stress --vm 1 --vm-bytes 2G --vm-keep --timeout 60", 5494 &guest_ip, 5495 DEFAULT_SSH_RETRIES, 5496 DEFAULT_SSH_TIMEOUT, 5497 ) 5498 .unwrap(); 5499 }); 5500 5501 // Wait for 50 seconds to make sure the stress command is consuming 5502 // the expected amount of memory. 5503 thread::sleep(std::time::Duration::new(50, 0)); 5504 let rss = process_rss_kib(pid); 5505 println!("RSS {rss} >= 2097152"); 5506 assert!(rss >= 2097152); 5507 5508 // Wait for an extra minute to make sure the stress command has 5509 // completed and that the guest reported the free pages to the VMM 5510 // through the virtio-balloon device. We expect the RSS to be under 5511 // 2GiB. 5512 thread::sleep(std::time::Duration::new(60, 0)); 5513 let rss = process_rss_kib(pid); 5514 println!("RSS {rss} < 2097152"); 5515 assert!(rss < 2097152); 5516 }); 5517 5518 let _ = child.kill(); 5519 let output = child.wait_with_output().unwrap(); 5520 5521 handle_child_output(r, &output); 5522 } 5523 5524 #[test] 5525 fn test_pmem_hotplug() { 5526 _test_pmem_hotplug(None) 5527 } 5528 5529 #[test] 5530 fn test_pmem_multi_segment_hotplug() { 5531 _test_pmem_hotplug(Some(15)) 5532 } 5533 5534 fn _test_pmem_hotplug(pci_segment: Option<u16>) { 5535 #[cfg(target_arch = "aarch64")] 5536 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string(); 5537 #[cfg(target_arch = "x86_64")] 5538 let focal_image = FOCAL_IMAGE_NAME.to_string(); 5539 let focal = UbuntuDiskConfig::new(focal_image); 5540 let guest = Guest::new(Box::new(focal)); 5541 5542 #[cfg(target_arch = "x86_64")] 5543 let kernel_path = direct_kernel_boot_path(); 5544 #[cfg(target_arch = "aarch64")] 5545 let kernel_path = edk2_path(); 5546 5547 let api_socket = temp_api_path(&guest.tmp_dir); 5548 5549 let mut cmd = GuestCommand::new(&guest); 5550 5551 cmd.args(["--api-socket", &api_socket]) 5552 .args(["--cpus", "boot=1"]) 5553 .args(["--memory", "size=512M"]) 5554 .args(["--kernel", kernel_path.to_str().unwrap()]) 5555 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5556 .default_disks() 5557 .default_net() 5558 .capture_output(); 5559 5560 if pci_segment.is_some() { 5561 cmd.args([ 5562 "--platform", 5563 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 5564 ]); 5565 } 5566 5567 let mut child = cmd.spawn().unwrap(); 5568 5569 let r = std::panic::catch_unwind(|| { 5570 guest.wait_vm_boot(None).unwrap(); 5571 5572 // Check /dev/pmem0 is not there 5573 assert_eq!( 5574 guest 5575 .ssh_command("lsblk | grep -c pmem0 || true") 5576 .unwrap() 5577 .trim() 5578 .parse::<u32>() 5579 .unwrap_or(1), 5580 0 5581 ); 5582 5583 let pmem_temp_file = TempFile::new().unwrap(); 5584 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 5585 let (cmd_success, cmd_output) = remote_command_w_output( 5586 &api_socket, 5587 "add-pmem", 5588 Some(&format!( 5589 "file={},id=test0{}", 5590 pmem_temp_file.as_path().to_str().unwrap(), 5591 if let Some(pci_segment) = pci_segment { 5592 format!(",pci_segment={pci_segment}") 5593 } else { 5594 "".to_owned() 5595 } 5596 )), 5597 ); 5598 assert!(cmd_success); 5599 if let Some(pci_segment) = pci_segment { 5600 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 5601 "{{\"id\":\"test0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 5602 ))); 5603 } else { 5604 assert!(String::from_utf8_lossy(&cmd_output) 5605 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 5606 } 5607 5608 // Check that /dev/pmem0 exists and the block size is 128M 5609 assert_eq!( 5610 guest 5611 .ssh_command("lsblk | grep pmem0 | grep -c 128M") 5612 .unwrap() 5613 .trim() 5614 .parse::<u32>() 5615 .unwrap_or_default(), 5616 1 5617 ); 5618 5619 guest.reboot_linux(0, None); 5620 5621 // Check still there after reboot 5622 assert_eq!( 5623 guest 5624 .ssh_command("lsblk | grep pmem0 | grep -c 128M") 5625 .unwrap() 5626 .trim() 5627 .parse::<u32>() 5628 .unwrap_or_default(), 5629 1 5630 ); 5631 5632 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 5633 5634 thread::sleep(std::time::Duration::new(20, 0)); 5635 5636 // Check device has gone away 5637 assert_eq!( 5638 guest 5639 .ssh_command("lsblk | grep -c pmem0.*128M || true") 5640 .unwrap() 5641 .trim() 5642 .parse::<u32>() 5643 .unwrap_or(1), 5644 0 5645 ); 5646 5647 guest.reboot_linux(1, None); 5648 5649 // Check still absent after reboot 5650 assert_eq!( 5651 guest 5652 .ssh_command("lsblk | grep -c pmem0.*128M || true") 5653 .unwrap() 5654 .trim() 5655 .parse::<u32>() 5656 .unwrap_or(1), 5657 0 5658 ); 5659 }); 5660 5661 let _ = child.kill(); 5662 let output = child.wait_with_output().unwrap(); 5663 5664 handle_child_output(r, &output); 5665 } 5666 5667 #[test] 5668 fn test_net_hotplug() { 5669 _test_net_hotplug(None) 5670 } 5671 5672 #[test] 5673 fn test_net_multi_segment_hotplug() { 5674 _test_net_hotplug(Some(15)) 5675 } 5676 5677 fn _test_net_hotplug(pci_segment: Option<u16>) { 5678 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5679 let guest = Guest::new(Box::new(focal)); 5680 5681 #[cfg(target_arch = "x86_64")] 5682 let kernel_path = direct_kernel_boot_path(); 5683 #[cfg(target_arch = "aarch64")] 5684 let kernel_path = edk2_path(); 5685 5686 let api_socket = temp_api_path(&guest.tmp_dir); 5687 5688 // Boot without network 5689 let mut cmd = GuestCommand::new(&guest); 5690 5691 cmd.args(["--api-socket", &api_socket]) 5692 .args(["--cpus", "boot=1"]) 5693 .args(["--memory", "size=512M"]) 5694 .args(["--kernel", kernel_path.to_str().unwrap()]) 5695 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5696 .default_disks() 5697 .capture_output(); 5698 5699 if pci_segment.is_some() { 5700 cmd.args([ 5701 "--platform", 5702 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 5703 ]); 5704 } 5705 5706 let mut child = cmd.spawn().unwrap(); 5707 5708 thread::sleep(std::time::Duration::new(20, 0)); 5709 5710 let r = std::panic::catch_unwind(|| { 5711 // Add network 5712 let (cmd_success, cmd_output) = remote_command_w_output( 5713 &api_socket, 5714 "add-net", 5715 Some( 5716 format!( 5717 "{}{},id=test0", 5718 guest.default_net_string(), 5719 if let Some(pci_segment) = pci_segment { 5720 format!(",pci_segment={pci_segment}") 5721 } else { 5722 "".to_owned() 5723 } 5724 ) 5725 .as_str(), 5726 ), 5727 ); 5728 assert!(cmd_success); 5729 5730 if let Some(pci_segment) = pci_segment { 5731 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 5732 "{{\"id\":\"test0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 5733 ))); 5734 } else { 5735 assert!(String::from_utf8_lossy(&cmd_output) 5736 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:05.0\"}")); 5737 } 5738 5739 thread::sleep(std::time::Duration::new(5, 0)); 5740 5741 // 1 network interfaces + default localhost ==> 2 interfaces 5742 assert_eq!( 5743 guest 5744 .ssh_command("ip -o link | wc -l") 5745 .unwrap() 5746 .trim() 5747 .parse::<u32>() 5748 .unwrap_or_default(), 5749 2 5750 ); 5751 5752 // Remove network 5753 assert!(remote_command(&api_socket, "remove-device", Some("test0"),)); 5754 thread::sleep(std::time::Duration::new(5, 0)); 5755 5756 let (cmd_success, cmd_output) = remote_command_w_output( 5757 &api_socket, 5758 "add-net", 5759 Some( 5760 format!( 5761 "{}{},id=test1", 5762 guest.default_net_string(), 5763 if let Some(pci_segment) = pci_segment { 5764 format!(",pci_segment={pci_segment}") 5765 } else { 5766 "".to_owned() 5767 } 5768 ) 5769 .as_str(), 5770 ), 5771 ); 5772 assert!(cmd_success); 5773 5774 if let Some(pci_segment) = pci_segment { 5775 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 5776 "{{\"id\":\"test1\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 5777 ))); 5778 } else { 5779 assert!(String::from_utf8_lossy(&cmd_output) 5780 .contains("{\"id\":\"test1\",\"bdf\":\"0000:00:05.0\"}")); 5781 } 5782 5783 thread::sleep(std::time::Duration::new(5, 0)); 5784 5785 // 1 network interfaces + default localhost ==> 2 interfaces 5786 assert_eq!( 5787 guest 5788 .ssh_command("ip -o link | wc -l") 5789 .unwrap() 5790 .trim() 5791 .parse::<u32>() 5792 .unwrap_or_default(), 5793 2 5794 ); 5795 5796 guest.reboot_linux(0, None); 5797 5798 // Check still there after reboot 5799 // 1 network interfaces + default localhost ==> 2 interfaces 5800 assert_eq!( 5801 guest 5802 .ssh_command("ip -o link | wc -l") 5803 .unwrap() 5804 .trim() 5805 .parse::<u32>() 5806 .unwrap_or_default(), 5807 2 5808 ); 5809 }); 5810 5811 let _ = child.kill(); 5812 let output = child.wait_with_output().unwrap(); 5813 5814 handle_child_output(r, &output); 5815 } 5816 5817 #[test] 5818 fn test_initramfs() { 5819 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5820 let guest = Guest::new(Box::new(focal)); 5821 let mut workload_path = dirs::home_dir().unwrap(); 5822 workload_path.push("workloads"); 5823 5824 #[cfg(target_arch = "x86_64")] 5825 let mut kernels = vec![direct_kernel_boot_path()]; 5826 #[cfg(target_arch = "aarch64")] 5827 let kernels = vec![direct_kernel_boot_path()]; 5828 5829 #[cfg(target_arch = "x86_64")] 5830 { 5831 let mut pvh_kernel_path = workload_path.clone(); 5832 pvh_kernel_path.push("vmlinux"); 5833 kernels.push(pvh_kernel_path); 5834 } 5835 5836 let mut initramfs_path = workload_path; 5837 initramfs_path.push("alpine_initramfs.img"); 5838 5839 let test_string = String::from("axz34i9rylotd8n50wbv6kcj7f2qushme1pg"); 5840 let cmdline = format!("console=hvc0 quiet TEST_STRING={test_string}"); 5841 5842 kernels.iter().for_each(|k_path| { 5843 let mut child = GuestCommand::new(&guest) 5844 .args(["--kernel", k_path.to_str().unwrap()]) 5845 .args(["--initramfs", initramfs_path.to_str().unwrap()]) 5846 .args(["--cmdline", &cmdline]) 5847 .capture_output() 5848 .spawn() 5849 .unwrap(); 5850 5851 thread::sleep(std::time::Duration::new(20, 0)); 5852 5853 let _ = child.kill(); 5854 let output = child.wait_with_output().unwrap(); 5855 5856 let r = std::panic::catch_unwind(|| { 5857 let s = String::from_utf8_lossy(&output.stdout); 5858 5859 assert_ne!(s.lines().position(|line| line == test_string), None); 5860 }); 5861 5862 handle_child_output(r, &output); 5863 }); 5864 } 5865 5866 // One thing to note about this test. The virtio-net device is heavily used 5867 // through each ssh command. There's no need to perform a dedicated test to 5868 // verify the migration went well for virtio-net. 5869 #[test] 5870 #[cfg(not(feature = "mshv"))] 5871 fn test_snapshot_restore_hotplug_virtiomem() { 5872 _test_snapshot_restore(true); 5873 } 5874 5875 #[test] 5876 fn test_snapshot_restore_basic() { 5877 _test_snapshot_restore(false); 5878 } 5879 5880 fn _test_snapshot_restore(use_hotplug: bool) { 5881 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5882 let guest = Guest::new(Box::new(focal)); 5883 let kernel_path = direct_kernel_boot_path(); 5884 5885 let api_socket_source = format!("{}.1", temp_api_path(&guest.tmp_dir)); 5886 5887 let net_id = "net123"; 5888 let net_params = format!( 5889 "id={},tap=,mac={},ip={},mask=255.255.255.0", 5890 net_id, guest.network.guest_mac, guest.network.host_ip 5891 ); 5892 let mut mem_params = "size=4G"; 5893 5894 if use_hotplug { 5895 mem_params = "size=4G,hotplug_method=virtio-mem,hotplug_size=32G" 5896 } 5897 5898 let cloudinit_params = format!( 5899 "path={},iommu=on", 5900 guest.disk_config.disk(DiskType::CloudInit).unwrap() 5901 ); 5902 5903 let socket = temp_vsock_path(&guest.tmp_dir); 5904 let event_path = temp_event_monitor_path(&guest.tmp_dir); 5905 5906 let mut child = GuestCommand::new(&guest) 5907 .args(["--api-socket", &api_socket_source]) 5908 .args(["--event-monitor", format!("path={event_path}").as_str()]) 5909 .args(["--cpus", "boot=4"]) 5910 .args(["--memory", mem_params]) 5911 .args(["--balloon", "size=0"]) 5912 .args(["--kernel", kernel_path.to_str().unwrap()]) 5913 .args([ 5914 "--disk", 5915 format!( 5916 "path={}", 5917 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 5918 ) 5919 .as_str(), 5920 "--disk", 5921 cloudinit_params.as_str(), 5922 ]) 5923 .args(["--net", net_params.as_str()]) 5924 .args(["--vsock", format!("cid=3,socket={socket}").as_str()]) 5925 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5926 .capture_output() 5927 .spawn() 5928 .unwrap(); 5929 5930 let console_text = String::from("On a branch floating down river a cricket, singing."); 5931 // Create the snapshot directory 5932 let snapshot_dir = temp_snapshot_dir_path(&guest.tmp_dir); 5933 5934 let r = std::panic::catch_unwind(|| { 5935 guest.wait_vm_boot(None).unwrap(); 5936 5937 // Check the number of vCPUs 5938 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 4); 5939 // Check the guest RAM 5940 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 5941 if use_hotplug { 5942 // Increase guest RAM with virtio-mem 5943 resize_command( 5944 &api_socket_source, 5945 None, 5946 Some(6 << 30), 5947 None, 5948 Some(&event_path), 5949 ); 5950 thread::sleep(std::time::Duration::new(5, 0)); 5951 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 5952 // Use balloon to remove RAM from the VM 5953 resize_command( 5954 &api_socket_source, 5955 None, 5956 None, 5957 Some(1 << 30), 5958 Some(&event_path), 5959 ); 5960 thread::sleep(std::time::Duration::new(5, 0)); 5961 let total_memory = guest.get_total_memory().unwrap_or_default(); 5962 assert!(total_memory > 4_800_000); 5963 assert!(total_memory < 5_760_000); 5964 } 5965 // Check the guest virtio-devices, e.g. block, rng, vsock, console, and net 5966 guest.check_devices_common(Some(&socket), Some(&console_text), None); 5967 5968 // x86_64: We check that removing and adding back the virtio-net device 5969 // does not break the snapshot/restore support for virtio-pci. 5970 // This is an important thing to test as the hotplug will 5971 // trigger a PCI BAR reprogramming, which is a good way of 5972 // checking if the stored resources are correctly restored. 5973 // Unplug the virtio-net device 5974 // AArch64: Device hotplug is currently not supported, skipping here. 5975 #[cfg(target_arch = "x86_64")] 5976 { 5977 assert!(remote_command( 5978 &api_socket_source, 5979 "remove-device", 5980 Some(net_id), 5981 )); 5982 thread::sleep(std::time::Duration::new(10, 0)); 5983 let latest_events = [&MetaEvent { 5984 event: "device-removed".to_string(), 5985 device_id: Some(net_id.to_string()), 5986 }]; 5987 assert!(check_latest_events_exact(&latest_events, &event_path)); 5988 5989 // Plug the virtio-net device again 5990 assert!(remote_command( 5991 &api_socket_source, 5992 "add-net", 5993 Some(net_params.as_str()), 5994 )); 5995 thread::sleep(std::time::Duration::new(10, 0)); 5996 } 5997 5998 // Pause the VM 5999 assert!(remote_command(&api_socket_source, "pause", None)); 6000 let latest_events = [ 6001 &MetaEvent { 6002 event: "pausing".to_string(), 6003 device_id: None, 6004 }, 6005 &MetaEvent { 6006 event: "paused".to_string(), 6007 device_id: None, 6008 }, 6009 ]; 6010 assert!(check_latest_events_exact(&latest_events, &event_path)); 6011 6012 // Take a snapshot from the VM 6013 assert!(remote_command( 6014 &api_socket_source, 6015 "snapshot", 6016 Some(format!("file://{snapshot_dir}").as_str()), 6017 )); 6018 6019 // Wait to make sure the snapshot is completed 6020 thread::sleep(std::time::Duration::new(10, 0)); 6021 6022 let latest_events = [ 6023 &MetaEvent { 6024 event: "snapshotting".to_string(), 6025 device_id: None, 6026 }, 6027 &MetaEvent { 6028 event: "snapshotted".to_string(), 6029 device_id: None, 6030 }, 6031 ]; 6032 assert!(check_latest_events_exact(&latest_events, &event_path)); 6033 }); 6034 6035 // Shutdown the source VM and check console output 6036 let _ = child.kill(); 6037 let output = child.wait_with_output().unwrap(); 6038 handle_child_output(r, &output); 6039 6040 let r = std::panic::catch_unwind(|| { 6041 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 6042 }); 6043 6044 handle_child_output(r, &output); 6045 6046 // Remove the vsock socket file. 6047 Command::new("rm") 6048 .arg("-f") 6049 .arg(socket.as_str()) 6050 .output() 6051 .unwrap(); 6052 6053 let api_socket_restored = format!("{}.2", temp_api_path(&guest.tmp_dir)); 6054 let event_path_restored = format!("{}.2", temp_event_monitor_path(&guest.tmp_dir)); 6055 6056 // Restore the VM from the snapshot 6057 let mut child = GuestCommand::new(&guest) 6058 .args(["--api-socket", &api_socket_restored]) 6059 .args([ 6060 "--event-monitor", 6061 format!("path={event_path_restored}").as_str(), 6062 ]) 6063 .args([ 6064 "--restore", 6065 format!("source_url=file://{snapshot_dir}").as_str(), 6066 ]) 6067 .capture_output() 6068 .spawn() 6069 .unwrap(); 6070 6071 // Wait for the VM to be restored 6072 thread::sleep(std::time::Duration::new(10, 0)); 6073 let expected_events = [ 6074 &MetaEvent { 6075 event: "starting".to_string(), 6076 device_id: None, 6077 }, 6078 &MetaEvent { 6079 event: "activated".to_string(), 6080 device_id: Some("__console".to_string()), 6081 }, 6082 &MetaEvent { 6083 event: "activated".to_string(), 6084 device_id: Some("__rng".to_string()), 6085 }, 6086 &MetaEvent { 6087 event: "restoring".to_string(), 6088 device_id: None, 6089 }, 6090 ]; 6091 assert!(check_sequential_events( 6092 &expected_events, 6093 &event_path_restored 6094 )); 6095 let latest_events = [&MetaEvent { 6096 event: "restored".to_string(), 6097 device_id: None, 6098 }]; 6099 assert!(check_latest_events_exact( 6100 &latest_events, 6101 &event_path_restored 6102 )); 6103 6104 let r = std::panic::catch_unwind(|| { 6105 // Resume the VM 6106 assert!(remote_command(&api_socket_restored, "resume", None)); 6107 let latest_events = [ 6108 &MetaEvent { 6109 event: "resuming".to_string(), 6110 device_id: None, 6111 }, 6112 &MetaEvent { 6113 event: "resumed".to_string(), 6114 device_id: None, 6115 }, 6116 ]; 6117 assert!(check_latest_events_exact( 6118 &latest_events, 6119 &event_path_restored 6120 )); 6121 6122 // Perform same checks to validate VM has been properly restored 6123 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 4); 6124 let total_memory = guest.get_total_memory().unwrap_or_default(); 6125 if !use_hotplug { 6126 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 6127 } else { 6128 assert!(total_memory > 4_800_000); 6129 assert!(total_memory < 5_760_000); 6130 // Deflate balloon to restore entire RAM to the VM 6131 resize_command(&api_socket_restored, None, None, Some(0), None); 6132 thread::sleep(std::time::Duration::new(5, 0)); 6133 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 6134 // Decrease guest RAM with virtio-mem 6135 resize_command(&api_socket_restored, None, Some(5 << 30), None, None); 6136 thread::sleep(std::time::Duration::new(5, 0)); 6137 let total_memory = guest.get_total_memory().unwrap_or_default(); 6138 assert!(total_memory > 4_800_000); 6139 assert!(total_memory < 5_760_000); 6140 } 6141 6142 guest.check_devices_common(Some(&socket), Some(&console_text), None); 6143 }); 6144 // Shutdown the target VM and check console output 6145 let _ = child.kill(); 6146 let output = child.wait_with_output().unwrap(); 6147 handle_child_output(r, &output); 6148 6149 let r = std::panic::catch_unwind(|| { 6150 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 6151 }); 6152 6153 handle_child_output(r, &output); 6154 } 6155 6156 #[test] 6157 fn test_counters() { 6158 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6159 let guest = Guest::new(Box::new(focal)); 6160 let api_socket = temp_api_path(&guest.tmp_dir); 6161 6162 let mut cmd = GuestCommand::new(&guest); 6163 cmd.args(["--cpus", "boot=1"]) 6164 .args(["--memory", "size=512M"]) 6165 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 6166 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6167 .default_disks() 6168 .args(["--net", guest.default_net_string().as_str()]) 6169 .args(["--api-socket", &api_socket]) 6170 .capture_output(); 6171 6172 let mut child = cmd.spawn().unwrap(); 6173 6174 let r = std::panic::catch_unwind(|| { 6175 guest.wait_vm_boot(None).unwrap(); 6176 6177 let orig_counters = get_counters(&api_socket); 6178 guest 6179 .ssh_command("dd if=/dev/zero of=test count=8 bs=1M") 6180 .unwrap(); 6181 6182 let new_counters = get_counters(&api_socket); 6183 6184 // Check that all the counters have increased 6185 assert!(new_counters > orig_counters); 6186 }); 6187 6188 let _ = child.kill(); 6189 let output = child.wait_with_output().unwrap(); 6190 6191 handle_child_output(r, &output); 6192 } 6193 6194 #[test] 6195 #[cfg(feature = "guest_debug")] 6196 fn test_coredump() { 6197 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6198 let guest = Guest::new(Box::new(focal)); 6199 let api_socket = temp_api_path(&guest.tmp_dir); 6200 6201 let mut cmd = GuestCommand::new(&guest); 6202 cmd.args(["--cpus", "boot=4"]) 6203 .args(["--memory", "size=4G"]) 6204 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 6205 .default_disks() 6206 .args(["--net", guest.default_net_string().as_str()]) 6207 .args(["--api-socket", &api_socket]) 6208 .capture_output(); 6209 6210 let mut child = cmd.spawn().unwrap(); 6211 let vmcore_file = temp_vmcore_file_path(&guest.tmp_dir); 6212 6213 let r = std::panic::catch_unwind(|| { 6214 guest.wait_vm_boot(None).unwrap(); 6215 6216 assert!(remote_command(&api_socket, "pause", None)); 6217 6218 assert!(remote_command( 6219 &api_socket, 6220 "coredump", 6221 Some(format!("file://{vmcore_file}").as_str()), 6222 )); 6223 6224 // the num of CORE notes should equals to vcpu 6225 let readelf_core_num_cmd = 6226 format!("readelf --all {vmcore_file} |grep CORE |grep -v Type |wc -l"); 6227 let core_num_in_elf = exec_host_command_output(&readelf_core_num_cmd); 6228 assert_eq!(String::from_utf8_lossy(&core_num_in_elf.stdout).trim(), "4"); 6229 6230 // the num of QEMU notes should equals to vcpu 6231 let readelf_vmm_num_cmd = format!("readelf --all {vmcore_file} |grep QEMU |wc -l"); 6232 let vmm_num_in_elf = exec_host_command_output(&readelf_vmm_num_cmd); 6233 assert_eq!(String::from_utf8_lossy(&vmm_num_in_elf.stdout).trim(), "4"); 6234 }); 6235 6236 let _ = child.kill(); 6237 let output = child.wait_with_output().unwrap(); 6238 6239 handle_child_output(r, &output); 6240 } 6241 6242 #[test] 6243 #[cfg(feature = "guest_debug")] 6244 fn test_coredump_no_pause() { 6245 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6246 let guest = Guest::new(Box::new(focal)); 6247 let api_socket = temp_api_path(&guest.tmp_dir); 6248 6249 let mut cmd = GuestCommand::new(&guest); 6250 cmd.args(["--cpus", "boot=4"]) 6251 .args(["--memory", "size=4G"]) 6252 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 6253 .default_disks() 6254 .args(["--net", guest.default_net_string().as_str()]) 6255 .args(["--api-socket", &api_socket]) 6256 .capture_output(); 6257 6258 let mut child = cmd.spawn().unwrap(); 6259 let vmcore_file = temp_vmcore_file_path(&guest.tmp_dir); 6260 6261 let r = std::panic::catch_unwind(|| { 6262 guest.wait_vm_boot(None).unwrap(); 6263 6264 assert!(remote_command( 6265 &api_socket, 6266 "coredump", 6267 Some(format!("file://{vmcore_file}").as_str()), 6268 )); 6269 6270 assert_eq!(vm_state(&api_socket), "Running"); 6271 }); 6272 6273 let _ = child.kill(); 6274 let output = child.wait_with_output().unwrap(); 6275 6276 handle_child_output(r, &output); 6277 } 6278 6279 #[test] 6280 fn test_watchdog() { 6281 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6282 let guest = Guest::new(Box::new(focal)); 6283 let api_socket = temp_api_path(&guest.tmp_dir); 6284 6285 let kernel_path = direct_kernel_boot_path(); 6286 6287 let mut cmd = GuestCommand::new(&guest); 6288 cmd.args(["--cpus", "boot=1"]) 6289 .args(["--memory", "size=512M"]) 6290 .args(["--kernel", kernel_path.to_str().unwrap()]) 6291 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6292 .default_disks() 6293 .args(["--net", guest.default_net_string().as_str()]) 6294 .args(["--watchdog"]) 6295 .args(["--api-socket", &api_socket]) 6296 .capture_output(); 6297 6298 let mut child = cmd.spawn().unwrap(); 6299 6300 let r = std::panic::catch_unwind(|| { 6301 guest.wait_vm_boot(None).unwrap(); 6302 6303 let mut expected_reboot_count = 1; 6304 6305 // Enable the watchdog with a 15s timeout 6306 enable_guest_watchdog(&guest, 15); 6307 6308 // Reboot and check that systemd has activated the watchdog 6309 guest.ssh_command("sudo reboot").unwrap(); 6310 guest.wait_vm_boot(None).unwrap(); 6311 expected_reboot_count += 1; 6312 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6313 assert_eq!( 6314 guest 6315 .ssh_command("sudo journalctl | grep -c -- \"Watchdog started\"") 6316 .unwrap() 6317 .trim() 6318 .parse::<u32>() 6319 .unwrap_or_default(), 6320 2 6321 ); 6322 6323 // Allow some normal time to elapse to check we don't get spurious reboots 6324 thread::sleep(std::time::Duration::new(40, 0)); 6325 // Check no reboot 6326 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6327 6328 // Trigger a panic (sync first). We need to do this inside a screen with a delay so the SSH command returns. 6329 guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap(); 6330 // Allow some time for the watchdog to trigger (max 30s) and reboot to happen 6331 guest.wait_vm_boot(Some(50)).unwrap(); 6332 // Check a reboot is triggered by the watchdog 6333 expected_reboot_count += 1; 6334 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6335 6336 #[cfg(target_arch = "x86_64")] 6337 { 6338 // Now pause the VM and remain offline for 30s 6339 assert!(remote_command(&api_socket, "pause", None)); 6340 thread::sleep(std::time::Duration::new(30, 0)); 6341 assert!(remote_command(&api_socket, "resume", None)); 6342 6343 // Check no reboot 6344 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6345 } 6346 }); 6347 6348 let _ = child.kill(); 6349 let output = child.wait_with_output().unwrap(); 6350 6351 handle_child_output(r, &output); 6352 } 6353 6354 #[test] 6355 fn test_pvpanic() { 6356 let jammy = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string()); 6357 let guest = Guest::new(Box::new(jammy)); 6358 let api_socket = temp_api_path(&guest.tmp_dir); 6359 let event_path = temp_event_monitor_path(&guest.tmp_dir); 6360 6361 let kernel_path = direct_kernel_boot_path(); 6362 6363 let mut cmd = GuestCommand::new(&guest); 6364 cmd.args(["--cpus", "boot=1"]) 6365 .args(["--memory", "size=512M"]) 6366 .args(["--kernel", kernel_path.to_str().unwrap()]) 6367 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6368 .default_disks() 6369 .args(["--net", guest.default_net_string().as_str()]) 6370 .args(["--pvpanic"]) 6371 .args(["--api-socket", &api_socket]) 6372 .args(["--event-monitor", format!("path={event_path}").as_str()]) 6373 .capture_output(); 6374 6375 let mut child = cmd.spawn().unwrap(); 6376 6377 let r = std::panic::catch_unwind(|| { 6378 guest.wait_vm_boot(None).unwrap(); 6379 6380 // Trigger guest a panic 6381 make_guest_panic(&guest); 6382 6383 // Wait a while for guest 6384 thread::sleep(std::time::Duration::new(10, 0)); 6385 6386 let expected_sequential_events = [&MetaEvent { 6387 event: "panic".to_string(), 6388 device_id: None, 6389 }]; 6390 assert!(check_latest_events_exact( 6391 &expected_sequential_events, 6392 &event_path 6393 )); 6394 }); 6395 6396 let _ = child.kill(); 6397 let output = child.wait_with_output().unwrap(); 6398 6399 handle_child_output(r, &output); 6400 } 6401 6402 #[test] 6403 fn test_tap_from_fd() { 6404 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6405 let guest = Guest::new(Box::new(focal)); 6406 let kernel_path = direct_kernel_boot_path(); 6407 6408 // Create a TAP interface with multi-queue enabled 6409 let num_queue_pairs: usize = 2; 6410 6411 use std::str::FromStr; 6412 let taps = net_util::open_tap( 6413 Some("chtap0"), 6414 Some(std::net::Ipv4Addr::from_str(&guest.network.host_ip).unwrap()), 6415 None, 6416 &mut None, 6417 None, 6418 num_queue_pairs, 6419 Some(libc::O_RDWR | libc::O_NONBLOCK), 6420 ) 6421 .unwrap(); 6422 6423 let mut child = GuestCommand::new(&guest) 6424 .args(["--cpus", &format!("boot={num_queue_pairs}")]) 6425 .args(["--memory", "size=512M"]) 6426 .args(["--kernel", kernel_path.to_str().unwrap()]) 6427 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6428 .default_disks() 6429 .args([ 6430 "--net", 6431 &format!( 6432 "fd=[{},{}],mac={},num_queues={}", 6433 taps[0].as_raw_fd(), 6434 taps[1].as_raw_fd(), 6435 guest.network.guest_mac, 6436 num_queue_pairs * 2 6437 ), 6438 ]) 6439 .capture_output() 6440 .spawn() 6441 .unwrap(); 6442 6443 let r = std::panic::catch_unwind(|| { 6444 guest.wait_vm_boot(None).unwrap(); 6445 6446 assert_eq!( 6447 guest 6448 .ssh_command("ip -o link | wc -l") 6449 .unwrap() 6450 .trim() 6451 .parse::<u32>() 6452 .unwrap_or_default(), 6453 2 6454 ); 6455 6456 guest.reboot_linux(0, None); 6457 6458 assert_eq!( 6459 guest 6460 .ssh_command("ip -o link | wc -l") 6461 .unwrap() 6462 .trim() 6463 .parse::<u32>() 6464 .unwrap_or_default(), 6465 2 6466 ); 6467 }); 6468 6469 let _ = child.kill(); 6470 let output = child.wait_with_output().unwrap(); 6471 6472 handle_child_output(r, &output); 6473 } 6474 6475 // By design, a guest VM won't be able to connect to the host 6476 // machine when using a macvtap network interface (while it can 6477 // communicate externally). As a workaround, this integration 6478 // test creates two macvtap interfaces in 'bridge' mode on the 6479 // same physical net interface, one for the guest and one for 6480 // the host. With additional setup on the IP address and the 6481 // routing table, it enables the communications between the 6482 // guest VM and the host machine. 6483 // Details: https://wiki.libvirt.org/page/TroubleshootMacvtapHostFail 6484 fn _test_macvtap(hotplug: bool, guest_macvtap_name: &str, host_macvtap_name: &str) { 6485 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6486 let guest = Guest::new(Box::new(focal)); 6487 let api_socket = temp_api_path(&guest.tmp_dir); 6488 6489 #[cfg(target_arch = "x86_64")] 6490 let kernel_path = direct_kernel_boot_path(); 6491 #[cfg(target_arch = "aarch64")] 6492 let kernel_path = edk2_path(); 6493 6494 let phy_net = "eth0"; 6495 6496 // Create a macvtap interface for the guest VM to use 6497 assert!(exec_host_command_status(&format!( 6498 "sudo ip link add link {phy_net} name {guest_macvtap_name} type macvtap mod bridge" 6499 )) 6500 .success()); 6501 assert!(exec_host_command_status(&format!( 6502 "sudo ip link set {} address {} up", 6503 guest_macvtap_name, guest.network.guest_mac 6504 )) 6505 .success()); 6506 assert!( 6507 exec_host_command_status(&format!("sudo ip link show {guest_macvtap_name}")).success() 6508 ); 6509 6510 let tap_index = 6511 fs::read_to_string(format!("/sys/class/net/{guest_macvtap_name}/ifindex")).unwrap(); 6512 let tap_device = format!("/dev/tap{}", tap_index.trim()); 6513 6514 assert!(exec_host_command_status(&format!("sudo chown $UID.$UID {tap_device}")).success()); 6515 6516 let cstr_tap_device = std::ffi::CString::new(tap_device).unwrap(); 6517 let tap_fd1 = unsafe { libc::open(cstr_tap_device.as_ptr(), libc::O_RDWR) }; 6518 assert!(tap_fd1 > 0); 6519 let tap_fd2 = unsafe { libc::open(cstr_tap_device.as_ptr(), libc::O_RDWR) }; 6520 assert!(tap_fd2 > 0); 6521 6522 // Create a macvtap on the same physical net interface for 6523 // the host machine to use 6524 assert!(exec_host_command_status(&format!( 6525 "sudo ip link add link {phy_net} name {host_macvtap_name} type macvtap mod bridge" 6526 )) 6527 .success()); 6528 // Use default mask "255.255.255.0" 6529 assert!(exec_host_command_status(&format!( 6530 "sudo ip address add {}/24 dev {}", 6531 guest.network.host_ip, host_macvtap_name 6532 )) 6533 .success()); 6534 assert!( 6535 exec_host_command_status(&format!("sudo ip link set dev {host_macvtap_name} up")) 6536 .success() 6537 ); 6538 6539 let mut guest_command = GuestCommand::new(&guest); 6540 guest_command 6541 .args(["--cpus", "boot=2"]) 6542 .args(["--memory", "size=512M"]) 6543 .args(["--kernel", kernel_path.to_str().unwrap()]) 6544 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6545 .default_disks() 6546 .args(["--api-socket", &api_socket]); 6547 6548 let net_params = format!( 6549 "fd=[{},{}],mac={},num_queues=4", 6550 tap_fd1, tap_fd2, guest.network.guest_mac 6551 ); 6552 6553 if !hotplug { 6554 guest_command.args(["--net", &net_params]); 6555 } 6556 6557 let mut child = guest_command.capture_output().spawn().unwrap(); 6558 6559 if hotplug { 6560 // Give some time to the VMM process to listen to the API 6561 // socket. This is the only requirement to avoid the following 6562 // call to ch-remote from failing. 6563 thread::sleep(std::time::Duration::new(10, 0)); 6564 // Hotplug the virtio-net device 6565 let (cmd_success, cmd_output) = 6566 remote_command_w_output(&api_socket, "add-net", Some(&net_params)); 6567 assert!(cmd_success); 6568 #[cfg(target_arch = "x86_64")] 6569 assert!(String::from_utf8_lossy(&cmd_output) 6570 .contains("{\"id\":\"_net2\",\"bdf\":\"0000:00:05.0\"}")); 6571 #[cfg(target_arch = "aarch64")] 6572 assert!(String::from_utf8_lossy(&cmd_output) 6573 .contains("{\"id\":\"_net0\",\"bdf\":\"0000:00:05.0\"}")); 6574 } 6575 6576 // The functional connectivity provided by the virtio-net device 6577 // gets tested through wait_vm_boot() as it expects to receive a 6578 // HTTP request, and through the SSH command as well. 6579 let r = std::panic::catch_unwind(|| { 6580 guest.wait_vm_boot(None).unwrap(); 6581 6582 assert_eq!( 6583 guest 6584 .ssh_command("ip -o link | wc -l") 6585 .unwrap() 6586 .trim() 6587 .parse::<u32>() 6588 .unwrap_or_default(), 6589 2 6590 ); 6591 6592 guest.reboot_linux(0, None); 6593 6594 assert_eq!( 6595 guest 6596 .ssh_command("ip -o link | wc -l") 6597 .unwrap() 6598 .trim() 6599 .parse::<u32>() 6600 .unwrap_or_default(), 6601 2 6602 ); 6603 }); 6604 6605 let _ = child.kill(); 6606 6607 exec_host_command_status(&format!("sudo ip link del {guest_macvtap_name}")); 6608 exec_host_command_status(&format!("sudo ip link del {host_macvtap_name}")); 6609 6610 let output = child.wait_with_output().unwrap(); 6611 6612 handle_child_output(r, &output); 6613 } 6614 6615 #[test] 6616 #[cfg_attr(target_arch = "aarch64", ignore = "See #5443")] 6617 fn test_macvtap() { 6618 _test_macvtap(false, "guestmacvtap0", "hostmacvtap0") 6619 } 6620 6621 #[test] 6622 #[cfg_attr(target_arch = "aarch64", ignore = "See #5443")] 6623 fn test_macvtap_hotplug() { 6624 _test_macvtap(true, "guestmacvtap1", "hostmacvtap1") 6625 } 6626 6627 #[test] 6628 #[cfg(not(feature = "mshv"))] 6629 fn test_ovs_dpdk() { 6630 let focal1 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6631 let guest1 = Guest::new(Box::new(focal1)); 6632 6633 let focal2 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6634 let guest2 = Guest::new(Box::new(focal2)); 6635 let api_socket_source = format!("{}.1", temp_api_path(&guest2.tmp_dir)); 6636 6637 let (mut child1, mut child2) = 6638 setup_ovs_dpdk_guests(&guest1, &guest2, &api_socket_source, false); 6639 6640 // Create the snapshot directory 6641 let snapshot_dir = temp_snapshot_dir_path(&guest2.tmp_dir); 6642 6643 let r = std::panic::catch_unwind(|| { 6644 // Remove one of the two ports from the OVS bridge 6645 assert!(exec_host_command_status("ovs-vsctl del-port vhost-user1").success()); 6646 6647 // Spawn a new netcat listener in the first VM 6648 let guest_ip = guest1.network.guest_ip.clone(); 6649 thread::spawn(move || { 6650 ssh_command_ip( 6651 "nc -l 12345", 6652 &guest_ip, 6653 DEFAULT_SSH_RETRIES, 6654 DEFAULT_SSH_TIMEOUT, 6655 ) 6656 .unwrap(); 6657 }); 6658 6659 // Wait for the server to be listening 6660 thread::sleep(std::time::Duration::new(5, 0)); 6661 6662 // Check the connection fails this time 6663 assert!(guest2.ssh_command("nc -vz 172.100.0.1 12345").is_err()); 6664 6665 // Add the OVS port back 6666 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface vhost-user1 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient1").success()); 6667 6668 // And finally check the connection is functional again 6669 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap(); 6670 6671 // Pause the VM 6672 assert!(remote_command(&api_socket_source, "pause", None)); 6673 6674 // Take a snapshot from the VM 6675 assert!(remote_command( 6676 &api_socket_source, 6677 "snapshot", 6678 Some(format!("file://{snapshot_dir}").as_str()), 6679 )); 6680 6681 // Wait to make sure the snapshot is completed 6682 thread::sleep(std::time::Duration::new(10, 0)); 6683 }); 6684 6685 // Shutdown the source VM 6686 let _ = child2.kill(); 6687 let output = child2.wait_with_output().unwrap(); 6688 handle_child_output(r, &output); 6689 6690 // Remove the vhost-user socket file. 6691 Command::new("rm") 6692 .arg("-f") 6693 .arg("/tmp/dpdkvhostclient2") 6694 .output() 6695 .unwrap(); 6696 6697 let api_socket_restored = format!("{}.2", temp_api_path(&guest2.tmp_dir)); 6698 // Restore the VM from the snapshot 6699 let mut child2 = GuestCommand::new(&guest2) 6700 .args(["--api-socket", &api_socket_restored]) 6701 .args([ 6702 "--restore", 6703 format!("source_url=file://{snapshot_dir}").as_str(), 6704 ]) 6705 .capture_output() 6706 .spawn() 6707 .unwrap(); 6708 6709 // Wait for the VM to be restored 6710 thread::sleep(std::time::Duration::new(10, 0)); 6711 6712 let r = std::panic::catch_unwind(|| { 6713 // Resume the VM 6714 assert!(remote_command(&api_socket_restored, "resume", None)); 6715 6716 // Spawn a new netcat listener in the first VM 6717 let guest_ip = guest1.network.guest_ip.clone(); 6718 thread::spawn(move || { 6719 ssh_command_ip( 6720 "nc -l 12345", 6721 &guest_ip, 6722 DEFAULT_SSH_RETRIES, 6723 DEFAULT_SSH_TIMEOUT, 6724 ) 6725 .unwrap(); 6726 }); 6727 6728 // Wait for the server to be listening 6729 thread::sleep(std::time::Duration::new(5, 0)); 6730 6731 // And check the connection is still functional after restore 6732 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap(); 6733 }); 6734 6735 let _ = child1.kill(); 6736 let _ = child2.kill(); 6737 6738 let output = child1.wait_with_output().unwrap(); 6739 child2.wait().unwrap(); 6740 6741 cleanup_ovs_dpdk(); 6742 6743 handle_child_output(r, &output); 6744 } 6745 6746 fn setup_spdk_nvme(nvme_dir: &std::path::Path) { 6747 cleanup_spdk_nvme(); 6748 6749 assert!(exec_host_command_status(&format!( 6750 "mkdir -p {}", 6751 nvme_dir.join("nvme-vfio-user").to_str().unwrap() 6752 )) 6753 .success()); 6754 assert!(exec_host_command_status(&format!( 6755 "truncate {} -s 128M", 6756 nvme_dir.join("test-disk.raw").to_str().unwrap() 6757 )) 6758 .success()); 6759 assert!(exec_host_command_status(&format!( 6760 "mkfs.ext4 {}", 6761 nvme_dir.join("test-disk.raw").to_str().unwrap() 6762 )) 6763 .success()); 6764 6765 // Start the SPDK nvmf_tgt daemon to present NVMe device as a VFIO user device 6766 Command::new("/usr/local/bin/spdk-nvme/nvmf_tgt") 6767 .args(["-i", "0", "-m", "0x1"]) 6768 .spawn() 6769 .unwrap(); 6770 thread::sleep(std::time::Duration::new(2, 0)); 6771 6772 assert!(exec_host_command_status( 6773 "/usr/local/bin/spdk-nvme/rpc.py nvmf_create_transport -t VFIOUSER" 6774 ) 6775 .success()); 6776 assert!(exec_host_command_status(&format!( 6777 "/usr/local/bin/spdk-nvme/rpc.py bdev_aio_create {} test 512", 6778 nvme_dir.join("test-disk.raw").to_str().unwrap() 6779 )) 6780 .success()); 6781 assert!(exec_host_command_status( 6782 "/usr/local/bin/spdk-nvme/rpc.py nvmf_create_subsystem nqn.2019-07.io.spdk:cnode -a -s test" 6783 ) 6784 .success()); 6785 assert!(exec_host_command_status( 6786 "/usr/local/bin/spdk-nvme/rpc.py nvmf_subsystem_add_ns nqn.2019-07.io.spdk:cnode test" 6787 ) 6788 .success()); 6789 assert!(exec_host_command_status(&format!( 6790 "/usr/local/bin/spdk-nvme/rpc.py nvmf_subsystem_add_listener nqn.2019-07.io.spdk:cnode -t VFIOUSER -a {} -s 0", 6791 nvme_dir.join("nvme-vfio-user").to_str().unwrap() 6792 )) 6793 .success()); 6794 } 6795 6796 fn cleanup_spdk_nvme() { 6797 exec_host_command_status("pkill -f nvmf_tgt"); 6798 } 6799 6800 #[test] 6801 fn test_vfio_user() { 6802 let jammy_image = JAMMY_IMAGE_NAME.to_string(); 6803 let jammy = UbuntuDiskConfig::new(jammy_image); 6804 let guest = Guest::new(Box::new(jammy)); 6805 6806 let spdk_nvme_dir = guest.tmp_dir.as_path().join("test-vfio-user"); 6807 setup_spdk_nvme(spdk_nvme_dir.as_path()); 6808 6809 let api_socket = temp_api_path(&guest.tmp_dir); 6810 let mut child = GuestCommand::new(&guest) 6811 .args(["--api-socket", &api_socket]) 6812 .args(["--cpus", "boot=1"]) 6813 .args(["--memory", "size=512M,shared=on,hugepages=on"]) 6814 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 6815 .args(["--serial", "tty", "--console", "off"]) 6816 .default_disks() 6817 .default_net() 6818 .capture_output() 6819 .spawn() 6820 .unwrap(); 6821 6822 let r = std::panic::catch_unwind(|| { 6823 guest.wait_vm_boot(None).unwrap(); 6824 6825 // Hotplug the SPDK-NVMe device to the VM 6826 let (cmd_success, cmd_output) = remote_command_w_output( 6827 &api_socket, 6828 "add-user-device", 6829 Some(&format!( 6830 "socket={},id=vfio_user0", 6831 spdk_nvme_dir 6832 .as_path() 6833 .join("nvme-vfio-user/cntrl") 6834 .to_str() 6835 .unwrap(), 6836 )), 6837 ); 6838 assert!(cmd_success); 6839 assert!(String::from_utf8_lossy(&cmd_output) 6840 .contains("{\"id\":\"vfio_user0\",\"bdf\":\"0000:00:05.0\"}")); 6841 6842 thread::sleep(std::time::Duration::new(10, 0)); 6843 6844 // Check both if /dev/nvme exists and if the block size is 128M. 6845 assert_eq!( 6846 guest 6847 .ssh_command("lsblk | grep nvme0n1 | grep -c 128M") 6848 .unwrap() 6849 .trim() 6850 .parse::<u32>() 6851 .unwrap_or_default(), 6852 1 6853 ); 6854 6855 // Check changes persist after reboot 6856 assert_eq!( 6857 guest.ssh_command("sudo mount /dev/nvme0n1 /mnt").unwrap(), 6858 "" 6859 ); 6860 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), "lost+found\n"); 6861 guest 6862 .ssh_command("echo test123 | sudo tee /mnt/test") 6863 .unwrap(); 6864 assert_eq!(guest.ssh_command("sudo umount /mnt").unwrap(), ""); 6865 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), ""); 6866 6867 guest.reboot_linux(0, None); 6868 assert_eq!( 6869 guest.ssh_command("sudo mount /dev/nvme0n1 /mnt").unwrap(), 6870 "" 6871 ); 6872 assert_eq!( 6873 guest.ssh_command("sudo cat /mnt/test").unwrap().trim(), 6874 "test123" 6875 ); 6876 }); 6877 6878 cleanup_spdk_nvme(); 6879 6880 let _ = child.kill(); 6881 let output = child.wait_with_output().unwrap(); 6882 6883 handle_child_output(r, &output); 6884 } 6885 6886 #[test] 6887 #[cfg(target_arch = "x86_64")] 6888 fn test_vdpa_block() { 6889 // Before trying to run the test, verify the vdpa_sim_blk module is correctly loaded. 6890 if !exec_host_command_status("lsmod | grep vdpa_sim_blk").success() { 6891 return; 6892 } 6893 6894 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6895 let guest = Guest::new(Box::new(focal)); 6896 let api_socket = temp_api_path(&guest.tmp_dir); 6897 6898 let kernel_path = direct_kernel_boot_path(); 6899 6900 let mut child = GuestCommand::new(&guest) 6901 .args(["--cpus", "boot=2"]) 6902 .args(["--memory", "size=512M,hugepages=on"]) 6903 .args(["--kernel", kernel_path.to_str().unwrap()]) 6904 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6905 .default_disks() 6906 .default_net() 6907 .args(["--vdpa", "path=/dev/vhost-vdpa-0,num_queues=1"]) 6908 .args(["--platform", "num_pci_segments=2,iommu_segments=1"]) 6909 .args(["--api-socket", &api_socket]) 6910 .capture_output() 6911 .spawn() 6912 .unwrap(); 6913 6914 let r = std::panic::catch_unwind(|| { 6915 guest.wait_vm_boot(None).unwrap(); 6916 6917 // Check both if /dev/vdc exists and if the block size is 128M. 6918 assert_eq!( 6919 guest 6920 .ssh_command("lsblk | grep vdc | grep -c 128M") 6921 .unwrap() 6922 .trim() 6923 .parse::<u32>() 6924 .unwrap_or_default(), 6925 1 6926 ); 6927 6928 // Check the content of the block device after we wrote to it. 6929 // The vpda-sim-blk should let us read what we previously wrote. 6930 guest 6931 .ssh_command("sudo bash -c 'echo foobar > /dev/vdc'") 6932 .unwrap(); 6933 assert_eq!( 6934 guest.ssh_command("sudo head -1 /dev/vdc").unwrap().trim(), 6935 "foobar" 6936 ); 6937 6938 // Hotplug an extra vDPA block device behind the vIOMMU 6939 // Add a new vDPA device to the VM 6940 let (cmd_success, cmd_output) = remote_command_w_output( 6941 &api_socket, 6942 "add-vdpa", 6943 Some("id=myvdpa0,path=/dev/vhost-vdpa-1,num_queues=1,pci_segment=1,iommu=on"), 6944 ); 6945 assert!(cmd_success); 6946 assert!(String::from_utf8_lossy(&cmd_output) 6947 .contains("{\"id\":\"myvdpa0\",\"bdf\":\"0001:00:01.0\"}")); 6948 6949 thread::sleep(std::time::Duration::new(10, 0)); 6950 6951 // Check IOMMU setup 6952 assert!(guest 6953 .does_device_vendor_pair_match("0x1057", "0x1af4") 6954 .unwrap_or_default()); 6955 assert_eq!( 6956 guest 6957 .ssh_command("ls /sys/kernel/iommu_groups/0/devices") 6958 .unwrap() 6959 .trim(), 6960 "0001:00:01.0" 6961 ); 6962 6963 // Check both if /dev/vdd exists and if the block size is 128M. 6964 assert_eq!( 6965 guest 6966 .ssh_command("lsblk | grep vdd | grep -c 128M") 6967 .unwrap() 6968 .trim() 6969 .parse::<u32>() 6970 .unwrap_or_default(), 6971 1 6972 ); 6973 6974 // Write some content to the block device we've just plugged. 6975 guest 6976 .ssh_command("sudo bash -c 'echo foobar > /dev/vdd'") 6977 .unwrap(); 6978 6979 // Check we can read the content back. 6980 assert_eq!( 6981 guest.ssh_command("sudo head -1 /dev/vdd").unwrap().trim(), 6982 "foobar" 6983 ); 6984 6985 // Unplug the device 6986 let cmd_success = remote_command(&api_socket, "remove-device", Some("myvdpa0")); 6987 assert!(cmd_success); 6988 thread::sleep(std::time::Duration::new(10, 0)); 6989 6990 // Check /dev/vdd doesn't exist anymore 6991 assert_eq!( 6992 guest 6993 .ssh_command("lsblk | grep -c vdd || true") 6994 .unwrap() 6995 .trim() 6996 .parse::<u32>() 6997 .unwrap_or(1), 6998 0 6999 ); 7000 }); 7001 7002 let _ = child.kill(); 7003 let output = child.wait_with_output().unwrap(); 7004 7005 handle_child_output(r, &output); 7006 } 7007 7008 #[test] 7009 #[cfg(target_arch = "x86_64")] 7010 #[ignore = "See #5756"] 7011 fn test_vdpa_net() { 7012 // Before trying to run the test, verify the vdpa_sim_net module is correctly loaded. 7013 if !exec_host_command_status("lsmod | grep vdpa_sim_net").success() { 7014 return; 7015 } 7016 7017 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7018 let guest = Guest::new(Box::new(focal)); 7019 7020 let kernel_path = direct_kernel_boot_path(); 7021 7022 let mut child = GuestCommand::new(&guest) 7023 .args(["--cpus", "boot=2"]) 7024 .args(["--memory", "size=512M,hugepages=on"]) 7025 .args(["--kernel", kernel_path.to_str().unwrap()]) 7026 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 7027 .default_disks() 7028 .default_net() 7029 .args(["--vdpa", "path=/dev/vhost-vdpa-2,num_queues=2"]) 7030 .capture_output() 7031 .spawn() 7032 .unwrap(); 7033 7034 let r = std::panic::catch_unwind(|| { 7035 guest.wait_vm_boot(None).unwrap(); 7036 7037 // Check we can find network interface related to vDPA device 7038 assert_eq!( 7039 guest 7040 .ssh_command("ip -o link | grep -c ens6") 7041 .unwrap() 7042 .trim() 7043 .parse::<u32>() 7044 .unwrap_or(0), 7045 1 7046 ); 7047 7048 guest 7049 .ssh_command("sudo ip addr add 172.16.1.2/24 dev ens6") 7050 .unwrap(); 7051 guest.ssh_command("sudo ip link set up dev ens6").unwrap(); 7052 7053 // Check there is no packet yet on both TX/RX of the network interface 7054 assert_eq!( 7055 guest 7056 .ssh_command("ip -j -p -s link show ens6 | grep -c '\"packets\": 0'") 7057 .unwrap() 7058 .trim() 7059 .parse::<u32>() 7060 .unwrap_or(0), 7061 2 7062 ); 7063 7064 // Send 6 packets with ping command 7065 guest.ssh_command("ping 172.16.1.10 -c 6 || true").unwrap(); 7066 7067 // Check we can find 6 packets on both TX/RX of the network interface 7068 assert_eq!( 7069 guest 7070 .ssh_command("ip -j -p -s link show ens6 | grep -c '\"packets\": 6'") 7071 .unwrap() 7072 .trim() 7073 .parse::<u32>() 7074 .unwrap_or(0), 7075 2 7076 ); 7077 7078 // No need to check for hotplug as we already tested it through 7079 // test_vdpa_block() 7080 }); 7081 7082 let _ = child.kill(); 7083 let output = child.wait_with_output().unwrap(); 7084 7085 handle_child_output(r, &output); 7086 } 7087 7088 #[test] 7089 #[cfg(target_arch = "x86_64")] 7090 fn test_tpm() { 7091 let focal = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string()); 7092 let guest = Guest::new(Box::new(focal)); 7093 7094 let (mut swtpm_command, swtpm_socket_path) = prepare_swtpm_daemon(&guest.tmp_dir); 7095 7096 let mut guest_cmd = GuestCommand::new(&guest); 7097 guest_cmd 7098 .args(["--cpus", "boot=1"]) 7099 .args(["--memory", "size=512M"]) 7100 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 7101 .args(["--tpm", &format!("socket={swtpm_socket_path}")]) 7102 .capture_output() 7103 .default_disks() 7104 .default_net(); 7105 7106 // Start swtpm daemon 7107 let mut swtpm_child = swtpm_command.spawn().unwrap(); 7108 thread::sleep(std::time::Duration::new(10, 0)); 7109 let mut child = guest_cmd.spawn().unwrap(); 7110 let r = std::panic::catch_unwind(|| { 7111 guest.wait_vm_boot(None).unwrap(); 7112 assert_eq!( 7113 guest.ssh_command("ls /dev/tpm0").unwrap().trim(), 7114 "/dev/tpm0" 7115 ); 7116 guest.ssh_command("sudo tpm2_selftest -f").unwrap(); 7117 guest 7118 .ssh_command("echo 'hello' > /tmp/checksum_test; ") 7119 .unwrap(); 7120 guest.ssh_command("cmp <(sudo tpm2_pcrevent /tmp/checksum_test | grep sha256 | awk '{print $2}') <(sha256sum /tmp/checksum_test| awk '{print $1}')").unwrap(); 7121 }); 7122 7123 let _ = swtpm_child.kill(); 7124 let _d_out = swtpm_child.wait_with_output().unwrap(); 7125 7126 let _ = child.kill(); 7127 let output = child.wait_with_output().unwrap(); 7128 7129 handle_child_output(r, &output); 7130 } 7131 } 7132 7133 mod dbus_api { 7134 use crate::*; 7135 7136 // Start cloud-hypervisor with no VM parameters, running both the HTTP 7137 // and DBus APIs. Alternate calls to the external APIs (HTTP and DBus) 7138 // to create a VM, boot it, and verify that it can be shut down and then 7139 // booted again. 7140 #[test] 7141 fn test_api_dbus_and_http_interleaved() { 7142 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7143 let guest = Guest::new(Box::new(focal)); 7144 let dbus_api = TargetApi::new_dbus_api(&guest.tmp_dir); 7145 let http_api = TargetApi::new_http_api(&guest.tmp_dir); 7146 7147 let mut child = GuestCommand::new(&guest) 7148 .args(dbus_api.guest_args()) 7149 .args(http_api.guest_args()) 7150 .capture_output() 7151 .spawn() 7152 .unwrap(); 7153 7154 thread::sleep(std::time::Duration::new(1, 0)); 7155 7156 // Verify API servers are running 7157 assert!(dbus_api.remote_command("ping", None)); 7158 assert!(http_api.remote_command("ping", None)); 7159 7160 // Create the VM first 7161 let cpu_count: u8 = 4; 7162 let request_body = guest.api_create_body( 7163 cpu_count, 7164 direct_kernel_boot_path().to_str().unwrap(), 7165 DIRECT_KERNEL_BOOT_CMDLINE, 7166 ); 7167 7168 let temp_config_path = guest.tmp_dir.as_path().join("config"); 7169 std::fs::write(&temp_config_path, request_body).unwrap(); 7170 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 7171 7172 let r = std::panic::catch_unwind(|| { 7173 // Create the VM 7174 assert!(dbus_api.remote_command("create", Some(create_config),)); 7175 7176 // Then boot it 7177 assert!(http_api.remote_command("boot", None)); 7178 guest.wait_vm_boot(None).unwrap(); 7179 7180 // Check that the VM booted as expected 7181 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 7182 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 7183 7184 // Sync and shutdown without powering off to prevent filesystem 7185 // corruption. 7186 guest.ssh_command("sync").unwrap(); 7187 guest.ssh_command("sudo shutdown -H now").unwrap(); 7188 7189 // Wait for the guest to be fully shutdown 7190 thread::sleep(std::time::Duration::new(20, 0)); 7191 7192 // Then shutdown the VM 7193 assert!(dbus_api.remote_command("shutdown", None)); 7194 7195 // Then boot it again 7196 assert!(http_api.remote_command("boot", None)); 7197 guest.wait_vm_boot(None).unwrap(); 7198 7199 // Check that the VM booted as expected 7200 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 7201 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 7202 }); 7203 7204 let _ = child.kill(); 7205 let output = child.wait_with_output().unwrap(); 7206 7207 handle_child_output(r, &output); 7208 } 7209 7210 #[test] 7211 fn test_api_dbus_create_boot() { 7212 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7213 let guest = Guest::new(Box::new(focal)); 7214 7215 _test_api_create_boot(TargetApi::new_dbus_api(&guest.tmp_dir), guest) 7216 } 7217 7218 #[test] 7219 fn test_api_dbus_shutdown() { 7220 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7221 let guest = Guest::new(Box::new(focal)); 7222 7223 _test_api_shutdown(TargetApi::new_dbus_api(&guest.tmp_dir), guest) 7224 } 7225 7226 #[test] 7227 fn test_api_dbus_delete() { 7228 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7229 let guest = Guest::new(Box::new(focal)); 7230 7231 _test_api_delete(TargetApi::new_dbus_api(&guest.tmp_dir), guest); 7232 } 7233 7234 #[test] 7235 fn test_api_dbus_pause_resume() { 7236 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7237 let guest = Guest::new(Box::new(focal)); 7238 7239 _test_api_pause_resume(TargetApi::new_dbus_api(&guest.tmp_dir), guest) 7240 } 7241 } 7242 7243 mod common_sequential { 7244 #[cfg(not(feature = "mshv"))] 7245 use crate::*; 7246 7247 #[test] 7248 #[cfg(not(feature = "mshv"))] 7249 fn test_memory_mergeable_on() { 7250 test_memory_mergeable(true) 7251 } 7252 } 7253 7254 mod windows { 7255 use crate::*; 7256 use once_cell::sync::Lazy; 7257 7258 static NEXT_DISK_ID: Lazy<Mutex<u8>> = Lazy::new(|| Mutex::new(1)); 7259 7260 struct WindowsGuest { 7261 guest: Guest, 7262 auth: PasswordAuth, 7263 } 7264 7265 trait FsType { 7266 const FS_FAT: u8; 7267 const FS_NTFS: u8; 7268 } 7269 impl FsType for WindowsGuest { 7270 const FS_FAT: u8 = 0; 7271 const FS_NTFS: u8 = 1; 7272 } 7273 7274 impl WindowsGuest { 7275 fn new() -> Self { 7276 let disk = WindowsDiskConfig::new(WINDOWS_IMAGE_NAME.to_string()); 7277 let guest = Guest::new(Box::new(disk)); 7278 let auth = PasswordAuth { 7279 username: String::from("administrator"), 7280 password: String::from("Admin123"), 7281 }; 7282 7283 WindowsGuest { guest, auth } 7284 } 7285 7286 fn guest(&self) -> &Guest { 7287 &self.guest 7288 } 7289 7290 fn ssh_cmd(&self, cmd: &str) -> String { 7291 ssh_command_ip_with_auth( 7292 cmd, 7293 &self.auth, 7294 &self.guest.network.guest_ip, 7295 DEFAULT_SSH_RETRIES, 7296 DEFAULT_SSH_TIMEOUT, 7297 ) 7298 .unwrap() 7299 } 7300 7301 fn cpu_count(&self) -> u8 { 7302 self.ssh_cmd("powershell -Command \"(Get-CimInstance win32_computersystem).NumberOfLogicalProcessors\"") 7303 .trim() 7304 .parse::<u8>() 7305 .unwrap_or(0) 7306 } 7307 7308 fn ram_size(&self) -> usize { 7309 self.ssh_cmd("powershell -Command \"(Get-CimInstance win32_computersystem).TotalPhysicalMemory\"") 7310 .trim() 7311 .parse::<usize>() 7312 .unwrap_or(0) 7313 } 7314 7315 fn netdev_count(&self) -> u8 { 7316 self.ssh_cmd("powershell -Command \"netsh int ipv4 show interfaces | Select-String ethernet | Measure-Object -Line | Format-Table -HideTableHeaders\"") 7317 .trim() 7318 .parse::<u8>() 7319 .unwrap_or(0) 7320 } 7321 7322 fn disk_count(&self) -> u8 { 7323 self.ssh_cmd("powershell -Command \"Get-Disk | Measure-Object -Line | Format-Table -HideTableHeaders\"") 7324 .trim() 7325 .parse::<u8>() 7326 .unwrap_or(0) 7327 } 7328 7329 fn reboot(&self) { 7330 let _ = self.ssh_cmd("shutdown /r /t 0"); 7331 } 7332 7333 fn shutdown(&self) { 7334 let _ = self.ssh_cmd("shutdown /s /t 0"); 7335 } 7336 7337 fn run_dnsmasq(&self) -> std::process::Child { 7338 let listen_address = format!("--listen-address={}", self.guest.network.host_ip); 7339 let dhcp_host = format!( 7340 "--dhcp-host={},{}", 7341 self.guest.network.guest_mac, self.guest.network.guest_ip 7342 ); 7343 let dhcp_range = format!( 7344 "--dhcp-range=eth,{},{}", 7345 self.guest.network.guest_ip, self.guest.network.guest_ip 7346 ); 7347 7348 Command::new("dnsmasq") 7349 .arg("--no-daemon") 7350 .arg("--log-queries") 7351 .arg(listen_address.as_str()) 7352 .arg("--except-interface=lo") 7353 .arg("--bind-dynamic") // Allow listening to host_ip while the interface is not ready yet. 7354 .arg("--conf-file=/dev/null") 7355 .arg(dhcp_host.as_str()) 7356 .arg(dhcp_range.as_str()) 7357 .spawn() 7358 .unwrap() 7359 } 7360 7361 // TODO Cleanup image file explicitly after test, if there's some space issues. 7362 fn disk_new(&self, fs: u8, sz: usize) -> String { 7363 let mut guard = NEXT_DISK_ID.lock().unwrap(); 7364 let id = *guard; 7365 *guard = id + 1; 7366 7367 let img = PathBuf::from(format!("/tmp/test-hotplug-{id}.raw")); 7368 let _ = fs::remove_file(&img); 7369 7370 // Create an image file 7371 let out = Command::new("qemu-img") 7372 .args([ 7373 "create", 7374 "-f", 7375 "raw", 7376 img.to_str().unwrap(), 7377 format!("{sz}m").as_str(), 7378 ]) 7379 .output() 7380 .expect("qemu-img command failed") 7381 .stdout; 7382 println!("{out:?}"); 7383 7384 // Associate image to a loop device 7385 let out = Command::new("losetup") 7386 .args(["--show", "-f", img.to_str().unwrap()]) 7387 .output() 7388 .expect("failed to create loop device") 7389 .stdout; 7390 let _tmp = String::from_utf8_lossy(&out); 7391 let loop_dev = _tmp.trim(); 7392 println!("{out:?}"); 7393 7394 // Create a partition table 7395 // echo 'type=7' | sudo sfdisk "${LOOP}" 7396 let mut child = Command::new("sfdisk") 7397 .args([loop_dev]) 7398 .stdin(Stdio::piped()) 7399 .spawn() 7400 .unwrap(); 7401 let stdin = child.stdin.as_mut().expect("failed to open stdin"); 7402 stdin 7403 .write_all("type=7".as_bytes()) 7404 .expect("failed to write stdin"); 7405 let out = child.wait_with_output().expect("sfdisk failed").stdout; 7406 println!("{out:?}"); 7407 7408 // Disengage the loop device 7409 let out = Command::new("losetup") 7410 .args(["-d", loop_dev]) 7411 .output() 7412 .expect("loop device not found") 7413 .stdout; 7414 println!("{out:?}"); 7415 7416 // Re-associate loop device pointing to the partition only 7417 let out = Command::new("losetup") 7418 .args([ 7419 "--show", 7420 "--offset", 7421 (512 * 2048).to_string().as_str(), 7422 "-f", 7423 img.to_str().unwrap(), 7424 ]) 7425 .output() 7426 .expect("failed to create loop device") 7427 .stdout; 7428 let _tmp = String::from_utf8_lossy(&out); 7429 let loop_dev = _tmp.trim(); 7430 println!("{out:?}"); 7431 7432 // Create filesystem. 7433 let fs_cmd = match fs { 7434 WindowsGuest::FS_FAT => "mkfs.msdos", 7435 WindowsGuest::FS_NTFS => "mkfs.ntfs", 7436 _ => panic!("Unknown filesystem type '{fs}'"), 7437 }; 7438 let out = Command::new(fs_cmd) 7439 .args([&loop_dev]) 7440 .output() 7441 .unwrap_or_else(|_| panic!("{fs_cmd} failed")) 7442 .stdout; 7443 println!("{out:?}"); 7444 7445 // Disengage the loop device 7446 let out = Command::new("losetup") 7447 .args(["-d", loop_dev]) 7448 .output() 7449 .unwrap_or_else(|_| panic!("loop device '{loop_dev}' not found")) 7450 .stdout; 7451 println!("{out:?}"); 7452 7453 img.to_str().unwrap().to_string() 7454 } 7455 7456 fn disks_set_rw(&self) { 7457 let _ = self.ssh_cmd("powershell -Command \"Get-Disk | Where-Object IsOffline -eq $True | Set-Disk -IsReadOnly $False\""); 7458 } 7459 7460 fn disks_online(&self) { 7461 let _ = self.ssh_cmd("powershell -Command \"Get-Disk | Where-Object IsOffline -eq $True | Set-Disk -IsOffline $False\""); 7462 } 7463 7464 fn disk_file_put(&self, fname: &str, data: &str) { 7465 let _ = self.ssh_cmd(&format!( 7466 "powershell -Command \"'{data}' | Set-Content -Path {fname}\"" 7467 )); 7468 } 7469 7470 fn disk_file_read(&self, fname: &str) -> String { 7471 self.ssh_cmd(&format!( 7472 "powershell -Command \"Get-Content -Path {fname}\"" 7473 )) 7474 } 7475 7476 fn wait_for_boot(&self) -> bool { 7477 let cmd = "dir /b c:\\ | find \"Windows\""; 7478 let tmo_max = 180; 7479 // The timeout increase by n*1+n*2+n*3+..., therefore the initial 7480 // interval must be small. 7481 let tmo_int = 2; 7482 let out = ssh_command_ip_with_auth( 7483 cmd, 7484 &self.auth, 7485 &self.guest.network.guest_ip, 7486 { 7487 let mut ret = 1; 7488 let mut tmo_acc = 0; 7489 loop { 7490 tmo_acc += tmo_int * ret; 7491 if tmo_acc >= tmo_max { 7492 break; 7493 } 7494 ret += 1; 7495 } 7496 ret 7497 }, 7498 tmo_int, 7499 ) 7500 .unwrap(); 7501 7502 if "Windows" == out.trim() { 7503 return true; 7504 } 7505 7506 false 7507 } 7508 } 7509 7510 fn vcpu_threads_count(pid: u32) -> u8 { 7511 // ps -T -p 12345 | grep vcpu | wc -l 7512 let out = Command::new("ps") 7513 .args(["-T", "-p", format!("{pid}").as_str()]) 7514 .output() 7515 .expect("ps command failed") 7516 .stdout; 7517 return String::from_utf8_lossy(&out).matches("vcpu").count() as u8; 7518 } 7519 7520 fn netdev_ctrl_threads_count(pid: u32) -> u8 { 7521 // ps -T -p 12345 | grep "_net[0-9]*_ctrl" | wc -l 7522 let out = Command::new("ps") 7523 .args(["-T", "-p", format!("{pid}").as_str()]) 7524 .output() 7525 .expect("ps command failed") 7526 .stdout; 7527 let mut n = 0; 7528 String::from_utf8_lossy(&out) 7529 .split_whitespace() 7530 .for_each(|s| n += (s.starts_with("_net") && s.ends_with("_ctrl")) as u8); // _net1_ctrl 7531 n 7532 } 7533 7534 fn disk_ctrl_threads_count(pid: u32) -> u8 { 7535 // ps -T -p 15782 | grep "_disk[0-9]*_q0" | wc -l 7536 let out = Command::new("ps") 7537 .args(["-T", "-p", format!("{pid}").as_str()]) 7538 .output() 7539 .expect("ps command failed") 7540 .stdout; 7541 let mut n = 0; 7542 String::from_utf8_lossy(&out) 7543 .split_whitespace() 7544 .for_each(|s| n += (s.starts_with("_disk") && s.ends_with("_q0")) as u8); // _disk0_q0, don't care about multiple queues as they're related to the same hdd 7545 n 7546 } 7547 7548 #[test] 7549 fn test_windows_guest() { 7550 let windows_guest = WindowsGuest::new(); 7551 7552 let mut child = GuestCommand::new(windows_guest.guest()) 7553 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 7554 .args(["--memory", "size=4G"]) 7555 .args(["--kernel", edk2_path().to_str().unwrap()]) 7556 .args(["--serial", "tty"]) 7557 .args(["--console", "off"]) 7558 .default_disks() 7559 .default_net() 7560 .capture_output() 7561 .spawn() 7562 .unwrap(); 7563 7564 let fd = child.stdout.as_ref().unwrap().as_raw_fd(); 7565 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7566 let fd = child.stderr.as_ref().unwrap().as_raw_fd(); 7567 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7568 7569 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE); 7570 7571 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7572 7573 let r = std::panic::catch_unwind(|| { 7574 // Wait to make sure Windows boots up 7575 assert!(windows_guest.wait_for_boot()); 7576 7577 windows_guest.shutdown(); 7578 }); 7579 7580 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7581 let _ = child.kill(); 7582 let output = child.wait_with_output().unwrap(); 7583 7584 let _ = child_dnsmasq.kill(); 7585 let _ = child_dnsmasq.wait(); 7586 7587 handle_child_output(r, &output); 7588 } 7589 7590 #[test] 7591 fn test_windows_guest_multiple_queues() { 7592 let windows_guest = WindowsGuest::new(); 7593 7594 let mut ovmf_path = dirs::home_dir().unwrap(); 7595 ovmf_path.push("workloads"); 7596 ovmf_path.push(OVMF_NAME); 7597 7598 let mut child = GuestCommand::new(windows_guest.guest()) 7599 .args(["--cpus", "boot=4,kvm_hyperv=on"]) 7600 .args(["--memory", "size=4G"]) 7601 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7602 .args(["--serial", "tty"]) 7603 .args(["--console", "off"]) 7604 .args([ 7605 "--disk", 7606 format!( 7607 "path={},num_queues=4", 7608 windows_guest 7609 .guest() 7610 .disk_config 7611 .disk(DiskType::OperatingSystem) 7612 .unwrap() 7613 ) 7614 .as_str(), 7615 ]) 7616 .args([ 7617 "--net", 7618 format!( 7619 "tap=,mac={},ip={},mask=255.255.255.0,num_queues=8", 7620 windows_guest.guest().network.guest_mac, 7621 windows_guest.guest().network.host_ip 7622 ) 7623 .as_str(), 7624 ]) 7625 .capture_output() 7626 .spawn() 7627 .unwrap(); 7628 7629 let fd = child.stdout.as_ref().unwrap().as_raw_fd(); 7630 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7631 let fd = child.stderr.as_ref().unwrap().as_raw_fd(); 7632 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7633 7634 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE); 7635 7636 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7637 7638 let r = std::panic::catch_unwind(|| { 7639 // Wait to make sure Windows boots up 7640 assert!(windows_guest.wait_for_boot()); 7641 7642 windows_guest.shutdown(); 7643 }); 7644 7645 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7646 let _ = child.kill(); 7647 let output = child.wait_with_output().unwrap(); 7648 7649 let _ = child_dnsmasq.kill(); 7650 let _ = child_dnsmasq.wait(); 7651 7652 handle_child_output(r, &output); 7653 } 7654 7655 #[test] 7656 #[cfg(not(feature = "mshv"))] 7657 #[ignore = "See #4327"] 7658 fn test_windows_guest_snapshot_restore() { 7659 let windows_guest = WindowsGuest::new(); 7660 7661 let mut ovmf_path = dirs::home_dir().unwrap(); 7662 ovmf_path.push("workloads"); 7663 ovmf_path.push(OVMF_NAME); 7664 7665 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 7666 let api_socket_source = format!("{}.1", temp_api_path(&tmp_dir)); 7667 7668 let mut child = GuestCommand::new(windows_guest.guest()) 7669 .args(["--api-socket", &api_socket_source]) 7670 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 7671 .args(["--memory", "size=4G"]) 7672 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7673 .args(["--serial", "tty"]) 7674 .args(["--console", "off"]) 7675 .default_disks() 7676 .default_net() 7677 .capture_output() 7678 .spawn() 7679 .unwrap(); 7680 7681 let fd = child.stdout.as_ref().unwrap().as_raw_fd(); 7682 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7683 let fd = child.stderr.as_ref().unwrap().as_raw_fd(); 7684 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7685 7686 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE); 7687 7688 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7689 7690 // Wait to make sure Windows boots up 7691 assert!(windows_guest.wait_for_boot()); 7692 7693 let snapshot_dir = temp_snapshot_dir_path(&tmp_dir); 7694 7695 // Pause the VM 7696 assert!(remote_command(&api_socket_source, "pause", None)); 7697 7698 // Take a snapshot from the VM 7699 assert!(remote_command( 7700 &api_socket_source, 7701 "snapshot", 7702 Some(format!("file://{snapshot_dir}").as_str()), 7703 )); 7704 7705 // Wait to make sure the snapshot is completed 7706 thread::sleep(std::time::Duration::new(30, 0)); 7707 7708 let _ = child.kill(); 7709 child.wait().unwrap(); 7710 7711 let api_socket_restored = format!("{}.2", temp_api_path(&tmp_dir)); 7712 7713 // Restore the VM from the snapshot 7714 let mut child = GuestCommand::new(windows_guest.guest()) 7715 .args(["--api-socket", &api_socket_restored]) 7716 .args([ 7717 "--restore", 7718 format!("source_url=file://{snapshot_dir}").as_str(), 7719 ]) 7720 .capture_output() 7721 .spawn() 7722 .unwrap(); 7723 7724 // Wait for the VM to be restored 7725 thread::sleep(std::time::Duration::new(20, 0)); 7726 7727 let r = std::panic::catch_unwind(|| { 7728 // Resume the VM 7729 assert!(remote_command(&api_socket_restored, "resume", None)); 7730 7731 windows_guest.shutdown(); 7732 }); 7733 7734 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7735 let _ = child.kill(); 7736 let output = child.wait_with_output().unwrap(); 7737 7738 let _ = child_dnsmasq.kill(); 7739 let _ = child_dnsmasq.wait(); 7740 7741 handle_child_output(r, &output); 7742 } 7743 7744 #[test] 7745 #[cfg(not(feature = "mshv"))] 7746 #[cfg(not(target_arch = "aarch64"))] 7747 fn test_windows_guest_cpu_hotplug() { 7748 let windows_guest = WindowsGuest::new(); 7749 7750 let mut ovmf_path = dirs::home_dir().unwrap(); 7751 ovmf_path.push("workloads"); 7752 ovmf_path.push(OVMF_NAME); 7753 7754 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 7755 let api_socket = temp_api_path(&tmp_dir); 7756 7757 let mut child = GuestCommand::new(windows_guest.guest()) 7758 .args(["--api-socket", &api_socket]) 7759 .args(["--cpus", "boot=2,max=8,kvm_hyperv=on"]) 7760 .args(["--memory", "size=4G"]) 7761 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7762 .args(["--serial", "tty"]) 7763 .args(["--console", "off"]) 7764 .default_disks() 7765 .default_net() 7766 .capture_output() 7767 .spawn() 7768 .unwrap(); 7769 7770 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7771 7772 let r = std::panic::catch_unwind(|| { 7773 // Wait to make sure Windows boots up 7774 assert!(windows_guest.wait_for_boot()); 7775 7776 let vcpu_num = 2; 7777 // Check the initial number of CPUs the guest sees 7778 assert_eq!(windows_guest.cpu_count(), vcpu_num); 7779 // Check the initial number of vcpu threads in the CH process 7780 assert_eq!(vcpu_threads_count(child.id()), vcpu_num); 7781 7782 let vcpu_num = 6; 7783 // Hotplug some CPUs 7784 resize_command(&api_socket, Some(vcpu_num), None, None, None); 7785 // Wait to make sure CPUs are added 7786 thread::sleep(std::time::Duration::new(10, 0)); 7787 // Check the guest sees the correct number 7788 assert_eq!(windows_guest.cpu_count(), vcpu_num); 7789 // Check the CH process has the correct number of vcpu threads 7790 assert_eq!(vcpu_threads_count(child.id()), vcpu_num); 7791 7792 let vcpu_num = 4; 7793 // Remove some CPUs. Note that Windows doesn't support hot-remove. 7794 resize_command(&api_socket, Some(vcpu_num), None, None, None); 7795 // Wait to make sure CPUs are removed 7796 thread::sleep(std::time::Duration::new(10, 0)); 7797 // Reboot to let Windows catch up 7798 windows_guest.reboot(); 7799 // Wait to make sure Windows completely rebooted 7800 thread::sleep(std::time::Duration::new(60, 0)); 7801 // Check the guest sees the correct number 7802 assert_eq!(windows_guest.cpu_count(), vcpu_num); 7803 // Check the CH process has the correct number of vcpu threads 7804 assert_eq!(vcpu_threads_count(child.id()), vcpu_num); 7805 7806 windows_guest.shutdown(); 7807 }); 7808 7809 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7810 let _ = child.kill(); 7811 let output = child.wait_with_output().unwrap(); 7812 7813 let _ = child_dnsmasq.kill(); 7814 let _ = child_dnsmasq.wait(); 7815 7816 handle_child_output(r, &output); 7817 } 7818 7819 #[test] 7820 #[cfg(not(feature = "mshv"))] 7821 #[cfg(not(target_arch = "aarch64"))] 7822 fn test_windows_guest_ram_hotplug() { 7823 let windows_guest = WindowsGuest::new(); 7824 7825 let mut ovmf_path = dirs::home_dir().unwrap(); 7826 ovmf_path.push("workloads"); 7827 ovmf_path.push(OVMF_NAME); 7828 7829 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 7830 let api_socket = temp_api_path(&tmp_dir); 7831 7832 let mut child = GuestCommand::new(windows_guest.guest()) 7833 .args(["--api-socket", &api_socket]) 7834 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 7835 .args(["--memory", "size=2G,hotplug_size=5G"]) 7836 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7837 .args(["--serial", "tty"]) 7838 .args(["--console", "off"]) 7839 .default_disks() 7840 .default_net() 7841 .capture_output() 7842 .spawn() 7843 .unwrap(); 7844 7845 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7846 7847 let r = std::panic::catch_unwind(|| { 7848 // Wait to make sure Windows boots up 7849 assert!(windows_guest.wait_for_boot()); 7850 7851 let ram_size = 2 * 1024 * 1024 * 1024; 7852 // Check the initial number of RAM the guest sees 7853 let current_ram_size = windows_guest.ram_size(); 7854 // This size seems to be reserved by the system and thus the 7855 // reported amount differs by this constant value. 7856 let reserved_ram_size = ram_size - current_ram_size; 7857 // Verify that there's not more than 4mb constant diff wasted 7858 // by the reserved ram. 7859 assert!(reserved_ram_size < 4 * 1024 * 1024); 7860 7861 let ram_size = 4 * 1024 * 1024 * 1024; 7862 // Hotplug some RAM 7863 resize_command(&api_socket, None, Some(ram_size), None, None); 7864 // Wait to make sure RAM has been added 7865 thread::sleep(std::time::Duration::new(10, 0)); 7866 // Check the guest sees the correct number 7867 assert_eq!(windows_guest.ram_size(), ram_size - reserved_ram_size); 7868 7869 let ram_size = 3 * 1024 * 1024 * 1024; 7870 // Unplug some RAM. Note that hot-remove most likely won't work. 7871 resize_command(&api_socket, None, Some(ram_size), None, None); 7872 // Wait to make sure RAM has been added 7873 thread::sleep(std::time::Duration::new(10, 0)); 7874 // Reboot to let Windows catch up 7875 windows_guest.reboot(); 7876 // Wait to make sure guest completely rebooted 7877 thread::sleep(std::time::Duration::new(60, 0)); 7878 // Check the guest sees the correct number 7879 assert_eq!(windows_guest.ram_size(), ram_size - reserved_ram_size); 7880 7881 windows_guest.shutdown(); 7882 }); 7883 7884 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7885 let _ = child.kill(); 7886 let output = child.wait_with_output().unwrap(); 7887 7888 let _ = child_dnsmasq.kill(); 7889 let _ = child_dnsmasq.wait(); 7890 7891 handle_child_output(r, &output); 7892 } 7893 7894 #[test] 7895 #[cfg(not(feature = "mshv"))] 7896 fn test_windows_guest_netdev_hotplug() { 7897 let windows_guest = WindowsGuest::new(); 7898 7899 let mut ovmf_path = dirs::home_dir().unwrap(); 7900 ovmf_path.push("workloads"); 7901 ovmf_path.push(OVMF_NAME); 7902 7903 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 7904 let api_socket = temp_api_path(&tmp_dir); 7905 7906 let mut child = GuestCommand::new(windows_guest.guest()) 7907 .args(["--api-socket", &api_socket]) 7908 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 7909 .args(["--memory", "size=4G"]) 7910 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7911 .args(["--serial", "tty"]) 7912 .args(["--console", "off"]) 7913 .default_disks() 7914 .default_net() 7915 .capture_output() 7916 .spawn() 7917 .unwrap(); 7918 7919 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7920 7921 let r = std::panic::catch_unwind(|| { 7922 // Wait to make sure Windows boots up 7923 assert!(windows_guest.wait_for_boot()); 7924 7925 // Initially present network device 7926 let netdev_num = 1; 7927 assert_eq!(windows_guest.netdev_count(), netdev_num); 7928 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 7929 7930 // Hotplug network device 7931 let (cmd_success, cmd_output) = remote_command_w_output( 7932 &api_socket, 7933 "add-net", 7934 Some(windows_guest.guest().default_net_string().as_str()), 7935 ); 7936 assert!(cmd_success); 7937 assert!(String::from_utf8_lossy(&cmd_output).contains("\"id\":\"_net2\"")); 7938 thread::sleep(std::time::Duration::new(5, 0)); 7939 // Verify the device is on the system 7940 let netdev_num = 2; 7941 assert_eq!(windows_guest.netdev_count(), netdev_num); 7942 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 7943 7944 // Remove network device 7945 let cmd_success = remote_command(&api_socket, "remove-device", Some("_net2")); 7946 assert!(cmd_success); 7947 thread::sleep(std::time::Duration::new(5, 0)); 7948 // Verify the device has been removed 7949 let netdev_num = 1; 7950 assert_eq!(windows_guest.netdev_count(), netdev_num); 7951 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 7952 7953 windows_guest.shutdown(); 7954 }); 7955 7956 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7957 let _ = child.kill(); 7958 let output = child.wait_with_output().unwrap(); 7959 7960 let _ = child_dnsmasq.kill(); 7961 let _ = child_dnsmasq.wait(); 7962 7963 handle_child_output(r, &output); 7964 } 7965 7966 #[test] 7967 #[cfg(not(feature = "mshv"))] 7968 #[cfg(not(target_arch = "aarch64"))] 7969 fn test_windows_guest_disk_hotplug() { 7970 let windows_guest = WindowsGuest::new(); 7971 7972 let mut ovmf_path = dirs::home_dir().unwrap(); 7973 ovmf_path.push("workloads"); 7974 ovmf_path.push(OVMF_NAME); 7975 7976 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 7977 let api_socket = temp_api_path(&tmp_dir); 7978 7979 let mut child = GuestCommand::new(windows_guest.guest()) 7980 .args(["--api-socket", &api_socket]) 7981 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 7982 .args(["--memory", "size=4G"]) 7983 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7984 .args(["--serial", "tty"]) 7985 .args(["--console", "off"]) 7986 .default_disks() 7987 .default_net() 7988 .capture_output() 7989 .spawn() 7990 .unwrap(); 7991 7992 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7993 7994 let disk = windows_guest.disk_new(WindowsGuest::FS_FAT, 100); 7995 7996 let r = std::panic::catch_unwind(|| { 7997 // Wait to make sure Windows boots up 7998 assert!(windows_guest.wait_for_boot()); 7999 8000 // Initially present disk device 8001 let disk_num = 1; 8002 assert_eq!(windows_guest.disk_count(), disk_num); 8003 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8004 8005 // Hotplug disk device 8006 let (cmd_success, cmd_output) = remote_command_w_output( 8007 &api_socket, 8008 "add-disk", 8009 Some(format!("path={disk},readonly=off").as_str()), 8010 ); 8011 assert!(cmd_success); 8012 assert!(String::from_utf8_lossy(&cmd_output).contains("\"id\":\"_disk2\"")); 8013 thread::sleep(std::time::Duration::new(5, 0)); 8014 // Online disk device 8015 windows_guest.disks_set_rw(); 8016 windows_guest.disks_online(); 8017 // Verify the device is on the system 8018 let disk_num = 2; 8019 assert_eq!(windows_guest.disk_count(), disk_num); 8020 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8021 8022 let data = "hello"; 8023 let fname = "d:\\world"; 8024 windows_guest.disk_file_put(fname, data); 8025 8026 // Unmount disk device 8027 let cmd_success = remote_command(&api_socket, "remove-device", Some("_disk2")); 8028 assert!(cmd_success); 8029 thread::sleep(std::time::Duration::new(5, 0)); 8030 // Verify the device has been removed 8031 let disk_num = 1; 8032 assert_eq!(windows_guest.disk_count(), disk_num); 8033 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8034 8035 // Remount and check the file exists with the expected contents 8036 let (cmd_success, _cmd_output) = remote_command_w_output( 8037 &api_socket, 8038 "add-disk", 8039 Some(format!("path={disk},readonly=off").as_str()), 8040 ); 8041 assert!(cmd_success); 8042 thread::sleep(std::time::Duration::new(5, 0)); 8043 let out = windows_guest.disk_file_read(fname); 8044 assert_eq!(data, out.trim()); 8045 8046 // Intentionally no unmount, it'll happen at shutdown. 8047 8048 windows_guest.shutdown(); 8049 }); 8050 8051 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8052 let _ = child.kill(); 8053 let output = child.wait_with_output().unwrap(); 8054 8055 let _ = child_dnsmasq.kill(); 8056 let _ = child_dnsmasq.wait(); 8057 8058 handle_child_output(r, &output); 8059 } 8060 8061 #[test] 8062 #[cfg(not(feature = "mshv"))] 8063 #[cfg(not(target_arch = "aarch64"))] 8064 fn test_windows_guest_disk_hotplug_multi() { 8065 let windows_guest = WindowsGuest::new(); 8066 8067 let mut ovmf_path = dirs::home_dir().unwrap(); 8068 ovmf_path.push("workloads"); 8069 ovmf_path.push(OVMF_NAME); 8070 8071 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8072 let api_socket = temp_api_path(&tmp_dir); 8073 8074 let mut child = GuestCommand::new(windows_guest.guest()) 8075 .args(["--api-socket", &api_socket]) 8076 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8077 .args(["--memory", "size=2G"]) 8078 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8079 .args(["--serial", "tty"]) 8080 .args(["--console", "off"]) 8081 .default_disks() 8082 .default_net() 8083 .capture_output() 8084 .spawn() 8085 .unwrap(); 8086 8087 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8088 8089 // Predefined data to used at various test stages 8090 let disk_test_data: [[String; 4]; 2] = [ 8091 [ 8092 "_disk2".to_string(), 8093 windows_guest.disk_new(WindowsGuest::FS_FAT, 123), 8094 "d:\\world".to_string(), 8095 "hello".to_string(), 8096 ], 8097 [ 8098 "_disk3".to_string(), 8099 windows_guest.disk_new(WindowsGuest::FS_NTFS, 333), 8100 "e:\\hello".to_string(), 8101 "world".to_string(), 8102 ], 8103 ]; 8104 8105 let r = std::panic::catch_unwind(|| { 8106 // Wait to make sure Windows boots up 8107 assert!(windows_guest.wait_for_boot()); 8108 8109 // Initially present disk device 8110 let disk_num = 1; 8111 assert_eq!(windows_guest.disk_count(), disk_num); 8112 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8113 8114 for it in &disk_test_data { 8115 let disk_id = it[0].as_str(); 8116 let disk = it[1].as_str(); 8117 // Hotplug disk device 8118 let (cmd_success, cmd_output) = remote_command_w_output( 8119 &api_socket, 8120 "add-disk", 8121 Some(format!("path={disk},readonly=off").as_str()), 8122 ); 8123 assert!(cmd_success); 8124 assert!(String::from_utf8_lossy(&cmd_output) 8125 .contains(format!("\"id\":\"{disk_id}\"").as_str())); 8126 thread::sleep(std::time::Duration::new(5, 0)); 8127 // Online disk devices 8128 windows_guest.disks_set_rw(); 8129 windows_guest.disks_online(); 8130 } 8131 // Verify the devices are on the system 8132 let disk_num = (disk_test_data.len() + 1) as u8; 8133 assert_eq!(windows_guest.disk_count(), disk_num); 8134 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8135 8136 // Put test data 8137 for it in &disk_test_data { 8138 let fname = it[2].as_str(); 8139 let data = it[3].as_str(); 8140 windows_guest.disk_file_put(fname, data); 8141 } 8142 8143 // Unmount disk devices 8144 for it in &disk_test_data { 8145 let disk_id = it[0].as_str(); 8146 let cmd_success = remote_command(&api_socket, "remove-device", Some(disk_id)); 8147 assert!(cmd_success); 8148 thread::sleep(std::time::Duration::new(5, 0)); 8149 } 8150 8151 // Verify the devices have been removed 8152 let disk_num = 1; 8153 assert_eq!(windows_guest.disk_count(), disk_num); 8154 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8155 8156 // Remount 8157 for it in &disk_test_data { 8158 let disk = it[1].as_str(); 8159 let (cmd_success, _cmd_output) = remote_command_w_output( 8160 &api_socket, 8161 "add-disk", 8162 Some(format!("path={disk},readonly=off").as_str()), 8163 ); 8164 assert!(cmd_success); 8165 thread::sleep(std::time::Duration::new(5, 0)); 8166 } 8167 8168 // Check the files exists with the expected contents 8169 for it in &disk_test_data { 8170 let fname = it[2].as_str(); 8171 let data = it[3].as_str(); 8172 let out = windows_guest.disk_file_read(fname); 8173 assert_eq!(data, out.trim()); 8174 } 8175 8176 // Intentionally no unmount, it'll happen at shutdown. 8177 8178 windows_guest.shutdown(); 8179 }); 8180 8181 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8182 let _ = child.kill(); 8183 let output = child.wait_with_output().unwrap(); 8184 8185 let _ = child_dnsmasq.kill(); 8186 let _ = child_dnsmasq.wait(); 8187 8188 handle_child_output(r, &output); 8189 } 8190 8191 #[test] 8192 #[cfg(not(feature = "mshv"))] 8193 #[cfg(not(target_arch = "aarch64"))] 8194 fn test_windows_guest_netdev_multi() { 8195 let windows_guest = WindowsGuest::new(); 8196 8197 let mut ovmf_path = dirs::home_dir().unwrap(); 8198 ovmf_path.push("workloads"); 8199 ovmf_path.push(OVMF_NAME); 8200 8201 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8202 let api_socket = temp_api_path(&tmp_dir); 8203 8204 let mut child = GuestCommand::new(windows_guest.guest()) 8205 .args(["--api-socket", &api_socket]) 8206 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8207 .args(["--memory", "size=4G"]) 8208 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8209 .args(["--serial", "tty"]) 8210 .args(["--console", "off"]) 8211 .default_disks() 8212 // The multi net dev config is borrowed from test_multiple_network_interfaces 8213 .args([ 8214 "--net", 8215 windows_guest.guest().default_net_string().as_str(), 8216 "--net", 8217 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0", 8218 "--net", 8219 "tap=mytap42,mac=fe:1f:9e:e1:60:f2,ip=192.168.4.1,mask=255.255.255.0", 8220 ]) 8221 .capture_output() 8222 .spawn() 8223 .unwrap(); 8224 8225 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8226 8227 let r = std::panic::catch_unwind(|| { 8228 // Wait to make sure Windows boots up 8229 assert!(windows_guest.wait_for_boot()); 8230 8231 let netdev_num = 3; 8232 assert_eq!(windows_guest.netdev_count(), netdev_num); 8233 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 8234 8235 let tap_count = exec_host_command_output("ip link | grep -c mytap42"); 8236 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1"); 8237 8238 windows_guest.shutdown(); 8239 }); 8240 8241 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8242 let _ = child.kill(); 8243 let output = child.wait_with_output().unwrap(); 8244 8245 let _ = child_dnsmasq.kill(); 8246 let _ = child_dnsmasq.wait(); 8247 8248 handle_child_output(r, &output); 8249 } 8250 } 8251 8252 #[cfg(target_arch = "x86_64")] 8253 mod sgx { 8254 use crate::*; 8255 8256 #[test] 8257 fn test_sgx() { 8258 let jammy_image = JAMMY_IMAGE_NAME.to_string(); 8259 let jammy = UbuntuDiskConfig::new(jammy_image); 8260 let guest = Guest::new(Box::new(jammy)); 8261 8262 let mut child = GuestCommand::new(&guest) 8263 .args(["--cpus", "boot=1"]) 8264 .args(["--memory", "size=512M"]) 8265 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 8266 .default_disks() 8267 .default_net() 8268 .args(["--sgx-epc", "id=epc0,size=64M"]) 8269 .capture_output() 8270 .spawn() 8271 .unwrap(); 8272 8273 let r = std::panic::catch_unwind(|| { 8274 guest.wait_vm_boot(None).unwrap(); 8275 8276 // Check if SGX is correctly detected in the guest. 8277 guest.check_sgx_support().unwrap(); 8278 8279 // Validate the SGX EPC section is 64MiB. 8280 assert_eq!( 8281 guest 8282 .ssh_command("cpuid -l 0x12 -s 2 | grep 'section size' | cut -d '=' -f 2") 8283 .unwrap() 8284 .trim(), 8285 "0x0000000004000000" 8286 ); 8287 }); 8288 8289 let _ = child.kill(); 8290 let output = child.wait_with_output().unwrap(); 8291 8292 handle_child_output(r, &output); 8293 } 8294 } 8295 8296 #[cfg(target_arch = "x86_64")] 8297 mod vfio { 8298 use crate::*; 8299 8300 #[test] 8301 // The VFIO integration test starts cloud-hypervisor guest with 3 TAP 8302 // backed networking interfaces, bound through a simple bridge on the host. 8303 // So if the nested cloud-hypervisor succeeds in getting a directly 8304 // assigned interface from its cloud-hypervisor host, we should be able to 8305 // ssh into it, and verify that it's running with the right kernel command 8306 // line (We tag the command line from cloud-hypervisor for that purpose). 8307 // The third device is added to validate that hotplug works correctly since 8308 // it is being added to the L2 VM through hotplugging mechanism. 8309 // Also, we pass-through a vitio-blk device to the L2 VM to test the 32-bit 8310 // vfio device support 8311 fn test_vfio() { 8312 setup_vfio_network_interfaces(); 8313 8314 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 8315 let guest = Guest::new_from_ip_range(Box::new(focal), "172.18", 0); 8316 8317 let mut workload_path = dirs::home_dir().unwrap(); 8318 workload_path.push("workloads"); 8319 8320 let kernel_path = direct_kernel_boot_path(); 8321 8322 let mut vfio_path = workload_path.clone(); 8323 vfio_path.push("vfio"); 8324 8325 let mut cloud_init_vfio_base_path = vfio_path.clone(); 8326 cloud_init_vfio_base_path.push("cloudinit.img"); 8327 8328 // We copy our cloudinit into the vfio mount point, for the nested 8329 // cloud-hypervisor guest to use. 8330 rate_limited_copy( 8331 guest.disk_config.disk(DiskType::CloudInit).unwrap(), 8332 &cloud_init_vfio_base_path, 8333 ) 8334 .expect("copying of cloud-init disk failed"); 8335 8336 let mut vfio_disk_path = workload_path.clone(); 8337 vfio_disk_path.push("vfio.img"); 8338 8339 // Create the vfio disk image 8340 let output = Command::new("mkfs.ext4") 8341 .arg("-d") 8342 .arg(vfio_path.to_str().unwrap()) 8343 .arg(vfio_disk_path.to_str().unwrap()) 8344 .arg("2g") 8345 .output() 8346 .unwrap(); 8347 if !output.status.success() { 8348 eprintln!("{}", String::from_utf8_lossy(&output.stderr)); 8349 panic!("mkfs.ext4 command generated an error"); 8350 } 8351 8352 let mut blk_file_path = workload_path; 8353 blk_file_path.push("blk.img"); 8354 8355 let vfio_tap0 = "vfio-tap0"; 8356 let vfio_tap1 = "vfio-tap1"; 8357 let vfio_tap2 = "vfio-tap2"; 8358 let vfio_tap3 = "vfio-tap3"; 8359 8360 let mut child = GuestCommand::new(&guest) 8361 .args(["--cpus", "boot=4"]) 8362 .args(["--memory", "size=2G,hugepages=on,shared=on"]) 8363 .args(["--kernel", kernel_path.to_str().unwrap()]) 8364 .args([ 8365 "--disk", 8366 format!( 8367 "path={}", 8368 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 8369 ) 8370 .as_str(), 8371 "--disk", 8372 format!( 8373 "path={}", 8374 guest.disk_config.disk(DiskType::CloudInit).unwrap() 8375 ) 8376 .as_str(), 8377 "--disk", 8378 format!("path={}", vfio_disk_path.to_str().unwrap()).as_str(), 8379 "--disk", 8380 format!("path={},iommu=on", blk_file_path.to_str().unwrap()).as_str(), 8381 ]) 8382 .args([ 8383 "--cmdline", 8384 format!( 8385 "{DIRECT_KERNEL_BOOT_CMDLINE} kvm-intel.nested=1 vfio_iommu_type1.allow_unsafe_interrupts" 8386 ) 8387 .as_str(), 8388 ]) 8389 .args([ 8390 "--net", 8391 format!("tap={},mac={}", vfio_tap0, guest.network.guest_mac).as_str(), 8392 "--net", 8393 format!( 8394 "tap={},mac={},iommu=on", 8395 vfio_tap1, guest.network.l2_guest_mac1 8396 ) 8397 .as_str(), 8398 "--net", 8399 format!( 8400 "tap={},mac={},iommu=on", 8401 vfio_tap2, guest.network.l2_guest_mac2 8402 ) 8403 .as_str(), 8404 "--net", 8405 format!( 8406 "tap={},mac={},iommu=on", 8407 vfio_tap3, guest.network.l2_guest_mac3 8408 ) 8409 .as_str(), 8410 ]) 8411 .capture_output() 8412 .spawn() 8413 .unwrap(); 8414 8415 thread::sleep(std::time::Duration::new(30, 0)); 8416 8417 let r = std::panic::catch_unwind(|| { 8418 guest.ssh_command_l1("sudo systemctl start vfio").unwrap(); 8419 thread::sleep(std::time::Duration::new(120, 0)); 8420 8421 // We booted our cloud hypervisor L2 guest with a "VFIOTAG" tag 8422 // added to its kernel command line. 8423 // Let's ssh into it and verify that it's there. If it is it means 8424 // we're in the right guest (The L2 one) because the QEMU L1 guest 8425 // does not have this command line tag. 8426 assert_eq!( 8427 guest 8428 .ssh_command_l2_1("grep -c VFIOTAG /proc/cmdline") 8429 .unwrap() 8430 .trim() 8431 .parse::<u32>() 8432 .unwrap_or_default(), 8433 1 8434 ); 8435 8436 // Let's also verify from the second virtio-net device passed to 8437 // the L2 VM. 8438 assert_eq!( 8439 guest 8440 .ssh_command_l2_2("grep -c VFIOTAG /proc/cmdline") 8441 .unwrap() 8442 .trim() 8443 .parse::<u32>() 8444 .unwrap_or_default(), 8445 1 8446 ); 8447 8448 // Check the amount of PCI devices appearing in L2 VM. 8449 assert_eq!( 8450 guest 8451 .ssh_command_l2_1("ls /sys/bus/pci/devices | wc -l") 8452 .unwrap() 8453 .trim() 8454 .parse::<u32>() 8455 .unwrap_or_default(), 8456 8, 8457 ); 8458 8459 // Check both if /dev/vdc exists and if the block size is 16M in L2 VM 8460 assert_eq!( 8461 guest 8462 .ssh_command_l2_1("lsblk | grep vdc | grep -c 16M") 8463 .unwrap() 8464 .trim() 8465 .parse::<u32>() 8466 .unwrap_or_default(), 8467 1 8468 ); 8469 8470 // Hotplug an extra virtio-net device through L2 VM. 8471 guest 8472 .ssh_command_l1( 8473 "echo 0000:00:09.0 | sudo tee /sys/bus/pci/devices/0000:00:09.0/driver/unbind", 8474 ) 8475 .unwrap(); 8476 guest 8477 .ssh_command_l1("echo 0000:00:09.0 | sudo tee /sys/bus/pci/drivers/vfio-pci/bind") 8478 .unwrap(); 8479 let vfio_hotplug_output = guest 8480 .ssh_command_l1( 8481 "sudo /mnt/ch-remote \ 8482 --api-socket /tmp/ch_api.sock \ 8483 add-device path=/sys/bus/pci/devices/0000:00:09.0,id=vfio123", 8484 ) 8485 .unwrap(); 8486 assert!(vfio_hotplug_output.contains("{\"id\":\"vfio123\",\"bdf\":\"0000:00:08.0\"}")); 8487 8488 thread::sleep(std::time::Duration::new(10, 0)); 8489 8490 // Let's also verify from the third virtio-net device passed to 8491 // the L2 VM. This third device has been hotplugged through the L2 8492 // VM, so this is our way to validate hotplug works for VFIO PCI. 8493 assert_eq!( 8494 guest 8495 .ssh_command_l2_3("grep -c VFIOTAG /proc/cmdline") 8496 .unwrap() 8497 .trim() 8498 .parse::<u32>() 8499 .unwrap_or_default(), 8500 1 8501 ); 8502 8503 // Check the amount of PCI devices appearing in L2 VM. 8504 // There should be one more device than before, raising the count 8505 // up to 9 PCI devices. 8506 assert_eq!( 8507 guest 8508 .ssh_command_l2_1("ls /sys/bus/pci/devices | wc -l") 8509 .unwrap() 8510 .trim() 8511 .parse::<u32>() 8512 .unwrap_or_default(), 8513 9, 8514 ); 8515 8516 // Let's now verify that we can correctly remove the virtio-net 8517 // device through the "remove-device" command responsible for 8518 // unplugging VFIO devices. 8519 guest 8520 .ssh_command_l1( 8521 "sudo /mnt/ch-remote \ 8522 --api-socket /tmp/ch_api.sock \ 8523 remove-device vfio123", 8524 ) 8525 .unwrap(); 8526 thread::sleep(std::time::Duration::new(10, 0)); 8527 8528 // Check the amount of PCI devices appearing in L2 VM is back down 8529 // to 8 devices. 8530 assert_eq!( 8531 guest 8532 .ssh_command_l2_1("ls /sys/bus/pci/devices | wc -l") 8533 .unwrap() 8534 .trim() 8535 .parse::<u32>() 8536 .unwrap_or_default(), 8537 8, 8538 ); 8539 8540 // Perform memory hotplug in L2 and validate the memory is showing 8541 // up as expected. In order to check, we will use the virtio-net 8542 // device already passed through L2 as a VFIO device, this will 8543 // verify that VFIO devices are functional with memory hotplug. 8544 assert!(guest.get_total_memory_l2().unwrap_or_default() > 480_000); 8545 guest 8546 .ssh_command_l2_1( 8547 "sudo bash -c 'echo online > /sys/devices/system/memory/auto_online_blocks'", 8548 ) 8549 .unwrap(); 8550 guest 8551 .ssh_command_l1( 8552 "sudo /mnt/ch-remote \ 8553 --api-socket /tmp/ch_api.sock \ 8554 resize --memory 1073741824", 8555 ) 8556 .unwrap(); 8557 assert!(guest.get_total_memory_l2().unwrap_or_default() > 960_000); 8558 }); 8559 8560 let _ = child.kill(); 8561 let output = child.wait_with_output().unwrap(); 8562 8563 cleanup_vfio_network_interfaces(); 8564 8565 handle_child_output(r, &output); 8566 } 8567 8568 fn test_nvidia_card_memory_hotplug(hotplug_method: &str) { 8569 let jammy = UbuntuDiskConfig::new(JAMMY_NVIDIA_IMAGE_NAME.to_string()); 8570 let guest = Guest::new(Box::new(jammy)); 8571 let api_socket = temp_api_path(&guest.tmp_dir); 8572 8573 let mut child = GuestCommand::new(&guest) 8574 .args(["--cpus", "boot=4"]) 8575 .args([ 8576 "--memory", 8577 format!("size=4G,hotplug_size=4G,hotplug_method={hotplug_method}").as_str(), 8578 ]) 8579 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 8580 .args(["--device", "path=/sys/bus/pci/devices/0000:31:00.0/"]) 8581 .args(["--api-socket", &api_socket]) 8582 .default_disks() 8583 .default_net() 8584 .capture_output() 8585 .spawn() 8586 .unwrap(); 8587 8588 let r = std::panic::catch_unwind(|| { 8589 guest.wait_vm_boot(None).unwrap(); 8590 8591 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 8592 8593 guest.enable_memory_hotplug(); 8594 8595 // Add RAM to the VM 8596 let desired_ram = 6 << 30; 8597 resize_command(&api_socket, None, Some(desired_ram), None, None); 8598 thread::sleep(std::time::Duration::new(30, 0)); 8599 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 8600 8601 // Check the VFIO device works when RAM is increased to 6GiB 8602 guest.check_nvidia_gpu(); 8603 }); 8604 8605 let _ = child.kill(); 8606 let output = child.wait_with_output().unwrap(); 8607 8608 handle_child_output(r, &output); 8609 } 8610 8611 #[test] 8612 fn test_nvidia_card_memory_hotplug_acpi() { 8613 test_nvidia_card_memory_hotplug("acpi") 8614 } 8615 8616 #[test] 8617 fn test_nvidia_card_memory_hotplug_virtio_mem() { 8618 test_nvidia_card_memory_hotplug("virtio-mem") 8619 } 8620 8621 #[test] 8622 fn test_nvidia_card_pci_hotplug() { 8623 let jammy = UbuntuDiskConfig::new(JAMMY_NVIDIA_IMAGE_NAME.to_string()); 8624 let guest = Guest::new(Box::new(jammy)); 8625 let api_socket = temp_api_path(&guest.tmp_dir); 8626 8627 let mut child = GuestCommand::new(&guest) 8628 .args(["--cpus", "boot=4"]) 8629 .args(["--memory", "size=4G"]) 8630 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 8631 .args(["--api-socket", &api_socket]) 8632 .default_disks() 8633 .default_net() 8634 .capture_output() 8635 .spawn() 8636 .unwrap(); 8637 8638 let r = std::panic::catch_unwind(|| { 8639 guest.wait_vm_boot(None).unwrap(); 8640 8641 // Hotplug the card to the VM 8642 let (cmd_success, cmd_output) = remote_command_w_output( 8643 &api_socket, 8644 "add-device", 8645 Some("id=vfio0,path=/sys/bus/pci/devices/0000:31:00.0/"), 8646 ); 8647 assert!(cmd_success); 8648 assert!(String::from_utf8_lossy(&cmd_output) 8649 .contains("{\"id\":\"vfio0\",\"bdf\":\"0000:00:06.0\"}")); 8650 8651 thread::sleep(std::time::Duration::new(10, 0)); 8652 8653 // Check the VFIO device works after hotplug 8654 guest.check_nvidia_gpu(); 8655 }); 8656 8657 let _ = child.kill(); 8658 let output = child.wait_with_output().unwrap(); 8659 8660 handle_child_output(r, &output); 8661 } 8662 8663 #[test] 8664 fn test_nvidia_card_reboot() { 8665 let jammy = UbuntuDiskConfig::new(JAMMY_NVIDIA_IMAGE_NAME.to_string()); 8666 let guest = Guest::new(Box::new(jammy)); 8667 let api_socket = temp_api_path(&guest.tmp_dir); 8668 8669 let mut child = GuestCommand::new(&guest) 8670 .args(["--cpus", "boot=4"]) 8671 .args(["--memory", "size=4G"]) 8672 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 8673 .args(["--device", "path=/sys/bus/pci/devices/0000:31:00.0/"]) 8674 .args(["--api-socket", &api_socket]) 8675 .default_disks() 8676 .default_net() 8677 .capture_output() 8678 .spawn() 8679 .unwrap(); 8680 8681 let r = std::panic::catch_unwind(|| { 8682 guest.wait_vm_boot(None).unwrap(); 8683 8684 // Check the VFIO device works after boot 8685 guest.check_nvidia_gpu(); 8686 8687 guest.reboot_linux(0, None); 8688 8689 // Check the VFIO device works after reboot 8690 guest.check_nvidia_gpu(); 8691 }); 8692 8693 let _ = child.kill(); 8694 let output = child.wait_with_output().unwrap(); 8695 8696 handle_child_output(r, &output); 8697 } 8698 } 8699 8700 mod live_migration { 8701 use crate::*; 8702 8703 fn start_live_migration( 8704 migration_socket: &str, 8705 src_api_socket: &str, 8706 dest_api_socket: &str, 8707 local: bool, 8708 ) -> bool { 8709 // Start to receive migration from the destintion VM 8710 let mut receive_migration = Command::new(clh_command("ch-remote")) 8711 .args([ 8712 "--api-socket", 8713 dest_api_socket, 8714 "receive-migration", 8715 &format! {"unix:{migration_socket}"}, 8716 ]) 8717 .stderr(Stdio::piped()) 8718 .stdout(Stdio::piped()) 8719 .spawn() 8720 .unwrap(); 8721 // Give it '1s' to make sure the 'migration_socket' file is properly created 8722 thread::sleep(std::time::Duration::new(1, 0)); 8723 // Start to send migration from the source VM 8724 8725 let mut args = [ 8726 "--api-socket".to_string(), 8727 src_api_socket.to_string(), 8728 "send-migration".to_string(), 8729 format! {"unix:{migration_socket}"}, 8730 ] 8731 .to_vec(); 8732 8733 if local { 8734 args.insert(3, "--local".to_string()); 8735 } 8736 8737 let mut send_migration = Command::new(clh_command("ch-remote")) 8738 .args(&args) 8739 .stderr(Stdio::piped()) 8740 .stdout(Stdio::piped()) 8741 .spawn() 8742 .unwrap(); 8743 8744 // The 'send-migration' command should be executed successfully within the given timeout 8745 let send_success = if let Some(status) = send_migration 8746 .wait_timeout(std::time::Duration::from_secs(30)) 8747 .unwrap() 8748 { 8749 status.success() 8750 } else { 8751 false 8752 }; 8753 8754 if !send_success { 8755 let _ = send_migration.kill(); 8756 let output = send_migration.wait_with_output().unwrap(); 8757 eprintln!("\n\n==== Start 'send_migration' output ====\n\n---stdout---\n{}\n\n---stderr---\n{}\n\n==== End 'send_migration' output ====\n\n", 8758 String::from_utf8_lossy(&output.stdout), String::from_utf8_lossy(&output.stderr)); 8759 } 8760 8761 // The 'receive-migration' command should be executed successfully within the given timeout 8762 let receive_success = if let Some(status) = receive_migration 8763 .wait_timeout(std::time::Duration::from_secs(30)) 8764 .unwrap() 8765 { 8766 status.success() 8767 } else { 8768 false 8769 }; 8770 8771 if !receive_success { 8772 let _ = receive_migration.kill(); 8773 let output = receive_migration.wait_with_output().unwrap(); 8774 eprintln!("\n\n==== Start 'receive_migration' output ====\n\n---stdout---\n{}\n\n---stderr---\n{}\n\n==== End 'receive_migration' output ====\n\n", 8775 String::from_utf8_lossy(&output.stdout), String::from_utf8_lossy(&output.stderr)); 8776 } 8777 8778 send_success && receive_success 8779 } 8780 8781 fn print_and_panic(src_vm: Child, dest_vm: Child, ovs_vm: Option<Child>, message: &str) -> ! { 8782 let mut src_vm = src_vm; 8783 let mut dest_vm = dest_vm; 8784 8785 let _ = src_vm.kill(); 8786 let src_output = src_vm.wait_with_output().unwrap(); 8787 eprintln!( 8788 "\n\n==== Start 'source_vm' stdout ====\n\n{}\n\n==== End 'source_vm' stdout ====", 8789 String::from_utf8_lossy(&src_output.stdout) 8790 ); 8791 eprintln!( 8792 "\n\n==== Start 'source_vm' stderr ====\n\n{}\n\n==== End 'source_vm' stderr ====", 8793 String::from_utf8_lossy(&src_output.stderr) 8794 ); 8795 let _ = dest_vm.kill(); 8796 let dest_output = dest_vm.wait_with_output().unwrap(); 8797 eprintln!( 8798 "\n\n==== Start 'destination_vm' stdout ====\n\n{}\n\n==== End 'destination_vm' stdout ====", 8799 String::from_utf8_lossy(&dest_output.stdout) 8800 ); 8801 eprintln!( 8802 "\n\n==== Start 'destination_vm' stderr ====\n\n{}\n\n==== End 'destination_vm' stderr ====", 8803 String::from_utf8_lossy(&dest_output.stderr) 8804 ); 8805 8806 if let Some(ovs_vm) = ovs_vm { 8807 let mut ovs_vm = ovs_vm; 8808 let _ = ovs_vm.kill(); 8809 let ovs_output = ovs_vm.wait_with_output().unwrap(); 8810 eprintln!( 8811 "\n\n==== Start 'ovs_vm' stdout ====\n\n{}\n\n==== End 'ovs_vm' stdout ====", 8812 String::from_utf8_lossy(&ovs_output.stdout) 8813 ); 8814 eprintln!( 8815 "\n\n==== Start 'ovs_vm' stderr ====\n\n{}\n\n==== End 'ovs_vm' stderr ====", 8816 String::from_utf8_lossy(&ovs_output.stderr) 8817 ); 8818 8819 cleanup_ovs_dpdk(); 8820 } 8821 8822 panic!("Test failed: {message}") 8823 } 8824 8825 // This test exercises the local live-migration between two Cloud Hypervisor VMs on the 8826 // same host. It ensures the following behaviors: 8827 // 1. The source VM is up and functional (including various virtio-devices are working properly); 8828 // 2. The 'send-migration' and 'receive-migration' command finished successfully; 8829 // 3. The source VM terminated gracefully after live migration; 8830 // 4. The destination VM is functional (including various virtio-devices are working properly) after 8831 // live migration; 8832 // Note: This test does not use vsock as we can't create two identical vsock on the same host. 8833 fn _test_live_migration(upgrade_test: bool, local: bool) { 8834 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 8835 let guest = Guest::new(Box::new(focal)); 8836 let kernel_path = direct_kernel_boot_path(); 8837 let console_text = String::from("On a branch floating down river a cricket, singing."); 8838 let net_id = "net123"; 8839 let net_params = format!( 8840 "id={},tap=,mac={},ip={},mask=255.255.255.0", 8841 net_id, guest.network.guest_mac, guest.network.host_ip 8842 ); 8843 8844 let memory_param: &[&str] = if local { 8845 &["--memory", "size=4G,shared=on"] 8846 } else { 8847 &["--memory", "size=4G"] 8848 }; 8849 8850 let boot_vcpus = 2; 8851 let max_vcpus = 4; 8852 8853 let pmem_temp_file = TempFile::new().unwrap(); 8854 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 8855 std::process::Command::new("mkfs.ext4") 8856 .arg(pmem_temp_file.as_path()) 8857 .output() 8858 .expect("Expect creating disk image to succeed"); 8859 let pmem_path = String::from("/dev/pmem0"); 8860 8861 // Start the source VM 8862 let src_vm_path = if !upgrade_test { 8863 clh_command("cloud-hypervisor") 8864 } else { 8865 cloud_hypervisor_release_path() 8866 }; 8867 let src_api_socket = temp_api_path(&guest.tmp_dir); 8868 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 8869 src_vm_cmd 8870 .args([ 8871 "--cpus", 8872 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 8873 ]) 8874 .args(memory_param) 8875 .args(["--kernel", kernel_path.to_str().unwrap()]) 8876 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 8877 .default_disks() 8878 .args(["--net", net_params.as_str()]) 8879 .args(["--api-socket", &src_api_socket]) 8880 .args([ 8881 "--pmem", 8882 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 8883 ]); 8884 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 8885 8886 // Start the destination VM 8887 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 8888 dest_api_socket.push_str(".dest"); 8889 let mut dest_child = GuestCommand::new(&guest) 8890 .args(["--api-socket", &dest_api_socket]) 8891 .capture_output() 8892 .spawn() 8893 .unwrap(); 8894 8895 let r = std::panic::catch_unwind(|| { 8896 guest.wait_vm_boot(None).unwrap(); 8897 8898 // Make sure the source VM is functaionl 8899 // Check the number of vCPUs 8900 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 8901 8902 // Check the guest RAM 8903 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 8904 8905 // Check the guest virtio-devices, e.g. block, rng, console, and net 8906 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 8907 8908 // x86_64: Following what's done in the `test_snapshot_restore`, we need 8909 // to make sure that removing and adding back the virtio-net device does 8910 // not break the live-migration support for virtio-pci. 8911 #[cfg(target_arch = "x86_64")] 8912 { 8913 assert!(remote_command( 8914 &src_api_socket, 8915 "remove-device", 8916 Some(net_id), 8917 )); 8918 thread::sleep(std::time::Duration::new(10, 0)); 8919 8920 // Plug the virtio-net device again 8921 assert!(remote_command( 8922 &src_api_socket, 8923 "add-net", 8924 Some(net_params.as_str()), 8925 )); 8926 thread::sleep(std::time::Duration::new(10, 0)); 8927 } 8928 8929 // Start the live-migration 8930 let migration_socket = String::from( 8931 guest 8932 .tmp_dir 8933 .as_path() 8934 .join("live-migration.sock") 8935 .to_str() 8936 .unwrap(), 8937 ); 8938 8939 assert!( 8940 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 8941 "Unsuccessful command: 'send-migration' or 'receive-migration'." 8942 ); 8943 }); 8944 8945 // Check and report any errors occurred during the live-migration 8946 if r.is_err() { 8947 print_and_panic( 8948 src_child, 8949 dest_child, 8950 None, 8951 "Error occurred during live-migration", 8952 ); 8953 } 8954 8955 // Check the source vm has been terminated successful (give it '3s' to settle) 8956 thread::sleep(std::time::Duration::new(3, 0)); 8957 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 8958 print_and_panic( 8959 src_child, 8960 dest_child, 8961 None, 8962 "source VM was not terminated successfully.", 8963 ); 8964 }; 8965 8966 // Post live-migration check to make sure the destination VM is funcational 8967 let r = std::panic::catch_unwind(|| { 8968 // Perform same checks to validate VM has been properly migrated 8969 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 8970 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 8971 8972 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 8973 }); 8974 8975 // Clean-up the destination VM and make sure it terminated correctly 8976 let _ = dest_child.kill(); 8977 let dest_output = dest_child.wait_with_output().unwrap(); 8978 handle_child_output(r, &dest_output); 8979 8980 // Check the destination VM has the expected 'concole_text' from its output 8981 let r = std::panic::catch_unwind(|| { 8982 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 8983 }); 8984 handle_child_output(r, &dest_output); 8985 } 8986 8987 fn _test_live_migration_balloon(upgrade_test: bool, local: bool) { 8988 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 8989 let guest = Guest::new(Box::new(focal)); 8990 let kernel_path = direct_kernel_boot_path(); 8991 let console_text = String::from("On a branch floating down river a cricket, singing."); 8992 let net_id = "net123"; 8993 let net_params = format!( 8994 "id={},tap=,mac={},ip={},mask=255.255.255.0", 8995 net_id, guest.network.guest_mac, guest.network.host_ip 8996 ); 8997 8998 let memory_param: &[&str] = if local { 8999 &[ 9000 "--memory", 9001 "size=4G,hotplug_method=virtio-mem,hotplug_size=8G,shared=on", 9002 "--balloon", 9003 "size=0", 9004 ] 9005 } else { 9006 &[ 9007 "--memory", 9008 "size=4G,hotplug_method=virtio-mem,hotplug_size=8G", 9009 "--balloon", 9010 "size=0", 9011 ] 9012 }; 9013 9014 let boot_vcpus = 2; 9015 let max_vcpus = 4; 9016 9017 let pmem_temp_file = TempFile::new().unwrap(); 9018 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 9019 std::process::Command::new("mkfs.ext4") 9020 .arg(pmem_temp_file.as_path()) 9021 .output() 9022 .expect("Expect creating disk image to succeed"); 9023 let pmem_path = String::from("/dev/pmem0"); 9024 9025 // Start the source VM 9026 let src_vm_path = if !upgrade_test { 9027 clh_command("cloud-hypervisor") 9028 } else { 9029 cloud_hypervisor_release_path() 9030 }; 9031 let src_api_socket = temp_api_path(&guest.tmp_dir); 9032 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 9033 src_vm_cmd 9034 .args([ 9035 "--cpus", 9036 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 9037 ]) 9038 .args(memory_param) 9039 .args(["--kernel", kernel_path.to_str().unwrap()]) 9040 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9041 .default_disks() 9042 .args(["--net", net_params.as_str()]) 9043 .args(["--api-socket", &src_api_socket]) 9044 .args([ 9045 "--pmem", 9046 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 9047 ]); 9048 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 9049 9050 // Start the destination VM 9051 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 9052 dest_api_socket.push_str(".dest"); 9053 let mut dest_child = GuestCommand::new(&guest) 9054 .args(["--api-socket", &dest_api_socket]) 9055 .capture_output() 9056 .spawn() 9057 .unwrap(); 9058 9059 let r = std::panic::catch_unwind(|| { 9060 guest.wait_vm_boot(None).unwrap(); 9061 9062 // Make sure the source VM is functaionl 9063 // Check the number of vCPUs 9064 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9065 9066 // Check the guest RAM 9067 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9068 // Increase the guest RAM 9069 resize_command(&src_api_socket, None, Some(6 << 30), None, None); 9070 thread::sleep(std::time::Duration::new(5, 0)); 9071 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 9072 // Use balloon to remove RAM from the VM 9073 resize_command(&src_api_socket, None, None, Some(1 << 30), None); 9074 thread::sleep(std::time::Duration::new(5, 0)); 9075 let total_memory = guest.get_total_memory().unwrap_or_default(); 9076 assert!(total_memory > 4_800_000); 9077 assert!(total_memory < 5_760_000); 9078 9079 // Check the guest virtio-devices, e.g. block, rng, console, and net 9080 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9081 9082 // x86_64: Following what's done in the `test_snapshot_restore`, we need 9083 // to make sure that removing and adding back the virtio-net device does 9084 // not break the live-migration support for virtio-pci. 9085 #[cfg(target_arch = "x86_64")] 9086 { 9087 assert!(remote_command( 9088 &src_api_socket, 9089 "remove-device", 9090 Some(net_id), 9091 )); 9092 thread::sleep(std::time::Duration::new(10, 0)); 9093 9094 // Plug the virtio-net device again 9095 assert!(remote_command( 9096 &src_api_socket, 9097 "add-net", 9098 Some(net_params.as_str()), 9099 )); 9100 thread::sleep(std::time::Duration::new(10, 0)); 9101 } 9102 9103 // Start the live-migration 9104 let migration_socket = String::from( 9105 guest 9106 .tmp_dir 9107 .as_path() 9108 .join("live-migration.sock") 9109 .to_str() 9110 .unwrap(), 9111 ); 9112 9113 assert!( 9114 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9115 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9116 ); 9117 }); 9118 9119 // Check and report any errors occurred during the live-migration 9120 if r.is_err() { 9121 print_and_panic( 9122 src_child, 9123 dest_child, 9124 None, 9125 "Error occurred during live-migration", 9126 ); 9127 } 9128 9129 // Check the source vm has been terminated successful (give it '3s' to settle) 9130 thread::sleep(std::time::Duration::new(3, 0)); 9131 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 9132 print_and_panic( 9133 src_child, 9134 dest_child, 9135 None, 9136 "source VM was not terminated successfully.", 9137 ); 9138 }; 9139 9140 // Post live-migration check to make sure the destination VM is funcational 9141 let r = std::panic::catch_unwind(|| { 9142 // Perform same checks to validate VM has been properly migrated 9143 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9144 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9145 9146 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9147 9148 // Perform checks on guest RAM using balloon 9149 let total_memory = guest.get_total_memory().unwrap_or_default(); 9150 assert!(total_memory > 4_800_000); 9151 assert!(total_memory < 5_760_000); 9152 // Deflate balloon to restore entire RAM to the VM 9153 resize_command(&dest_api_socket, None, None, Some(0), None); 9154 thread::sleep(std::time::Duration::new(5, 0)); 9155 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 9156 // Decrease guest RAM with virtio-mem 9157 resize_command(&dest_api_socket, None, Some(5 << 30), None, None); 9158 thread::sleep(std::time::Duration::new(5, 0)); 9159 let total_memory = guest.get_total_memory().unwrap_or_default(); 9160 assert!(total_memory > 4_800_000); 9161 assert!(total_memory < 5_760_000); 9162 }); 9163 9164 // Clean-up the destination VM and make sure it terminated correctly 9165 let _ = dest_child.kill(); 9166 let dest_output = dest_child.wait_with_output().unwrap(); 9167 handle_child_output(r, &dest_output); 9168 9169 // Check the destination VM has the expected 'concole_text' from its output 9170 let r = std::panic::catch_unwind(|| { 9171 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 9172 }); 9173 handle_child_output(r, &dest_output); 9174 } 9175 9176 fn _test_live_migration_numa(upgrade_test: bool, local: bool) { 9177 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9178 let guest = Guest::new(Box::new(focal)); 9179 let kernel_path = direct_kernel_boot_path(); 9180 let console_text = String::from("On a branch floating down river a cricket, singing."); 9181 let net_id = "net123"; 9182 let net_params = format!( 9183 "id={},tap=,mac={},ip={},mask=255.255.255.0", 9184 net_id, guest.network.guest_mac, guest.network.host_ip 9185 ); 9186 9187 let memory_param: &[&str] = if local { 9188 &[ 9189 "--memory", 9190 "size=0,hotplug_method=virtio-mem,shared=on", 9191 "--memory-zone", 9192 "id=mem0,size=1G,hotplug_size=4G,shared=on", 9193 "--memory-zone", 9194 "id=mem1,size=1G,hotplug_size=4G,shared=on", 9195 "--memory-zone", 9196 "id=mem2,size=2G,hotplug_size=4G,shared=on", 9197 "--numa", 9198 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0", 9199 "--numa", 9200 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1", 9201 "--numa", 9202 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2", 9203 ] 9204 } else { 9205 &[ 9206 "--memory", 9207 "size=0,hotplug_method=virtio-mem", 9208 "--memory-zone", 9209 "id=mem0,size=1G,hotplug_size=4G", 9210 "--memory-zone", 9211 "id=mem1,size=1G,hotplug_size=4G", 9212 "--memory-zone", 9213 "id=mem2,size=2G,hotplug_size=4G", 9214 "--numa", 9215 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0", 9216 "--numa", 9217 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1", 9218 "--numa", 9219 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2", 9220 ] 9221 }; 9222 9223 let boot_vcpus = 6; 9224 let max_vcpus = 12; 9225 9226 let pmem_temp_file = TempFile::new().unwrap(); 9227 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 9228 std::process::Command::new("mkfs.ext4") 9229 .arg(pmem_temp_file.as_path()) 9230 .output() 9231 .expect("Expect creating disk image to succeed"); 9232 let pmem_path = String::from("/dev/pmem0"); 9233 9234 // Start the source VM 9235 let src_vm_path = if !upgrade_test { 9236 clh_command("cloud-hypervisor") 9237 } else { 9238 cloud_hypervisor_release_path() 9239 }; 9240 let src_api_socket = temp_api_path(&guest.tmp_dir); 9241 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 9242 src_vm_cmd 9243 .args([ 9244 "--cpus", 9245 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 9246 ]) 9247 .args(memory_param) 9248 .args(["--kernel", kernel_path.to_str().unwrap()]) 9249 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9250 .default_disks() 9251 .args(["--net", net_params.as_str()]) 9252 .args(["--api-socket", &src_api_socket]) 9253 .args([ 9254 "--pmem", 9255 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 9256 ]); 9257 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 9258 9259 // Start the destination VM 9260 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 9261 dest_api_socket.push_str(".dest"); 9262 let mut dest_child = GuestCommand::new(&guest) 9263 .args(["--api-socket", &dest_api_socket]) 9264 .capture_output() 9265 .spawn() 9266 .unwrap(); 9267 9268 let r = std::panic::catch_unwind(|| { 9269 guest.wait_vm_boot(None).unwrap(); 9270 9271 // Make sure the source VM is functaionl 9272 // Check the number of vCPUs 9273 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9274 9275 // Check the guest RAM 9276 assert!(guest.get_total_memory().unwrap_or_default() > 2_880_000); 9277 9278 // Check the guest virtio-devices, e.g. block, rng, console, and net 9279 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9280 9281 // Check the NUMA parameters are applied correctly and resize 9282 // each zone to test the case where we migrate a VM with the 9283 // virtio-mem regions being used. 9284 { 9285 guest.check_numa_common( 9286 Some(&[960_000, 960_000, 1_920_000]), 9287 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 9288 Some(&["10 15 20", "20 10 25", "25 30 10"]), 9289 ); 9290 9291 // AArch64 currently does not support hotplug, and therefore we only 9292 // test hotplug-related function on x86_64 here. 9293 #[cfg(target_arch = "x86_64")] 9294 { 9295 guest.enable_memory_hotplug(); 9296 9297 // Resize every memory zone and check each associated NUMA node 9298 // has been assigned the right amount of memory. 9299 resize_zone_command(&src_api_socket, "mem0", "2G"); 9300 resize_zone_command(&src_api_socket, "mem1", "2G"); 9301 resize_zone_command(&src_api_socket, "mem2", "3G"); 9302 thread::sleep(std::time::Duration::new(5, 0)); 9303 9304 guest.check_numa_common(Some(&[1_920_000, 1_920_000, 1_920_000]), None, None); 9305 } 9306 } 9307 9308 // x86_64: Following what's done in the `test_snapshot_restore`, we need 9309 // to make sure that removing and adding back the virtio-net device does 9310 // not break the live-migration support for virtio-pci. 9311 #[cfg(target_arch = "x86_64")] 9312 { 9313 assert!(remote_command( 9314 &src_api_socket, 9315 "remove-device", 9316 Some(net_id), 9317 )); 9318 thread::sleep(std::time::Duration::new(10, 0)); 9319 9320 // Plug the virtio-net device again 9321 assert!(remote_command( 9322 &src_api_socket, 9323 "add-net", 9324 Some(net_params.as_str()), 9325 )); 9326 thread::sleep(std::time::Duration::new(10, 0)); 9327 } 9328 9329 // Start the live-migration 9330 let migration_socket = String::from( 9331 guest 9332 .tmp_dir 9333 .as_path() 9334 .join("live-migration.sock") 9335 .to_str() 9336 .unwrap(), 9337 ); 9338 9339 assert!( 9340 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9341 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9342 ); 9343 }); 9344 9345 // Check and report any errors occurred during the live-migration 9346 if r.is_err() { 9347 print_and_panic( 9348 src_child, 9349 dest_child, 9350 None, 9351 "Error occurred during live-migration", 9352 ); 9353 } 9354 9355 // Check the source vm has been terminated successful (give it '3s' to settle) 9356 thread::sleep(std::time::Duration::new(3, 0)); 9357 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 9358 print_and_panic( 9359 src_child, 9360 dest_child, 9361 None, 9362 "source VM was not terminated successfully.", 9363 ); 9364 }; 9365 9366 // Post live-migration check to make sure the destination VM is funcational 9367 let r = std::panic::catch_unwind(|| { 9368 // Perform same checks to validate VM has been properly migrated 9369 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9370 #[cfg(target_arch = "x86_64")] 9371 assert!(guest.get_total_memory().unwrap_or_default() > 6_720_000); 9372 #[cfg(target_arch = "aarch64")] 9373 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9374 9375 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9376 9377 // Perform NUMA related checks 9378 { 9379 #[cfg(target_arch = "aarch64")] 9380 { 9381 guest.check_numa_common( 9382 Some(&[960_000, 960_000, 1_920_000]), 9383 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 9384 Some(&["10 15 20", "20 10 25", "25 30 10"]), 9385 ); 9386 } 9387 9388 // AArch64 currently does not support hotplug, and therefore we only 9389 // test hotplug-related function on x86_64 here. 9390 #[cfg(target_arch = "x86_64")] 9391 { 9392 guest.check_numa_common( 9393 Some(&[1_920_000, 1_920_000, 2_880_000]), 9394 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 9395 Some(&["10 15 20", "20 10 25", "25 30 10"]), 9396 ); 9397 9398 guest.enable_memory_hotplug(); 9399 9400 // Resize every memory zone and check each associated NUMA node 9401 // has been assigned the right amount of memory. 9402 resize_zone_command(&dest_api_socket, "mem0", "4G"); 9403 resize_zone_command(&dest_api_socket, "mem1", "4G"); 9404 resize_zone_command(&dest_api_socket, "mem2", "4G"); 9405 // Resize to the maximum amount of CPUs and check each NUMA 9406 // node has been assigned the right CPUs set. 9407 resize_command(&dest_api_socket, Some(max_vcpus), None, None, None); 9408 thread::sleep(std::time::Duration::new(5, 0)); 9409 9410 guest.check_numa_common( 9411 Some(&[3_840_000, 3_840_000, 3_840_000]), 9412 Some(&[vec![0, 1, 2, 9], vec![3, 4, 6, 7, 8], vec![5, 10, 11]]), 9413 None, 9414 ); 9415 } 9416 } 9417 }); 9418 9419 // Clean-up the destination VM and make sure it terminated correctly 9420 let _ = dest_child.kill(); 9421 let dest_output = dest_child.wait_with_output().unwrap(); 9422 handle_child_output(r, &dest_output); 9423 9424 // Check the destination VM has the expected 'concole_text' from its output 9425 let r = std::panic::catch_unwind(|| { 9426 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 9427 }); 9428 handle_child_output(r, &dest_output); 9429 } 9430 9431 fn _test_live_migration_watchdog(upgrade_test: bool, local: bool) { 9432 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9433 let guest = Guest::new(Box::new(focal)); 9434 let kernel_path = direct_kernel_boot_path(); 9435 let console_text = String::from("On a branch floating down river a cricket, singing."); 9436 let net_id = "net123"; 9437 let net_params = format!( 9438 "id={},tap=,mac={},ip={},mask=255.255.255.0", 9439 net_id, guest.network.guest_mac, guest.network.host_ip 9440 ); 9441 9442 let memory_param: &[&str] = if local { 9443 &["--memory", "size=4G,shared=on"] 9444 } else { 9445 &["--memory", "size=4G"] 9446 }; 9447 9448 let boot_vcpus = 2; 9449 let max_vcpus = 4; 9450 9451 let pmem_temp_file = TempFile::new().unwrap(); 9452 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 9453 std::process::Command::new("mkfs.ext4") 9454 .arg(pmem_temp_file.as_path()) 9455 .output() 9456 .expect("Expect creating disk image to succeed"); 9457 let pmem_path = String::from("/dev/pmem0"); 9458 9459 // Start the source VM 9460 let src_vm_path = if !upgrade_test { 9461 clh_command("cloud-hypervisor") 9462 } else { 9463 cloud_hypervisor_release_path() 9464 }; 9465 let src_api_socket = temp_api_path(&guest.tmp_dir); 9466 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 9467 src_vm_cmd 9468 .args([ 9469 "--cpus", 9470 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 9471 ]) 9472 .args(memory_param) 9473 .args(["--kernel", kernel_path.to_str().unwrap()]) 9474 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9475 .default_disks() 9476 .args(["--net", net_params.as_str()]) 9477 .args(["--api-socket", &src_api_socket]) 9478 .args([ 9479 "--pmem", 9480 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 9481 ]) 9482 .args(["--watchdog"]); 9483 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 9484 9485 // Start the destination VM 9486 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 9487 dest_api_socket.push_str(".dest"); 9488 let mut dest_child = GuestCommand::new(&guest) 9489 .args(["--api-socket", &dest_api_socket]) 9490 .capture_output() 9491 .spawn() 9492 .unwrap(); 9493 9494 let r = std::panic::catch_unwind(|| { 9495 guest.wait_vm_boot(None).unwrap(); 9496 9497 // Make sure the source VM is functaionl 9498 // Check the number of vCPUs 9499 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9500 // Check the guest RAM 9501 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9502 // Check the guest virtio-devices, e.g. block, rng, console, and net 9503 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9504 // x86_64: Following what's done in the `test_snapshot_restore`, we need 9505 // to make sure that removing and adding back the virtio-net device does 9506 // not break the live-migration support for virtio-pci. 9507 #[cfg(target_arch = "x86_64")] 9508 { 9509 assert!(remote_command( 9510 &src_api_socket, 9511 "remove-device", 9512 Some(net_id), 9513 )); 9514 thread::sleep(std::time::Duration::new(10, 0)); 9515 9516 // Plug the virtio-net device again 9517 assert!(remote_command( 9518 &src_api_socket, 9519 "add-net", 9520 Some(net_params.as_str()), 9521 )); 9522 thread::sleep(std::time::Duration::new(10, 0)); 9523 } 9524 9525 // Enable watchdog and ensure its functional 9526 let mut expected_reboot_count = 1; 9527 // Enable the watchdog with a 15s timeout 9528 enable_guest_watchdog(&guest, 15); 9529 // Reboot and check that systemd has activated the watchdog 9530 guest.ssh_command("sudo reboot").unwrap(); 9531 guest.wait_vm_boot(None).unwrap(); 9532 expected_reboot_count += 1; 9533 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9534 assert_eq!( 9535 guest 9536 .ssh_command("sudo journalctl | grep -c -- \"Watchdog started\"") 9537 .unwrap() 9538 .trim() 9539 .parse::<u32>() 9540 .unwrap_or_default(), 9541 2 9542 ); 9543 // Allow some normal time to elapse to check we don't get spurious reboots 9544 thread::sleep(std::time::Duration::new(40, 0)); 9545 // Check no reboot 9546 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9547 9548 // Start the live-migration 9549 let migration_socket = String::from( 9550 guest 9551 .tmp_dir 9552 .as_path() 9553 .join("live-migration.sock") 9554 .to_str() 9555 .unwrap(), 9556 ); 9557 9558 assert!( 9559 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9560 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9561 ); 9562 }); 9563 9564 // Check and report any errors occurred during the live-migration 9565 if r.is_err() { 9566 print_and_panic( 9567 src_child, 9568 dest_child, 9569 None, 9570 "Error occurred during live-migration", 9571 ); 9572 } 9573 9574 // Check the source vm has been terminated successful (give it '3s' to settle) 9575 thread::sleep(std::time::Duration::new(3, 0)); 9576 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 9577 print_and_panic( 9578 src_child, 9579 dest_child, 9580 None, 9581 "source VM was not terminated successfully.", 9582 ); 9583 }; 9584 9585 // Post live-migration check to make sure the destination VM is funcational 9586 let r = std::panic::catch_unwind(|| { 9587 // Perform same checks to validate VM has been properly migrated 9588 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9589 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9590 9591 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9592 9593 // Perform checks on watchdog 9594 let mut expected_reboot_count = 2; 9595 9596 // Allow some normal time to elapse to check we don't get spurious reboots 9597 thread::sleep(std::time::Duration::new(40, 0)); 9598 // Check no reboot 9599 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9600 9601 // Trigger a panic (sync first). We need to do this inside a screen with a delay so the SSH command returns. 9602 guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap(); 9603 // Allow some time for the watchdog to trigger (max 30s) and reboot to happen 9604 guest.wait_vm_boot(Some(50)).unwrap(); 9605 // Check a reboot is triggered by the watchdog 9606 expected_reboot_count += 1; 9607 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9608 9609 #[cfg(target_arch = "x86_64")] 9610 { 9611 // Now pause the VM and remain offline for 30s 9612 assert!(remote_command(&dest_api_socket, "pause", None)); 9613 thread::sleep(std::time::Duration::new(30, 0)); 9614 assert!(remote_command(&dest_api_socket, "resume", None)); 9615 9616 // Check no reboot 9617 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9618 } 9619 }); 9620 9621 // Clean-up the destination VM and make sure it terminated correctly 9622 let _ = dest_child.kill(); 9623 let dest_output = dest_child.wait_with_output().unwrap(); 9624 handle_child_output(r, &dest_output); 9625 9626 // Check the destination VM has the expected 'concole_text' from its output 9627 let r = std::panic::catch_unwind(|| { 9628 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 9629 }); 9630 handle_child_output(r, &dest_output); 9631 } 9632 9633 fn _test_live_migration_ovs_dpdk(upgrade_test: bool, local: bool) { 9634 let ovs_focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9635 let ovs_guest = Guest::new(Box::new(ovs_focal)); 9636 9637 let migration_focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9638 let migration_guest = Guest::new(Box::new(migration_focal)); 9639 let src_api_socket = temp_api_path(&migration_guest.tmp_dir); 9640 9641 // Start two VMs that are connected through ovs-dpdk and one of the VMs is the source VM for live-migration 9642 let (mut ovs_child, mut src_child) = 9643 setup_ovs_dpdk_guests(&ovs_guest, &migration_guest, &src_api_socket, upgrade_test); 9644 9645 // Start the destination VM 9646 let mut dest_api_socket = temp_api_path(&migration_guest.tmp_dir); 9647 dest_api_socket.push_str(".dest"); 9648 let mut dest_child = GuestCommand::new(&migration_guest) 9649 .args(["--api-socket", &dest_api_socket]) 9650 .capture_output() 9651 .spawn() 9652 .unwrap(); 9653 9654 let r = std::panic::catch_unwind(|| { 9655 // Give it '1s' to make sure the 'dest_api_socket' file is properly created 9656 thread::sleep(std::time::Duration::new(1, 0)); 9657 9658 // Start the live-migration 9659 let migration_socket = String::from( 9660 migration_guest 9661 .tmp_dir 9662 .as_path() 9663 .join("live-migration.sock") 9664 .to_str() 9665 .unwrap(), 9666 ); 9667 9668 assert!( 9669 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9670 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9671 ); 9672 }); 9673 9674 // Check and report any errors occurred during the live-migration 9675 if r.is_err() { 9676 print_and_panic( 9677 src_child, 9678 dest_child, 9679 Some(ovs_child), 9680 "Error occurred during live-migration", 9681 ); 9682 } 9683 9684 // Check the source vm has been terminated successful (give it '3s' to settle) 9685 thread::sleep(std::time::Duration::new(3, 0)); 9686 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 9687 print_and_panic( 9688 src_child, 9689 dest_child, 9690 Some(ovs_child), 9691 "source VM was not terminated successfully.", 9692 ); 9693 }; 9694 9695 // Post live-migration check to make sure the destination VM is funcational 9696 let r = std::panic::catch_unwind(|| { 9697 // Perform same checks to validate VM has been properly migrated 9698 // Spawn a new netcat listener in the OVS VM 9699 let guest_ip = ovs_guest.network.guest_ip.clone(); 9700 thread::spawn(move || { 9701 ssh_command_ip( 9702 "nc -l 12345", 9703 &guest_ip, 9704 DEFAULT_SSH_RETRIES, 9705 DEFAULT_SSH_TIMEOUT, 9706 ) 9707 .unwrap(); 9708 }); 9709 9710 // Wait for the server to be listening 9711 thread::sleep(std::time::Duration::new(5, 0)); 9712 9713 // And check the connection is still functional after live-migration 9714 migration_guest 9715 .ssh_command("nc -vz 172.100.0.1 12345") 9716 .unwrap(); 9717 }); 9718 9719 // Clean-up the destination VM and OVS VM, and make sure they terminated correctly 9720 let _ = dest_child.kill(); 9721 let _ = ovs_child.kill(); 9722 let dest_output = dest_child.wait_with_output().unwrap(); 9723 let ovs_output = ovs_child.wait_with_output().unwrap(); 9724 9725 cleanup_ovs_dpdk(); 9726 9727 handle_child_output(r, &dest_output); 9728 handle_child_output(Ok(()), &ovs_output); 9729 } 9730 9731 mod live_migration_parallel { 9732 use super::*; 9733 #[test] 9734 fn test_live_migration_basic() { 9735 _test_live_migration(false, false) 9736 } 9737 9738 #[test] 9739 fn test_live_migration_local() { 9740 _test_live_migration(false, true) 9741 } 9742 9743 #[test] 9744 #[cfg(not(feature = "mshv"))] 9745 fn test_live_migration_numa() { 9746 _test_live_migration_numa(false, false) 9747 } 9748 9749 #[test] 9750 #[cfg(not(feature = "mshv"))] 9751 fn test_live_migration_numa_local() { 9752 _test_live_migration_numa(false, true) 9753 } 9754 9755 #[test] 9756 fn test_live_migration_watchdog() { 9757 _test_live_migration_watchdog(false, false) 9758 } 9759 9760 #[test] 9761 fn test_live_migration_watchdog_local() { 9762 _test_live_migration_watchdog(false, true) 9763 } 9764 9765 #[test] 9766 fn test_live_migration_balloon() { 9767 _test_live_migration_balloon(false, false) 9768 } 9769 9770 #[test] 9771 fn test_live_migration_balloon_local() { 9772 _test_live_migration_balloon(false, true) 9773 } 9774 9775 #[test] 9776 fn test_live_upgrade_basic() { 9777 _test_live_migration(true, false) 9778 } 9779 9780 #[test] 9781 fn test_live_upgrade_local() { 9782 _test_live_migration(true, true) 9783 } 9784 9785 #[test] 9786 #[cfg(not(feature = "mshv"))] 9787 fn test_live_upgrade_numa() { 9788 _test_live_migration_numa(true, false) 9789 } 9790 9791 #[test] 9792 #[cfg(not(feature = "mshv"))] 9793 fn test_live_upgrade_numa_local() { 9794 _test_live_migration_numa(true, true) 9795 } 9796 9797 #[test] 9798 fn test_live_upgrade_watchdog() { 9799 _test_live_migration_watchdog(true, false) 9800 } 9801 9802 #[test] 9803 fn test_live_upgrade_watchdog_local() { 9804 _test_live_migration_watchdog(true, true) 9805 } 9806 9807 #[test] 9808 fn test_live_upgrade_balloon() { 9809 _test_live_migration_balloon(true, false) 9810 } 9811 9812 #[test] 9813 fn test_live_upgrade_balloon_local() { 9814 _test_live_migration_balloon(true, true) 9815 } 9816 } 9817 9818 mod live_migration_sequential { 9819 #[cfg(target_arch = "x86_64")] 9820 #[cfg(not(feature = "mshv"))] 9821 use super::*; 9822 9823 // Require to run ovs-dpdk tests sequentially because they rely on the same ovs-dpdk setup 9824 #[test] 9825 #[cfg(target_arch = "x86_64")] 9826 #[cfg(not(feature = "mshv"))] 9827 fn test_live_migration_ovs_dpdk() { 9828 _test_live_migration_ovs_dpdk(false, false); 9829 } 9830 9831 #[test] 9832 #[cfg(target_arch = "x86_64")] 9833 #[cfg(not(feature = "mshv"))] 9834 fn test_live_migration_ovs_dpdk_local() { 9835 _test_live_migration_ovs_dpdk(false, true); 9836 } 9837 9838 #[test] 9839 #[cfg(target_arch = "x86_64")] 9840 #[cfg(not(feature = "mshv"))] 9841 fn test_live_upgrade_ovs_dpdk() { 9842 _test_live_migration_ovs_dpdk(true, false); 9843 } 9844 9845 #[test] 9846 #[cfg(target_arch = "x86_64")] 9847 #[cfg(not(feature = "mshv"))] 9848 fn test_live_upgrade_ovs_dpdk_local() { 9849 _test_live_migration_ovs_dpdk(true, true); 9850 } 9851 } 9852 } 9853 9854 #[cfg(target_arch = "aarch64")] 9855 mod aarch64_acpi { 9856 use crate::*; 9857 9858 #[test] 9859 fn test_simple_launch_acpi() { 9860 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9861 9862 vec![Box::new(focal)].drain(..).for_each(|disk_config| { 9863 let guest = Guest::new(disk_config); 9864 9865 let mut child = GuestCommand::new(&guest) 9866 .args(["--cpus", "boot=1"]) 9867 .args(["--memory", "size=512M"]) 9868 .args(["--kernel", edk2_path().to_str().unwrap()]) 9869 .default_disks() 9870 .default_net() 9871 .args(["--serial", "tty", "--console", "off"]) 9872 .capture_output() 9873 .spawn() 9874 .unwrap(); 9875 9876 let r = std::panic::catch_unwind(|| { 9877 guest.wait_vm_boot(Some(120)).unwrap(); 9878 9879 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 9880 assert!(guest.get_total_memory().unwrap_or_default() > 400_000); 9881 assert_eq!(guest.get_pci_bridge_class().unwrap_or_default(), "0x060000"); 9882 }); 9883 9884 let _ = child.kill(); 9885 let output = child.wait_with_output().unwrap(); 9886 9887 handle_child_output(r, &output); 9888 }); 9889 } 9890 9891 #[test] 9892 fn test_guest_numa_nodes_acpi() { 9893 _test_guest_numa_nodes(true); 9894 } 9895 9896 #[test] 9897 fn test_cpu_topology_421_acpi() { 9898 test_cpu_topology(4, 2, 1, true); 9899 } 9900 9901 #[test] 9902 fn test_cpu_topology_142_acpi() { 9903 test_cpu_topology(1, 4, 2, true); 9904 } 9905 9906 #[test] 9907 fn test_cpu_topology_262_acpi() { 9908 test_cpu_topology(2, 6, 2, true); 9909 } 9910 9911 #[test] 9912 fn test_power_button_acpi() { 9913 _test_power_button(true); 9914 } 9915 9916 #[test] 9917 fn test_virtio_iommu() { 9918 _test_virtio_iommu(true) 9919 } 9920 } 9921 9922 mod rate_limiter { 9923 use super::*; 9924 9925 // Check if the 'measured' rate is within the expected 'difference' (in percentage) 9926 // compared to given 'limit' rate. 9927 fn check_rate_limit(measured: f64, limit: f64, difference: f64) -> bool { 9928 let upper_limit = limit * (1_f64 + difference); 9929 let lower_limit = limit * (1_f64 - difference); 9930 9931 if measured > lower_limit && measured < upper_limit { 9932 return true; 9933 } 9934 9935 eprintln!( 9936 "\n\n==== check_rate_limit failed! ====\n\nmeasured={measured}, , lower_limit={lower_limit}, upper_limit={upper_limit}\n\n" 9937 ); 9938 9939 false 9940 } 9941 9942 fn _test_rate_limiter_net(rx: bool) { 9943 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9944 let guest = Guest::new(Box::new(focal)); 9945 9946 let test_timeout = 10; 9947 let num_queues = 2; 9948 let queue_size = 256; 9949 let bw_size = 10485760_u64; // bytes 9950 let bw_refill_time = 100; // ms 9951 let limit_bps = (bw_size * 8 * 1000) as f64 / bw_refill_time as f64; 9952 9953 let net_params = format!( 9954 "tap=,mac={},ip={},mask=255.255.255.0,num_queues={},queue_size={},bw_size={},bw_refill_time={}", 9955 guest.network.guest_mac, 9956 guest.network.host_ip, 9957 num_queues, 9958 queue_size, 9959 bw_size, 9960 bw_refill_time, 9961 ); 9962 9963 let mut child = GuestCommand::new(&guest) 9964 .args(["--cpus", &format!("boot={}", num_queues / 2)]) 9965 .args(["--memory", "size=4G"]) 9966 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 9967 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9968 .default_disks() 9969 .args(["--net", net_params.as_str()]) 9970 .capture_output() 9971 .spawn() 9972 .unwrap(); 9973 9974 let r = std::panic::catch_unwind(|| { 9975 guest.wait_vm_boot(None).unwrap(); 9976 let measured_bps = 9977 measure_virtio_net_throughput(test_timeout, num_queues / 2, &guest, rx, true) 9978 .unwrap(); 9979 assert!(check_rate_limit(measured_bps, limit_bps, 0.1)); 9980 }); 9981 9982 let _ = child.kill(); 9983 let output = child.wait_with_output().unwrap(); 9984 handle_child_output(r, &output); 9985 } 9986 9987 #[test] 9988 fn test_rate_limiter_net_rx() { 9989 _test_rate_limiter_net(true); 9990 } 9991 9992 #[test] 9993 fn test_rate_limiter_net_tx() { 9994 _test_rate_limiter_net(false); 9995 } 9996 9997 fn _test_rate_limiter_block(bandwidth: bool) { 9998 let test_timeout = 10; 9999 let num_queues = 1; 10000 let fio_ops = FioOps::RandRW; 10001 10002 let bw_size = if bandwidth { 10003 10485760_u64 // bytes 10004 } else { 10005 100_u64 // I/O 10006 }; 10007 let bw_refill_time = 100; // ms 10008 let limit_rate = (bw_size * 1000) as f64 / bw_refill_time as f64; 10009 10010 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10011 let guest = Guest::new(Box::new(focal)); 10012 let api_socket = temp_api_path(&guest.tmp_dir); 10013 let test_img_dir = TempDir::new_with_prefix("/var/tmp/ch").unwrap(); 10014 let blk_rate_limiter_test_img = 10015 String::from(test_img_dir.as_path().join("blk.img").to_str().unwrap()); 10016 10017 // Create the test block image 10018 assert!(exec_host_command_output(&format!( 10019 "dd if=/dev/zero of={blk_rate_limiter_test_img} bs=1M count=1024" 10020 )) 10021 .status 10022 .success()); 10023 10024 let test_blk_params = if bandwidth { 10025 format!( 10026 "path={blk_rate_limiter_test_img},bw_size={bw_size},bw_refill_time={bw_refill_time}" 10027 ) 10028 } else { 10029 format!( 10030 "path={blk_rate_limiter_test_img},ops_size={bw_size},ops_refill_time={bw_refill_time}" 10031 ) 10032 }; 10033 10034 let mut child = GuestCommand::new(&guest) 10035 .args(["--cpus", &format!("boot={num_queues}")]) 10036 .args(["--memory", "size=4G"]) 10037 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 10038 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 10039 .args([ 10040 "--disk", 10041 format!( 10042 "path={}", 10043 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 10044 ) 10045 .as_str(), 10046 "--disk", 10047 format!( 10048 "path={}", 10049 guest.disk_config.disk(DiskType::CloudInit).unwrap() 10050 ) 10051 .as_str(), 10052 "--disk", 10053 test_blk_params.as_str(), 10054 ]) 10055 .default_net() 10056 .args(["--api-socket", &api_socket]) 10057 .capture_output() 10058 .spawn() 10059 .unwrap(); 10060 10061 let r = std::panic::catch_unwind(|| { 10062 guest.wait_vm_boot(None).unwrap(); 10063 10064 let fio_command = format!( 10065 "sudo fio --filename=/dev/vdc --name=test --output-format=json \ 10066 --direct=1 --bs=4k --ioengine=io_uring --iodepth=64 \ 10067 --rw={fio_ops} --runtime={test_timeout} --numjobs={num_queues}" 10068 ); 10069 let output = guest.ssh_command(&fio_command).unwrap(); 10070 10071 // Parse fio output 10072 let measured_rate = if bandwidth { 10073 parse_fio_output(&output, &fio_ops, num_queues).unwrap() 10074 } else { 10075 parse_fio_output_iops(&output, &fio_ops, num_queues).unwrap() 10076 }; 10077 assert!(check_rate_limit(measured_rate, limit_rate, 0.1)); 10078 }); 10079 10080 let _ = child.kill(); 10081 let output = child.wait_with_output().unwrap(); 10082 handle_child_output(r, &output); 10083 } 10084 10085 #[test] 10086 fn test_rate_limiter_block_bandwidth() { 10087 _test_rate_limiter_block(true) 10088 } 10089 10090 #[test] 10091 fn test_rate_limiter_block_iops() { 10092 _test_rate_limiter_block(false) 10093 } 10094 } 10095