1 // Copyright © 2020 Intel Corporation 2 // 3 // SPDX-License-Identifier: Apache-2.0 4 // 5 #![allow(clippy::undocumented_unsafe_blocks)] 6 // When enabling the `mshv` feature, we skip quite some tests and 7 // hence have known dead-code. This annotation silences dead-code 8 // related warnings for our quality workflow to pass. 9 #![allow(dead_code)] 10 11 extern crate test_infra; 12 13 use net_util::MacAddr; 14 use std::collections::HashMap; 15 use std::fs; 16 use std::io; 17 use std::io::BufRead; 18 use std::io::Read; 19 use std::io::Seek; 20 use std::io::Write; 21 use std::os::unix::io::AsRawFd; 22 use std::path::PathBuf; 23 use std::process::{Child, Command, Stdio}; 24 use std::string::String; 25 use std::sync::mpsc; 26 use std::sync::mpsc::Receiver; 27 use std::sync::Mutex; 28 use std::thread; 29 use test_infra::*; 30 use vmm_sys_util::{tempdir::TempDir, tempfile::TempFile}; 31 use wait_timeout::ChildExt; 32 33 // Constant taken from the VMM crate. 34 const MAX_NUM_PCI_SEGMENTS: u16 = 96; 35 36 #[cfg(target_arch = "x86_64")] 37 mod x86_64 { 38 pub const FOCAL_IMAGE_NAME: &str = "focal-server-cloudimg-amd64-custom-20210609-0.raw"; 39 pub const JAMMY_NVIDIA_IMAGE_NAME: &str = "jammy-server-cloudimg-amd64-nvidia.raw"; 40 pub const FOCAL_IMAGE_NAME_QCOW2: &str = "focal-server-cloudimg-amd64-custom-20210609-0.qcow2"; 41 pub const FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE: &str = 42 "focal-server-cloudimg-amd64-custom-20210609-0-backing.qcow2"; 43 pub const FOCAL_IMAGE_NAME_VHD: &str = "focal-server-cloudimg-amd64-custom-20210609-0.vhd"; 44 pub const FOCAL_IMAGE_NAME_VHDX: &str = "focal-server-cloudimg-amd64-custom-20210609-0.vhdx"; 45 pub const JAMMY_IMAGE_NAME: &str = "jammy-server-cloudimg-amd64-custom-20230119-0.raw"; 46 pub const WINDOWS_IMAGE_NAME: &str = "windows-server-2022-amd64-2.raw"; 47 pub const OVMF_NAME: &str = "CLOUDHV.fd"; 48 pub const GREP_SERIAL_IRQ_CMD: &str = "grep -c 'IO-APIC.*ttyS0' /proc/interrupts || true"; 49 } 50 51 #[cfg(target_arch = "x86_64")] 52 use x86_64::*; 53 54 #[cfg(target_arch = "aarch64")] 55 mod aarch64 { 56 pub const BIONIC_IMAGE_NAME: &str = "bionic-server-cloudimg-arm64.raw"; 57 pub const FOCAL_IMAGE_NAME: &str = "focal-server-cloudimg-arm64-custom-20210929-0.raw"; 58 pub const FOCAL_IMAGE_UPDATE_KERNEL_NAME: &str = 59 "focal-server-cloudimg-arm64-custom-20210929-0-update-kernel.raw"; 60 pub const FOCAL_IMAGE_NAME_QCOW2: &str = "focal-server-cloudimg-arm64-custom-20210929-0.qcow2"; 61 pub const FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE: &str = 62 "focal-server-cloudimg-arm64-custom-20210929-0-backing.qcow2"; 63 pub const FOCAL_IMAGE_NAME_VHD: &str = "focal-server-cloudimg-arm64-custom-20210929-0.vhd"; 64 pub const FOCAL_IMAGE_NAME_VHDX: &str = "focal-server-cloudimg-arm64-custom-20210929-0.vhdx"; 65 pub const JAMMY_IMAGE_NAME: &str = "jammy-server-cloudimg-arm64-custom-20220329-0.raw"; 66 pub const WINDOWS_IMAGE_NAME: &str = "windows-11-iot-enterprise-aarch64.raw"; 67 pub const OVMF_NAME: &str = "CLOUDHV_EFI.fd"; 68 pub const GREP_SERIAL_IRQ_CMD: &str = "grep -c 'GICv3.*uart-pl011' /proc/interrupts || true"; 69 pub const GREP_PMU_IRQ_CMD: &str = "grep -c 'GICv3.*arm-pmu' /proc/interrupts || true"; 70 } 71 72 #[cfg(target_arch = "aarch64")] 73 use aarch64::*; 74 75 const DIRECT_KERNEL_BOOT_CMDLINE: &str = 76 "root=/dev/vda1 console=hvc0 rw systemd.journald.forward_to_console=1"; 77 78 const CONSOLE_TEST_STRING: &str = "Started OpenBSD Secure Shell server"; 79 80 // This enum exists to make it more convenient to 81 // implement test for both D-Bus and REST APIs. 82 enum TargetApi { 83 // API socket 84 HttpApi(String), 85 // well known service name, object path 86 DBusApi(String, String), 87 } 88 89 impl TargetApi { 90 fn new_http_api(tmp_dir: &TempDir) -> Self { 91 Self::HttpApi(temp_api_path(tmp_dir)) 92 } 93 94 fn new_dbus_api(tmp_dir: &TempDir) -> Self { 95 // `tmp_dir` is in the form of "/tmp/chXXXXXX" 96 // and we take the `chXXXXXX` part as a unique identifier for the guest 97 let id = tmp_dir.as_path().file_name().unwrap().to_str().unwrap(); 98 99 Self::DBusApi( 100 format!("org.cloudhypervisor.{id}"), 101 format!("/org/cloudhypervisor/{id}"), 102 ) 103 } 104 105 fn guest_args(&self) -> Vec<String> { 106 match self { 107 TargetApi::HttpApi(api_socket) => { 108 vec![format!("--api-socket={}", api_socket.as_str())] 109 } 110 TargetApi::DBusApi(service_name, object_path) => { 111 vec![ 112 format!("--dbus-service-name={}", service_name.as_str()), 113 format!("--dbus-object-path={}", object_path.as_str()), 114 ] 115 } 116 } 117 } 118 119 fn remote_args(&self) -> Vec<String> { 120 // `guest_args` and `remote_args` are consistent with each other 121 self.guest_args() 122 } 123 124 fn remote_command(&self, command: &str, arg: Option<&str>) -> bool { 125 let mut cmd = Command::new(clh_command("ch-remote")); 126 cmd.args(self.remote_args()); 127 cmd.arg(command); 128 129 if let Some(arg) = arg { 130 cmd.arg(arg); 131 } 132 133 let output = cmd.output().unwrap(); 134 if output.status.success() { 135 true 136 } else { 137 eprintln!("Error running ch-remote command: {:?}", &cmd); 138 let stderr = String::from_utf8_lossy(&output.stderr); 139 eprintln!("stderr: {stderr}"); 140 false 141 } 142 } 143 } 144 145 // Start cloud-hypervisor with no VM parameters, only the API server running. 146 // From the API: Create a VM, boot it and check that it looks as expected. 147 fn _test_api_create_boot(target_api: TargetApi, guest: Guest) { 148 let mut child = GuestCommand::new(&guest) 149 .args(target_api.guest_args()) 150 .capture_output() 151 .spawn() 152 .unwrap(); 153 154 thread::sleep(std::time::Duration::new(1, 0)); 155 156 // Verify API server is running 157 assert!(target_api.remote_command("ping", None)); 158 159 // Create the VM first 160 let cpu_count: u8 = 4; 161 let request_body = guest.api_create_body( 162 cpu_count, 163 direct_kernel_boot_path().to_str().unwrap(), 164 DIRECT_KERNEL_BOOT_CMDLINE, 165 ); 166 167 let temp_config_path = guest.tmp_dir.as_path().join("config"); 168 std::fs::write(&temp_config_path, request_body).unwrap(); 169 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 170 171 assert!(target_api.remote_command("create", Some(create_config),)); 172 173 // Then boot it 174 assert!(target_api.remote_command("boot", None)); 175 thread::sleep(std::time::Duration::new(20, 0)); 176 177 let r = std::panic::catch_unwind(|| { 178 // Check that the VM booted as expected 179 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 180 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 181 }); 182 183 let _ = child.kill(); 184 let output = child.wait_with_output().unwrap(); 185 186 handle_child_output(r, &output); 187 } 188 189 // Start cloud-hypervisor with no VM parameters, only the API server running. 190 // From the API: Create a VM, boot it and check it can be shutdown and then 191 // booted again 192 fn _test_api_shutdown(target_api: TargetApi, guest: Guest) { 193 let mut child = GuestCommand::new(&guest) 194 .args(target_api.guest_args()) 195 .capture_output() 196 .spawn() 197 .unwrap(); 198 199 thread::sleep(std::time::Duration::new(1, 0)); 200 201 // Verify API server is running 202 assert!(target_api.remote_command("ping", None)); 203 204 // Create the VM first 205 let cpu_count: u8 = 4; 206 let request_body = guest.api_create_body( 207 cpu_count, 208 direct_kernel_boot_path().to_str().unwrap(), 209 DIRECT_KERNEL_BOOT_CMDLINE, 210 ); 211 212 let temp_config_path = guest.tmp_dir.as_path().join("config"); 213 std::fs::write(&temp_config_path, request_body).unwrap(); 214 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 215 216 let r = std::panic::catch_unwind(|| { 217 assert!(target_api.remote_command("create", Some(create_config))); 218 219 // Then boot it 220 assert!(target_api.remote_command("boot", None)); 221 222 guest.wait_vm_boot(None).unwrap(); 223 224 // Check that the VM booted as expected 225 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 226 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 227 228 // Sync and shutdown without powering off to prevent filesystem 229 // corruption. 230 guest.ssh_command("sync").unwrap(); 231 guest.ssh_command("sudo shutdown -H now").unwrap(); 232 233 // Wait for the guest to be fully shutdown 234 thread::sleep(std::time::Duration::new(20, 0)); 235 236 // Then shut it down 237 assert!(target_api.remote_command("shutdown", None)); 238 239 // Then boot it again 240 assert!(target_api.remote_command("boot", None)); 241 242 guest.wait_vm_boot(None).unwrap(); 243 244 // Check that the VM booted as expected 245 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 246 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 247 }); 248 249 let _ = child.kill(); 250 let output = child.wait_with_output().unwrap(); 251 252 handle_child_output(r, &output); 253 } 254 255 // Start cloud-hypervisor with no VM parameters, only the API server running. 256 // From the API: Create a VM, boot it and check it can be deleted and then recreated 257 // booted again. 258 fn _test_api_delete(target_api: TargetApi, guest: Guest) { 259 let mut child = GuestCommand::new(&guest) 260 .args(target_api.guest_args()) 261 .capture_output() 262 .spawn() 263 .unwrap(); 264 265 thread::sleep(std::time::Duration::new(1, 0)); 266 267 // Verify API server is running 268 assert!(target_api.remote_command("ping", None)); 269 270 // Create the VM first 271 let cpu_count: u8 = 4; 272 let request_body = guest.api_create_body( 273 cpu_count, 274 direct_kernel_boot_path().to_str().unwrap(), 275 DIRECT_KERNEL_BOOT_CMDLINE, 276 ); 277 let temp_config_path = guest.tmp_dir.as_path().join("config"); 278 std::fs::write(&temp_config_path, request_body).unwrap(); 279 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 280 281 let r = std::panic::catch_unwind(|| { 282 assert!(target_api.remote_command("create", Some(create_config))); 283 284 // Then boot it 285 assert!(target_api.remote_command("boot", None)); 286 287 guest.wait_vm_boot(None).unwrap(); 288 289 // Check that the VM booted as expected 290 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 291 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 292 293 // Sync and shutdown without powering off to prevent filesystem 294 // corruption. 295 guest.ssh_command("sync").unwrap(); 296 guest.ssh_command("sudo shutdown -H now").unwrap(); 297 298 // Wait for the guest to be fully shutdown 299 thread::sleep(std::time::Duration::new(20, 0)); 300 301 // Then delete it 302 assert!(target_api.remote_command("delete", None)); 303 304 assert!(target_api.remote_command("create", Some(create_config))); 305 306 // Then boot it again 307 assert!(target_api.remote_command("boot", None)); 308 309 guest.wait_vm_boot(None).unwrap(); 310 311 // Check that the VM booted as expected 312 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 313 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 314 }); 315 316 let _ = child.kill(); 317 let output = child.wait_with_output().unwrap(); 318 319 handle_child_output(r, &output); 320 } 321 322 // Start cloud-hypervisor with no VM parameters, only the API server running. 323 // From the API: Create a VM, boot it and check that it looks as expected. 324 // Then we pause the VM, check that it's no longer available. 325 // Finally we resume the VM and check that it's available. 326 fn _test_api_pause_resume(target_api: TargetApi, guest: Guest) { 327 let mut child = GuestCommand::new(&guest) 328 .args(target_api.guest_args()) 329 .capture_output() 330 .spawn() 331 .unwrap(); 332 333 thread::sleep(std::time::Duration::new(1, 0)); 334 335 // Verify API server is running 336 assert!(target_api.remote_command("ping", None)); 337 338 // Create the VM first 339 let cpu_count: u8 = 4; 340 let request_body = guest.api_create_body( 341 cpu_count, 342 direct_kernel_boot_path().to_str().unwrap(), 343 DIRECT_KERNEL_BOOT_CMDLINE, 344 ); 345 346 let temp_config_path = guest.tmp_dir.as_path().join("config"); 347 std::fs::write(&temp_config_path, request_body).unwrap(); 348 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 349 350 assert!(target_api.remote_command("create", Some(create_config))); 351 352 // Then boot it 353 assert!(target_api.remote_command("boot", None)); 354 thread::sleep(std::time::Duration::new(20, 0)); 355 356 let r = std::panic::catch_unwind(|| { 357 // Check that the VM booted as expected 358 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 359 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 360 361 // We now pause the VM 362 assert!(target_api.remote_command("pause", None)); 363 364 // Check pausing again fails 365 assert!(!target_api.remote_command("pause", None)); 366 367 thread::sleep(std::time::Duration::new(2, 0)); 368 369 // SSH into the VM should fail 370 assert!(ssh_command_ip( 371 "grep -c processor /proc/cpuinfo", 372 &guest.network.guest_ip, 373 2, 374 5 375 ) 376 .is_err()); 377 378 // Resume the VM 379 assert!(target_api.remote_command("resume", None)); 380 381 // Check resuming again fails 382 assert!(!target_api.remote_command("resume", None)); 383 384 thread::sleep(std::time::Duration::new(2, 0)); 385 386 // Now we should be able to SSH back in and get the right number of CPUs 387 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 388 }); 389 390 let _ = child.kill(); 391 let output = child.wait_with_output().unwrap(); 392 393 handle_child_output(r, &output); 394 } 395 396 fn _test_pty_interaction(pty_path: PathBuf) { 397 let mut cf = std::fs::OpenOptions::new() 398 .write(true) 399 .read(true) 400 .open(pty_path) 401 .unwrap(); 402 403 // Some dumb sleeps but we don't want to write 404 // before the console is up and we don't want 405 // to try and write the next line before the 406 // login process is ready. 407 thread::sleep(std::time::Duration::new(5, 0)); 408 assert_eq!(cf.write(b"cloud\n").unwrap(), 6); 409 thread::sleep(std::time::Duration::new(2, 0)); 410 assert_eq!(cf.write(b"cloud123\n").unwrap(), 9); 411 thread::sleep(std::time::Duration::new(2, 0)); 412 assert_eq!(cf.write(b"echo test_pty_console\n").unwrap(), 22); 413 thread::sleep(std::time::Duration::new(2, 0)); 414 415 // read pty and ensure they have a login shell 416 // some fairly hacky workarounds to avoid looping 417 // forever in case the channel is blocked getting output 418 let ptyc = pty_read(cf); 419 let mut empty = 0; 420 let mut prev = String::new(); 421 loop { 422 thread::sleep(std::time::Duration::new(2, 0)); 423 match ptyc.try_recv() { 424 Ok(line) => { 425 empty = 0; 426 prev = prev + &line; 427 if prev.contains("test_pty_console") { 428 break; 429 } 430 } 431 Err(mpsc::TryRecvError::Empty) => { 432 empty += 1; 433 assert!(empty <= 5, "No login on pty"); 434 } 435 _ => { 436 panic!("No login on pty") 437 } 438 } 439 } 440 } 441 442 fn prepare_virtiofsd(tmp_dir: &TempDir, shared_dir: &str) -> (std::process::Child, String) { 443 let mut workload_path = dirs::home_dir().unwrap(); 444 workload_path.push("workloads"); 445 446 let mut virtiofsd_path = workload_path; 447 virtiofsd_path.push("virtiofsd"); 448 let virtiofsd_path = String::from(virtiofsd_path.to_str().unwrap()); 449 450 let virtiofsd_socket_path = 451 String::from(tmp_dir.as_path().join("virtiofs.sock").to_str().unwrap()); 452 453 // Start the daemon 454 let child = Command::new(virtiofsd_path.as_str()) 455 .args(["--shared-dir", shared_dir]) 456 .args(["--socket-path", virtiofsd_socket_path.as_str()]) 457 .args(["--cache", "never"]) 458 .spawn() 459 .unwrap(); 460 461 thread::sleep(std::time::Duration::new(10, 0)); 462 463 (child, virtiofsd_socket_path) 464 } 465 466 fn prepare_vubd( 467 tmp_dir: &TempDir, 468 blk_img: &str, 469 num_queues: usize, 470 rdonly: bool, 471 direct: bool, 472 ) -> (std::process::Child, String) { 473 let mut workload_path = dirs::home_dir().unwrap(); 474 workload_path.push("workloads"); 475 476 let mut blk_file_path = workload_path; 477 blk_file_path.push(blk_img); 478 let blk_file_path = String::from(blk_file_path.to_str().unwrap()); 479 480 let vubd_socket_path = String::from(tmp_dir.as_path().join("vub.sock").to_str().unwrap()); 481 482 // Start the daemon 483 let child = Command::new(clh_command("vhost_user_block")) 484 .args([ 485 "--block-backend", 486 format!( 487 "path={blk_file_path},socket={vubd_socket_path},num_queues={num_queues},readonly={rdonly},direct={direct}" 488 ) 489 .as_str(), 490 ]) 491 .spawn() 492 .unwrap(); 493 494 thread::sleep(std::time::Duration::new(10, 0)); 495 496 (child, vubd_socket_path) 497 } 498 499 fn temp_vsock_path(tmp_dir: &TempDir) -> String { 500 String::from(tmp_dir.as_path().join("vsock").to_str().unwrap()) 501 } 502 503 fn temp_api_path(tmp_dir: &TempDir) -> String { 504 String::from( 505 tmp_dir 506 .as_path() 507 .join("cloud-hypervisor.sock") 508 .to_str() 509 .unwrap(), 510 ) 511 } 512 513 fn temp_event_monitor_path(tmp_dir: &TempDir) -> String { 514 String::from(tmp_dir.as_path().join("event.json").to_str().unwrap()) 515 } 516 517 // Creates the directory and returns the path. 518 fn temp_snapshot_dir_path(tmp_dir: &TempDir) -> String { 519 let snapshot_dir = String::from(tmp_dir.as_path().join("snapshot").to_str().unwrap()); 520 std::fs::create_dir(&snapshot_dir).unwrap(); 521 snapshot_dir 522 } 523 524 fn temp_vmcore_file_path(tmp_dir: &TempDir) -> String { 525 let vmcore_file = String::from(tmp_dir.as_path().join("vmcore").to_str().unwrap()); 526 vmcore_file 527 } 528 529 // Creates the path for direct kernel boot and return the path. 530 // For x86_64, this function returns the vmlinux kernel path. 531 // For AArch64, this function returns the PE kernel path. 532 fn direct_kernel_boot_path() -> PathBuf { 533 let mut workload_path = dirs::home_dir().unwrap(); 534 workload_path.push("workloads"); 535 536 let mut kernel_path = workload_path; 537 #[cfg(target_arch = "x86_64")] 538 kernel_path.push("vmlinux"); 539 #[cfg(target_arch = "aarch64")] 540 kernel_path.push("Image"); 541 542 kernel_path 543 } 544 545 fn edk2_path() -> PathBuf { 546 let mut workload_path = dirs::home_dir().unwrap(); 547 workload_path.push("workloads"); 548 let mut edk2_path = workload_path; 549 edk2_path.push(OVMF_NAME); 550 551 edk2_path 552 } 553 554 fn cloud_hypervisor_release_path() -> String { 555 let mut workload_path = dirs::home_dir().unwrap(); 556 workload_path.push("workloads"); 557 558 let mut ch_release_path = workload_path; 559 #[cfg(target_arch = "x86_64")] 560 ch_release_path.push("cloud-hypervisor-static"); 561 #[cfg(target_arch = "aarch64")] 562 ch_release_path.push("cloud-hypervisor-static-aarch64"); 563 564 ch_release_path.into_os_string().into_string().unwrap() 565 } 566 567 fn prepare_vhost_user_net_daemon( 568 tmp_dir: &TempDir, 569 ip: &str, 570 tap: Option<&str>, 571 mtu: Option<u16>, 572 num_queues: usize, 573 client_mode: bool, 574 ) -> (std::process::Command, String) { 575 let vunet_socket_path = String::from(tmp_dir.as_path().join("vunet.sock").to_str().unwrap()); 576 577 // Start the daemon 578 let mut net_params = format!( 579 "ip={ip},mask=255.255.255.0,socket={vunet_socket_path},num_queues={num_queues},queue_size=1024,client={client_mode}" 580 ); 581 582 if let Some(tap) = tap { 583 net_params.push_str(format!(",tap={tap}").as_str()); 584 } 585 586 if let Some(mtu) = mtu { 587 net_params.push_str(format!(",mtu={mtu}").as_str()); 588 } 589 590 let mut command = Command::new(clh_command("vhost_user_net")); 591 command.args(["--net-backend", net_params.as_str()]); 592 593 (command, vunet_socket_path) 594 } 595 596 fn prepare_swtpm_daemon(tmp_dir: &TempDir) -> (std::process::Command, String) { 597 let swtpm_tpm_dir = String::from(tmp_dir.as_path().join("swtpm").to_str().unwrap()); 598 let swtpm_socket_path = String::from( 599 tmp_dir 600 .as_path() 601 .join("swtpm") 602 .join("swtpm.sock") 603 .to_str() 604 .unwrap(), 605 ); 606 std::fs::create_dir(&swtpm_tpm_dir).unwrap(); 607 608 let mut swtpm_command = Command::new("swtpm"); 609 let swtpm_args = [ 610 "socket", 611 "--tpmstate", 612 &format!("dir={swtpm_tpm_dir}"), 613 "--ctrl", 614 &format!("type=unixio,path={swtpm_socket_path}"), 615 "--flags", 616 "startup-clear", 617 "--tpm2", 618 ]; 619 swtpm_command.args(swtpm_args); 620 621 (swtpm_command, swtpm_socket_path) 622 } 623 624 fn remote_command(api_socket: &str, command: &str, arg: Option<&str>) -> bool { 625 let mut cmd = Command::new(clh_command("ch-remote")); 626 cmd.args([&format!("--api-socket={api_socket}"), command]); 627 628 if let Some(arg) = arg { 629 cmd.arg(arg); 630 } 631 let output = cmd.output().unwrap(); 632 if output.status.success() { 633 true 634 } else { 635 eprintln!("Error running ch-remote command: {:?}", &cmd); 636 let stderr = String::from_utf8_lossy(&output.stderr); 637 eprintln!("stderr: {stderr}"); 638 false 639 } 640 } 641 642 fn remote_command_w_output(api_socket: &str, command: &str, arg: Option<&str>) -> (bool, Vec<u8>) { 643 let mut cmd = Command::new(clh_command("ch-remote")); 644 cmd.args([&format!("--api-socket={api_socket}"), command]); 645 646 if let Some(arg) = arg { 647 cmd.arg(arg); 648 } 649 650 let output = cmd.output().expect("Failed to launch ch-remote"); 651 652 (output.status.success(), output.stdout) 653 } 654 655 fn resize_command( 656 api_socket: &str, 657 desired_vcpus: Option<u8>, 658 desired_ram: Option<usize>, 659 desired_balloon: Option<usize>, 660 event_file: Option<&str>, 661 ) -> bool { 662 let mut cmd = Command::new(clh_command("ch-remote")); 663 cmd.args([&format!("--api-socket={api_socket}"), "resize"]); 664 665 if let Some(desired_vcpus) = desired_vcpus { 666 cmd.arg(format!("--cpus={desired_vcpus}")); 667 } 668 669 if let Some(desired_ram) = desired_ram { 670 cmd.arg(format!("--memory={desired_ram}")); 671 } 672 673 if let Some(desired_balloon) = desired_balloon { 674 cmd.arg(format!("--balloon={desired_balloon}")); 675 } 676 677 let ret = cmd.status().expect("Failed to launch ch-remote").success(); 678 679 if let Some(event_path) = event_file { 680 let latest_events = [ 681 &MetaEvent { 682 event: "resizing".to_string(), 683 device_id: None, 684 }, 685 &MetaEvent { 686 event: "resized".to_string(), 687 device_id: None, 688 }, 689 ]; 690 // See: #5938 691 thread::sleep(std::time::Duration::new(1, 0)); 692 assert!(check_latest_events_exact(&latest_events, event_path)); 693 } 694 695 ret 696 } 697 698 fn resize_zone_command(api_socket: &str, id: &str, desired_size: &str) -> bool { 699 let mut cmd = Command::new(clh_command("ch-remote")); 700 cmd.args([ 701 &format!("--api-socket={api_socket}"), 702 "resize-zone", 703 &format!("--id={id}"), 704 &format!("--size={desired_size}"), 705 ]); 706 707 cmd.status().expect("Failed to launch ch-remote").success() 708 } 709 710 // setup OVS-DPDK bridge and ports 711 fn setup_ovs_dpdk() { 712 // setup OVS-DPDK 713 assert!(exec_host_command_status("service openvswitch-switch start").success()); 714 assert!(exec_host_command_status("ovs-vsctl init").success()); 715 assert!( 716 exec_host_command_status("ovs-vsctl set Open_vSwitch . other_config:dpdk-init=true") 717 .success() 718 ); 719 assert!(exec_host_command_status("service openvswitch-switch restart").success()); 720 721 // Create OVS-DPDK bridge and ports 722 assert!(exec_host_command_status( 723 "ovs-vsctl add-br ovsbr0 -- set bridge ovsbr0 datapath_type=netdev", 724 ) 725 .success()); 726 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface vhost-user1 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient1").success()); 727 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user2 -- set Interface vhost-user2 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient2").success()); 728 assert!(exec_host_command_status("ip link set up dev ovsbr0").success()); 729 assert!(exec_host_command_status("service openvswitch-switch restart").success()); 730 } 731 fn cleanup_ovs_dpdk() { 732 assert!(exec_host_command_status("ovs-vsctl del-br ovsbr0").success()); 733 exec_host_command_status("rm -f ovs-vsctl /tmp/dpdkvhostclient1 /tmp/dpdkvhostclient2"); 734 } 735 // Setup two guests and ensure they are connected through ovs-dpdk 736 fn setup_ovs_dpdk_guests( 737 guest1: &Guest, 738 guest2: &Guest, 739 api_socket: &str, 740 release_binary: bool, 741 ) -> (Child, Child) { 742 setup_ovs_dpdk(); 743 744 let clh_path = if !release_binary { 745 clh_command("cloud-hypervisor") 746 } else { 747 cloud_hypervisor_release_path() 748 }; 749 750 let mut child1 = GuestCommand::new_with_binary_path(guest1, &clh_path) 751 .args(["--cpus", "boot=2"]) 752 .args(["--memory", "size=0,shared=on"]) 753 .args(["--memory-zone", "id=mem0,size=1G,shared=on,host_numa_node=0"]) 754 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 755 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 756 .default_disks() 757 .args(["--net", guest1.default_net_string().as_str(), "vhost_user=true,socket=/tmp/dpdkvhostclient1,num_queues=2,queue_size=256,vhost_mode=server"]) 758 .capture_output() 759 .spawn() 760 .unwrap(); 761 762 #[cfg(target_arch = "x86_64")] 763 let guest_net_iface = "ens5"; 764 #[cfg(target_arch = "aarch64")] 765 let guest_net_iface = "enp0s5"; 766 767 let r = std::panic::catch_unwind(|| { 768 guest1.wait_vm_boot(None).unwrap(); 769 770 guest1 771 .ssh_command(&format!( 772 "sudo ip addr add 172.100.0.1/24 dev {guest_net_iface}" 773 )) 774 .unwrap(); 775 guest1 776 .ssh_command(&format!("sudo ip link set up dev {guest_net_iface}")) 777 .unwrap(); 778 779 let guest_ip = guest1.network.guest_ip.clone(); 780 thread::spawn(move || { 781 ssh_command_ip( 782 "nc -l 12345", 783 &guest_ip, 784 DEFAULT_SSH_RETRIES, 785 DEFAULT_SSH_TIMEOUT, 786 ) 787 .unwrap(); 788 }); 789 }); 790 if r.is_err() { 791 cleanup_ovs_dpdk(); 792 793 let _ = child1.kill(); 794 let output = child1.wait_with_output().unwrap(); 795 handle_child_output(r, &output); 796 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return 797 } 798 799 let mut child2 = GuestCommand::new_with_binary_path(guest2, &clh_path) 800 .args(["--api-socket", api_socket]) 801 .args(["--cpus", "boot=2"]) 802 .args(["--memory", "size=0,shared=on"]) 803 .args(["--memory-zone", "id=mem0,size=1G,shared=on,host_numa_node=0"]) 804 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 805 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 806 .default_disks() 807 .args(["--net", guest2.default_net_string().as_str(), "vhost_user=true,socket=/tmp/dpdkvhostclient2,num_queues=2,queue_size=256,vhost_mode=server"]) 808 .capture_output() 809 .spawn() 810 .unwrap(); 811 812 let r = std::panic::catch_unwind(|| { 813 guest2.wait_vm_boot(None).unwrap(); 814 815 guest2 816 .ssh_command(&format!( 817 "sudo ip addr add 172.100.0.2/24 dev {guest_net_iface}" 818 )) 819 .unwrap(); 820 guest2 821 .ssh_command(&format!("sudo ip link set up dev {guest_net_iface}")) 822 .unwrap(); 823 824 // Check the connection works properly between the two VMs 825 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap(); 826 }); 827 if r.is_err() { 828 cleanup_ovs_dpdk(); 829 830 let _ = child1.kill(); 831 let _ = child2.kill(); 832 let output = child2.wait_with_output().unwrap(); 833 handle_child_output(r, &output); 834 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return 835 } 836 837 (child1, child2) 838 } 839 840 enum FwType { 841 Ovmf, 842 RustHypervisorFirmware, 843 } 844 845 fn fw_path(_fw_type: FwType) -> String { 846 let mut workload_path = dirs::home_dir().unwrap(); 847 workload_path.push("workloads"); 848 849 let mut fw_path = workload_path; 850 #[cfg(target_arch = "aarch64")] 851 fw_path.push("CLOUDHV_EFI.fd"); 852 #[cfg(target_arch = "x86_64")] 853 { 854 match _fw_type { 855 FwType::Ovmf => fw_path.push(OVMF_NAME), 856 FwType::RustHypervisorFirmware => fw_path.push("hypervisor-fw"), 857 } 858 } 859 860 fw_path.to_str().unwrap().to_string() 861 } 862 863 #[derive(Debug)] 864 struct MetaEvent { 865 event: String, 866 device_id: Option<String>, 867 } 868 869 impl MetaEvent { 870 pub fn match_with_json_event(&self, v: &serde_json::Value) -> bool { 871 let mut matched = false; 872 if v["event"].as_str().unwrap() == self.event { 873 if let Some(device_id) = &self.device_id { 874 if v["properties"]["id"].as_str().unwrap() == device_id { 875 matched = true 876 } 877 } else { 878 matched = true; 879 } 880 } 881 matched 882 } 883 } 884 885 // Parse the event_monitor file based on the format that each event 886 // is followed by a double newline 887 fn parse_event_file(event_file: &str) -> Vec<serde_json::Value> { 888 let content = fs::read(event_file).unwrap(); 889 let mut ret = Vec::new(); 890 for entry in String::from_utf8_lossy(&content) 891 .trim() 892 .split("\n\n") 893 .collect::<Vec<&str>>() 894 { 895 ret.push(serde_json::from_str(entry).unwrap()); 896 } 897 898 ret 899 } 900 901 // Return true if all events from the input 'expected_events' are matched sequentially 902 // with events from the 'event_file' 903 fn check_sequential_events(expected_events: &[&MetaEvent], event_file: &str) -> bool { 904 let json_events = parse_event_file(event_file); 905 let len = expected_events.len(); 906 let mut idx = 0; 907 for e in &json_events { 908 if idx == len { 909 break; 910 } 911 if expected_events[idx].match_with_json_event(e) { 912 idx += 1; 913 } 914 } 915 916 let ret = idx == len; 917 918 if !ret { 919 eprintln!( 920 "\n\n==== Start 'check_sequential_events' failed ==== \ 921 \n\nexpected_events={:?}\nactual_events={:?} \ 922 \n\n==== End 'check_sequential_events' failed ====", 923 expected_events, json_events, 924 ); 925 } 926 927 ret 928 } 929 930 // Return true if all events from the input 'expected_events' are matched exactly 931 // with events from the 'event_file' 932 fn check_sequential_events_exact(expected_events: &[&MetaEvent], event_file: &str) -> bool { 933 let json_events = parse_event_file(event_file); 934 assert!(expected_events.len() <= json_events.len()); 935 let json_events = &json_events[..expected_events.len()]; 936 937 for (idx, e) in json_events.iter().enumerate() { 938 if !expected_events[idx].match_with_json_event(e) { 939 eprintln!( 940 "\n\n==== Start 'check_sequential_events_exact' failed ==== \ 941 \n\nexpected_events={:?}\nactual_events={:?} \ 942 \n\n==== End 'check_sequential_events_exact' failed ====", 943 expected_events, json_events, 944 ); 945 946 return false; 947 } 948 } 949 950 true 951 } 952 953 // Return true if events from the input 'latest_events' are matched exactly 954 // with the most recent events from the 'event_file' 955 fn check_latest_events_exact(latest_events: &[&MetaEvent], event_file: &str) -> bool { 956 let json_events = parse_event_file(event_file); 957 assert!(latest_events.len() <= json_events.len()); 958 let json_events = &json_events[(json_events.len() - latest_events.len())..]; 959 960 for (idx, e) in json_events.iter().enumerate() { 961 if !latest_events[idx].match_with_json_event(e) { 962 eprintln!( 963 "\n\n==== Start 'check_latest_events_exact' failed ==== \ 964 \n\nexpected_events={:?}\nactual_events={:?} \ 965 \n\n==== End 'check_latest_events_exact' failed ====", 966 latest_events, json_events, 967 ); 968 969 return false; 970 } 971 } 972 973 true 974 } 975 976 fn test_cpu_topology(threads_per_core: u8, cores_per_package: u8, packages: u8, use_fw: bool) { 977 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 978 let guest = Guest::new(Box::new(focal)); 979 let total_vcpus = threads_per_core * cores_per_package * packages; 980 let direct_kernel_boot_path = direct_kernel_boot_path(); 981 let mut kernel_path = direct_kernel_boot_path.to_str().unwrap(); 982 let fw_path = fw_path(FwType::RustHypervisorFirmware); 983 if use_fw { 984 kernel_path = fw_path.as_str(); 985 } 986 987 let mut child = GuestCommand::new(&guest) 988 .args([ 989 "--cpus", 990 &format!( 991 "boot={total_vcpus},topology={threads_per_core}:{cores_per_package}:1:{packages}" 992 ), 993 ]) 994 .args(["--memory", "size=512M"]) 995 .args(["--kernel", kernel_path]) 996 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 997 .default_disks() 998 .default_net() 999 .capture_output() 1000 .spawn() 1001 .unwrap(); 1002 1003 let r = std::panic::catch_unwind(|| { 1004 guest.wait_vm_boot(None).unwrap(); 1005 assert_eq!( 1006 guest.get_cpu_count().unwrap_or_default(), 1007 u32::from(total_vcpus) 1008 ); 1009 assert_eq!( 1010 guest 1011 .ssh_command("lscpu | grep \"per core\" | cut -f 2 -d \":\" | sed \"s# *##\"") 1012 .unwrap() 1013 .trim() 1014 .parse::<u8>() 1015 .unwrap_or(0), 1016 threads_per_core 1017 ); 1018 1019 assert_eq!( 1020 guest 1021 .ssh_command("lscpu | grep \"per socket\" | cut -f 2 -d \":\" | sed \"s# *##\"") 1022 .unwrap() 1023 .trim() 1024 .parse::<u8>() 1025 .unwrap_or(0), 1026 cores_per_package 1027 ); 1028 1029 assert_eq!( 1030 guest 1031 .ssh_command("lscpu | grep \"Socket\" | cut -f 2 -d \":\" | sed \"s# *##\"") 1032 .unwrap() 1033 .trim() 1034 .parse::<u8>() 1035 .unwrap_or(0), 1036 packages 1037 ); 1038 1039 #[cfg(target_arch = "x86_64")] 1040 { 1041 let mut cpu_id = 0; 1042 for package_id in 0..packages { 1043 for core_id in 0..cores_per_package { 1044 for _ in 0..threads_per_core { 1045 assert_eq!( 1046 guest 1047 .ssh_command(&format!("cat /sys/devices/system/cpu/cpu{cpu_id}/topology/physical_package_id")) 1048 .unwrap() 1049 .trim() 1050 .parse::<u8>() 1051 .unwrap_or(0), 1052 package_id 1053 ); 1054 1055 assert_eq!( 1056 guest 1057 .ssh_command(&format!( 1058 "cat /sys/devices/system/cpu/cpu{cpu_id}/topology/core_id" 1059 )) 1060 .unwrap() 1061 .trim() 1062 .parse::<u8>() 1063 .unwrap_or(0), 1064 core_id 1065 ); 1066 1067 cpu_id += 1; 1068 } 1069 } 1070 } 1071 } 1072 }); 1073 1074 let _ = child.kill(); 1075 let output = child.wait_with_output().unwrap(); 1076 1077 handle_child_output(r, &output); 1078 } 1079 1080 #[allow(unused_variables)] 1081 fn _test_guest_numa_nodes(acpi: bool) { 1082 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1083 let guest = Guest::new(Box::new(focal)); 1084 let api_socket = temp_api_path(&guest.tmp_dir); 1085 #[cfg(target_arch = "x86_64")] 1086 let kernel_path = direct_kernel_boot_path(); 1087 #[cfg(target_arch = "aarch64")] 1088 let kernel_path = if acpi { 1089 edk2_path() 1090 } else { 1091 direct_kernel_boot_path() 1092 }; 1093 1094 let mut child = GuestCommand::new(&guest) 1095 .args(["--cpus", "boot=6,max=12"]) 1096 .args(["--memory", "size=0,hotplug_method=virtio-mem"]) 1097 .args([ 1098 "--memory-zone", 1099 "id=mem0,size=1G,hotplug_size=3G", 1100 "id=mem1,size=2G,hotplug_size=3G", 1101 "id=mem2,size=3G,hotplug_size=3G", 1102 ]) 1103 .args([ 1104 "--numa", 1105 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0", 1106 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1", 1107 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2", 1108 ]) 1109 .args(["--kernel", kernel_path.to_str().unwrap()]) 1110 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1111 .args(["--api-socket", &api_socket]) 1112 .capture_output() 1113 .default_disks() 1114 .default_net() 1115 .spawn() 1116 .unwrap(); 1117 1118 let r = std::panic::catch_unwind(|| { 1119 guest.wait_vm_boot(None).unwrap(); 1120 1121 guest.check_numa_common( 1122 Some(&[960_000, 1_920_000, 2_880_000]), 1123 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 1124 Some(&["10 15 20", "20 10 25", "25 30 10"]), 1125 ); 1126 1127 // AArch64 currently does not support hotplug, and therefore we only 1128 // test hotplug-related function on x86_64 here. 1129 #[cfg(target_arch = "x86_64")] 1130 { 1131 guest.enable_memory_hotplug(); 1132 1133 // Resize every memory zone and check each associated NUMA node 1134 // has been assigned the right amount of memory. 1135 resize_zone_command(&api_socket, "mem0", "4G"); 1136 resize_zone_command(&api_socket, "mem1", "4G"); 1137 resize_zone_command(&api_socket, "mem2", "4G"); 1138 // Resize to the maximum amount of CPUs and check each NUMA 1139 // node has been assigned the right CPUs set. 1140 resize_command(&api_socket, Some(12), None, None, None); 1141 thread::sleep(std::time::Duration::new(5, 0)); 1142 1143 guest.check_numa_common( 1144 Some(&[3_840_000, 3_840_000, 3_840_000]), 1145 Some(&[vec![0, 1, 2, 9], vec![3, 4, 6, 7, 8], vec![5, 10, 11]]), 1146 None, 1147 ); 1148 } 1149 }); 1150 1151 let _ = child.kill(); 1152 let output = child.wait_with_output().unwrap(); 1153 1154 handle_child_output(r, &output); 1155 } 1156 1157 #[allow(unused_variables)] 1158 fn _test_power_button(acpi: bool) { 1159 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1160 let guest = Guest::new(Box::new(focal)); 1161 let mut cmd = GuestCommand::new(&guest); 1162 let api_socket = temp_api_path(&guest.tmp_dir); 1163 1164 #[cfg(target_arch = "x86_64")] 1165 let kernel_path = direct_kernel_boot_path(); 1166 #[cfg(target_arch = "aarch64")] 1167 let kernel_path = if acpi { 1168 edk2_path() 1169 } else { 1170 direct_kernel_boot_path() 1171 }; 1172 1173 cmd.args(["--cpus", "boot=1"]) 1174 .args(["--memory", "size=512M"]) 1175 .args(["--kernel", kernel_path.to_str().unwrap()]) 1176 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1177 .capture_output() 1178 .default_disks() 1179 .default_net() 1180 .args(["--api-socket", &api_socket]); 1181 1182 let child = cmd.spawn().unwrap(); 1183 1184 let r = std::panic::catch_unwind(|| { 1185 guest.wait_vm_boot(None).unwrap(); 1186 assert!(remote_command(&api_socket, "power-button", None)); 1187 }); 1188 1189 let output = child.wait_with_output().unwrap(); 1190 assert!(output.status.success()); 1191 handle_child_output(r, &output); 1192 } 1193 1194 type PrepareNetDaemon = dyn Fn( 1195 &TempDir, 1196 &str, 1197 Option<&str>, 1198 Option<u16>, 1199 usize, 1200 bool, 1201 ) -> (std::process::Command, String); 1202 1203 fn test_vhost_user_net( 1204 tap: Option<&str>, 1205 num_queues: usize, 1206 prepare_daemon: &PrepareNetDaemon, 1207 generate_host_mac: bool, 1208 client_mode_daemon: bool, 1209 ) { 1210 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1211 let guest = Guest::new(Box::new(focal)); 1212 let api_socket = temp_api_path(&guest.tmp_dir); 1213 1214 let kernel_path = direct_kernel_boot_path(); 1215 1216 let host_mac = if generate_host_mac { 1217 Some(MacAddr::local_random()) 1218 } else { 1219 None 1220 }; 1221 1222 let mtu = Some(3000); 1223 1224 let (mut daemon_command, vunet_socket_path) = prepare_daemon( 1225 &guest.tmp_dir, 1226 &guest.network.host_ip, 1227 tap, 1228 mtu, 1229 num_queues, 1230 client_mode_daemon, 1231 ); 1232 1233 let net_params = format!( 1234 "vhost_user=true,mac={},socket={},num_queues={},queue_size=1024{},vhost_mode={},mtu=3000", 1235 guest.network.guest_mac, 1236 vunet_socket_path, 1237 num_queues, 1238 if let Some(host_mac) = host_mac { 1239 format!(",host_mac={host_mac}") 1240 } else { 1241 "".to_owned() 1242 }, 1243 if client_mode_daemon { 1244 "server" 1245 } else { 1246 "client" 1247 }, 1248 ); 1249 1250 let mut ch_command = GuestCommand::new(&guest); 1251 ch_command 1252 .args(["--cpus", format!("boot={}", num_queues / 2).as_str()]) 1253 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"]) 1254 .args(["--kernel", kernel_path.to_str().unwrap()]) 1255 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1256 .default_disks() 1257 .args(["--net", net_params.as_str()]) 1258 .args(["--api-socket", &api_socket]) 1259 .capture_output(); 1260 1261 let mut daemon_child: std::process::Child; 1262 let mut child: std::process::Child; 1263 1264 if client_mode_daemon { 1265 child = ch_command.spawn().unwrap(); 1266 // Make sure the VMM is waiting for the backend to connect 1267 thread::sleep(std::time::Duration::new(10, 0)); 1268 daemon_child = daemon_command.spawn().unwrap(); 1269 } else { 1270 daemon_child = daemon_command.spawn().unwrap(); 1271 // Make sure the backend is waiting for the VMM to connect 1272 thread::sleep(std::time::Duration::new(10, 0)); 1273 child = ch_command.spawn().unwrap(); 1274 } 1275 1276 let r = std::panic::catch_unwind(|| { 1277 guest.wait_vm_boot(None).unwrap(); 1278 1279 if let Some(tap_name) = tap { 1280 let tap_count = exec_host_command_output(&format!("ip link | grep -c {tap_name}")); 1281 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1"); 1282 } 1283 1284 if let Some(host_mac) = tap { 1285 let mac_count = exec_host_command_output(&format!("ip link | grep -c {host_mac}")); 1286 assert_eq!(String::from_utf8_lossy(&mac_count.stdout).trim(), "1"); 1287 } 1288 1289 #[cfg(target_arch = "aarch64")] 1290 let iface = "enp0s4"; 1291 #[cfg(target_arch = "x86_64")] 1292 let iface = "ens4"; 1293 1294 assert_eq!( 1295 guest 1296 .ssh_command(format!("cat /sys/class/net/{iface}/mtu").as_str()) 1297 .unwrap() 1298 .trim(), 1299 "3000" 1300 ); 1301 1302 // 1 network interface + default localhost ==> 2 interfaces 1303 // It's important to note that this test is fully exercising the 1304 // vhost-user-net implementation and the associated backend since 1305 // it does not define any --net network interface. That means all 1306 // the ssh communication in that test happens through the network 1307 // interface backed by vhost-user-net. 1308 assert_eq!( 1309 guest 1310 .ssh_command("ip -o link | wc -l") 1311 .unwrap() 1312 .trim() 1313 .parse::<u32>() 1314 .unwrap_or_default(), 1315 2 1316 ); 1317 1318 // The following pci devices will appear on guest with PCI-MSI 1319 // interrupt vectors assigned. 1320 // 1 virtio-console with 3 vectors: config, Rx, Tx 1321 // 1 virtio-blk with 2 vectors: config, Request 1322 // 1 virtio-blk with 2 vectors: config, Request 1323 // 1 virtio-rng with 2 vectors: config, Request 1324 // Since virtio-net has 2 queue pairs, its vectors is as follows: 1325 // 1 virtio-net with 5 vectors: config, Rx (2), Tx (2) 1326 // Based on the above, the total vectors should 14. 1327 #[cfg(target_arch = "x86_64")] 1328 let grep_cmd = "grep -c PCI-MSI /proc/interrupts"; 1329 #[cfg(target_arch = "aarch64")] 1330 let grep_cmd = "grep -c ITS-MSI /proc/interrupts"; 1331 assert_eq!( 1332 guest 1333 .ssh_command(grep_cmd) 1334 .unwrap() 1335 .trim() 1336 .parse::<u32>() 1337 .unwrap_or_default(), 1338 10 + (num_queues as u32) 1339 ); 1340 1341 // ACPI feature is needed. 1342 #[cfg(target_arch = "x86_64")] 1343 { 1344 guest.enable_memory_hotplug(); 1345 1346 // Add RAM to the VM 1347 let desired_ram = 1024 << 20; 1348 resize_command(&api_socket, None, Some(desired_ram), None, None); 1349 1350 thread::sleep(std::time::Duration::new(10, 0)); 1351 1352 // Here by simply checking the size (through ssh), we validate 1353 // the connection is still working, which means vhost-user-net 1354 // keeps working after the resize. 1355 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 1356 } 1357 }); 1358 1359 let _ = child.kill(); 1360 let output = child.wait_with_output().unwrap(); 1361 1362 thread::sleep(std::time::Duration::new(5, 0)); 1363 let _ = daemon_child.kill(); 1364 let _ = daemon_child.wait(); 1365 1366 handle_child_output(r, &output); 1367 } 1368 1369 type PrepareBlkDaemon = dyn Fn(&TempDir, &str, usize, bool, bool) -> (std::process::Child, String); 1370 1371 fn test_vhost_user_blk( 1372 num_queues: usize, 1373 readonly: bool, 1374 direct: bool, 1375 prepare_vhost_user_blk_daemon: Option<&PrepareBlkDaemon>, 1376 ) { 1377 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1378 let guest = Guest::new(Box::new(focal)); 1379 let api_socket = temp_api_path(&guest.tmp_dir); 1380 1381 let kernel_path = direct_kernel_boot_path(); 1382 1383 let (blk_params, daemon_child) = { 1384 let prepare_daemon = prepare_vhost_user_blk_daemon.unwrap(); 1385 // Start the daemon 1386 let (daemon_child, vubd_socket_path) = 1387 prepare_daemon(&guest.tmp_dir, "blk.img", num_queues, readonly, direct); 1388 1389 ( 1390 format!( 1391 "vhost_user=true,socket={vubd_socket_path},num_queues={num_queues},queue_size=128", 1392 ), 1393 Some(daemon_child), 1394 ) 1395 }; 1396 1397 let mut child = GuestCommand::new(&guest) 1398 .args(["--cpus", format!("boot={num_queues}").as_str()]) 1399 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"]) 1400 .args(["--kernel", kernel_path.to_str().unwrap()]) 1401 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1402 .args([ 1403 "--disk", 1404 format!( 1405 "path={}", 1406 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 1407 ) 1408 .as_str(), 1409 format!( 1410 "path={}", 1411 guest.disk_config.disk(DiskType::CloudInit).unwrap() 1412 ) 1413 .as_str(), 1414 blk_params.as_str(), 1415 ]) 1416 .default_net() 1417 .args(["--api-socket", &api_socket]) 1418 .capture_output() 1419 .spawn() 1420 .unwrap(); 1421 1422 let r = std::panic::catch_unwind(|| { 1423 guest.wait_vm_boot(None).unwrap(); 1424 1425 // Check both if /dev/vdc exists and if the block size is 16M. 1426 assert_eq!( 1427 guest 1428 .ssh_command("lsblk | grep vdc | grep -c 16M") 1429 .unwrap() 1430 .trim() 1431 .parse::<u32>() 1432 .unwrap_or_default(), 1433 1 1434 ); 1435 1436 // Check if this block is RO or RW. 1437 assert_eq!( 1438 guest 1439 .ssh_command("lsblk | grep vdc | awk '{print $5}'") 1440 .unwrap() 1441 .trim() 1442 .parse::<u32>() 1443 .unwrap_or_default(), 1444 readonly as u32 1445 ); 1446 1447 // Check if the number of queues in /sys/block/vdc/mq matches the 1448 // expected num_queues. 1449 assert_eq!( 1450 guest 1451 .ssh_command("ls -ll /sys/block/vdc/mq | grep ^d | wc -l") 1452 .unwrap() 1453 .trim() 1454 .parse::<u32>() 1455 .unwrap_or_default(), 1456 num_queues as u32 1457 ); 1458 1459 // Mount the device 1460 let mount_ro_rw_flag = if readonly { "ro,noload" } else { "rw" }; 1461 guest.ssh_command("mkdir mount_image").unwrap(); 1462 guest 1463 .ssh_command( 1464 format!("sudo mount -o {mount_ro_rw_flag} -t ext4 /dev/vdc mount_image/").as_str(), 1465 ) 1466 .unwrap(); 1467 1468 // Check the content of the block device. The file "foo" should 1469 // contain "bar". 1470 assert_eq!( 1471 guest.ssh_command("cat mount_image/foo").unwrap().trim(), 1472 "bar" 1473 ); 1474 1475 // ACPI feature is needed. 1476 #[cfg(target_arch = "x86_64")] 1477 { 1478 guest.enable_memory_hotplug(); 1479 1480 // Add RAM to the VM 1481 let desired_ram = 1024 << 20; 1482 resize_command(&api_socket, None, Some(desired_ram), None, None); 1483 1484 thread::sleep(std::time::Duration::new(10, 0)); 1485 1486 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 1487 1488 // Check again the content of the block device after the resize 1489 // has been performed. 1490 assert_eq!( 1491 guest.ssh_command("cat mount_image/foo").unwrap().trim(), 1492 "bar" 1493 ); 1494 } 1495 1496 // Unmount the device 1497 guest.ssh_command("sudo umount /dev/vdc").unwrap(); 1498 guest.ssh_command("rm -r mount_image").unwrap(); 1499 }); 1500 1501 let _ = child.kill(); 1502 let output = child.wait_with_output().unwrap(); 1503 1504 if let Some(mut daemon_child) = daemon_child { 1505 thread::sleep(std::time::Duration::new(5, 0)); 1506 let _ = daemon_child.kill(); 1507 let _ = daemon_child.wait(); 1508 } 1509 1510 handle_child_output(r, &output); 1511 } 1512 1513 fn test_boot_from_vhost_user_blk( 1514 num_queues: usize, 1515 readonly: bool, 1516 direct: bool, 1517 prepare_vhost_user_blk_daemon: Option<&PrepareBlkDaemon>, 1518 ) { 1519 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1520 let guest = Guest::new(Box::new(focal)); 1521 1522 let kernel_path = direct_kernel_boot_path(); 1523 1524 let disk_path = guest.disk_config.disk(DiskType::OperatingSystem).unwrap(); 1525 1526 let (blk_boot_params, daemon_child) = { 1527 let prepare_daemon = prepare_vhost_user_blk_daemon.unwrap(); 1528 // Start the daemon 1529 let (daemon_child, vubd_socket_path) = prepare_daemon( 1530 &guest.tmp_dir, 1531 disk_path.as_str(), 1532 num_queues, 1533 readonly, 1534 direct, 1535 ); 1536 1537 ( 1538 format!( 1539 "vhost_user=true,socket={vubd_socket_path},num_queues={num_queues},queue_size=128", 1540 ), 1541 Some(daemon_child), 1542 ) 1543 }; 1544 1545 let mut child = GuestCommand::new(&guest) 1546 .args(["--cpus", format!("boot={num_queues}").as_str()]) 1547 .args(["--memory", "size=512M,shared=on"]) 1548 .args(["--kernel", kernel_path.to_str().unwrap()]) 1549 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1550 .args([ 1551 "--disk", 1552 blk_boot_params.as_str(), 1553 format!( 1554 "path={}", 1555 guest.disk_config.disk(DiskType::CloudInit).unwrap() 1556 ) 1557 .as_str(), 1558 ]) 1559 .default_net() 1560 .capture_output() 1561 .spawn() 1562 .unwrap(); 1563 1564 let r = std::panic::catch_unwind(|| { 1565 guest.wait_vm_boot(None).unwrap(); 1566 1567 // Just check the VM booted correctly. 1568 assert_eq!(guest.get_cpu_count().unwrap_or_default(), num_queues as u32); 1569 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 1570 }); 1571 let _ = child.kill(); 1572 let output = child.wait_with_output().unwrap(); 1573 1574 if let Some(mut daemon_child) = daemon_child { 1575 thread::sleep(std::time::Duration::new(5, 0)); 1576 let _ = daemon_child.kill(); 1577 let _ = daemon_child.wait(); 1578 } 1579 1580 handle_child_output(r, &output); 1581 } 1582 1583 fn _test_virtio_fs( 1584 prepare_daemon: &dyn Fn(&TempDir, &str) -> (std::process::Child, String), 1585 hotplug: bool, 1586 pci_segment: Option<u16>, 1587 ) { 1588 #[cfg(target_arch = "aarch64")] 1589 let focal_image = if hotplug { 1590 FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string() 1591 } else { 1592 FOCAL_IMAGE_NAME.to_string() 1593 }; 1594 #[cfg(target_arch = "x86_64")] 1595 let focal_image = FOCAL_IMAGE_NAME.to_string(); 1596 let focal = UbuntuDiskConfig::new(focal_image); 1597 let guest = Guest::new(Box::new(focal)); 1598 let api_socket = temp_api_path(&guest.tmp_dir); 1599 1600 let mut workload_path = dirs::home_dir().unwrap(); 1601 workload_path.push("workloads"); 1602 1603 let mut shared_dir = workload_path; 1604 shared_dir.push("shared_dir"); 1605 1606 #[cfg(target_arch = "x86_64")] 1607 let kernel_path = direct_kernel_boot_path(); 1608 #[cfg(target_arch = "aarch64")] 1609 let kernel_path = if hotplug { 1610 edk2_path() 1611 } else { 1612 direct_kernel_boot_path() 1613 }; 1614 1615 let (mut daemon_child, virtiofsd_socket_path) = 1616 prepare_daemon(&guest.tmp_dir, shared_dir.to_str().unwrap()); 1617 1618 let mut guest_command = GuestCommand::new(&guest); 1619 guest_command 1620 .args(["--cpus", "boot=1"]) 1621 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"]) 1622 .args(["--kernel", kernel_path.to_str().unwrap()]) 1623 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1624 .default_disks() 1625 .default_net() 1626 .args(["--api-socket", &api_socket]); 1627 if pci_segment.is_some() { 1628 guest_command.args([ 1629 "--platform", 1630 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 1631 ]); 1632 } 1633 1634 let fs_params = format!( 1635 "id=myfs0,tag=myfs,socket={},num_queues=1,queue_size=1024{}", 1636 virtiofsd_socket_path, 1637 if let Some(pci_segment) = pci_segment { 1638 format!(",pci_segment={pci_segment}") 1639 } else { 1640 "".to_owned() 1641 } 1642 ); 1643 1644 if !hotplug { 1645 guest_command.args(["--fs", fs_params.as_str()]); 1646 } 1647 1648 let mut child = guest_command.capture_output().spawn().unwrap(); 1649 1650 let r = std::panic::catch_unwind(|| { 1651 guest.wait_vm_boot(None).unwrap(); 1652 1653 if hotplug { 1654 // Add fs to the VM 1655 let (cmd_success, cmd_output) = 1656 remote_command_w_output(&api_socket, "add-fs", Some(&fs_params)); 1657 assert!(cmd_success); 1658 1659 if let Some(pci_segment) = pci_segment { 1660 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 1661 "{{\"id\":\"myfs0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 1662 ))); 1663 } else { 1664 assert!(String::from_utf8_lossy(&cmd_output) 1665 .contains("{\"id\":\"myfs0\",\"bdf\":\"0000:00:06.0\"}")); 1666 } 1667 1668 thread::sleep(std::time::Duration::new(10, 0)); 1669 } 1670 1671 // Mount shared directory through virtio_fs filesystem 1672 guest 1673 .ssh_command("mkdir -p mount_dir && sudo mount -t virtiofs myfs mount_dir/") 1674 .unwrap(); 1675 1676 // Check file1 exists and its content is "foo" 1677 assert_eq!( 1678 guest.ssh_command("cat mount_dir/file1").unwrap().trim(), 1679 "foo" 1680 ); 1681 // Check file2 does not exist 1682 guest 1683 .ssh_command("[ ! -f 'mount_dir/file2' ] || true") 1684 .unwrap(); 1685 1686 // Check file3 exists and its content is "bar" 1687 assert_eq!( 1688 guest.ssh_command("cat mount_dir/file3").unwrap().trim(), 1689 "bar" 1690 ); 1691 1692 // ACPI feature is needed. 1693 #[cfg(target_arch = "x86_64")] 1694 { 1695 guest.enable_memory_hotplug(); 1696 1697 // Add RAM to the VM 1698 let desired_ram = 1024 << 20; 1699 resize_command(&api_socket, None, Some(desired_ram), None, None); 1700 1701 thread::sleep(std::time::Duration::new(30, 0)); 1702 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 1703 1704 // After the resize, check again that file1 exists and its 1705 // content is "foo". 1706 assert_eq!( 1707 guest.ssh_command("cat mount_dir/file1").unwrap().trim(), 1708 "foo" 1709 ); 1710 } 1711 1712 if hotplug { 1713 // Remove from VM 1714 guest.ssh_command("sudo umount mount_dir").unwrap(); 1715 assert!(remote_command(&api_socket, "remove-device", Some("myfs0"))); 1716 } 1717 }); 1718 1719 let (r, hotplug_daemon_child) = if r.is_ok() && hotplug { 1720 thread::sleep(std::time::Duration::new(10, 0)); 1721 let (daemon_child, virtiofsd_socket_path) = 1722 prepare_daemon(&guest.tmp_dir, shared_dir.to_str().unwrap()); 1723 1724 let r = std::panic::catch_unwind(|| { 1725 thread::sleep(std::time::Duration::new(10, 0)); 1726 let fs_params = format!( 1727 "id=myfs0,tag=myfs,socket={},num_queues=1,queue_size=1024{}", 1728 virtiofsd_socket_path, 1729 if let Some(pci_segment) = pci_segment { 1730 format!(",pci_segment={pci_segment}") 1731 } else { 1732 "".to_owned() 1733 } 1734 ); 1735 1736 // Add back and check it works 1737 let (cmd_success, cmd_output) = 1738 remote_command_w_output(&api_socket, "add-fs", Some(&fs_params)); 1739 assert!(cmd_success); 1740 if let Some(pci_segment) = pci_segment { 1741 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 1742 "{{\"id\":\"myfs0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 1743 ))); 1744 } else { 1745 assert!(String::from_utf8_lossy(&cmd_output) 1746 .contains("{\"id\":\"myfs0\",\"bdf\":\"0000:00:06.0\"}")); 1747 } 1748 1749 thread::sleep(std::time::Duration::new(10, 0)); 1750 // Mount shared directory through virtio_fs filesystem 1751 guest 1752 .ssh_command("mkdir -p mount_dir && sudo mount -t virtiofs myfs mount_dir/") 1753 .unwrap(); 1754 1755 // Check file1 exists and its content is "foo" 1756 assert_eq!( 1757 guest.ssh_command("cat mount_dir/file1").unwrap().trim(), 1758 "foo" 1759 ); 1760 }); 1761 1762 (r, Some(daemon_child)) 1763 } else { 1764 (r, None) 1765 }; 1766 1767 let _ = child.kill(); 1768 let output = child.wait_with_output().unwrap(); 1769 1770 let _ = daemon_child.kill(); 1771 let _ = daemon_child.wait(); 1772 1773 if let Some(mut daemon_child) = hotplug_daemon_child { 1774 let _ = daemon_child.kill(); 1775 let _ = daemon_child.wait(); 1776 } 1777 1778 handle_child_output(r, &output); 1779 } 1780 1781 fn test_virtio_pmem(discard_writes: bool, specify_size: bool) { 1782 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1783 let guest = Guest::new(Box::new(focal)); 1784 1785 let kernel_path = direct_kernel_boot_path(); 1786 1787 let pmem_temp_file = TempFile::new().unwrap(); 1788 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 1789 1790 std::process::Command::new("mkfs.ext4") 1791 .arg(pmem_temp_file.as_path()) 1792 .output() 1793 .expect("Expect creating disk image to succeed"); 1794 1795 let mut child = GuestCommand::new(&guest) 1796 .args(["--cpus", "boot=1"]) 1797 .args(["--memory", "size=512M"]) 1798 .args(["--kernel", kernel_path.to_str().unwrap()]) 1799 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1800 .default_disks() 1801 .default_net() 1802 .args([ 1803 "--pmem", 1804 format!( 1805 "file={}{}{}", 1806 pmem_temp_file.as_path().to_str().unwrap(), 1807 if specify_size { ",size=128M" } else { "" }, 1808 if discard_writes { 1809 ",discard_writes=on" 1810 } else { 1811 "" 1812 } 1813 ) 1814 .as_str(), 1815 ]) 1816 .capture_output() 1817 .spawn() 1818 .unwrap(); 1819 1820 let r = std::panic::catch_unwind(|| { 1821 guest.wait_vm_boot(None).unwrap(); 1822 1823 // Check for the presence of /dev/pmem0 1824 assert_eq!( 1825 guest.ssh_command("ls /dev/pmem0").unwrap().trim(), 1826 "/dev/pmem0" 1827 ); 1828 1829 // Check changes persist after reboot 1830 assert_eq!(guest.ssh_command("sudo mount /dev/pmem0 /mnt").unwrap(), ""); 1831 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), "lost+found\n"); 1832 guest 1833 .ssh_command("echo test123 | sudo tee /mnt/test") 1834 .unwrap(); 1835 assert_eq!(guest.ssh_command("sudo umount /mnt").unwrap(), ""); 1836 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), ""); 1837 1838 guest.reboot_linux(0, None); 1839 assert_eq!(guest.ssh_command("sudo mount /dev/pmem0 /mnt").unwrap(), ""); 1840 assert_eq!( 1841 guest 1842 .ssh_command("sudo cat /mnt/test || true") 1843 .unwrap() 1844 .trim(), 1845 if discard_writes { "" } else { "test123" } 1846 ); 1847 }); 1848 1849 let _ = child.kill(); 1850 let output = child.wait_with_output().unwrap(); 1851 1852 handle_child_output(r, &output); 1853 } 1854 1855 fn get_fd_count(pid: u32) -> usize { 1856 fs::read_dir(format!("/proc/{pid}/fd")).unwrap().count() 1857 } 1858 1859 fn _test_virtio_vsock(hotplug: bool) { 1860 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1861 let guest = Guest::new(Box::new(focal)); 1862 1863 #[cfg(target_arch = "x86_64")] 1864 let kernel_path = direct_kernel_boot_path(); 1865 #[cfg(target_arch = "aarch64")] 1866 let kernel_path = if hotplug { 1867 edk2_path() 1868 } else { 1869 direct_kernel_boot_path() 1870 }; 1871 1872 let socket = temp_vsock_path(&guest.tmp_dir); 1873 let api_socket = temp_api_path(&guest.tmp_dir); 1874 1875 let mut cmd = GuestCommand::new(&guest); 1876 cmd.args(["--api-socket", &api_socket]); 1877 cmd.args(["--cpus", "boot=1"]); 1878 cmd.args(["--memory", "size=512M"]); 1879 cmd.args(["--kernel", kernel_path.to_str().unwrap()]); 1880 cmd.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]); 1881 cmd.default_disks(); 1882 cmd.default_net(); 1883 1884 if !hotplug { 1885 cmd.args(["--vsock", format!("cid=3,socket={socket}").as_str()]); 1886 } 1887 1888 let mut child = cmd.capture_output().spawn().unwrap(); 1889 1890 let r = std::panic::catch_unwind(|| { 1891 guest.wait_vm_boot(None).unwrap(); 1892 1893 if hotplug { 1894 let (cmd_success, cmd_output) = remote_command_w_output( 1895 &api_socket, 1896 "add-vsock", 1897 Some(format!("cid=3,socket={socket},id=test0").as_str()), 1898 ); 1899 assert!(cmd_success); 1900 assert!(String::from_utf8_lossy(&cmd_output) 1901 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 1902 thread::sleep(std::time::Duration::new(10, 0)); 1903 // Check adding a second one fails 1904 assert!(!remote_command( 1905 &api_socket, 1906 "add-vsock", 1907 Some("cid=1234,socket=/tmp/fail") 1908 )); 1909 } 1910 1911 // Validate vsock works as expected. 1912 guest.check_vsock(socket.as_str()); 1913 guest.reboot_linux(0, None); 1914 // Validate vsock still works after a reboot. 1915 guest.check_vsock(socket.as_str()); 1916 1917 if hotplug { 1918 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 1919 } 1920 }); 1921 1922 let _ = child.kill(); 1923 let output = child.wait_with_output().unwrap(); 1924 1925 handle_child_output(r, &output); 1926 } 1927 1928 fn get_ksm_pages_shared() -> u32 { 1929 fs::read_to_string("/sys/kernel/mm/ksm/pages_shared") 1930 .unwrap() 1931 .trim() 1932 .parse::<u32>() 1933 .unwrap() 1934 } 1935 1936 fn test_memory_mergeable(mergeable: bool) { 1937 let memory_param = if mergeable { 1938 "mergeable=on" 1939 } else { 1940 "mergeable=off" 1941 }; 1942 1943 // We are assuming the rest of the system in our CI is not using mergeable memory 1944 let ksm_ps_init = get_ksm_pages_shared(); 1945 assert!(ksm_ps_init == 0); 1946 1947 let focal1 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1948 let guest1 = Guest::new(Box::new(focal1)); 1949 let mut child1 = GuestCommand::new(&guest1) 1950 .args(["--cpus", "boot=1"]) 1951 .args(["--memory", format!("size=512M,{memory_param}").as_str()]) 1952 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 1953 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1954 .default_disks() 1955 .args(["--net", guest1.default_net_string().as_str()]) 1956 .args(["--serial", "tty", "--console", "off"]) 1957 .capture_output() 1958 .spawn() 1959 .unwrap(); 1960 1961 let r = std::panic::catch_unwind(|| { 1962 guest1.wait_vm_boot(None).unwrap(); 1963 }); 1964 if r.is_err() { 1965 let _ = child1.kill(); 1966 let output = child1.wait_with_output().unwrap(); 1967 handle_child_output(r, &output); 1968 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return 1969 } 1970 1971 let ksm_ps_guest1 = get_ksm_pages_shared(); 1972 1973 let focal2 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1974 let guest2 = Guest::new(Box::new(focal2)); 1975 let mut child2 = GuestCommand::new(&guest2) 1976 .args(["--cpus", "boot=1"]) 1977 .args(["--memory", format!("size=512M,{memory_param}").as_str()]) 1978 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 1979 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1980 .default_disks() 1981 .args(["--net", guest2.default_net_string().as_str()]) 1982 .args(["--serial", "tty", "--console", "off"]) 1983 .capture_output() 1984 .spawn() 1985 .unwrap(); 1986 1987 let r = std::panic::catch_unwind(|| { 1988 guest2.wait_vm_boot(None).unwrap(); 1989 let ksm_ps_guest2 = get_ksm_pages_shared(); 1990 1991 if mergeable { 1992 println!( 1993 "ksm pages_shared after vm1 booted '{ksm_ps_guest1}', ksm pages_shared after vm2 booted '{ksm_ps_guest2}'" 1994 ); 1995 // We are expecting the number of shared pages to increase as the number of VM increases 1996 assert!(ksm_ps_guest1 < ksm_ps_guest2); 1997 } else { 1998 assert!(ksm_ps_guest1 == 0); 1999 assert!(ksm_ps_guest2 == 0); 2000 } 2001 }); 2002 2003 let _ = child1.kill(); 2004 let _ = child2.kill(); 2005 2006 let output = child1.wait_with_output().unwrap(); 2007 child2.wait().unwrap(); 2008 2009 handle_child_output(r, &output); 2010 } 2011 2012 fn _get_vmm_overhead(pid: u32, guest_memory_size: u32) -> HashMap<String, u32> { 2013 let smaps = fs::File::open(format!("/proc/{pid}/smaps")).unwrap(); 2014 let reader = io::BufReader::new(smaps); 2015 2016 let mut skip_map: bool = false; 2017 let mut region_name: String = "".to_string(); 2018 let mut region_maps = HashMap::new(); 2019 for line in reader.lines() { 2020 let l = line.unwrap(); 2021 2022 if l.contains('-') { 2023 let values: Vec<&str> = l.split_whitespace().collect(); 2024 region_name = values.last().unwrap().trim().to_string(); 2025 if region_name == "0" { 2026 region_name = "anonymous".to_string() 2027 } 2028 } 2029 2030 // Each section begins with something that looks like: 2031 // Size: 2184 kB 2032 if l.starts_with("Size:") { 2033 let values: Vec<&str> = l.split_whitespace().collect(); 2034 let map_size = values[1].parse::<u32>().unwrap(); 2035 // We skip the assigned guest RAM map, its RSS is only 2036 // dependent on the guest actual memory usage. 2037 // Everything else can be added to the VMM overhead. 2038 skip_map = map_size >= guest_memory_size; 2039 continue; 2040 } 2041 2042 // If this is a map we're taking into account, then we only 2043 // count the RSS. The sum of all counted RSS is the VMM overhead. 2044 if !skip_map && l.starts_with("Rss:") { 2045 let values: Vec<&str> = l.split_whitespace().collect(); 2046 let value = values[1].trim().parse::<u32>().unwrap(); 2047 *region_maps.entry(region_name.clone()).or_insert(0) += value; 2048 } 2049 } 2050 2051 region_maps 2052 } 2053 2054 fn get_vmm_overhead(pid: u32, guest_memory_size: u32) -> u32 { 2055 let mut total = 0; 2056 2057 for (region_name, value) in &_get_vmm_overhead(pid, guest_memory_size) { 2058 eprintln!("{region_name}: {value}"); 2059 total += value; 2060 } 2061 2062 total 2063 } 2064 2065 fn process_rss_kib(pid: u32) -> usize { 2066 let command = format!("ps -q {pid} -o rss="); 2067 let rss = exec_host_command_output(&command); 2068 String::from_utf8_lossy(&rss.stdout).trim().parse().unwrap() 2069 } 2070 2071 // 10MB is our maximum accepted overhead. 2072 const MAXIMUM_VMM_OVERHEAD_KB: u32 = 10 * 1024; 2073 2074 #[derive(PartialEq, Eq, PartialOrd)] 2075 struct Counters { 2076 rx_bytes: u64, 2077 rx_frames: u64, 2078 tx_bytes: u64, 2079 tx_frames: u64, 2080 read_bytes: u64, 2081 write_bytes: u64, 2082 read_ops: u64, 2083 write_ops: u64, 2084 } 2085 2086 fn get_counters(api_socket: &str) -> Counters { 2087 // Get counters 2088 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "counters", None); 2089 assert!(cmd_success); 2090 2091 let counters: HashMap<&str, HashMap<&str, u64>> = 2092 serde_json::from_slice(&cmd_output).unwrap_or_default(); 2093 2094 let rx_bytes = *counters.get("_net2").unwrap().get("rx_bytes").unwrap(); 2095 let rx_frames = *counters.get("_net2").unwrap().get("rx_frames").unwrap(); 2096 let tx_bytes = *counters.get("_net2").unwrap().get("tx_bytes").unwrap(); 2097 let tx_frames = *counters.get("_net2").unwrap().get("tx_frames").unwrap(); 2098 2099 let read_bytes = *counters.get("_disk0").unwrap().get("read_bytes").unwrap(); 2100 let write_bytes = *counters.get("_disk0").unwrap().get("write_bytes").unwrap(); 2101 let read_ops = *counters.get("_disk0").unwrap().get("read_ops").unwrap(); 2102 let write_ops = *counters.get("_disk0").unwrap().get("write_ops").unwrap(); 2103 2104 Counters { 2105 rx_bytes, 2106 rx_frames, 2107 tx_bytes, 2108 tx_frames, 2109 read_bytes, 2110 write_bytes, 2111 read_ops, 2112 write_ops, 2113 } 2114 } 2115 2116 fn pty_read(mut pty: std::fs::File) -> Receiver<String> { 2117 let (tx, rx) = mpsc::channel::<String>(); 2118 thread::spawn(move || loop { 2119 thread::sleep(std::time::Duration::new(1, 0)); 2120 let mut buf = [0; 512]; 2121 match pty.read(&mut buf) { 2122 Ok(_) => { 2123 let output = std::str::from_utf8(&buf).unwrap().to_string(); 2124 match tx.send(output) { 2125 Ok(_) => (), 2126 Err(_) => break, 2127 } 2128 } 2129 Err(_) => break, 2130 } 2131 }); 2132 rx 2133 } 2134 2135 fn get_pty_path(api_socket: &str, pty_type: &str) -> PathBuf { 2136 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None); 2137 assert!(cmd_success); 2138 let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default(); 2139 assert_eq!("Pty", info["config"][pty_type]["mode"]); 2140 PathBuf::from( 2141 info["config"][pty_type]["file"] 2142 .as_str() 2143 .expect("Missing pty path"), 2144 ) 2145 } 2146 2147 // VFIO test network setup. 2148 // We reserve a different IP class for it: 172.18.0.0/24. 2149 #[cfg(target_arch = "x86_64")] 2150 fn setup_vfio_network_interfaces() { 2151 // 'vfio-br0' 2152 assert!(exec_host_command_status("sudo ip link add name vfio-br0 type bridge").success()); 2153 assert!(exec_host_command_status("sudo ip link set vfio-br0 up").success()); 2154 assert!(exec_host_command_status("sudo ip addr add 172.18.0.1/24 dev vfio-br0").success()); 2155 // 'vfio-tap0' 2156 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap0 mode tap").success()); 2157 assert!(exec_host_command_status("sudo ip link set vfio-tap0 master vfio-br0").success()); 2158 assert!(exec_host_command_status("sudo ip link set vfio-tap0 up").success()); 2159 // 'vfio-tap1' 2160 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap1 mode tap").success()); 2161 assert!(exec_host_command_status("sudo ip link set vfio-tap1 master vfio-br0").success()); 2162 assert!(exec_host_command_status("sudo ip link set vfio-tap1 up").success()); 2163 // 'vfio-tap2' 2164 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap2 mode tap").success()); 2165 assert!(exec_host_command_status("sudo ip link set vfio-tap2 master vfio-br0").success()); 2166 assert!(exec_host_command_status("sudo ip link set vfio-tap2 up").success()); 2167 // 'vfio-tap3' 2168 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap3 mode tap").success()); 2169 assert!(exec_host_command_status("sudo ip link set vfio-tap3 master vfio-br0").success()); 2170 assert!(exec_host_command_status("sudo ip link set vfio-tap3 up").success()); 2171 } 2172 2173 // Tear VFIO test network down 2174 #[cfg(target_arch = "x86_64")] 2175 fn cleanup_vfio_network_interfaces() { 2176 assert!(exec_host_command_status("sudo ip link del vfio-br0").success()); 2177 assert!(exec_host_command_status("sudo ip link del vfio-tap0").success()); 2178 assert!(exec_host_command_status("sudo ip link del vfio-tap1").success()); 2179 assert!(exec_host_command_status("sudo ip link del vfio-tap2").success()); 2180 assert!(exec_host_command_status("sudo ip link del vfio-tap3").success()); 2181 } 2182 2183 fn balloon_size(api_socket: &str) -> u64 { 2184 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None); 2185 assert!(cmd_success); 2186 2187 let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default(); 2188 let total_mem = &info["config"]["memory"]["size"] 2189 .to_string() 2190 .parse::<u64>() 2191 .unwrap(); 2192 let actual_mem = &info["memory_actual_size"] 2193 .to_string() 2194 .parse::<u64>() 2195 .unwrap(); 2196 total_mem - actual_mem 2197 } 2198 2199 fn vm_state(api_socket: &str) -> String { 2200 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None); 2201 assert!(cmd_success); 2202 2203 let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default(); 2204 let state = &info["state"].as_str().unwrap(); 2205 2206 state.to_string() 2207 } 2208 2209 // This test validates that it can find the virtio-iommu device at first. 2210 // It also verifies that both disks and the network card are attached to 2211 // the virtual IOMMU by looking at /sys/kernel/iommu_groups directory. 2212 // The last interesting part of this test is that it exercises the network 2213 // interface attached to the virtual IOMMU since this is the one used to 2214 // send all commands through SSH. 2215 fn _test_virtio_iommu(acpi: bool) { 2216 // Virtio-iommu support is ready in recent kernel (v5.14). But the kernel in 2217 // Focal image is still old. 2218 // So if ACPI is enabled on AArch64, we use a modified Focal image in which 2219 // the kernel binary has been updated. 2220 #[cfg(target_arch = "aarch64")] 2221 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string(); 2222 #[cfg(target_arch = "x86_64")] 2223 let focal_image = FOCAL_IMAGE_NAME.to_string(); 2224 let focal = UbuntuDiskConfig::new(focal_image); 2225 let guest = Guest::new(Box::new(focal)); 2226 2227 #[cfg(target_arch = "x86_64")] 2228 let kernel_path = direct_kernel_boot_path(); 2229 #[cfg(target_arch = "aarch64")] 2230 let kernel_path = if acpi { 2231 edk2_path() 2232 } else { 2233 direct_kernel_boot_path() 2234 }; 2235 2236 let mut child = GuestCommand::new(&guest) 2237 .args(["--cpus", "boot=1"]) 2238 .args(["--memory", "size=512M"]) 2239 .args(["--kernel", kernel_path.to_str().unwrap()]) 2240 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2241 .args([ 2242 "--disk", 2243 format!( 2244 "path={},iommu=on", 2245 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 2246 ) 2247 .as_str(), 2248 format!( 2249 "path={},iommu=on", 2250 guest.disk_config.disk(DiskType::CloudInit).unwrap() 2251 ) 2252 .as_str(), 2253 ]) 2254 .args(["--net", guest.default_net_string_w_iommu().as_str()]) 2255 .capture_output() 2256 .spawn() 2257 .unwrap(); 2258 2259 let r = std::panic::catch_unwind(|| { 2260 guest.wait_vm_boot(None).unwrap(); 2261 2262 // Verify the virtio-iommu device is present. 2263 assert!(guest 2264 .does_device_vendor_pair_match("0x1057", "0x1af4") 2265 .unwrap_or_default()); 2266 2267 // On AArch64, if the guest system boots from FDT, the behavior of IOMMU is a bit 2268 // different with ACPI. 2269 // All devices on the PCI bus will be attached to the virtual IOMMU, except the 2270 // virtio-iommu device itself. So these devices will all be added to IOMMU groups, 2271 // and appear under folder '/sys/kernel/iommu_groups/'. 2272 // The result is, in the case of FDT, IOMMU group '0' contains "0000:00:01.0" 2273 // which is the console. The first disk "0000:00:02.0" is in group '1'. 2274 // While on ACPI, console device is not attached to IOMMU. So the IOMMU group '0' 2275 // contains "0000:00:02.0" which is the first disk. 2276 // 2277 // Verify the iommu group of the first disk. 2278 let iommu_group = !acpi as i32; 2279 assert_eq!( 2280 guest 2281 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str()) 2282 .unwrap() 2283 .trim(), 2284 "0000:00:02.0" 2285 ); 2286 2287 // Verify the iommu group of the second disk. 2288 let iommu_group = if acpi { 1 } else { 2 }; 2289 assert_eq!( 2290 guest 2291 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str()) 2292 .unwrap() 2293 .trim(), 2294 "0000:00:03.0" 2295 ); 2296 2297 // Verify the iommu group of the network card. 2298 let iommu_group = if acpi { 2 } else { 3 }; 2299 assert_eq!( 2300 guest 2301 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str()) 2302 .unwrap() 2303 .trim(), 2304 "0000:00:04.0" 2305 ); 2306 }); 2307 2308 let _ = child.kill(); 2309 let output = child.wait_with_output().unwrap(); 2310 2311 handle_child_output(r, &output); 2312 } 2313 2314 fn get_reboot_count(guest: &Guest) -> u32 { 2315 guest 2316 .ssh_command("sudo last | grep -c reboot") 2317 .unwrap() 2318 .trim() 2319 .parse::<u32>() 2320 .unwrap_or_default() 2321 } 2322 2323 fn enable_guest_watchdog(guest: &Guest, watchdog_sec: u32) { 2324 // Check for PCI device 2325 assert!(guest 2326 .does_device_vendor_pair_match("0x1063", "0x1af4") 2327 .unwrap_or_default()); 2328 2329 // Enable systemd watchdog 2330 guest 2331 .ssh_command(&format!( 2332 "echo RuntimeWatchdogSec={watchdog_sec}s | sudo tee -a /etc/systemd/system.conf" 2333 )) 2334 .unwrap(); 2335 } 2336 2337 fn make_guest_panic(guest: &Guest) { 2338 // Check for pvpanic device 2339 assert!(guest 2340 .does_device_vendor_pair_match("0x0011", "0x1b36") 2341 .unwrap_or_default()); 2342 2343 // Trigger guest a panic 2344 guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap(); 2345 } 2346 2347 mod common_parallel { 2348 use std::{fs::OpenOptions, io::SeekFrom}; 2349 2350 use crate::*; 2351 2352 #[test] 2353 #[cfg(target_arch = "x86_64")] 2354 fn test_focal_hypervisor_fw() { 2355 test_simple_launch(fw_path(FwType::RustHypervisorFirmware), FOCAL_IMAGE_NAME) 2356 } 2357 2358 #[test] 2359 #[cfg(target_arch = "x86_64")] 2360 fn test_focal_ovmf() { 2361 test_simple_launch(fw_path(FwType::Ovmf), FOCAL_IMAGE_NAME) 2362 } 2363 2364 #[cfg(target_arch = "x86_64")] 2365 fn test_simple_launch(fw_path: String, disk_path: &str) { 2366 let disk_config = Box::new(UbuntuDiskConfig::new(disk_path.to_string())); 2367 let guest = Guest::new(disk_config); 2368 let event_path = temp_event_monitor_path(&guest.tmp_dir); 2369 2370 let mut child = GuestCommand::new(&guest) 2371 .args(["--cpus", "boot=1"]) 2372 .args(["--memory", "size=512M"]) 2373 .args(["--kernel", fw_path.as_str()]) 2374 .default_disks() 2375 .default_net() 2376 .args(["--serial", "tty", "--console", "off"]) 2377 .args(["--event-monitor", format!("path={event_path}").as_str()]) 2378 .capture_output() 2379 .spawn() 2380 .unwrap(); 2381 2382 let r = std::panic::catch_unwind(|| { 2383 guest.wait_vm_boot(Some(120)).unwrap(); 2384 2385 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 2386 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 2387 assert_eq!(guest.get_pci_bridge_class().unwrap_or_default(), "0x060000"); 2388 2389 let expected_sequential_events = [ 2390 &MetaEvent { 2391 event: "starting".to_string(), 2392 device_id: None, 2393 }, 2394 &MetaEvent { 2395 event: "booting".to_string(), 2396 device_id: None, 2397 }, 2398 &MetaEvent { 2399 event: "booted".to_string(), 2400 device_id: None, 2401 }, 2402 &MetaEvent { 2403 event: "activated".to_string(), 2404 device_id: Some("_disk0".to_string()), 2405 }, 2406 &MetaEvent { 2407 event: "reset".to_string(), 2408 device_id: Some("_disk0".to_string()), 2409 }, 2410 ]; 2411 assert!(check_sequential_events( 2412 &expected_sequential_events, 2413 &event_path 2414 )); 2415 2416 // It's been observed on the Bionic image that udev and snapd 2417 // services can cause some delay in the VM's shutdown. Disabling 2418 // them improves the reliability of this test. 2419 let _ = guest.ssh_command("sudo systemctl disable udev"); 2420 let _ = guest.ssh_command("sudo systemctl stop udev"); 2421 let _ = guest.ssh_command("sudo systemctl disable snapd"); 2422 let _ = guest.ssh_command("sudo systemctl stop snapd"); 2423 2424 guest.ssh_command("sudo poweroff").unwrap(); 2425 thread::sleep(std::time::Duration::new(20, 0)); 2426 let latest_events = [ 2427 &MetaEvent { 2428 event: "shutdown".to_string(), 2429 device_id: None, 2430 }, 2431 &MetaEvent { 2432 event: "deleted".to_string(), 2433 device_id: None, 2434 }, 2435 &MetaEvent { 2436 event: "shutdown".to_string(), 2437 device_id: None, 2438 }, 2439 ]; 2440 assert!(check_latest_events_exact(&latest_events, &event_path)); 2441 }); 2442 2443 let _ = child.kill(); 2444 let output = child.wait_with_output().unwrap(); 2445 2446 handle_child_output(r, &output); 2447 } 2448 2449 #[test] 2450 fn test_multi_cpu() { 2451 let jammy_image = JAMMY_IMAGE_NAME.to_string(); 2452 let jammy = UbuntuDiskConfig::new(jammy_image); 2453 let guest = Guest::new(Box::new(jammy)); 2454 2455 let mut cmd = GuestCommand::new(&guest); 2456 cmd.args(["--cpus", "boot=2,max=4"]) 2457 .args(["--memory", "size=512M"]) 2458 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2459 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2460 .capture_output() 2461 .default_disks() 2462 .default_net(); 2463 2464 let mut child = cmd.spawn().unwrap(); 2465 2466 let r = std::panic::catch_unwind(|| { 2467 guest.wait_vm_boot(Some(120)).unwrap(); 2468 2469 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 2470 2471 assert_eq!( 2472 guest 2473 .ssh_command( 2474 r#"sudo dmesg | grep "smp: Brought up" | sed "s/\[\ *[0-9.]*\] //""# 2475 ) 2476 .unwrap() 2477 .trim(), 2478 "smp: Brought up 1 node, 2 CPUs" 2479 ); 2480 }); 2481 2482 let _ = child.kill(); 2483 let output = child.wait_with_output().unwrap(); 2484 2485 handle_child_output(r, &output); 2486 } 2487 2488 #[test] 2489 fn test_cpu_topology_421() { 2490 test_cpu_topology(4, 2, 1, false); 2491 } 2492 2493 #[test] 2494 fn test_cpu_topology_142() { 2495 test_cpu_topology(1, 4, 2, false); 2496 } 2497 2498 #[test] 2499 fn test_cpu_topology_262() { 2500 test_cpu_topology(2, 6, 2, false); 2501 } 2502 2503 #[test] 2504 #[cfg(target_arch = "x86_64")] 2505 #[cfg(not(feature = "mshv"))] 2506 fn test_cpu_physical_bits() { 2507 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2508 let guest = Guest::new(Box::new(focal)); 2509 let max_phys_bits: u8 = 36; 2510 let mut child = GuestCommand::new(&guest) 2511 .args(["--cpus", &format!("max_phys_bits={max_phys_bits}")]) 2512 .args(["--memory", "size=512M"]) 2513 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2514 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2515 .default_disks() 2516 .default_net() 2517 .capture_output() 2518 .spawn() 2519 .unwrap(); 2520 2521 let r = std::panic::catch_unwind(|| { 2522 guest.wait_vm_boot(None).unwrap(); 2523 2524 assert!( 2525 guest 2526 .ssh_command("lscpu | grep \"Address sizes:\" | cut -f 2 -d \":\" | sed \"s# *##\" | cut -f 1 -d \" \"") 2527 .unwrap() 2528 .trim() 2529 .parse::<u8>() 2530 .unwrap_or(max_phys_bits + 1) <= max_phys_bits, 2531 ); 2532 }); 2533 2534 let _ = child.kill(); 2535 let output = child.wait_with_output().unwrap(); 2536 2537 handle_child_output(r, &output); 2538 } 2539 2540 #[test] 2541 fn test_cpu_affinity() { 2542 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2543 let guest = Guest::new(Box::new(focal)); 2544 2545 // We need the host to have at least 4 CPUs if we want to be able 2546 // to run this test. 2547 let host_cpus_count = exec_host_command_output("nproc"); 2548 assert!( 2549 String::from_utf8_lossy(&host_cpus_count.stdout) 2550 .trim() 2551 .parse::<u16>() 2552 .unwrap_or(0) 2553 >= 4 2554 ); 2555 2556 let mut child = GuestCommand::new(&guest) 2557 .args(["--cpus", "boot=2,affinity=[0@[0,2],1@[1,3]]"]) 2558 .args(["--memory", "size=512M"]) 2559 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2560 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2561 .default_disks() 2562 .default_net() 2563 .capture_output() 2564 .spawn() 2565 .unwrap(); 2566 2567 let r = std::panic::catch_unwind(|| { 2568 guest.wait_vm_boot(None).unwrap(); 2569 let pid = child.id(); 2570 let taskset_vcpu0 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep vcpu0 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2571 assert_eq!(String::from_utf8_lossy(&taskset_vcpu0.stdout).trim(), "0,2"); 2572 let taskset_vcpu1 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep vcpu1 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2573 assert_eq!(String::from_utf8_lossy(&taskset_vcpu1.stdout).trim(), "1,3"); 2574 }); 2575 2576 let _ = child.kill(); 2577 let output = child.wait_with_output().unwrap(); 2578 2579 handle_child_output(r, &output); 2580 } 2581 2582 #[test] 2583 #[cfg(not(feature = "mshv"))] 2584 fn test_large_vm() { 2585 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2586 let guest = Guest::new(Box::new(focal)); 2587 let mut cmd = GuestCommand::new(&guest); 2588 cmd.args(["--cpus", "boot=48"]) 2589 .args(["--memory", "size=5120M"]) 2590 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2591 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2592 .args(["--serial", "tty"]) 2593 .args(["--console", "off"]) 2594 .capture_output() 2595 .default_disks() 2596 .default_net(); 2597 2598 let mut child = cmd.spawn().unwrap(); 2599 2600 guest.wait_vm_boot(None).unwrap(); 2601 2602 let r = std::panic::catch_unwind(|| { 2603 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 48); 2604 assert_eq!( 2605 guest 2606 .ssh_command("lscpu | grep \"On-line\" | cut -f 2 -d \":\" | sed \"s# *##\"") 2607 .unwrap() 2608 .trim(), 2609 "0-47" 2610 ); 2611 2612 assert!(guest.get_total_memory().unwrap_or_default() > 5_000_000); 2613 }); 2614 2615 let _ = child.kill(); 2616 let output = child.wait_with_output().unwrap(); 2617 2618 handle_child_output(r, &output); 2619 } 2620 2621 #[test] 2622 #[cfg(not(feature = "mshv"))] 2623 fn test_huge_memory() { 2624 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2625 let guest = Guest::new(Box::new(focal)); 2626 let mut cmd = GuestCommand::new(&guest); 2627 cmd.args(["--cpus", "boot=1"]) 2628 .args(["--memory", "size=128G"]) 2629 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2630 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2631 .capture_output() 2632 .default_disks() 2633 .default_net(); 2634 2635 let mut child = cmd.spawn().unwrap(); 2636 2637 guest.wait_vm_boot(Some(120)).unwrap(); 2638 2639 let r = std::panic::catch_unwind(|| { 2640 assert!(guest.get_total_memory().unwrap_or_default() > 128_000_000); 2641 }); 2642 2643 let _ = child.kill(); 2644 let output = child.wait_with_output().unwrap(); 2645 2646 handle_child_output(r, &output); 2647 } 2648 2649 #[test] 2650 fn test_power_button() { 2651 _test_power_button(false); 2652 } 2653 2654 #[test] 2655 #[cfg(not(feature = "mshv"))] 2656 fn test_user_defined_memory_regions() { 2657 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2658 let guest = Guest::new(Box::new(focal)); 2659 let api_socket = temp_api_path(&guest.tmp_dir); 2660 2661 let kernel_path = direct_kernel_boot_path(); 2662 2663 let mut child = GuestCommand::new(&guest) 2664 .args(["--cpus", "boot=1"]) 2665 .args(["--memory", "size=0,hotplug_method=virtio-mem"]) 2666 .args([ 2667 "--memory-zone", 2668 "id=mem0,size=1G,hotplug_size=2G", 2669 "id=mem1,size=1G,shared=on", 2670 "id=mem2,size=1G,host_numa_node=0,hotplug_size=2G", 2671 ]) 2672 .args(["--kernel", kernel_path.to_str().unwrap()]) 2673 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2674 .args(["--api-socket", &api_socket]) 2675 .capture_output() 2676 .default_disks() 2677 .default_net() 2678 .spawn() 2679 .unwrap(); 2680 2681 let r = std::panic::catch_unwind(|| { 2682 guest.wait_vm_boot(None).unwrap(); 2683 2684 assert!(guest.get_total_memory().unwrap_or_default() > 2_880_000); 2685 2686 guest.enable_memory_hotplug(); 2687 2688 resize_zone_command(&api_socket, "mem0", "3G"); 2689 thread::sleep(std::time::Duration::new(5, 0)); 2690 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000); 2691 resize_zone_command(&api_socket, "mem2", "3G"); 2692 thread::sleep(std::time::Duration::new(5, 0)); 2693 assert!(guest.get_total_memory().unwrap_or_default() > 6_720_000); 2694 resize_zone_command(&api_socket, "mem0", "2G"); 2695 thread::sleep(std::time::Duration::new(5, 0)); 2696 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 2697 resize_zone_command(&api_socket, "mem2", "2G"); 2698 thread::sleep(std::time::Duration::new(5, 0)); 2699 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000); 2700 2701 guest.reboot_linux(0, None); 2702 2703 // Check the amount of RAM after reboot 2704 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000); 2705 assert!(guest.get_total_memory().unwrap_or_default() < 5_760_000); 2706 2707 // Check if we can still resize down to the initial 'boot'size 2708 resize_zone_command(&api_socket, "mem0", "1G"); 2709 thread::sleep(std::time::Duration::new(5, 0)); 2710 assert!(guest.get_total_memory().unwrap_or_default() < 4_800_000); 2711 resize_zone_command(&api_socket, "mem2", "1G"); 2712 thread::sleep(std::time::Duration::new(5, 0)); 2713 assert!(guest.get_total_memory().unwrap_or_default() < 3_840_000); 2714 }); 2715 2716 let _ = child.kill(); 2717 let output = child.wait_with_output().unwrap(); 2718 2719 handle_child_output(r, &output); 2720 } 2721 2722 #[test] 2723 #[cfg(not(feature = "mshv"))] 2724 fn test_guest_numa_nodes() { 2725 _test_guest_numa_nodes(false); 2726 } 2727 2728 #[test] 2729 #[cfg(target_arch = "x86_64")] 2730 fn test_iommu_segments() { 2731 let focal_image = FOCAL_IMAGE_NAME.to_string(); 2732 let focal = UbuntuDiskConfig::new(focal_image); 2733 let guest = Guest::new(Box::new(focal)); 2734 2735 // Prepare another disk file for the virtio-disk device 2736 let test_disk_path = String::from( 2737 guest 2738 .tmp_dir 2739 .as_path() 2740 .join("test-disk.raw") 2741 .to_str() 2742 .unwrap(), 2743 ); 2744 assert!( 2745 exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success() 2746 ); 2747 assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success()); 2748 2749 let api_socket = temp_api_path(&guest.tmp_dir); 2750 let mut cmd = GuestCommand::new(&guest); 2751 2752 cmd.args(["--cpus", "boot=1"]) 2753 .args(["--api-socket", &api_socket]) 2754 .args(["--memory", "size=512M"]) 2755 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2756 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2757 .args([ 2758 "--platform", 2759 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS},iommu_segments=[1]"), 2760 ]) 2761 .default_disks() 2762 .capture_output() 2763 .default_net(); 2764 2765 let mut child = cmd.spawn().unwrap(); 2766 2767 guest.wait_vm_boot(None).unwrap(); 2768 2769 let r = std::panic::catch_unwind(|| { 2770 let (cmd_success, cmd_output) = remote_command_w_output( 2771 &api_socket, 2772 "add-disk", 2773 Some( 2774 format!( 2775 "path={},id=test0,pci_segment=1,iommu=on", 2776 test_disk_path.as_str() 2777 ) 2778 .as_str(), 2779 ), 2780 ); 2781 assert!(cmd_success); 2782 assert!(String::from_utf8_lossy(&cmd_output) 2783 .contains("{\"id\":\"test0\",\"bdf\":\"0001:00:01.0\"}")); 2784 2785 // Check IOMMU setup 2786 assert!(guest 2787 .does_device_vendor_pair_match("0x1057", "0x1af4") 2788 .unwrap_or_default()); 2789 assert_eq!( 2790 guest 2791 .ssh_command("ls /sys/kernel/iommu_groups/0/devices") 2792 .unwrap() 2793 .trim(), 2794 "0001:00:01.0" 2795 ); 2796 }); 2797 2798 let _ = child.kill(); 2799 let output = child.wait_with_output().unwrap(); 2800 2801 handle_child_output(r, &output); 2802 } 2803 2804 #[test] 2805 fn test_pci_msi() { 2806 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2807 let guest = Guest::new(Box::new(focal)); 2808 let mut cmd = GuestCommand::new(&guest); 2809 cmd.args(["--cpus", "boot=1"]) 2810 .args(["--memory", "size=512M"]) 2811 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2812 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2813 .capture_output() 2814 .default_disks() 2815 .default_net(); 2816 2817 let mut child = cmd.spawn().unwrap(); 2818 2819 guest.wait_vm_boot(None).unwrap(); 2820 2821 #[cfg(target_arch = "x86_64")] 2822 let grep_cmd = "grep -c PCI-MSI /proc/interrupts"; 2823 #[cfg(target_arch = "aarch64")] 2824 let grep_cmd = "grep -c ITS-MSI /proc/interrupts"; 2825 2826 let r = std::panic::catch_unwind(|| { 2827 assert_eq!( 2828 guest 2829 .ssh_command(grep_cmd) 2830 .unwrap() 2831 .trim() 2832 .parse::<u32>() 2833 .unwrap_or_default(), 2834 12 2835 ); 2836 }); 2837 2838 let _ = child.kill(); 2839 let output = child.wait_with_output().unwrap(); 2840 2841 handle_child_output(r, &output); 2842 } 2843 2844 #[test] 2845 fn test_virtio_net_ctrl_queue() { 2846 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2847 let guest = Guest::new(Box::new(focal)); 2848 let mut cmd = GuestCommand::new(&guest); 2849 cmd.args(["--cpus", "boot=1"]) 2850 .args(["--memory", "size=512M"]) 2851 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2852 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2853 .args(["--net", guest.default_net_string_w_mtu(3000).as_str()]) 2854 .capture_output() 2855 .default_disks(); 2856 2857 let mut child = cmd.spawn().unwrap(); 2858 2859 guest.wait_vm_boot(None).unwrap(); 2860 2861 #[cfg(target_arch = "aarch64")] 2862 let iface = "enp0s4"; 2863 #[cfg(target_arch = "x86_64")] 2864 let iface = "ens4"; 2865 2866 let r = std::panic::catch_unwind(|| { 2867 assert_eq!( 2868 guest 2869 .ssh_command( 2870 format!("sudo ethtool -K {iface} rx-gro-hw off && echo success").as_str() 2871 ) 2872 .unwrap() 2873 .trim(), 2874 "success" 2875 ); 2876 assert_eq!( 2877 guest 2878 .ssh_command(format!("cat /sys/class/net/{iface}/mtu").as_str()) 2879 .unwrap() 2880 .trim(), 2881 "3000" 2882 ); 2883 }); 2884 2885 let _ = child.kill(); 2886 let output = child.wait_with_output().unwrap(); 2887 2888 handle_child_output(r, &output); 2889 } 2890 2891 #[test] 2892 fn test_pci_multiple_segments() { 2893 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2894 let guest = Guest::new(Box::new(focal)); 2895 2896 // Prepare another disk file for the virtio-disk device 2897 let test_disk_path = String::from( 2898 guest 2899 .tmp_dir 2900 .as_path() 2901 .join("test-disk.raw") 2902 .to_str() 2903 .unwrap(), 2904 ); 2905 assert!( 2906 exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success() 2907 ); 2908 assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success()); 2909 2910 let mut cmd = GuestCommand::new(&guest); 2911 cmd.args(["--cpus", "boot=1"]) 2912 .args(["--memory", "size=512M"]) 2913 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2914 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2915 .args([ 2916 "--platform", 2917 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 2918 ]) 2919 .args([ 2920 "--disk", 2921 format!( 2922 "path={}", 2923 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 2924 ) 2925 .as_str(), 2926 format!( 2927 "path={}", 2928 guest.disk_config.disk(DiskType::CloudInit).unwrap() 2929 ) 2930 .as_str(), 2931 format!("path={test_disk_path},pci_segment=15").as_str(), 2932 ]) 2933 .capture_output() 2934 .default_net(); 2935 2936 let mut child = cmd.spawn().unwrap(); 2937 2938 guest.wait_vm_boot(None).unwrap(); 2939 2940 let grep_cmd = "lspci | grep \"Host bridge\" | wc -l"; 2941 2942 let r = std::panic::catch_unwind(|| { 2943 // There should be MAX_NUM_PCI_SEGMENTS PCI host bridges in the guest. 2944 assert_eq!( 2945 guest 2946 .ssh_command(grep_cmd) 2947 .unwrap() 2948 .trim() 2949 .parse::<u16>() 2950 .unwrap_or_default(), 2951 MAX_NUM_PCI_SEGMENTS 2952 ); 2953 2954 // Check both if /dev/vdc exists and if the block size is 4M. 2955 assert_eq!( 2956 guest 2957 .ssh_command("lsblk | grep vdc | grep -c 4M") 2958 .unwrap() 2959 .trim() 2960 .parse::<u32>() 2961 .unwrap_or_default(), 2962 1 2963 ); 2964 2965 // Mount the device. 2966 guest.ssh_command("mkdir mount_image").unwrap(); 2967 guest 2968 .ssh_command("sudo mount -o rw -t ext4 /dev/vdc mount_image/") 2969 .unwrap(); 2970 // Grant all users with write permission. 2971 guest.ssh_command("sudo chmod a+w mount_image/").unwrap(); 2972 2973 // Write something to the device. 2974 guest 2975 .ssh_command("sudo echo \"bar\" >> mount_image/foo") 2976 .unwrap(); 2977 2978 // Check the content of the block device. The file "foo" should 2979 // contain "bar". 2980 assert_eq!( 2981 guest 2982 .ssh_command("sudo cat mount_image/foo") 2983 .unwrap() 2984 .trim(), 2985 "bar" 2986 ); 2987 }); 2988 2989 let _ = child.kill(); 2990 let output = child.wait_with_output().unwrap(); 2991 2992 handle_child_output(r, &output); 2993 } 2994 2995 #[test] 2996 fn test_pci_multiple_segments_numa_node() { 2997 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2998 let guest = Guest::new(Box::new(focal)); 2999 let api_socket = temp_api_path(&guest.tmp_dir); 3000 #[cfg(target_arch = "x86_64")] 3001 let kernel_path = direct_kernel_boot_path(); 3002 #[cfg(target_arch = "aarch64")] 3003 let kernel_path = edk2_path(); 3004 3005 // Prepare another disk file for the virtio-disk device 3006 let test_disk_path = String::from( 3007 guest 3008 .tmp_dir 3009 .as_path() 3010 .join("test-disk.raw") 3011 .to_str() 3012 .unwrap(), 3013 ); 3014 assert!( 3015 exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success() 3016 ); 3017 assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success()); 3018 const TEST_DISK_NODE: u16 = 1; 3019 3020 let mut child = GuestCommand::new(&guest) 3021 .args(["--platform", "num_pci_segments=2"]) 3022 .args(["--cpus", "boot=2"]) 3023 .args(["--memory", "size=0"]) 3024 .args(["--memory-zone", "id=mem0,size=256M", "id=mem1,size=256M"]) 3025 .args([ 3026 "--numa", 3027 "guest_numa_id=0,cpus=[0],distances=[1@20],memory_zones=mem0,pci_segments=[0]", 3028 "guest_numa_id=1,cpus=[1],distances=[0@20],memory_zones=mem1,pci_segments=[1]", 3029 ]) 3030 .args(["--kernel", kernel_path.to_str().unwrap()]) 3031 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3032 .args(["--api-socket", &api_socket]) 3033 .capture_output() 3034 .args([ 3035 "--disk", 3036 format!( 3037 "path={}", 3038 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 3039 ) 3040 .as_str(), 3041 format!( 3042 "path={}", 3043 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3044 ) 3045 .as_str(), 3046 format!("path={test_disk_path},pci_segment={TEST_DISK_NODE}").as_str(), 3047 ]) 3048 .default_net() 3049 .spawn() 3050 .unwrap(); 3051 3052 let cmd = "cat /sys/block/vdc/device/../numa_node"; 3053 3054 let r = std::panic::catch_unwind(|| { 3055 guest.wait_vm_boot(None).unwrap(); 3056 3057 assert_eq!( 3058 guest 3059 .ssh_command(cmd) 3060 .unwrap() 3061 .trim() 3062 .parse::<u16>() 3063 .unwrap_or_default(), 3064 TEST_DISK_NODE 3065 ); 3066 }); 3067 3068 let _ = child.kill(); 3069 let output = child.wait_with_output().unwrap(); 3070 3071 handle_child_output(r, &output); 3072 } 3073 3074 #[test] 3075 fn test_direct_kernel_boot() { 3076 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3077 let guest = Guest::new(Box::new(focal)); 3078 3079 let kernel_path = direct_kernel_boot_path(); 3080 3081 let mut child = GuestCommand::new(&guest) 3082 .args(["--cpus", "boot=1"]) 3083 .args(["--memory", "size=512M"]) 3084 .args(["--kernel", kernel_path.to_str().unwrap()]) 3085 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3086 .default_disks() 3087 .default_net() 3088 .capture_output() 3089 .spawn() 3090 .unwrap(); 3091 3092 let r = std::panic::catch_unwind(|| { 3093 guest.wait_vm_boot(None).unwrap(); 3094 3095 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 3096 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 3097 3098 let grep_cmd = if cfg!(target_arch = "x86_64") { 3099 "grep -c PCI-MSI /proc/interrupts" 3100 } else { 3101 "grep -c ITS-MSI /proc/interrupts" 3102 }; 3103 assert_eq!( 3104 guest 3105 .ssh_command(grep_cmd) 3106 .unwrap() 3107 .trim() 3108 .parse::<u32>() 3109 .unwrap_or_default(), 3110 12 3111 ); 3112 }); 3113 3114 let _ = child.kill(); 3115 let output = child.wait_with_output().unwrap(); 3116 3117 handle_child_output(r, &output); 3118 } 3119 3120 fn _test_virtio_block(image_name: &str, disable_io_uring: bool, disable_aio: bool) { 3121 let focal = UbuntuDiskConfig::new(image_name.to_string()); 3122 let guest = Guest::new(Box::new(focal)); 3123 3124 let mut workload_path = dirs::home_dir().unwrap(); 3125 workload_path.push("workloads"); 3126 3127 let mut blk_file_path = workload_path; 3128 blk_file_path.push("blk.img"); 3129 3130 let kernel_path = direct_kernel_boot_path(); 3131 3132 let mut cloud_child = GuestCommand::new(&guest) 3133 .args(["--cpus", "boot=4"]) 3134 .args(["--memory", "size=512M,shared=on"]) 3135 .args(["--kernel", kernel_path.to_str().unwrap()]) 3136 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3137 .args([ 3138 "--disk", 3139 format!( 3140 "path={}", 3141 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 3142 ) 3143 .as_str(), 3144 format!( 3145 "path={}", 3146 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3147 ) 3148 .as_str(), 3149 format!( 3150 "path={},readonly=on,direct=on,num_queues=4,_disable_io_uring={},_disable_aio={}", 3151 blk_file_path.to_str().unwrap(), 3152 disable_io_uring, 3153 disable_aio, 3154 ) 3155 .as_str(), 3156 ]) 3157 .default_net() 3158 .capture_output() 3159 .spawn() 3160 .unwrap(); 3161 3162 let r = std::panic::catch_unwind(|| { 3163 guest.wait_vm_boot(None).unwrap(); 3164 3165 // Check both if /dev/vdc exists and if the block size is 16M. 3166 assert_eq!( 3167 guest 3168 .ssh_command("lsblk | grep vdc | grep -c 16M") 3169 .unwrap() 3170 .trim() 3171 .parse::<u32>() 3172 .unwrap_or_default(), 3173 1 3174 ); 3175 3176 // Check both if /dev/vdc exists and if this block is RO. 3177 assert_eq!( 3178 guest 3179 .ssh_command("lsblk | grep vdc | awk '{print $5}'") 3180 .unwrap() 3181 .trim() 3182 .parse::<u32>() 3183 .unwrap_or_default(), 3184 1 3185 ); 3186 3187 // Check if the number of queues is 4. 3188 assert_eq!( 3189 guest 3190 .ssh_command("ls -ll /sys/block/vdc/mq | grep ^d | wc -l") 3191 .unwrap() 3192 .trim() 3193 .parse::<u32>() 3194 .unwrap_or_default(), 3195 4 3196 ); 3197 }); 3198 3199 let _ = cloud_child.kill(); 3200 let output = cloud_child.wait_with_output().unwrap(); 3201 3202 handle_child_output(r, &output); 3203 } 3204 3205 #[test] 3206 fn test_virtio_block_io_uring() { 3207 _test_virtio_block(FOCAL_IMAGE_NAME, false, true) 3208 } 3209 3210 #[test] 3211 fn test_virtio_block_aio() { 3212 _test_virtio_block(FOCAL_IMAGE_NAME, true, false) 3213 } 3214 3215 #[test] 3216 fn test_virtio_block_sync() { 3217 _test_virtio_block(FOCAL_IMAGE_NAME, true, true) 3218 } 3219 3220 #[test] 3221 fn test_virtio_block_qcow2() { 3222 _test_virtio_block(FOCAL_IMAGE_NAME_QCOW2, false, false) 3223 } 3224 3225 #[test] 3226 fn test_virtio_block_qcow2_backing_file() { 3227 _test_virtio_block(FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE, false, false) 3228 } 3229 3230 #[test] 3231 fn test_virtio_block_vhd() { 3232 let mut workload_path = dirs::home_dir().unwrap(); 3233 workload_path.push("workloads"); 3234 3235 let mut raw_file_path = workload_path.clone(); 3236 let mut vhd_file_path = workload_path; 3237 raw_file_path.push(FOCAL_IMAGE_NAME); 3238 vhd_file_path.push(FOCAL_IMAGE_NAME_VHD); 3239 3240 // Generate VHD file from RAW file 3241 std::process::Command::new("qemu-img") 3242 .arg("convert") 3243 .arg("-p") 3244 .args(["-f", "raw"]) 3245 .args(["-O", "vpc"]) 3246 .args(["-o", "subformat=fixed"]) 3247 .arg(raw_file_path.to_str().unwrap()) 3248 .arg(vhd_file_path.to_str().unwrap()) 3249 .output() 3250 .expect("Expect generating VHD image from RAW image"); 3251 3252 _test_virtio_block(FOCAL_IMAGE_NAME_VHD, false, false) 3253 } 3254 3255 #[test] 3256 fn test_virtio_block_vhdx() { 3257 let mut workload_path = dirs::home_dir().unwrap(); 3258 workload_path.push("workloads"); 3259 3260 let mut raw_file_path = workload_path.clone(); 3261 let mut vhdx_file_path = workload_path; 3262 raw_file_path.push(FOCAL_IMAGE_NAME); 3263 vhdx_file_path.push(FOCAL_IMAGE_NAME_VHDX); 3264 3265 // Generate dynamic VHDX file from RAW file 3266 std::process::Command::new("qemu-img") 3267 .arg("convert") 3268 .arg("-p") 3269 .args(["-f", "raw"]) 3270 .args(["-O", "vhdx"]) 3271 .arg(raw_file_path.to_str().unwrap()) 3272 .arg(vhdx_file_path.to_str().unwrap()) 3273 .output() 3274 .expect("Expect generating dynamic VHDx image from RAW image"); 3275 3276 _test_virtio_block(FOCAL_IMAGE_NAME_VHDX, false, false) 3277 } 3278 3279 #[test] 3280 fn test_virtio_block_dynamic_vhdx_expand() { 3281 const VIRTUAL_DISK_SIZE: u64 = 100 << 20; 3282 const EMPTY_VHDX_FILE_SIZE: u64 = 8 << 20; 3283 const FULL_VHDX_FILE_SIZE: u64 = 112 << 20; 3284 const DYNAMIC_VHDX_NAME: &str = "dynamic.vhdx"; 3285 3286 let mut workload_path = dirs::home_dir().unwrap(); 3287 workload_path.push("workloads"); 3288 3289 let mut vhdx_file_path = workload_path; 3290 vhdx_file_path.push(DYNAMIC_VHDX_NAME); 3291 let vhdx_path = vhdx_file_path.to_str().unwrap(); 3292 3293 // Generate a 100 MiB dynamic VHDX file 3294 std::process::Command::new("qemu-img") 3295 .arg("create") 3296 .args(["-f", "vhdx"]) 3297 .arg(vhdx_path) 3298 .arg(VIRTUAL_DISK_SIZE.to_string()) 3299 .output() 3300 .expect("Expect generating dynamic VHDx image from RAW image"); 3301 3302 // Check if the size matches with empty VHDx file size 3303 assert_eq!(vhdx_image_size(vhdx_path), EMPTY_VHDX_FILE_SIZE); 3304 3305 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3306 let guest = Guest::new(Box::new(focal)); 3307 let kernel_path = direct_kernel_boot_path(); 3308 3309 let mut cloud_child = GuestCommand::new(&guest) 3310 .args(["--cpus", "boot=1"]) 3311 .args(["--memory", "size=512M"]) 3312 .args(["--kernel", kernel_path.to_str().unwrap()]) 3313 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3314 .args([ 3315 "--disk", 3316 format!( 3317 "path={}", 3318 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 3319 ) 3320 .as_str(), 3321 format!( 3322 "path={}", 3323 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3324 ) 3325 .as_str(), 3326 format!("path={vhdx_path}").as_str(), 3327 ]) 3328 .default_net() 3329 .capture_output() 3330 .spawn() 3331 .unwrap(); 3332 3333 let r = std::panic::catch_unwind(|| { 3334 guest.wait_vm_boot(None).unwrap(); 3335 3336 // Check both if /dev/vdc exists and if the block size is 100 MiB. 3337 assert_eq!( 3338 guest 3339 .ssh_command("lsblk | grep vdc | grep -c 100M") 3340 .unwrap() 3341 .trim() 3342 .parse::<u32>() 3343 .unwrap_or_default(), 3344 1 3345 ); 3346 3347 // Write 100 MB of data to the VHDx disk 3348 guest 3349 .ssh_command("sudo dd if=/dev/urandom of=/dev/vdc bs=1M count=100") 3350 .unwrap(); 3351 }); 3352 3353 // Check if the size matches with expected expanded VHDx file size 3354 assert_eq!(vhdx_image_size(vhdx_path), FULL_VHDX_FILE_SIZE); 3355 3356 let _ = cloud_child.kill(); 3357 let output = cloud_child.wait_with_output().unwrap(); 3358 3359 handle_child_output(r, &output); 3360 } 3361 3362 fn vhdx_image_size(disk_name: &str) -> u64 { 3363 std::fs::File::open(disk_name) 3364 .unwrap() 3365 .seek(SeekFrom::End(0)) 3366 .unwrap() 3367 } 3368 3369 #[test] 3370 fn test_virtio_block_direct_and_firmware() { 3371 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3372 let guest = Guest::new(Box::new(focal)); 3373 3374 // The OS disk must be copied to a location that is not backed by 3375 // tmpfs, otherwise the syscall openat(2) with O_DIRECT simply fails 3376 // with EINVAL because tmpfs doesn't support this flag. 3377 let mut workloads_path = dirs::home_dir().unwrap(); 3378 workloads_path.push("workloads"); 3379 let os_dir = TempDir::new_in(workloads_path.as_path()).unwrap(); 3380 let mut os_path = os_dir.as_path().to_path_buf(); 3381 os_path.push("osdisk.img"); 3382 rate_limited_copy( 3383 guest.disk_config.disk(DiskType::OperatingSystem).unwrap(), 3384 os_path.as_path(), 3385 ) 3386 .expect("copying of OS disk failed"); 3387 3388 let mut child = GuestCommand::new(&guest) 3389 .args(["--cpus", "boot=1"]) 3390 .args(["--memory", "size=512M"]) 3391 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 3392 .args([ 3393 "--disk", 3394 format!("path={},direct=on", os_path.as_path().to_str().unwrap()).as_str(), 3395 format!( 3396 "path={}", 3397 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3398 ) 3399 .as_str(), 3400 ]) 3401 .default_net() 3402 .capture_output() 3403 .spawn() 3404 .unwrap(); 3405 3406 let r = std::panic::catch_unwind(|| { 3407 guest.wait_vm_boot(Some(120)).unwrap(); 3408 }); 3409 3410 let _ = child.kill(); 3411 let output = child.wait_with_output().unwrap(); 3412 3413 handle_child_output(r, &output); 3414 } 3415 3416 #[test] 3417 fn test_vhost_user_net_default() { 3418 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, false, false) 3419 } 3420 3421 #[test] 3422 fn test_vhost_user_net_named_tap() { 3423 test_vhost_user_net( 3424 Some("mytap0"), 3425 2, 3426 &prepare_vhost_user_net_daemon, 3427 false, 3428 false, 3429 ) 3430 } 3431 3432 #[test] 3433 fn test_vhost_user_net_existing_tap() { 3434 test_vhost_user_net( 3435 Some("vunet-tap0"), 3436 2, 3437 &prepare_vhost_user_net_daemon, 3438 false, 3439 false, 3440 ) 3441 } 3442 3443 #[test] 3444 fn test_vhost_user_net_multiple_queues() { 3445 test_vhost_user_net(None, 4, &prepare_vhost_user_net_daemon, false, false) 3446 } 3447 3448 #[test] 3449 fn test_vhost_user_net_tap_multiple_queues() { 3450 test_vhost_user_net( 3451 Some("vunet-tap1"), 3452 4, 3453 &prepare_vhost_user_net_daemon, 3454 false, 3455 false, 3456 ) 3457 } 3458 3459 #[test] 3460 fn test_vhost_user_net_host_mac() { 3461 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, true, false) 3462 } 3463 3464 #[test] 3465 fn test_vhost_user_net_client_mode() { 3466 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, false, true) 3467 } 3468 3469 #[test] 3470 #[cfg(not(target_arch = "aarch64"))] 3471 fn test_vhost_user_blk_default() { 3472 test_vhost_user_blk(2, false, false, Some(&prepare_vubd)) 3473 } 3474 3475 #[test] 3476 #[cfg(not(target_arch = "aarch64"))] 3477 fn test_vhost_user_blk_readonly() { 3478 test_vhost_user_blk(1, true, false, Some(&prepare_vubd)) 3479 } 3480 3481 #[test] 3482 #[cfg(not(target_arch = "aarch64"))] 3483 fn test_vhost_user_blk_direct() { 3484 test_vhost_user_blk(1, false, true, Some(&prepare_vubd)) 3485 } 3486 3487 #[test] 3488 fn test_boot_from_vhost_user_blk_default() { 3489 test_boot_from_vhost_user_blk(1, false, false, Some(&prepare_vubd)) 3490 } 3491 3492 #[test] 3493 #[cfg(target_arch = "x86_64")] 3494 fn test_split_irqchip() { 3495 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3496 let guest = Guest::new(Box::new(focal)); 3497 3498 let mut child = GuestCommand::new(&guest) 3499 .args(["--cpus", "boot=1"]) 3500 .args(["--memory", "size=512M"]) 3501 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3502 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3503 .default_disks() 3504 .default_net() 3505 .capture_output() 3506 .spawn() 3507 .unwrap(); 3508 3509 let r = std::panic::catch_unwind(|| { 3510 guest.wait_vm_boot(None).unwrap(); 3511 3512 assert_eq!( 3513 guest 3514 .ssh_command("grep -c IO-APIC.*timer /proc/interrupts || true") 3515 .unwrap() 3516 .trim() 3517 .parse::<u32>() 3518 .unwrap_or(1), 3519 0 3520 ); 3521 assert_eq!( 3522 guest 3523 .ssh_command("grep -c IO-APIC.*cascade /proc/interrupts || true") 3524 .unwrap() 3525 .trim() 3526 .parse::<u32>() 3527 .unwrap_or(1), 3528 0 3529 ); 3530 }); 3531 3532 let _ = child.kill(); 3533 let output = child.wait_with_output().unwrap(); 3534 3535 handle_child_output(r, &output); 3536 } 3537 3538 #[test] 3539 #[cfg(target_arch = "x86_64")] 3540 fn test_dmi_serial_number() { 3541 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3542 let guest = Guest::new(Box::new(focal)); 3543 3544 let mut child = GuestCommand::new(&guest) 3545 .args(["--cpus", "boot=1"]) 3546 .args(["--memory", "size=512M"]) 3547 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3548 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3549 .args(["--platform", "serial_number=a=b;c=d"]) 3550 .default_disks() 3551 .default_net() 3552 .capture_output() 3553 .spawn() 3554 .unwrap(); 3555 3556 let r = std::panic::catch_unwind(|| { 3557 guest.wait_vm_boot(None).unwrap(); 3558 3559 assert_eq!( 3560 guest 3561 .ssh_command("sudo cat /sys/class/dmi/id/product_serial") 3562 .unwrap() 3563 .trim(), 3564 "a=b;c=d" 3565 ); 3566 }); 3567 3568 let _ = child.kill(); 3569 let output = child.wait_with_output().unwrap(); 3570 3571 handle_child_output(r, &output); 3572 } 3573 3574 #[test] 3575 #[cfg(target_arch = "x86_64")] 3576 fn test_dmi_uuid() { 3577 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3578 let guest = Guest::new(Box::new(focal)); 3579 3580 let mut child = GuestCommand::new(&guest) 3581 .args(["--cpus", "boot=1"]) 3582 .args(["--memory", "size=512M"]) 3583 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3584 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3585 .args(["--platform", "uuid=1e8aa28a-435d-4027-87f4-40dceff1fa0a"]) 3586 .default_disks() 3587 .default_net() 3588 .capture_output() 3589 .spawn() 3590 .unwrap(); 3591 3592 let r = std::panic::catch_unwind(|| { 3593 guest.wait_vm_boot(None).unwrap(); 3594 3595 assert_eq!( 3596 guest 3597 .ssh_command("sudo cat /sys/class/dmi/id/product_uuid") 3598 .unwrap() 3599 .trim(), 3600 "1e8aa28a-435d-4027-87f4-40dceff1fa0a" 3601 ); 3602 }); 3603 3604 let _ = child.kill(); 3605 let output = child.wait_with_output().unwrap(); 3606 3607 handle_child_output(r, &output); 3608 } 3609 3610 #[test] 3611 #[cfg(target_arch = "x86_64")] 3612 fn test_dmi_oem_strings() { 3613 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3614 let guest = Guest::new(Box::new(focal)); 3615 3616 let s1 = "io.systemd.credential:xx=yy"; 3617 let s2 = "This is a test string"; 3618 3619 let oem_strings = format!("oem_strings=[{s1},{s2}]"); 3620 3621 let mut child = GuestCommand::new(&guest) 3622 .args(["--cpus", "boot=1"]) 3623 .args(["--memory", "size=512M"]) 3624 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3625 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3626 .args(["--platform", &oem_strings]) 3627 .default_disks() 3628 .default_net() 3629 .capture_output() 3630 .spawn() 3631 .unwrap(); 3632 3633 let r = std::panic::catch_unwind(|| { 3634 guest.wait_vm_boot(None).unwrap(); 3635 3636 assert_eq!( 3637 guest 3638 .ssh_command("sudo dmidecode --oem-string count") 3639 .unwrap() 3640 .trim(), 3641 "2" 3642 ); 3643 3644 assert_eq!( 3645 guest 3646 .ssh_command("sudo dmidecode --oem-string 1") 3647 .unwrap() 3648 .trim(), 3649 s1 3650 ); 3651 3652 assert_eq!( 3653 guest 3654 .ssh_command("sudo dmidecode --oem-string 2") 3655 .unwrap() 3656 .trim(), 3657 s2 3658 ); 3659 }); 3660 3661 let _ = child.kill(); 3662 let output = child.wait_with_output().unwrap(); 3663 3664 handle_child_output(r, &output); 3665 } 3666 3667 #[test] 3668 fn test_virtio_fs() { 3669 _test_virtio_fs(&prepare_virtiofsd, false, None) 3670 } 3671 3672 #[test] 3673 fn test_virtio_fs_hotplug() { 3674 _test_virtio_fs(&prepare_virtiofsd, true, None) 3675 } 3676 3677 #[test] 3678 #[cfg(not(feature = "mshv"))] 3679 fn test_virtio_fs_multi_segment_hotplug() { 3680 _test_virtio_fs(&prepare_virtiofsd, true, Some(15)) 3681 } 3682 3683 #[test] 3684 #[cfg(not(feature = "mshv"))] 3685 fn test_virtio_fs_multi_segment() { 3686 _test_virtio_fs(&prepare_virtiofsd, false, Some(15)) 3687 } 3688 3689 #[test] 3690 fn test_virtio_pmem_persist_writes() { 3691 test_virtio_pmem(false, false) 3692 } 3693 3694 #[test] 3695 fn test_virtio_pmem_discard_writes() { 3696 test_virtio_pmem(true, false) 3697 } 3698 3699 #[test] 3700 fn test_virtio_pmem_with_size() { 3701 test_virtio_pmem(true, true) 3702 } 3703 3704 #[test] 3705 fn test_boot_from_virtio_pmem() { 3706 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3707 let guest = Guest::new(Box::new(focal)); 3708 3709 let kernel_path = direct_kernel_boot_path(); 3710 3711 let mut child = GuestCommand::new(&guest) 3712 .args(["--cpus", "boot=1"]) 3713 .args(["--memory", "size=512M"]) 3714 .args(["--kernel", kernel_path.to_str().unwrap()]) 3715 .args([ 3716 "--disk", 3717 format!( 3718 "path={}", 3719 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3720 ) 3721 .as_str(), 3722 ]) 3723 .default_net() 3724 .args([ 3725 "--pmem", 3726 format!( 3727 "file={},size={}", 3728 guest.disk_config.disk(DiskType::OperatingSystem).unwrap(), 3729 fs::metadata(guest.disk_config.disk(DiskType::OperatingSystem).unwrap()) 3730 .unwrap() 3731 .len() 3732 ) 3733 .as_str(), 3734 ]) 3735 .args([ 3736 "--cmdline", 3737 DIRECT_KERNEL_BOOT_CMDLINE 3738 .replace("vda1", "pmem0p1") 3739 .as_str(), 3740 ]) 3741 .capture_output() 3742 .spawn() 3743 .unwrap(); 3744 3745 let r = std::panic::catch_unwind(|| { 3746 guest.wait_vm_boot(None).unwrap(); 3747 3748 // Simple checks to validate the VM booted properly 3749 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 3750 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 3751 }); 3752 3753 let _ = child.kill(); 3754 let output = child.wait_with_output().unwrap(); 3755 3756 handle_child_output(r, &output); 3757 } 3758 3759 #[test] 3760 fn test_multiple_network_interfaces() { 3761 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3762 let guest = Guest::new(Box::new(focal)); 3763 3764 let kernel_path = direct_kernel_boot_path(); 3765 3766 let mut child = GuestCommand::new(&guest) 3767 .args(["--cpus", "boot=1"]) 3768 .args(["--memory", "size=512M"]) 3769 .args(["--kernel", kernel_path.to_str().unwrap()]) 3770 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3771 .default_disks() 3772 .args([ 3773 "--net", 3774 guest.default_net_string().as_str(), 3775 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0", 3776 "tap=mytap1,mac=fe:1f:9e:e1:60:f2,ip=192.168.4.1,mask=255.255.255.0", 3777 ]) 3778 .capture_output() 3779 .spawn() 3780 .unwrap(); 3781 3782 let r = std::panic::catch_unwind(|| { 3783 guest.wait_vm_boot(None).unwrap(); 3784 3785 let tap_count = exec_host_command_output("ip link | grep -c mytap1"); 3786 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1"); 3787 3788 // 3 network interfaces + default localhost ==> 4 interfaces 3789 assert_eq!( 3790 guest 3791 .ssh_command("ip -o link | wc -l") 3792 .unwrap() 3793 .trim() 3794 .parse::<u32>() 3795 .unwrap_or_default(), 3796 4 3797 ); 3798 }); 3799 3800 let _ = child.kill(); 3801 let output = child.wait_with_output().unwrap(); 3802 3803 handle_child_output(r, &output); 3804 } 3805 3806 #[test] 3807 #[cfg(target_arch = "aarch64")] 3808 fn test_pmu_on() { 3809 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3810 let guest = Guest::new(Box::new(focal)); 3811 let mut child = GuestCommand::new(&guest) 3812 .args(["--cpus", "boot=1"]) 3813 .args(["--memory", "size=512M"]) 3814 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3815 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3816 .default_disks() 3817 .default_net() 3818 .capture_output() 3819 .spawn() 3820 .unwrap(); 3821 3822 let r = std::panic::catch_unwind(|| { 3823 guest.wait_vm_boot(None).unwrap(); 3824 3825 // Test that PMU exists. 3826 assert_eq!( 3827 guest 3828 .ssh_command(GREP_PMU_IRQ_CMD) 3829 .unwrap() 3830 .trim() 3831 .parse::<u32>() 3832 .unwrap_or_default(), 3833 1 3834 ); 3835 }); 3836 3837 let _ = child.kill(); 3838 let output = child.wait_with_output().unwrap(); 3839 3840 handle_child_output(r, &output); 3841 } 3842 3843 #[test] 3844 fn test_serial_off() { 3845 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3846 let guest = Guest::new(Box::new(focal)); 3847 let mut child = GuestCommand::new(&guest) 3848 .args(["--cpus", "boot=1"]) 3849 .args(["--memory", "size=512M"]) 3850 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3851 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3852 .default_disks() 3853 .default_net() 3854 .args(["--serial", "off"]) 3855 .capture_output() 3856 .spawn() 3857 .unwrap(); 3858 3859 let r = std::panic::catch_unwind(|| { 3860 guest.wait_vm_boot(None).unwrap(); 3861 3862 // Test that there is no ttyS0 3863 assert_eq!( 3864 guest 3865 .ssh_command(GREP_SERIAL_IRQ_CMD) 3866 .unwrap() 3867 .trim() 3868 .parse::<u32>() 3869 .unwrap_or(1), 3870 0 3871 ); 3872 }); 3873 3874 let _ = child.kill(); 3875 let output = child.wait_with_output().unwrap(); 3876 3877 handle_child_output(r, &output); 3878 } 3879 3880 #[test] 3881 fn test_serial_null() { 3882 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3883 let guest = Guest::new(Box::new(focal)); 3884 let mut cmd = GuestCommand::new(&guest); 3885 #[cfg(target_arch = "x86_64")] 3886 let console_str: &str = "console=ttyS0"; 3887 #[cfg(target_arch = "aarch64")] 3888 let console_str: &str = "console=ttyAMA0"; 3889 3890 cmd.args(["--cpus", "boot=1"]) 3891 .args(["--memory", "size=512M"]) 3892 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3893 .args([ 3894 "--cmdline", 3895 DIRECT_KERNEL_BOOT_CMDLINE 3896 .replace("console=hvc0 ", console_str) 3897 .as_str(), 3898 ]) 3899 .default_disks() 3900 .default_net() 3901 .args(["--serial", "null"]) 3902 .args(["--console", "off"]) 3903 .capture_output(); 3904 3905 let mut child = cmd.spawn().unwrap(); 3906 3907 let r = std::panic::catch_unwind(|| { 3908 guest.wait_vm_boot(None).unwrap(); 3909 3910 // Test that there is a ttyS0 3911 assert_eq!( 3912 guest 3913 .ssh_command(GREP_SERIAL_IRQ_CMD) 3914 .unwrap() 3915 .trim() 3916 .parse::<u32>() 3917 .unwrap_or_default(), 3918 1 3919 ); 3920 }); 3921 3922 let _ = child.kill(); 3923 let output = child.wait_with_output().unwrap(); 3924 handle_child_output(r, &output); 3925 3926 let r = std::panic::catch_unwind(|| { 3927 assert!(!String::from_utf8_lossy(&output.stdout).contains(CONSOLE_TEST_STRING)); 3928 }); 3929 3930 handle_child_output(r, &output); 3931 } 3932 3933 #[test] 3934 fn test_serial_tty() { 3935 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3936 let guest = Guest::new(Box::new(focal)); 3937 3938 let kernel_path = direct_kernel_boot_path(); 3939 3940 #[cfg(target_arch = "x86_64")] 3941 let console_str: &str = "console=ttyS0"; 3942 #[cfg(target_arch = "aarch64")] 3943 let console_str: &str = "console=ttyAMA0"; 3944 3945 let mut child = GuestCommand::new(&guest) 3946 .args(["--cpus", "boot=1"]) 3947 .args(["--memory", "size=512M"]) 3948 .args(["--kernel", kernel_path.to_str().unwrap()]) 3949 .args([ 3950 "--cmdline", 3951 DIRECT_KERNEL_BOOT_CMDLINE 3952 .replace("console=hvc0 ", console_str) 3953 .as_str(), 3954 ]) 3955 .default_disks() 3956 .default_net() 3957 .args(["--serial", "tty"]) 3958 .args(["--console", "off"]) 3959 .capture_output() 3960 .spawn() 3961 .unwrap(); 3962 3963 let r = std::panic::catch_unwind(|| { 3964 guest.wait_vm_boot(None).unwrap(); 3965 3966 // Test that there is a ttyS0 3967 assert_eq!( 3968 guest 3969 .ssh_command(GREP_SERIAL_IRQ_CMD) 3970 .unwrap() 3971 .trim() 3972 .parse::<u32>() 3973 .unwrap_or_default(), 3974 1 3975 ); 3976 }); 3977 3978 // This sleep is needed to wait for the login prompt 3979 thread::sleep(std::time::Duration::new(2, 0)); 3980 3981 let _ = child.kill(); 3982 let output = child.wait_with_output().unwrap(); 3983 handle_child_output(r, &output); 3984 3985 let r = std::panic::catch_unwind(|| { 3986 assert!(String::from_utf8_lossy(&output.stdout).contains(CONSOLE_TEST_STRING)); 3987 }); 3988 3989 handle_child_output(r, &output); 3990 } 3991 3992 #[test] 3993 fn test_serial_file() { 3994 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3995 let guest = Guest::new(Box::new(focal)); 3996 3997 let serial_path = guest.tmp_dir.as_path().join("serial-output"); 3998 #[cfg(target_arch = "x86_64")] 3999 let console_str: &str = "console=ttyS0"; 4000 #[cfg(target_arch = "aarch64")] 4001 let console_str: &str = "console=ttyAMA0"; 4002 4003 let mut child = GuestCommand::new(&guest) 4004 .args(["--cpus", "boot=1"]) 4005 .args(["--memory", "size=512M"]) 4006 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4007 .args([ 4008 "--cmdline", 4009 DIRECT_KERNEL_BOOT_CMDLINE 4010 .replace("console=hvc0 ", console_str) 4011 .as_str(), 4012 ]) 4013 .default_disks() 4014 .default_net() 4015 .args([ 4016 "--serial", 4017 format!("file={}", serial_path.to_str().unwrap()).as_str(), 4018 ]) 4019 .capture_output() 4020 .spawn() 4021 .unwrap(); 4022 4023 let r = std::panic::catch_unwind(|| { 4024 guest.wait_vm_boot(None).unwrap(); 4025 4026 // Test that there is a ttyS0 4027 assert_eq!( 4028 guest 4029 .ssh_command(GREP_SERIAL_IRQ_CMD) 4030 .unwrap() 4031 .trim() 4032 .parse::<u32>() 4033 .unwrap_or_default(), 4034 1 4035 ); 4036 4037 guest.ssh_command("sudo shutdown -h now").unwrap(); 4038 }); 4039 4040 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 4041 let _ = child.kill(); 4042 let output = child.wait_with_output().unwrap(); 4043 handle_child_output(r, &output); 4044 4045 let r = std::panic::catch_unwind(|| { 4046 // Check that the cloud-hypervisor binary actually terminated 4047 assert!(output.status.success()); 4048 4049 // Do this check after shutdown of the VM as an easy way to ensure 4050 // all writes are flushed to disk 4051 let mut f = std::fs::File::open(serial_path).unwrap(); 4052 let mut buf = String::new(); 4053 f.read_to_string(&mut buf).unwrap(); 4054 assert!(buf.contains(CONSOLE_TEST_STRING)); 4055 }); 4056 4057 handle_child_output(r, &output); 4058 } 4059 4060 #[test] 4061 fn test_pty_interaction() { 4062 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4063 let guest = Guest::new(Box::new(focal)); 4064 let api_socket = temp_api_path(&guest.tmp_dir); 4065 let serial_option = if cfg!(target_arch = "x86_64") { 4066 " console=ttyS0" 4067 } else { 4068 " console=ttyAMA0" 4069 }; 4070 let cmdline = DIRECT_KERNEL_BOOT_CMDLINE.to_owned() + serial_option; 4071 4072 let mut child = GuestCommand::new(&guest) 4073 .args(["--cpus", "boot=1"]) 4074 .args(["--memory", "size=512M"]) 4075 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4076 .args(["--cmdline", &cmdline]) 4077 .default_disks() 4078 .default_net() 4079 .args(["--serial", "null"]) 4080 .args(["--console", "pty"]) 4081 .args(["--api-socket", &api_socket]) 4082 .spawn() 4083 .unwrap(); 4084 4085 let r = std::panic::catch_unwind(|| { 4086 guest.wait_vm_boot(None).unwrap(); 4087 // Get pty fd for console 4088 let console_path = get_pty_path(&api_socket, "console"); 4089 _test_pty_interaction(console_path); 4090 4091 guest.ssh_command("sudo shutdown -h now").unwrap(); 4092 }); 4093 4094 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 4095 let _ = child.kill(); 4096 let output = child.wait_with_output().unwrap(); 4097 handle_child_output(r, &output); 4098 4099 let r = std::panic::catch_unwind(|| { 4100 // Check that the cloud-hypervisor binary actually terminated 4101 assert!(output.status.success()) 4102 }); 4103 handle_child_output(r, &output); 4104 } 4105 4106 #[test] 4107 fn test_serial_socket_interaction() { 4108 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4109 let guest = Guest::new(Box::new(focal)); 4110 let serial_socket = guest.tmp_dir.as_path().join("serial.socket"); 4111 let serial_socket_pty = guest.tmp_dir.as_path().join("serial.pty"); 4112 let serial_option = if cfg!(target_arch = "x86_64") { 4113 " console=ttyS0" 4114 } else { 4115 " console=ttyAMA0" 4116 }; 4117 let cmdline = DIRECT_KERNEL_BOOT_CMDLINE.to_owned() + serial_option; 4118 4119 let mut child = GuestCommand::new(&guest) 4120 .args(["--cpus", "boot=1"]) 4121 .args(["--memory", "size=512M"]) 4122 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4123 .args(["--cmdline", &cmdline]) 4124 .default_disks() 4125 .default_net() 4126 .args(["--console", "null"]) 4127 .args([ 4128 "--serial", 4129 format!("socket={}", serial_socket.to_str().unwrap()).as_str(), 4130 ]) 4131 .spawn() 4132 .unwrap(); 4133 4134 let _ = std::panic::catch_unwind(|| { 4135 guest.wait_vm_boot(None).unwrap(); 4136 }); 4137 4138 let mut socat_command = Command::new("socat"); 4139 let socat_args = [ 4140 &format!("pty,link={},raw", serial_socket_pty.display()), 4141 &format!("UNIX-CONNECT:{}", serial_socket.display()), 4142 ]; 4143 socat_command.args(socat_args); 4144 4145 let mut socat_child = socat_command.spawn().unwrap(); 4146 thread::sleep(std::time::Duration::new(1, 0)); 4147 4148 let _ = std::panic::catch_unwind(|| { 4149 _test_pty_interaction(serial_socket_pty); 4150 }); 4151 4152 let _ = socat_child.kill(); 4153 4154 let r = std::panic::catch_unwind(|| { 4155 guest.ssh_command("sudo shutdown -h now").unwrap(); 4156 }); 4157 4158 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 4159 let _ = child.kill(); 4160 let output = child.wait_with_output().unwrap(); 4161 handle_child_output(r, &output); 4162 4163 let r = std::panic::catch_unwind(|| { 4164 // Check that the cloud-hypervisor binary actually terminated 4165 if !output.status.success() { 4166 panic!( 4167 "Cloud Hypervisor process failed to terminate gracefully: {:?}", 4168 output.status 4169 ); 4170 } 4171 }); 4172 handle_child_output(r, &output); 4173 } 4174 4175 #[test] 4176 fn test_virtio_console() { 4177 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4178 let guest = Guest::new(Box::new(focal)); 4179 4180 let kernel_path = direct_kernel_boot_path(); 4181 4182 let mut child = GuestCommand::new(&guest) 4183 .args(["--cpus", "boot=1"]) 4184 .args(["--memory", "size=512M"]) 4185 .args(["--kernel", kernel_path.to_str().unwrap()]) 4186 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4187 .default_disks() 4188 .default_net() 4189 .args(["--console", "tty"]) 4190 .args(["--serial", "null"]) 4191 .capture_output() 4192 .spawn() 4193 .unwrap(); 4194 4195 let text = String::from("On a branch floating down river a cricket, singing."); 4196 let cmd = format!("echo {text} | sudo tee /dev/hvc0"); 4197 4198 let r = std::panic::catch_unwind(|| { 4199 guest.wait_vm_boot(None).unwrap(); 4200 4201 assert!(guest 4202 .does_device_vendor_pair_match("0x1043", "0x1af4") 4203 .unwrap_or_default()); 4204 4205 guest.ssh_command(&cmd).unwrap(); 4206 }); 4207 4208 let _ = child.kill(); 4209 let output = child.wait_with_output().unwrap(); 4210 handle_child_output(r, &output); 4211 4212 let r = std::panic::catch_unwind(|| { 4213 assert!(String::from_utf8_lossy(&output.stdout).contains(&text)); 4214 }); 4215 4216 handle_child_output(r, &output); 4217 } 4218 4219 #[test] 4220 fn test_console_file() { 4221 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4222 let guest = Guest::new(Box::new(focal)); 4223 4224 let console_path = guest.tmp_dir.as_path().join("console-output"); 4225 let mut child = GuestCommand::new(&guest) 4226 .args(["--cpus", "boot=1"]) 4227 .args(["--memory", "size=512M"]) 4228 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4229 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4230 .default_disks() 4231 .default_net() 4232 .args([ 4233 "--console", 4234 format!("file={}", console_path.to_str().unwrap()).as_str(), 4235 ]) 4236 .capture_output() 4237 .spawn() 4238 .unwrap(); 4239 4240 guest.wait_vm_boot(None).unwrap(); 4241 4242 guest.ssh_command("sudo shutdown -h now").unwrap(); 4243 4244 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 4245 let _ = child.kill(); 4246 let output = child.wait_with_output().unwrap(); 4247 4248 let r = std::panic::catch_unwind(|| { 4249 // Check that the cloud-hypervisor binary actually terminated 4250 assert!(output.status.success()); 4251 4252 // Do this check after shutdown of the VM as an easy way to ensure 4253 // all writes are flushed to disk 4254 let mut f = std::fs::File::open(console_path).unwrap(); 4255 let mut buf = String::new(); 4256 f.read_to_string(&mut buf).unwrap(); 4257 4258 if !buf.contains(CONSOLE_TEST_STRING) { 4259 eprintln!( 4260 "\n\n==== Console file output ====\n\n{buf}\n\n==== End console file output ====" 4261 ); 4262 } 4263 assert!(buf.contains(CONSOLE_TEST_STRING)); 4264 }); 4265 4266 handle_child_output(r, &output); 4267 } 4268 4269 #[test] 4270 #[cfg(target_arch = "x86_64")] 4271 #[cfg(not(feature = "mshv"))] 4272 // The VFIO integration test starts cloud-hypervisor guest with 3 TAP 4273 // backed networking interfaces, bound through a simple bridge on the host. 4274 // So if the nested cloud-hypervisor succeeds in getting a directly 4275 // assigned interface from its cloud-hypervisor host, we should be able to 4276 // ssh into it, and verify that it's running with the right kernel command 4277 // line (We tag the command line from cloud-hypervisor for that purpose). 4278 // The third device is added to validate that hotplug works correctly since 4279 // it is being added to the L2 VM through hotplugging mechanism. 4280 // Also, we pass-through a virtio-blk device to the L2 VM to test the 32-bit 4281 // vfio device support 4282 fn test_vfio() { 4283 setup_vfio_network_interfaces(); 4284 4285 let jammy = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string()); 4286 let guest = Guest::new_from_ip_range(Box::new(jammy), "172.18", 0); 4287 4288 let mut workload_path = dirs::home_dir().unwrap(); 4289 workload_path.push("workloads"); 4290 4291 let kernel_path = direct_kernel_boot_path(); 4292 4293 let mut vfio_path = workload_path.clone(); 4294 vfio_path.push("vfio"); 4295 4296 let mut cloud_init_vfio_base_path = vfio_path.clone(); 4297 cloud_init_vfio_base_path.push("cloudinit.img"); 4298 4299 // We copy our cloudinit into the vfio mount point, for the nested 4300 // cloud-hypervisor guest to use. 4301 rate_limited_copy( 4302 guest.disk_config.disk(DiskType::CloudInit).unwrap(), 4303 &cloud_init_vfio_base_path, 4304 ) 4305 .expect("copying of cloud-init disk failed"); 4306 4307 let mut vfio_disk_path = workload_path.clone(); 4308 vfio_disk_path.push("vfio.img"); 4309 4310 // Create the vfio disk image 4311 let output = Command::new("mkfs.ext4") 4312 .arg("-d") 4313 .arg(vfio_path.to_str().unwrap()) 4314 .arg(vfio_disk_path.to_str().unwrap()) 4315 .arg("2g") 4316 .output() 4317 .unwrap(); 4318 if !output.status.success() { 4319 eprintln!("{}", String::from_utf8_lossy(&output.stderr)); 4320 panic!("mkfs.ext4 command generated an error"); 4321 } 4322 4323 let mut blk_file_path = workload_path; 4324 blk_file_path.push("blk.img"); 4325 4326 let vfio_tap0 = "vfio-tap0"; 4327 let vfio_tap1 = "vfio-tap1"; 4328 let vfio_tap2 = "vfio-tap2"; 4329 let vfio_tap3 = "vfio-tap3"; 4330 4331 let mut child = GuestCommand::new(&guest) 4332 .args(["--cpus", "boot=4"]) 4333 .args(["--memory", "size=2G,hugepages=on,shared=on"]) 4334 .args(["--kernel", kernel_path.to_str().unwrap()]) 4335 .args([ 4336 "--disk", 4337 format!( 4338 "path={}", 4339 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 4340 ) 4341 .as_str(), 4342 format!( 4343 "path={}", 4344 guest.disk_config.disk(DiskType::CloudInit).unwrap() 4345 ) 4346 .as_str(), 4347 format!("path={}", vfio_disk_path.to_str().unwrap()).as_str(), 4348 format!("path={},iommu=on", blk_file_path.to_str().unwrap()).as_str(), 4349 ]) 4350 .args([ 4351 "--cmdline", 4352 format!( 4353 "{DIRECT_KERNEL_BOOT_CMDLINE} kvm-intel.nested=1 vfio_iommu_type1.allow_unsafe_interrupts" 4354 ) 4355 .as_str(), 4356 ]) 4357 .args([ 4358 "--net", 4359 format!("tap={},mac={}", vfio_tap0, guest.network.guest_mac).as_str(), 4360 format!( 4361 "tap={},mac={},iommu=on", 4362 vfio_tap1, guest.network.l2_guest_mac1 4363 ) 4364 .as_str(), 4365 format!( 4366 "tap={},mac={},iommu=on", 4367 vfio_tap2, guest.network.l2_guest_mac2 4368 ) 4369 .as_str(), 4370 format!( 4371 "tap={},mac={},iommu=on", 4372 vfio_tap3, guest.network.l2_guest_mac3 4373 ) 4374 .as_str(), 4375 ]) 4376 .capture_output() 4377 .spawn() 4378 .unwrap(); 4379 4380 thread::sleep(std::time::Duration::new(30, 0)); 4381 4382 let r = std::panic::catch_unwind(|| { 4383 guest.ssh_command_l1("sudo systemctl start vfio").unwrap(); 4384 thread::sleep(std::time::Duration::new(120, 0)); 4385 4386 // We booted our cloud hypervisor L2 guest with a "VFIOTAG" tag 4387 // added to its kernel command line. 4388 // Let's ssh into it and verify that it's there. If it is it means 4389 // we're in the right guest (The L2 one) because the QEMU L1 guest 4390 // does not have this command line tag. 4391 assert!(check_matched_lines_count( 4392 guest.ssh_command_l2_1("cat /proc/cmdline").unwrap().trim(), 4393 vec!["VFIOTAG"], 4394 1 4395 )); 4396 4397 // Let's also verify from the second virtio-net device passed to 4398 // the L2 VM. 4399 assert!(check_matched_lines_count( 4400 guest.ssh_command_l2_2("cat /proc/cmdline").unwrap().trim(), 4401 vec!["VFIOTAG"], 4402 1 4403 )); 4404 4405 // Check the amount of PCI devices appearing in L2 VM. 4406 assert!(check_lines_count( 4407 guest 4408 .ssh_command_l2_1("ls /sys/bus/pci/devices") 4409 .unwrap() 4410 .trim(), 4411 8 4412 )); 4413 4414 // Check both if /dev/vdc exists and if the block size is 16M in L2 VM 4415 assert!(check_matched_lines_count( 4416 guest.ssh_command_l2_1("lsblk").unwrap().trim(), 4417 vec!["vdc", "16M"], 4418 1 4419 )); 4420 4421 // Hotplug an extra virtio-net device through L2 VM. 4422 guest 4423 .ssh_command_l1( 4424 "echo 0000:00:09.0 | sudo tee /sys/bus/pci/devices/0000:00:09.0/driver/unbind", 4425 ) 4426 .unwrap(); 4427 guest 4428 .ssh_command_l1("echo 0000:00:09.0 | sudo tee /sys/bus/pci/drivers/vfio-pci/bind") 4429 .unwrap(); 4430 let vfio_hotplug_output = guest 4431 .ssh_command_l1( 4432 "sudo /mnt/ch-remote \ 4433 --api-socket=/tmp/ch_api.sock \ 4434 add-device path=/sys/bus/pci/devices/0000:00:09.0,id=vfio123", 4435 ) 4436 .unwrap(); 4437 assert!(check_matched_lines_count( 4438 vfio_hotplug_output.trim(), 4439 vec!["{\"id\":\"vfio123\",\"bdf\":\"0000:00:08.0\"}"], 4440 1 4441 )); 4442 4443 thread::sleep(std::time::Duration::new(10, 0)); 4444 4445 // Let's also verify from the third virtio-net device passed to 4446 // the L2 VM. This third device has been hotplugged through the L2 4447 // VM, so this is our way to validate hotplug works for VFIO PCI. 4448 assert!(check_matched_lines_count( 4449 guest.ssh_command_l2_3("cat /proc/cmdline").unwrap().trim(), 4450 vec!["VFIOTAG"], 4451 1 4452 )); 4453 4454 // Check the amount of PCI devices appearing in L2 VM. 4455 // There should be one more device than before, raising the count 4456 // up to 9 PCI devices. 4457 assert!(check_lines_count( 4458 guest 4459 .ssh_command_l2_1("ls /sys/bus/pci/devices") 4460 .unwrap() 4461 .trim(), 4462 9 4463 )); 4464 4465 // Let's now verify that we can correctly remove the virtio-net 4466 // device through the "remove-device" command responsible for 4467 // unplugging VFIO devices. 4468 guest 4469 .ssh_command_l1( 4470 "sudo /mnt/ch-remote \ 4471 --api-socket=/tmp/ch_api.sock \ 4472 remove-device vfio123", 4473 ) 4474 .unwrap(); 4475 thread::sleep(std::time::Duration::new(10, 0)); 4476 4477 // Check the amount of PCI devices appearing in L2 VM is back down 4478 // to 8 devices. 4479 assert!(check_lines_count( 4480 guest 4481 .ssh_command_l2_1("ls /sys/bus/pci/devices") 4482 .unwrap() 4483 .trim(), 4484 8 4485 )); 4486 4487 // Perform memory hotplug in L2 and validate the memory is showing 4488 // up as expected. In order to check, we will use the virtio-net 4489 // device already passed through L2 as a VFIO device, this will 4490 // verify that VFIO devices are functional with memory hotplug. 4491 assert!(guest.get_total_memory_l2().unwrap_or_default() > 480_000); 4492 guest 4493 .ssh_command_l2_1( 4494 "sudo bash -c 'echo online > /sys/devices/system/memory/auto_online_blocks'", 4495 ) 4496 .unwrap(); 4497 guest 4498 .ssh_command_l1( 4499 "sudo /mnt/ch-remote \ 4500 --api-socket=/tmp/ch_api.sock \ 4501 resize --memory=1073741824", 4502 ) 4503 .unwrap(); 4504 assert!(guest.get_total_memory_l2().unwrap_or_default() > 960_000); 4505 }); 4506 4507 let _ = child.kill(); 4508 let output = child.wait_with_output().unwrap(); 4509 4510 cleanup_vfio_network_interfaces(); 4511 4512 handle_child_output(r, &output); 4513 } 4514 4515 #[test] 4516 fn test_direct_kernel_boot_noacpi() { 4517 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4518 let guest = Guest::new(Box::new(focal)); 4519 4520 let kernel_path = direct_kernel_boot_path(); 4521 4522 let mut child = GuestCommand::new(&guest) 4523 .args(["--cpus", "boot=1"]) 4524 .args(["--memory", "size=512M"]) 4525 .args(["--kernel", kernel_path.to_str().unwrap()]) 4526 .args([ 4527 "--cmdline", 4528 format!("{DIRECT_KERNEL_BOOT_CMDLINE} acpi=off").as_str(), 4529 ]) 4530 .default_disks() 4531 .default_net() 4532 .capture_output() 4533 .spawn() 4534 .unwrap(); 4535 4536 let r = std::panic::catch_unwind(|| { 4537 guest.wait_vm_boot(None).unwrap(); 4538 4539 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 4540 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4541 }); 4542 4543 let _ = child.kill(); 4544 let output = child.wait_with_output().unwrap(); 4545 4546 handle_child_output(r, &output); 4547 } 4548 4549 #[test] 4550 fn test_virtio_vsock() { 4551 _test_virtio_vsock(false) 4552 } 4553 4554 #[test] 4555 fn test_virtio_vsock_hotplug() { 4556 _test_virtio_vsock(true); 4557 } 4558 4559 #[test] 4560 fn test_api_http_shutdown() { 4561 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4562 let guest = Guest::new(Box::new(focal)); 4563 4564 _test_api_shutdown(TargetApi::new_http_api(&guest.tmp_dir), guest) 4565 } 4566 4567 #[test] 4568 fn test_api_http_delete() { 4569 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4570 let guest = Guest::new(Box::new(focal)); 4571 4572 _test_api_delete(TargetApi::new_http_api(&guest.tmp_dir), guest); 4573 } 4574 4575 #[test] 4576 fn test_api_http_pause_resume() { 4577 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4578 let guest = Guest::new(Box::new(focal)); 4579 4580 _test_api_pause_resume(TargetApi::new_http_api(&guest.tmp_dir), guest) 4581 } 4582 4583 #[test] 4584 fn test_api_http_create_boot() { 4585 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4586 let guest = Guest::new(Box::new(focal)); 4587 4588 _test_api_create_boot(TargetApi::new_http_api(&guest.tmp_dir), guest) 4589 } 4590 4591 #[test] 4592 fn test_virtio_iommu() { 4593 _test_virtio_iommu(cfg!(target_arch = "x86_64")) 4594 } 4595 4596 #[test] 4597 // We cannot force the software running in the guest to reprogram the BAR 4598 // with some different addresses, but we have a reliable way of testing it 4599 // with a standard Linux kernel. 4600 // By removing a device from the PCI tree, and then rescanning the tree, 4601 // Linux consistently chooses to reorganize the PCI device BARs to other 4602 // locations in the guest address space. 4603 // This test creates a dedicated PCI network device to be checked as being 4604 // properly probed first, then removing it, and adding it again by doing a 4605 // rescan. 4606 fn test_pci_bar_reprogramming() { 4607 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4608 let guest = Guest::new(Box::new(focal)); 4609 4610 #[cfg(target_arch = "x86_64")] 4611 let kernel_path = direct_kernel_boot_path(); 4612 #[cfg(target_arch = "aarch64")] 4613 let kernel_path = edk2_path(); 4614 4615 let mut child = GuestCommand::new(&guest) 4616 .args(["--cpus", "boot=1"]) 4617 .args(["--memory", "size=512M"]) 4618 .args(["--kernel", kernel_path.to_str().unwrap()]) 4619 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4620 .default_disks() 4621 .args([ 4622 "--net", 4623 guest.default_net_string().as_str(), 4624 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0", 4625 ]) 4626 .capture_output() 4627 .spawn() 4628 .unwrap(); 4629 4630 let r = std::panic::catch_unwind(|| { 4631 guest.wait_vm_boot(None).unwrap(); 4632 4633 // 2 network interfaces + default localhost ==> 3 interfaces 4634 assert_eq!( 4635 guest 4636 .ssh_command("ip -o link | wc -l") 4637 .unwrap() 4638 .trim() 4639 .parse::<u32>() 4640 .unwrap_or_default(), 4641 3 4642 ); 4643 4644 let init_bar_addr = guest 4645 .ssh_command( 4646 "sudo awk '{print $1; exit}' /sys/bus/pci/devices/0000:00:05.0/resource", 4647 ) 4648 .unwrap(); 4649 4650 // Remove the PCI device 4651 guest 4652 .ssh_command("echo 1 | sudo tee /sys/bus/pci/devices/0000:00:05.0/remove") 4653 .unwrap(); 4654 4655 // Only 1 network interface left + default localhost ==> 2 interfaces 4656 assert_eq!( 4657 guest 4658 .ssh_command("ip -o link | wc -l") 4659 .unwrap() 4660 .trim() 4661 .parse::<u32>() 4662 .unwrap_or_default(), 4663 2 4664 ); 4665 4666 // Remove the PCI device 4667 guest 4668 .ssh_command("echo 1 | sudo tee /sys/bus/pci/rescan") 4669 .unwrap(); 4670 4671 // Back to 2 network interface + default localhost ==> 3 interfaces 4672 assert_eq!( 4673 guest 4674 .ssh_command("ip -o link | wc -l") 4675 .unwrap() 4676 .trim() 4677 .parse::<u32>() 4678 .unwrap_or_default(), 4679 3 4680 ); 4681 4682 let new_bar_addr = guest 4683 .ssh_command( 4684 "sudo awk '{print $1; exit}' /sys/bus/pci/devices/0000:00:05.0/resource", 4685 ) 4686 .unwrap(); 4687 4688 // Let's compare the BAR addresses for our virtio-net device. 4689 // They should be different as we expect the BAR reprogramming 4690 // to have happened. 4691 assert_ne!(init_bar_addr, new_bar_addr); 4692 }); 4693 4694 let _ = child.kill(); 4695 let output = child.wait_with_output().unwrap(); 4696 4697 handle_child_output(r, &output); 4698 } 4699 4700 #[test] 4701 fn test_memory_mergeable_off() { 4702 test_memory_mergeable(false) 4703 } 4704 4705 #[test] 4706 #[cfg(target_arch = "x86_64")] 4707 fn test_cpu_hotplug() { 4708 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4709 let guest = Guest::new(Box::new(focal)); 4710 let api_socket = temp_api_path(&guest.tmp_dir); 4711 4712 let kernel_path = direct_kernel_boot_path(); 4713 4714 let mut child = GuestCommand::new(&guest) 4715 .args(["--cpus", "boot=2,max=4"]) 4716 .args(["--memory", "size=512M"]) 4717 .args(["--kernel", kernel_path.to_str().unwrap()]) 4718 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4719 .default_disks() 4720 .default_net() 4721 .args(["--api-socket", &api_socket]) 4722 .capture_output() 4723 .spawn() 4724 .unwrap(); 4725 4726 let r = std::panic::catch_unwind(|| { 4727 guest.wait_vm_boot(None).unwrap(); 4728 4729 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 4730 4731 // Resize the VM 4732 let desired_vcpus = 4; 4733 resize_command(&api_socket, Some(desired_vcpus), None, None, None); 4734 4735 guest 4736 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online") 4737 .unwrap(); 4738 guest 4739 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online") 4740 .unwrap(); 4741 thread::sleep(std::time::Duration::new(10, 0)); 4742 assert_eq!( 4743 guest.get_cpu_count().unwrap_or_default(), 4744 u32::from(desired_vcpus) 4745 ); 4746 4747 guest.reboot_linux(0, None); 4748 4749 assert_eq!( 4750 guest.get_cpu_count().unwrap_or_default(), 4751 u32::from(desired_vcpus) 4752 ); 4753 4754 // Resize the VM 4755 let desired_vcpus = 2; 4756 resize_command(&api_socket, Some(desired_vcpus), None, None, None); 4757 4758 thread::sleep(std::time::Duration::new(10, 0)); 4759 assert_eq!( 4760 guest.get_cpu_count().unwrap_or_default(), 4761 u32::from(desired_vcpus) 4762 ); 4763 4764 // Resize the VM back up to 4 4765 let desired_vcpus = 4; 4766 resize_command(&api_socket, Some(desired_vcpus), None, None, None); 4767 4768 guest 4769 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online") 4770 .unwrap(); 4771 guest 4772 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online") 4773 .unwrap(); 4774 thread::sleep(std::time::Duration::new(10, 0)); 4775 assert_eq!( 4776 guest.get_cpu_count().unwrap_or_default(), 4777 u32::from(desired_vcpus) 4778 ); 4779 }); 4780 4781 let _ = child.kill(); 4782 let output = child.wait_with_output().unwrap(); 4783 4784 handle_child_output(r, &output); 4785 } 4786 4787 #[test] 4788 fn test_memory_hotplug() { 4789 #[cfg(target_arch = "aarch64")] 4790 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string(); 4791 #[cfg(target_arch = "x86_64")] 4792 let focal_image = FOCAL_IMAGE_NAME.to_string(); 4793 let focal = UbuntuDiskConfig::new(focal_image); 4794 let guest = Guest::new(Box::new(focal)); 4795 let api_socket = temp_api_path(&guest.tmp_dir); 4796 4797 #[cfg(target_arch = "aarch64")] 4798 let kernel_path = edk2_path(); 4799 #[cfg(target_arch = "x86_64")] 4800 let kernel_path = direct_kernel_boot_path(); 4801 4802 let mut child = GuestCommand::new(&guest) 4803 .args(["--cpus", "boot=2,max=4"]) 4804 .args(["--memory", "size=512M,hotplug_size=8192M"]) 4805 .args(["--kernel", kernel_path.to_str().unwrap()]) 4806 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4807 .default_disks() 4808 .default_net() 4809 .args(["--balloon", "size=0"]) 4810 .args(["--api-socket", &api_socket]) 4811 .capture_output() 4812 .spawn() 4813 .unwrap(); 4814 4815 let r = std::panic::catch_unwind(|| { 4816 guest.wait_vm_boot(None).unwrap(); 4817 4818 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4819 4820 guest.enable_memory_hotplug(); 4821 4822 // Add RAM to the VM 4823 let desired_ram = 1024 << 20; 4824 resize_command(&api_socket, None, Some(desired_ram), None, None); 4825 4826 thread::sleep(std::time::Duration::new(10, 0)); 4827 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4828 4829 // Use balloon to remove RAM from the VM 4830 let desired_balloon = 512 << 20; 4831 resize_command(&api_socket, None, None, Some(desired_balloon), None); 4832 4833 thread::sleep(std::time::Duration::new(10, 0)); 4834 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4835 assert!(guest.get_total_memory().unwrap_or_default() < 960_000); 4836 4837 guest.reboot_linux(0, None); 4838 4839 assert!(guest.get_total_memory().unwrap_or_default() < 960_000); 4840 4841 // Use balloon add RAM to the VM 4842 let desired_balloon = 0; 4843 resize_command(&api_socket, None, None, Some(desired_balloon), None); 4844 4845 thread::sleep(std::time::Duration::new(10, 0)); 4846 4847 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4848 4849 guest.enable_memory_hotplug(); 4850 4851 // Add RAM to the VM 4852 let desired_ram = 2048 << 20; 4853 resize_command(&api_socket, None, Some(desired_ram), None, None); 4854 4855 thread::sleep(std::time::Duration::new(10, 0)); 4856 assert!(guest.get_total_memory().unwrap_or_default() > 1_920_000); 4857 4858 // Remove RAM to the VM (only applies after reboot) 4859 let desired_ram = 1024 << 20; 4860 resize_command(&api_socket, None, Some(desired_ram), None, None); 4861 4862 guest.reboot_linux(1, None); 4863 4864 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4865 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000); 4866 }); 4867 4868 let _ = child.kill(); 4869 let output = child.wait_with_output().unwrap(); 4870 4871 handle_child_output(r, &output); 4872 } 4873 4874 #[test] 4875 #[cfg(not(feature = "mshv"))] 4876 fn test_virtio_mem() { 4877 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4878 let guest = Guest::new(Box::new(focal)); 4879 let api_socket = temp_api_path(&guest.tmp_dir); 4880 4881 let kernel_path = direct_kernel_boot_path(); 4882 4883 let mut child = GuestCommand::new(&guest) 4884 .args(["--cpus", "boot=2,max=4"]) 4885 .args([ 4886 "--memory", 4887 "size=512M,hotplug_method=virtio-mem,hotplug_size=8192M", 4888 ]) 4889 .args(["--kernel", kernel_path.to_str().unwrap()]) 4890 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4891 .default_disks() 4892 .default_net() 4893 .args(["--api-socket", &api_socket]) 4894 .capture_output() 4895 .spawn() 4896 .unwrap(); 4897 4898 let r = std::panic::catch_unwind(|| { 4899 guest.wait_vm_boot(None).unwrap(); 4900 4901 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4902 4903 guest.enable_memory_hotplug(); 4904 4905 // Add RAM to the VM 4906 let desired_ram = 1024 << 20; 4907 resize_command(&api_socket, None, Some(desired_ram), None, None); 4908 4909 thread::sleep(std::time::Duration::new(10, 0)); 4910 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4911 4912 // Add RAM to the VM 4913 let desired_ram = 2048 << 20; 4914 resize_command(&api_socket, None, Some(desired_ram), None, None); 4915 4916 thread::sleep(std::time::Duration::new(10, 0)); 4917 assert!(guest.get_total_memory().unwrap_or_default() > 1_920_000); 4918 4919 // Remove RAM from the VM 4920 let desired_ram = 1024 << 20; 4921 resize_command(&api_socket, None, Some(desired_ram), None, None); 4922 4923 thread::sleep(std::time::Duration::new(10, 0)); 4924 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4925 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000); 4926 4927 guest.reboot_linux(0, None); 4928 4929 // Check the amount of memory after reboot is 1GiB 4930 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4931 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000); 4932 4933 // Check we can still resize to 512MiB 4934 let desired_ram = 512 << 20; 4935 resize_command(&api_socket, None, Some(desired_ram), None, None); 4936 thread::sleep(std::time::Duration::new(10, 0)); 4937 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4938 assert!(guest.get_total_memory().unwrap_or_default() < 960_000); 4939 }); 4940 4941 let _ = child.kill(); 4942 let output = child.wait_with_output().unwrap(); 4943 4944 handle_child_output(r, &output); 4945 } 4946 4947 #[test] 4948 #[cfg(target_arch = "x86_64")] 4949 #[cfg(not(feature = "mshv"))] 4950 // Test both vCPU and memory resizing together 4951 fn test_resize() { 4952 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4953 let guest = Guest::new(Box::new(focal)); 4954 let api_socket = temp_api_path(&guest.tmp_dir); 4955 4956 let kernel_path = direct_kernel_boot_path(); 4957 4958 let mut child = GuestCommand::new(&guest) 4959 .args(["--cpus", "boot=2,max=4"]) 4960 .args(["--memory", "size=512M,hotplug_size=8192M"]) 4961 .args(["--kernel", kernel_path.to_str().unwrap()]) 4962 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4963 .default_disks() 4964 .default_net() 4965 .args(["--api-socket", &api_socket]) 4966 .capture_output() 4967 .spawn() 4968 .unwrap(); 4969 4970 let r = std::panic::catch_unwind(|| { 4971 guest.wait_vm_boot(None).unwrap(); 4972 4973 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 4974 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4975 4976 guest.enable_memory_hotplug(); 4977 4978 // Resize the VM 4979 let desired_vcpus = 4; 4980 let desired_ram = 1024 << 20; 4981 resize_command( 4982 &api_socket, 4983 Some(desired_vcpus), 4984 Some(desired_ram), 4985 None, 4986 None, 4987 ); 4988 4989 guest 4990 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online") 4991 .unwrap(); 4992 guest 4993 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online") 4994 .unwrap(); 4995 thread::sleep(std::time::Duration::new(10, 0)); 4996 assert_eq!( 4997 guest.get_cpu_count().unwrap_or_default(), 4998 u32::from(desired_vcpus) 4999 ); 5000 5001 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 5002 }); 5003 5004 let _ = child.kill(); 5005 let output = child.wait_with_output().unwrap(); 5006 5007 handle_child_output(r, &output); 5008 } 5009 5010 #[test] 5011 fn test_memory_overhead() { 5012 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5013 let guest = Guest::new(Box::new(focal)); 5014 5015 let kernel_path = direct_kernel_boot_path(); 5016 5017 let guest_memory_size_kb = 512 * 1024; 5018 5019 let mut child = GuestCommand::new(&guest) 5020 .args(["--cpus", "boot=1"]) 5021 .args(["--memory", format!("size={guest_memory_size_kb}K").as_str()]) 5022 .args(["--kernel", kernel_path.to_str().unwrap()]) 5023 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5024 .default_net() 5025 .default_disks() 5026 .capture_output() 5027 .spawn() 5028 .unwrap(); 5029 5030 guest.wait_vm_boot(None).unwrap(); 5031 5032 let r = std::panic::catch_unwind(|| { 5033 let overhead = get_vmm_overhead(child.id(), guest_memory_size_kb); 5034 eprintln!("Guest memory overhead: {overhead} vs {MAXIMUM_VMM_OVERHEAD_KB}"); 5035 assert!(overhead <= MAXIMUM_VMM_OVERHEAD_KB); 5036 }); 5037 5038 let _ = child.kill(); 5039 let output = child.wait_with_output().unwrap(); 5040 5041 handle_child_output(r, &output); 5042 } 5043 5044 #[test] 5045 fn test_disk_hotplug() { 5046 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5047 let guest = Guest::new(Box::new(focal)); 5048 5049 #[cfg(target_arch = "x86_64")] 5050 let kernel_path = direct_kernel_boot_path(); 5051 #[cfg(target_arch = "aarch64")] 5052 let kernel_path = edk2_path(); 5053 5054 let api_socket = temp_api_path(&guest.tmp_dir); 5055 5056 let mut child = GuestCommand::new(&guest) 5057 .args(["--api-socket", &api_socket]) 5058 .args(["--cpus", "boot=1"]) 5059 .args(["--memory", "size=512M"]) 5060 .args(["--kernel", kernel_path.to_str().unwrap()]) 5061 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5062 .default_disks() 5063 .default_net() 5064 .capture_output() 5065 .spawn() 5066 .unwrap(); 5067 5068 let r = std::panic::catch_unwind(|| { 5069 guest.wait_vm_boot(None).unwrap(); 5070 5071 // Check /dev/vdc is not there 5072 assert_eq!( 5073 guest 5074 .ssh_command("lsblk | grep -c vdc.*16M || true") 5075 .unwrap() 5076 .trim() 5077 .parse::<u32>() 5078 .unwrap_or(1), 5079 0 5080 ); 5081 5082 // Now let's add the extra disk. 5083 let mut blk_file_path = dirs::home_dir().unwrap(); 5084 blk_file_path.push("workloads"); 5085 blk_file_path.push("blk.img"); 5086 let (cmd_success, cmd_output) = remote_command_w_output( 5087 &api_socket, 5088 "add-disk", 5089 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 5090 ); 5091 assert!(cmd_success); 5092 assert!(String::from_utf8_lossy(&cmd_output) 5093 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 5094 5095 thread::sleep(std::time::Duration::new(10, 0)); 5096 5097 // Check that /dev/vdc exists and the block size is 16M. 5098 assert_eq!( 5099 guest 5100 .ssh_command("lsblk | grep vdc | grep -c 16M") 5101 .unwrap() 5102 .trim() 5103 .parse::<u32>() 5104 .unwrap_or_default(), 5105 1 5106 ); 5107 // And check the block device can be read. 5108 guest 5109 .ssh_command("sudo dd if=/dev/vdc of=/dev/null bs=1M iflag=direct count=16") 5110 .unwrap(); 5111 5112 // Let's remove it the extra disk. 5113 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 5114 thread::sleep(std::time::Duration::new(5, 0)); 5115 // And check /dev/vdc is not there 5116 assert_eq!( 5117 guest 5118 .ssh_command("lsblk | grep -c vdc.*16M || true") 5119 .unwrap() 5120 .trim() 5121 .parse::<u32>() 5122 .unwrap_or(1), 5123 0 5124 ); 5125 5126 // And add it back to validate unplug did work correctly. 5127 let (cmd_success, cmd_output) = remote_command_w_output( 5128 &api_socket, 5129 "add-disk", 5130 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 5131 ); 5132 assert!(cmd_success); 5133 assert!(String::from_utf8_lossy(&cmd_output) 5134 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 5135 5136 thread::sleep(std::time::Duration::new(10, 0)); 5137 5138 // Check that /dev/vdc exists and the block size is 16M. 5139 assert_eq!( 5140 guest 5141 .ssh_command("lsblk | grep vdc | grep -c 16M") 5142 .unwrap() 5143 .trim() 5144 .parse::<u32>() 5145 .unwrap_or_default(), 5146 1 5147 ); 5148 // And check the block device can be read. 5149 guest 5150 .ssh_command("sudo dd if=/dev/vdc of=/dev/null bs=1M iflag=direct count=16") 5151 .unwrap(); 5152 5153 // Reboot the VM. 5154 guest.reboot_linux(0, None); 5155 5156 // Check still there after reboot 5157 assert_eq!( 5158 guest 5159 .ssh_command("lsblk | grep vdc | grep -c 16M") 5160 .unwrap() 5161 .trim() 5162 .parse::<u32>() 5163 .unwrap_or_default(), 5164 1 5165 ); 5166 5167 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 5168 5169 thread::sleep(std::time::Duration::new(20, 0)); 5170 5171 // Check device has gone away 5172 assert_eq!( 5173 guest 5174 .ssh_command("lsblk | grep -c vdc.*16M || true") 5175 .unwrap() 5176 .trim() 5177 .parse::<u32>() 5178 .unwrap_or(1), 5179 0 5180 ); 5181 5182 guest.reboot_linux(1, None); 5183 5184 // Check device still absent 5185 assert_eq!( 5186 guest 5187 .ssh_command("lsblk | grep -c vdc.*16M || true") 5188 .unwrap() 5189 .trim() 5190 .parse::<u32>() 5191 .unwrap_or(1), 5192 0 5193 ); 5194 }); 5195 5196 let _ = child.kill(); 5197 let output = child.wait_with_output().unwrap(); 5198 5199 handle_child_output(r, &output); 5200 } 5201 5202 fn create_loop_device(backing_file_path: &str, block_size: u32, num_retries: usize) -> String { 5203 const LOOP_CONFIGURE: u64 = 0x4c0a; 5204 const LOOP_CTL_GET_FREE: u64 = 0x4c82; 5205 const LOOP_CTL_PATH: &str = "/dev/loop-control"; 5206 const LOOP_DEVICE_PREFIX: &str = "/dev/loop"; 5207 5208 #[repr(C)] 5209 struct LoopInfo64 { 5210 lo_device: u64, 5211 lo_inode: u64, 5212 lo_rdevice: u64, 5213 lo_offset: u64, 5214 lo_sizelimit: u64, 5215 lo_number: u32, 5216 lo_encrypt_type: u32, 5217 lo_encrypt_key_size: u32, 5218 lo_flags: u32, 5219 lo_file_name: [u8; 64], 5220 lo_crypt_name: [u8; 64], 5221 lo_encrypt_key: [u8; 32], 5222 lo_init: [u64; 2], 5223 } 5224 5225 impl Default for LoopInfo64 { 5226 fn default() -> Self { 5227 LoopInfo64 { 5228 lo_device: 0, 5229 lo_inode: 0, 5230 lo_rdevice: 0, 5231 lo_offset: 0, 5232 lo_sizelimit: 0, 5233 lo_number: 0, 5234 lo_encrypt_type: 0, 5235 lo_encrypt_key_size: 0, 5236 lo_flags: 0, 5237 lo_file_name: [0; 64], 5238 lo_crypt_name: [0; 64], 5239 lo_encrypt_key: [0; 32], 5240 lo_init: [0; 2], 5241 } 5242 } 5243 } 5244 5245 #[derive(Default)] 5246 #[repr(C)] 5247 struct LoopConfig { 5248 fd: u32, 5249 block_size: u32, 5250 info: LoopInfo64, 5251 _reserved: [u64; 8], 5252 } 5253 5254 // Open loop-control device 5255 let loop_ctl_file = OpenOptions::new() 5256 .read(true) 5257 .write(true) 5258 .open(LOOP_CTL_PATH) 5259 .unwrap(); 5260 5261 // Request a free loop device 5262 let loop_device_number = 5263 unsafe { libc::ioctl(loop_ctl_file.as_raw_fd(), LOOP_CTL_GET_FREE as _) }; 5264 5265 if loop_device_number < 0 { 5266 panic!("Couldn't find a free loop device"); 5267 } 5268 5269 // Create loop device path 5270 let loop_device_path = format!("{LOOP_DEVICE_PREFIX}{loop_device_number}"); 5271 5272 // Open loop device 5273 let loop_device_file = OpenOptions::new() 5274 .read(true) 5275 .write(true) 5276 .open(&loop_device_path) 5277 .unwrap(); 5278 5279 // Open backing file 5280 let backing_file = OpenOptions::new() 5281 .read(true) 5282 .write(true) 5283 .open(backing_file_path) 5284 .unwrap(); 5285 5286 let loop_config = LoopConfig { 5287 fd: backing_file.as_raw_fd() as u32, 5288 block_size, 5289 ..Default::default() 5290 }; 5291 5292 for i in 0..num_retries { 5293 let ret = unsafe { 5294 libc::ioctl( 5295 loop_device_file.as_raw_fd(), 5296 LOOP_CONFIGURE as _, 5297 &loop_config, 5298 ) 5299 }; 5300 if ret != 0 { 5301 if i < num_retries - 1 { 5302 println!( 5303 "Iteration {}: Failed to configure the loop device {}: {}", 5304 i, 5305 loop_device_path, 5306 std::io::Error::last_os_error() 5307 ); 5308 } else { 5309 panic!( 5310 "Failed {} times trying to configure the loop device {}: {}", 5311 num_retries, 5312 loop_device_path, 5313 std::io::Error::last_os_error() 5314 ); 5315 } 5316 } else { 5317 break; 5318 } 5319 5320 // Wait for a bit before retrying 5321 thread::sleep(std::time::Duration::new(5, 0)); 5322 } 5323 5324 loop_device_path 5325 } 5326 5327 #[test] 5328 fn test_virtio_block_topology() { 5329 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5330 let guest = Guest::new(Box::new(focal)); 5331 5332 let kernel_path = direct_kernel_boot_path(); 5333 let test_disk_path = guest.tmp_dir.as_path().join("test.img"); 5334 5335 let output = exec_host_command_output( 5336 format!( 5337 "qemu-img create -f raw {} 16M", 5338 test_disk_path.to_str().unwrap() 5339 ) 5340 .as_str(), 5341 ); 5342 if !output.status.success() { 5343 let stdout = String::from_utf8_lossy(&output.stdout); 5344 let stderr = String::from_utf8_lossy(&output.stderr); 5345 panic!("qemu-img command failed\nstdout\n{stdout}\nstderr\n{stderr}"); 5346 } 5347 5348 let loop_dev = create_loop_device(test_disk_path.to_str().unwrap(), 4096, 5); 5349 5350 let mut child = GuestCommand::new(&guest) 5351 .args(["--cpus", "boot=1"]) 5352 .args(["--memory", "size=512M"]) 5353 .args(["--kernel", kernel_path.to_str().unwrap()]) 5354 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5355 .args([ 5356 "--disk", 5357 format!( 5358 "path={}", 5359 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 5360 ) 5361 .as_str(), 5362 format!( 5363 "path={}", 5364 guest.disk_config.disk(DiskType::CloudInit).unwrap() 5365 ) 5366 .as_str(), 5367 format!("path={}", &loop_dev).as_str(), 5368 ]) 5369 .default_net() 5370 .capture_output() 5371 .spawn() 5372 .unwrap(); 5373 5374 let r = std::panic::catch_unwind(|| { 5375 guest.wait_vm_boot(None).unwrap(); 5376 5377 // MIN-IO column 5378 assert_eq!( 5379 guest 5380 .ssh_command("lsblk -t| grep vdc | awk '{print $3}'") 5381 .unwrap() 5382 .trim() 5383 .parse::<u32>() 5384 .unwrap_or_default(), 5385 4096 5386 ); 5387 // PHY-SEC column 5388 assert_eq!( 5389 guest 5390 .ssh_command("lsblk -t| grep vdc | awk '{print $5}'") 5391 .unwrap() 5392 .trim() 5393 .parse::<u32>() 5394 .unwrap_or_default(), 5395 4096 5396 ); 5397 // LOG-SEC column 5398 assert_eq!( 5399 guest 5400 .ssh_command("lsblk -t| grep vdc | awk '{print $6}'") 5401 .unwrap() 5402 .trim() 5403 .parse::<u32>() 5404 .unwrap_or_default(), 5405 4096 5406 ); 5407 }); 5408 5409 let _ = child.kill(); 5410 let output = child.wait_with_output().unwrap(); 5411 5412 handle_child_output(r, &output); 5413 5414 Command::new("losetup") 5415 .args(["-d", &loop_dev]) 5416 .output() 5417 .expect("loop device not found"); 5418 } 5419 5420 #[test] 5421 fn test_virtio_balloon_deflate_on_oom() { 5422 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5423 let guest = Guest::new(Box::new(focal)); 5424 5425 let kernel_path = direct_kernel_boot_path(); 5426 5427 let api_socket = temp_api_path(&guest.tmp_dir); 5428 5429 //Let's start a 4G guest with balloon occupied 2G memory 5430 let mut child = GuestCommand::new(&guest) 5431 .args(["--api-socket", &api_socket]) 5432 .args(["--cpus", "boot=1"]) 5433 .args(["--memory", "size=4G"]) 5434 .args(["--kernel", kernel_path.to_str().unwrap()]) 5435 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5436 .args(["--balloon", "size=2G,deflate_on_oom=on"]) 5437 .default_disks() 5438 .default_net() 5439 .capture_output() 5440 .spawn() 5441 .unwrap(); 5442 5443 let r = std::panic::catch_unwind(|| { 5444 guest.wait_vm_boot(None).unwrap(); 5445 5446 // Wait for balloon memory's initialization and check its size. 5447 // The virtio-balloon driver might take a few seconds to report the 5448 // balloon effective size back to the VMM. 5449 thread::sleep(std::time::Duration::new(20, 0)); 5450 5451 let orig_balloon = balloon_size(&api_socket); 5452 println!("The original balloon memory size is {orig_balloon} bytes"); 5453 assert!(orig_balloon == 2147483648); 5454 5455 // Two steps to verify if the 'deflate_on_oom' parameter works. 5456 // 1st: run a command to trigger an OOM in the guest. 5457 guest 5458 .ssh_command("echo f | sudo tee /proc/sysrq-trigger") 5459 .unwrap(); 5460 5461 // Give some time for the OOM to happen in the guest and be reported 5462 // back to the host. 5463 thread::sleep(std::time::Duration::new(20, 0)); 5464 5465 // 2nd: check balloon_mem's value to verify balloon has been automatically deflated 5466 let deflated_balloon = balloon_size(&api_socket); 5467 println!("After deflating, balloon memory size is {deflated_balloon} bytes"); 5468 // Verify the balloon size deflated 5469 assert!(deflated_balloon < 2147483648); 5470 }); 5471 5472 let _ = child.kill(); 5473 let output = child.wait_with_output().unwrap(); 5474 5475 handle_child_output(r, &output); 5476 } 5477 5478 #[test] 5479 #[cfg(not(feature = "mshv"))] 5480 fn test_virtio_balloon_free_page_reporting() { 5481 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5482 let guest = Guest::new(Box::new(focal)); 5483 5484 //Let's start a 4G guest with balloon occupied 2G memory 5485 let mut child = GuestCommand::new(&guest) 5486 .args(["--cpus", "boot=1"]) 5487 .args(["--memory", "size=4G"]) 5488 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 5489 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5490 .args(["--balloon", "size=0,free_page_reporting=on"]) 5491 .default_disks() 5492 .default_net() 5493 .capture_output() 5494 .spawn() 5495 .unwrap(); 5496 5497 let pid = child.id(); 5498 let r = std::panic::catch_unwind(|| { 5499 guest.wait_vm_boot(None).unwrap(); 5500 5501 // Check the initial RSS is less than 1GiB 5502 let rss = process_rss_kib(pid); 5503 println!("RSS {rss} < 1048576"); 5504 assert!(rss < 1048576); 5505 5506 // Spawn a command inside the guest to consume 2GiB of RAM for 60 5507 // seconds 5508 let guest_ip = guest.network.guest_ip.clone(); 5509 thread::spawn(move || { 5510 ssh_command_ip( 5511 "stress --vm 1 --vm-bytes 2G --vm-keep --timeout 60", 5512 &guest_ip, 5513 DEFAULT_SSH_RETRIES, 5514 DEFAULT_SSH_TIMEOUT, 5515 ) 5516 .unwrap(); 5517 }); 5518 5519 // Wait for 50 seconds to make sure the stress command is consuming 5520 // the expected amount of memory. 5521 thread::sleep(std::time::Duration::new(50, 0)); 5522 let rss = process_rss_kib(pid); 5523 println!("RSS {rss} >= 2097152"); 5524 assert!(rss >= 2097152); 5525 5526 // Wait for an extra minute to make sure the stress command has 5527 // completed and that the guest reported the free pages to the VMM 5528 // through the virtio-balloon device. We expect the RSS to be under 5529 // 2GiB. 5530 thread::sleep(std::time::Duration::new(60, 0)); 5531 let rss = process_rss_kib(pid); 5532 println!("RSS {rss} < 2097152"); 5533 assert!(rss < 2097152); 5534 }); 5535 5536 let _ = child.kill(); 5537 let output = child.wait_with_output().unwrap(); 5538 5539 handle_child_output(r, &output); 5540 } 5541 5542 #[test] 5543 fn test_pmem_hotplug() { 5544 _test_pmem_hotplug(None) 5545 } 5546 5547 #[test] 5548 fn test_pmem_multi_segment_hotplug() { 5549 _test_pmem_hotplug(Some(15)) 5550 } 5551 5552 fn _test_pmem_hotplug(pci_segment: Option<u16>) { 5553 #[cfg(target_arch = "aarch64")] 5554 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string(); 5555 #[cfg(target_arch = "x86_64")] 5556 let focal_image = FOCAL_IMAGE_NAME.to_string(); 5557 let focal = UbuntuDiskConfig::new(focal_image); 5558 let guest = Guest::new(Box::new(focal)); 5559 5560 #[cfg(target_arch = "x86_64")] 5561 let kernel_path = direct_kernel_boot_path(); 5562 #[cfg(target_arch = "aarch64")] 5563 let kernel_path = edk2_path(); 5564 5565 let api_socket = temp_api_path(&guest.tmp_dir); 5566 5567 let mut cmd = GuestCommand::new(&guest); 5568 5569 cmd.args(["--api-socket", &api_socket]) 5570 .args(["--cpus", "boot=1"]) 5571 .args(["--memory", "size=512M"]) 5572 .args(["--kernel", kernel_path.to_str().unwrap()]) 5573 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5574 .default_disks() 5575 .default_net() 5576 .capture_output(); 5577 5578 if pci_segment.is_some() { 5579 cmd.args([ 5580 "--platform", 5581 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 5582 ]); 5583 } 5584 5585 let mut child = cmd.spawn().unwrap(); 5586 5587 let r = std::panic::catch_unwind(|| { 5588 guest.wait_vm_boot(None).unwrap(); 5589 5590 // Check /dev/pmem0 is not there 5591 assert_eq!( 5592 guest 5593 .ssh_command("lsblk | grep -c pmem0 || true") 5594 .unwrap() 5595 .trim() 5596 .parse::<u32>() 5597 .unwrap_or(1), 5598 0 5599 ); 5600 5601 let pmem_temp_file = TempFile::new().unwrap(); 5602 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 5603 let (cmd_success, cmd_output) = remote_command_w_output( 5604 &api_socket, 5605 "add-pmem", 5606 Some(&format!( 5607 "file={},id=test0{}", 5608 pmem_temp_file.as_path().to_str().unwrap(), 5609 if let Some(pci_segment) = pci_segment { 5610 format!(",pci_segment={pci_segment}") 5611 } else { 5612 "".to_owned() 5613 } 5614 )), 5615 ); 5616 assert!(cmd_success); 5617 if let Some(pci_segment) = pci_segment { 5618 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 5619 "{{\"id\":\"test0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 5620 ))); 5621 } else { 5622 assert!(String::from_utf8_lossy(&cmd_output) 5623 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 5624 } 5625 5626 // Check that /dev/pmem0 exists and the block size is 128M 5627 assert_eq!( 5628 guest 5629 .ssh_command("lsblk | grep pmem0 | grep -c 128M") 5630 .unwrap() 5631 .trim() 5632 .parse::<u32>() 5633 .unwrap_or_default(), 5634 1 5635 ); 5636 5637 guest.reboot_linux(0, None); 5638 5639 // Check still there after reboot 5640 assert_eq!( 5641 guest 5642 .ssh_command("lsblk | grep pmem0 | grep -c 128M") 5643 .unwrap() 5644 .trim() 5645 .parse::<u32>() 5646 .unwrap_or_default(), 5647 1 5648 ); 5649 5650 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 5651 5652 thread::sleep(std::time::Duration::new(20, 0)); 5653 5654 // Check device has gone away 5655 assert_eq!( 5656 guest 5657 .ssh_command("lsblk | grep -c pmem0.*128M || true") 5658 .unwrap() 5659 .trim() 5660 .parse::<u32>() 5661 .unwrap_or(1), 5662 0 5663 ); 5664 5665 guest.reboot_linux(1, None); 5666 5667 // Check still absent after reboot 5668 assert_eq!( 5669 guest 5670 .ssh_command("lsblk | grep -c pmem0.*128M || true") 5671 .unwrap() 5672 .trim() 5673 .parse::<u32>() 5674 .unwrap_or(1), 5675 0 5676 ); 5677 }); 5678 5679 let _ = child.kill(); 5680 let output = child.wait_with_output().unwrap(); 5681 5682 handle_child_output(r, &output); 5683 } 5684 5685 #[test] 5686 fn test_net_hotplug() { 5687 _test_net_hotplug(None) 5688 } 5689 5690 #[test] 5691 fn test_net_multi_segment_hotplug() { 5692 _test_net_hotplug(Some(15)) 5693 } 5694 5695 fn _test_net_hotplug(pci_segment: Option<u16>) { 5696 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5697 let guest = Guest::new(Box::new(focal)); 5698 5699 #[cfg(target_arch = "x86_64")] 5700 let kernel_path = direct_kernel_boot_path(); 5701 #[cfg(target_arch = "aarch64")] 5702 let kernel_path = edk2_path(); 5703 5704 let api_socket = temp_api_path(&guest.tmp_dir); 5705 5706 // Boot without network 5707 let mut cmd = GuestCommand::new(&guest); 5708 5709 cmd.args(["--api-socket", &api_socket]) 5710 .args(["--cpus", "boot=1"]) 5711 .args(["--memory", "size=512M"]) 5712 .args(["--kernel", kernel_path.to_str().unwrap()]) 5713 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5714 .default_disks() 5715 .capture_output(); 5716 5717 if pci_segment.is_some() { 5718 cmd.args([ 5719 "--platform", 5720 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 5721 ]); 5722 } 5723 5724 let mut child = cmd.spawn().unwrap(); 5725 5726 thread::sleep(std::time::Duration::new(20, 0)); 5727 5728 let r = std::panic::catch_unwind(|| { 5729 // Add network 5730 let (cmd_success, cmd_output) = remote_command_w_output( 5731 &api_socket, 5732 "add-net", 5733 Some( 5734 format!( 5735 "{}{},id=test0", 5736 guest.default_net_string(), 5737 if let Some(pci_segment) = pci_segment { 5738 format!(",pci_segment={pci_segment}") 5739 } else { 5740 "".to_owned() 5741 } 5742 ) 5743 .as_str(), 5744 ), 5745 ); 5746 assert!(cmd_success); 5747 5748 if let Some(pci_segment) = pci_segment { 5749 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 5750 "{{\"id\":\"test0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 5751 ))); 5752 } else { 5753 assert!(String::from_utf8_lossy(&cmd_output) 5754 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:05.0\"}")); 5755 } 5756 5757 thread::sleep(std::time::Duration::new(5, 0)); 5758 5759 // 1 network interfaces + default localhost ==> 2 interfaces 5760 assert_eq!( 5761 guest 5762 .ssh_command("ip -o link | wc -l") 5763 .unwrap() 5764 .trim() 5765 .parse::<u32>() 5766 .unwrap_or_default(), 5767 2 5768 ); 5769 5770 // Remove network 5771 assert!(remote_command(&api_socket, "remove-device", Some("test0"),)); 5772 thread::sleep(std::time::Duration::new(5, 0)); 5773 5774 let (cmd_success, cmd_output) = remote_command_w_output( 5775 &api_socket, 5776 "add-net", 5777 Some( 5778 format!( 5779 "{}{},id=test1", 5780 guest.default_net_string(), 5781 if let Some(pci_segment) = pci_segment { 5782 format!(",pci_segment={pci_segment}") 5783 } else { 5784 "".to_owned() 5785 } 5786 ) 5787 .as_str(), 5788 ), 5789 ); 5790 assert!(cmd_success); 5791 5792 if let Some(pci_segment) = pci_segment { 5793 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 5794 "{{\"id\":\"test1\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 5795 ))); 5796 } else { 5797 assert!(String::from_utf8_lossy(&cmd_output) 5798 .contains("{\"id\":\"test1\",\"bdf\":\"0000:00:05.0\"}")); 5799 } 5800 5801 thread::sleep(std::time::Duration::new(5, 0)); 5802 5803 // 1 network interfaces + default localhost ==> 2 interfaces 5804 assert_eq!( 5805 guest 5806 .ssh_command("ip -o link | wc -l") 5807 .unwrap() 5808 .trim() 5809 .parse::<u32>() 5810 .unwrap_or_default(), 5811 2 5812 ); 5813 5814 guest.reboot_linux(0, None); 5815 5816 // Check still there after reboot 5817 // 1 network interfaces + default localhost ==> 2 interfaces 5818 assert_eq!( 5819 guest 5820 .ssh_command("ip -o link | wc -l") 5821 .unwrap() 5822 .trim() 5823 .parse::<u32>() 5824 .unwrap_or_default(), 5825 2 5826 ); 5827 }); 5828 5829 let _ = child.kill(); 5830 let output = child.wait_with_output().unwrap(); 5831 5832 handle_child_output(r, &output); 5833 } 5834 5835 #[test] 5836 fn test_initramfs() { 5837 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5838 let guest = Guest::new(Box::new(focal)); 5839 let mut workload_path = dirs::home_dir().unwrap(); 5840 workload_path.push("workloads"); 5841 5842 #[cfg(target_arch = "x86_64")] 5843 let mut kernels = vec![direct_kernel_boot_path()]; 5844 #[cfg(target_arch = "aarch64")] 5845 let kernels = vec![direct_kernel_boot_path()]; 5846 5847 #[cfg(target_arch = "x86_64")] 5848 { 5849 let mut pvh_kernel_path = workload_path.clone(); 5850 pvh_kernel_path.push("vmlinux"); 5851 kernels.push(pvh_kernel_path); 5852 } 5853 5854 let mut initramfs_path = workload_path; 5855 initramfs_path.push("alpine_initramfs.img"); 5856 5857 let test_string = String::from("axz34i9rylotd8n50wbv6kcj7f2qushme1pg"); 5858 let cmdline = format!("console=hvc0 quiet TEST_STRING={test_string}"); 5859 5860 kernels.iter().for_each(|k_path| { 5861 let mut child = GuestCommand::new(&guest) 5862 .args(["--kernel", k_path.to_str().unwrap()]) 5863 .args(["--initramfs", initramfs_path.to_str().unwrap()]) 5864 .args(["--cmdline", &cmdline]) 5865 .capture_output() 5866 .spawn() 5867 .unwrap(); 5868 5869 thread::sleep(std::time::Duration::new(20, 0)); 5870 5871 let _ = child.kill(); 5872 let output = child.wait_with_output().unwrap(); 5873 5874 let r = std::panic::catch_unwind(|| { 5875 let s = String::from_utf8_lossy(&output.stdout); 5876 5877 assert_ne!(s.lines().position(|line| line == test_string), None); 5878 }); 5879 5880 handle_child_output(r, &output); 5881 }); 5882 } 5883 5884 // One thing to note about this test. The virtio-net device is heavily used 5885 // through each ssh command. There's no need to perform a dedicated test to 5886 // verify the migration went well for virtio-net. 5887 #[test] 5888 #[cfg(not(feature = "mshv"))] 5889 fn test_snapshot_restore_hotplug_virtiomem() { 5890 _test_snapshot_restore(true); 5891 } 5892 5893 #[test] 5894 fn test_snapshot_restore_basic() { 5895 _test_snapshot_restore(false); 5896 } 5897 5898 fn _test_snapshot_restore(use_hotplug: bool) { 5899 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5900 let guest = Guest::new(Box::new(focal)); 5901 let kernel_path = direct_kernel_boot_path(); 5902 5903 let api_socket_source = format!("{}.1", temp_api_path(&guest.tmp_dir)); 5904 5905 let net_id = "net123"; 5906 let net_params = format!( 5907 "id={},tap=,mac={},ip={},mask=255.255.255.0", 5908 net_id, guest.network.guest_mac, guest.network.host_ip 5909 ); 5910 let mut mem_params = "size=4G"; 5911 5912 if use_hotplug { 5913 mem_params = "size=4G,hotplug_method=virtio-mem,hotplug_size=32G" 5914 } 5915 5916 let cloudinit_params = format!( 5917 "path={},iommu=on", 5918 guest.disk_config.disk(DiskType::CloudInit).unwrap() 5919 ); 5920 5921 let socket = temp_vsock_path(&guest.tmp_dir); 5922 let event_path = temp_event_monitor_path(&guest.tmp_dir); 5923 5924 let mut child = GuestCommand::new(&guest) 5925 .args(["--api-socket", &api_socket_source]) 5926 .args(["--event-monitor", format!("path={event_path}").as_str()]) 5927 .args(["--cpus", "boot=4"]) 5928 .args(["--memory", mem_params]) 5929 .args(["--balloon", "size=0"]) 5930 .args(["--kernel", kernel_path.to_str().unwrap()]) 5931 .args([ 5932 "--disk", 5933 format!( 5934 "path={}", 5935 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 5936 ) 5937 .as_str(), 5938 cloudinit_params.as_str(), 5939 ]) 5940 .args(["--net", net_params.as_str()]) 5941 .args(["--vsock", format!("cid=3,socket={socket}").as_str()]) 5942 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5943 .capture_output() 5944 .spawn() 5945 .unwrap(); 5946 5947 let console_text = String::from("On a branch floating down river a cricket, singing."); 5948 // Create the snapshot directory 5949 let snapshot_dir = temp_snapshot_dir_path(&guest.tmp_dir); 5950 5951 let r = std::panic::catch_unwind(|| { 5952 guest.wait_vm_boot(None).unwrap(); 5953 5954 // Check the number of vCPUs 5955 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 4); 5956 // Check the guest RAM 5957 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 5958 if use_hotplug { 5959 // Increase guest RAM with virtio-mem 5960 resize_command( 5961 &api_socket_source, 5962 None, 5963 Some(6 << 30), 5964 None, 5965 Some(&event_path), 5966 ); 5967 thread::sleep(std::time::Duration::new(5, 0)); 5968 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 5969 // Use balloon to remove RAM from the VM 5970 resize_command( 5971 &api_socket_source, 5972 None, 5973 None, 5974 Some(1 << 30), 5975 Some(&event_path), 5976 ); 5977 thread::sleep(std::time::Duration::new(5, 0)); 5978 let total_memory = guest.get_total_memory().unwrap_or_default(); 5979 assert!(total_memory > 4_800_000); 5980 assert!(total_memory < 5_760_000); 5981 } 5982 // Check the guest virtio-devices, e.g. block, rng, vsock, console, and net 5983 guest.check_devices_common(Some(&socket), Some(&console_text), None); 5984 5985 // x86_64: We check that removing and adding back the virtio-net device 5986 // does not break the snapshot/restore support for virtio-pci. 5987 // This is an important thing to test as the hotplug will 5988 // trigger a PCI BAR reprogramming, which is a good way of 5989 // checking if the stored resources are correctly restored. 5990 // Unplug the virtio-net device 5991 // AArch64: Device hotplug is currently not supported, skipping here. 5992 #[cfg(target_arch = "x86_64")] 5993 { 5994 assert!(remote_command( 5995 &api_socket_source, 5996 "remove-device", 5997 Some(net_id), 5998 )); 5999 thread::sleep(std::time::Duration::new(10, 0)); 6000 let latest_events = [&MetaEvent { 6001 event: "device-removed".to_string(), 6002 device_id: Some(net_id.to_string()), 6003 }]; 6004 // See: #5938 6005 thread::sleep(std::time::Duration::new(1, 0)); 6006 assert!(check_latest_events_exact(&latest_events, &event_path)); 6007 6008 // Plug the virtio-net device again 6009 assert!(remote_command( 6010 &api_socket_source, 6011 "add-net", 6012 Some(net_params.as_str()), 6013 )); 6014 thread::sleep(std::time::Duration::new(10, 0)); 6015 } 6016 6017 // Pause the VM 6018 assert!(remote_command(&api_socket_source, "pause", None)); 6019 let latest_events = [ 6020 &MetaEvent { 6021 event: "pausing".to_string(), 6022 device_id: None, 6023 }, 6024 &MetaEvent { 6025 event: "paused".to_string(), 6026 device_id: None, 6027 }, 6028 ]; 6029 // See: #5938 6030 thread::sleep(std::time::Duration::new(1, 0)); 6031 assert!(check_latest_events_exact(&latest_events, &event_path)); 6032 6033 // Take a snapshot from the VM 6034 assert!(remote_command( 6035 &api_socket_source, 6036 "snapshot", 6037 Some(format!("file://{snapshot_dir}").as_str()), 6038 )); 6039 6040 // Wait to make sure the snapshot is completed 6041 thread::sleep(std::time::Duration::new(10, 0)); 6042 6043 let latest_events = [ 6044 &MetaEvent { 6045 event: "snapshotting".to_string(), 6046 device_id: None, 6047 }, 6048 &MetaEvent { 6049 event: "snapshotted".to_string(), 6050 device_id: None, 6051 }, 6052 ]; 6053 // See: #5938 6054 thread::sleep(std::time::Duration::new(1, 0)); 6055 assert!(check_latest_events_exact(&latest_events, &event_path)); 6056 }); 6057 6058 // Shutdown the source VM and check console output 6059 let _ = child.kill(); 6060 let output = child.wait_with_output().unwrap(); 6061 handle_child_output(r, &output); 6062 6063 let r = std::panic::catch_unwind(|| { 6064 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 6065 }); 6066 6067 handle_child_output(r, &output); 6068 6069 // Remove the vsock socket file. 6070 Command::new("rm") 6071 .arg("-f") 6072 .arg(socket.as_str()) 6073 .output() 6074 .unwrap(); 6075 6076 let api_socket_restored = format!("{}.2", temp_api_path(&guest.tmp_dir)); 6077 let event_path_restored = format!("{}.2", temp_event_monitor_path(&guest.tmp_dir)); 6078 6079 // Restore the VM from the snapshot 6080 let mut child = GuestCommand::new(&guest) 6081 .args(["--api-socket", &api_socket_restored]) 6082 .args([ 6083 "--event-monitor", 6084 format!("path={event_path_restored}").as_str(), 6085 ]) 6086 .args([ 6087 "--restore", 6088 format!("source_url=file://{snapshot_dir}").as_str(), 6089 ]) 6090 .capture_output() 6091 .spawn() 6092 .unwrap(); 6093 6094 // Wait for the VM to be restored 6095 thread::sleep(std::time::Duration::new(10, 0)); 6096 let expected_events = [ 6097 &MetaEvent { 6098 event: "starting".to_string(), 6099 device_id: None, 6100 }, 6101 &MetaEvent { 6102 event: "activated".to_string(), 6103 device_id: Some("__console".to_string()), 6104 }, 6105 &MetaEvent { 6106 event: "activated".to_string(), 6107 device_id: Some("__rng".to_string()), 6108 }, 6109 &MetaEvent { 6110 event: "restoring".to_string(), 6111 device_id: None, 6112 }, 6113 ]; 6114 assert!(check_sequential_events( 6115 &expected_events, 6116 &event_path_restored 6117 )); 6118 let latest_events = [&MetaEvent { 6119 event: "restored".to_string(), 6120 device_id: None, 6121 }]; 6122 assert!(check_latest_events_exact( 6123 &latest_events, 6124 &event_path_restored 6125 )); 6126 6127 let r = std::panic::catch_unwind(|| { 6128 // Resume the VM 6129 assert!(remote_command(&api_socket_restored, "resume", None)); 6130 // There is no way that we can ensure the 'write()' to the 6131 // event file is completed when the 'resume' request is 6132 // returned successfully, because the 'write()' was done 6133 // asynchronously from a different thread of Cloud 6134 // Hypervisor (e.g. the event-monitor thread). 6135 thread::sleep(std::time::Duration::new(1, 0)); 6136 let latest_events = [ 6137 &MetaEvent { 6138 event: "resuming".to_string(), 6139 device_id: None, 6140 }, 6141 &MetaEvent { 6142 event: "resumed".to_string(), 6143 device_id: None, 6144 }, 6145 ]; 6146 assert!(check_latest_events_exact( 6147 &latest_events, 6148 &event_path_restored 6149 )); 6150 6151 // Perform same checks to validate VM has been properly restored 6152 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 4); 6153 let total_memory = guest.get_total_memory().unwrap_or_default(); 6154 if !use_hotplug { 6155 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 6156 } else { 6157 assert!(total_memory > 4_800_000); 6158 assert!(total_memory < 5_760_000); 6159 // Deflate balloon to restore entire RAM to the VM 6160 resize_command(&api_socket_restored, None, None, Some(0), None); 6161 thread::sleep(std::time::Duration::new(5, 0)); 6162 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 6163 // Decrease guest RAM with virtio-mem 6164 resize_command(&api_socket_restored, None, Some(5 << 30), None, None); 6165 thread::sleep(std::time::Duration::new(5, 0)); 6166 let total_memory = guest.get_total_memory().unwrap_or_default(); 6167 assert!(total_memory > 4_800_000); 6168 assert!(total_memory < 5_760_000); 6169 } 6170 6171 guest.check_devices_common(Some(&socket), Some(&console_text), None); 6172 }); 6173 // Shutdown the target VM and check console output 6174 let _ = child.kill(); 6175 let output = child.wait_with_output().unwrap(); 6176 handle_child_output(r, &output); 6177 6178 let r = std::panic::catch_unwind(|| { 6179 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 6180 }); 6181 6182 handle_child_output(r, &output); 6183 } 6184 6185 #[test] 6186 fn test_counters() { 6187 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6188 let guest = Guest::new(Box::new(focal)); 6189 let api_socket = temp_api_path(&guest.tmp_dir); 6190 6191 let mut cmd = GuestCommand::new(&guest); 6192 cmd.args(["--cpus", "boot=1"]) 6193 .args(["--memory", "size=512M"]) 6194 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 6195 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6196 .default_disks() 6197 .args(["--net", guest.default_net_string().as_str()]) 6198 .args(["--api-socket", &api_socket]) 6199 .capture_output(); 6200 6201 let mut child = cmd.spawn().unwrap(); 6202 6203 let r = std::panic::catch_unwind(|| { 6204 guest.wait_vm_boot(None).unwrap(); 6205 6206 let orig_counters = get_counters(&api_socket); 6207 guest 6208 .ssh_command("dd if=/dev/zero of=test count=8 bs=1M") 6209 .unwrap(); 6210 6211 let new_counters = get_counters(&api_socket); 6212 6213 // Check that all the counters have increased 6214 assert!(new_counters > orig_counters); 6215 }); 6216 6217 let _ = child.kill(); 6218 let output = child.wait_with_output().unwrap(); 6219 6220 handle_child_output(r, &output); 6221 } 6222 6223 #[test] 6224 #[cfg(feature = "guest_debug")] 6225 fn test_coredump() { 6226 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6227 let guest = Guest::new(Box::new(focal)); 6228 let api_socket = temp_api_path(&guest.tmp_dir); 6229 6230 let mut cmd = GuestCommand::new(&guest); 6231 cmd.args(["--cpus", "boot=4"]) 6232 .args(["--memory", "size=4G"]) 6233 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 6234 .default_disks() 6235 .args(["--net", guest.default_net_string().as_str()]) 6236 .args(["--api-socket", &api_socket]) 6237 .capture_output(); 6238 6239 let mut child = cmd.spawn().unwrap(); 6240 let vmcore_file = temp_vmcore_file_path(&guest.tmp_dir); 6241 6242 let r = std::panic::catch_unwind(|| { 6243 guest.wait_vm_boot(None).unwrap(); 6244 6245 assert!(remote_command(&api_socket, "pause", None)); 6246 6247 assert!(remote_command( 6248 &api_socket, 6249 "coredump", 6250 Some(format!("file://{vmcore_file}").as_str()), 6251 )); 6252 6253 // the num of CORE notes should equals to vcpu 6254 let readelf_core_num_cmd = 6255 format!("readelf --all {vmcore_file} |grep CORE |grep -v Type |wc -l"); 6256 let core_num_in_elf = exec_host_command_output(&readelf_core_num_cmd); 6257 assert_eq!(String::from_utf8_lossy(&core_num_in_elf.stdout).trim(), "4"); 6258 6259 // the num of QEMU notes should equals to vcpu 6260 let readelf_vmm_num_cmd = format!("readelf --all {vmcore_file} |grep QEMU |wc -l"); 6261 let vmm_num_in_elf = exec_host_command_output(&readelf_vmm_num_cmd); 6262 assert_eq!(String::from_utf8_lossy(&vmm_num_in_elf.stdout).trim(), "4"); 6263 }); 6264 6265 let _ = child.kill(); 6266 let output = child.wait_with_output().unwrap(); 6267 6268 handle_child_output(r, &output); 6269 } 6270 6271 #[test] 6272 #[cfg(feature = "guest_debug")] 6273 fn test_coredump_no_pause() { 6274 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6275 let guest = Guest::new(Box::new(focal)); 6276 let api_socket = temp_api_path(&guest.tmp_dir); 6277 6278 let mut cmd = GuestCommand::new(&guest); 6279 cmd.args(["--cpus", "boot=4"]) 6280 .args(["--memory", "size=4G"]) 6281 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 6282 .default_disks() 6283 .args(["--net", guest.default_net_string().as_str()]) 6284 .args(["--api-socket", &api_socket]) 6285 .capture_output(); 6286 6287 let mut child = cmd.spawn().unwrap(); 6288 let vmcore_file = temp_vmcore_file_path(&guest.tmp_dir); 6289 6290 let r = std::panic::catch_unwind(|| { 6291 guest.wait_vm_boot(None).unwrap(); 6292 6293 assert!(remote_command( 6294 &api_socket, 6295 "coredump", 6296 Some(format!("file://{vmcore_file}").as_str()), 6297 )); 6298 6299 assert_eq!(vm_state(&api_socket), "Running"); 6300 }); 6301 6302 let _ = child.kill(); 6303 let output = child.wait_with_output().unwrap(); 6304 6305 handle_child_output(r, &output); 6306 } 6307 6308 #[test] 6309 fn test_watchdog() { 6310 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6311 let guest = Guest::new(Box::new(focal)); 6312 let api_socket = temp_api_path(&guest.tmp_dir); 6313 6314 let kernel_path = direct_kernel_boot_path(); 6315 let event_path = temp_event_monitor_path(&guest.tmp_dir); 6316 6317 let mut cmd = GuestCommand::new(&guest); 6318 cmd.args(["--cpus", "boot=1"]) 6319 .args(["--memory", "size=512M"]) 6320 .args(["--kernel", kernel_path.to_str().unwrap()]) 6321 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6322 .default_disks() 6323 .args(["--net", guest.default_net_string().as_str()]) 6324 .args(["--watchdog"]) 6325 .args(["--api-socket", &api_socket]) 6326 .args(["--event-monitor", format!("path={event_path}").as_str()]) 6327 .capture_output(); 6328 6329 let mut child = cmd.spawn().unwrap(); 6330 6331 let r = std::panic::catch_unwind(|| { 6332 guest.wait_vm_boot(None).unwrap(); 6333 6334 let mut expected_reboot_count = 1; 6335 6336 // Enable the watchdog with a 15s timeout 6337 enable_guest_watchdog(&guest, 15); 6338 6339 // Reboot and check that systemd has activated the watchdog 6340 guest.ssh_command("sudo reboot").unwrap(); 6341 guest.wait_vm_boot(None).unwrap(); 6342 expected_reboot_count += 1; 6343 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6344 assert_eq!( 6345 guest 6346 .ssh_command("sudo journalctl | grep -c -- \"Watchdog started\"") 6347 .unwrap() 6348 .trim() 6349 .parse::<u32>() 6350 .unwrap_or_default(), 6351 2 6352 ); 6353 6354 // Allow some normal time to elapse to check we don't get spurious reboots 6355 thread::sleep(std::time::Duration::new(40, 0)); 6356 // Check no reboot 6357 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6358 6359 // Trigger a panic (sync first). We need to do this inside a screen with a delay so the SSH command returns. 6360 guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap(); 6361 // Allow some time for the watchdog to trigger (max 30s) and reboot to happen 6362 guest.wait_vm_boot(Some(50)).unwrap(); 6363 // Check a reboot is triggered by the watchdog 6364 expected_reboot_count += 1; 6365 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6366 6367 #[cfg(target_arch = "x86_64")] 6368 { 6369 // Now pause the VM and remain offline for 30s 6370 assert!(remote_command(&api_socket, "pause", None)); 6371 let latest_events = [ 6372 &MetaEvent { 6373 event: "pausing".to_string(), 6374 device_id: None, 6375 }, 6376 &MetaEvent { 6377 event: "paused".to_string(), 6378 device_id: None, 6379 }, 6380 ]; 6381 assert!(check_latest_events_exact(&latest_events, &event_path)); 6382 assert!(remote_command(&api_socket, "resume", None)); 6383 6384 // Check no reboot 6385 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6386 } 6387 }); 6388 6389 let _ = child.kill(); 6390 let output = child.wait_with_output().unwrap(); 6391 6392 handle_child_output(r, &output); 6393 } 6394 6395 #[test] 6396 fn test_pvpanic() { 6397 let jammy = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string()); 6398 let guest = Guest::new(Box::new(jammy)); 6399 let api_socket = temp_api_path(&guest.tmp_dir); 6400 let event_path = temp_event_monitor_path(&guest.tmp_dir); 6401 6402 let kernel_path = direct_kernel_boot_path(); 6403 6404 let mut cmd = GuestCommand::new(&guest); 6405 cmd.args(["--cpus", "boot=1"]) 6406 .args(["--memory", "size=512M"]) 6407 .args(["--kernel", kernel_path.to_str().unwrap()]) 6408 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6409 .default_disks() 6410 .args(["--net", guest.default_net_string().as_str()]) 6411 .args(["--pvpanic"]) 6412 .args(["--api-socket", &api_socket]) 6413 .args(["--event-monitor", format!("path={event_path}").as_str()]) 6414 .capture_output(); 6415 6416 let mut child = cmd.spawn().unwrap(); 6417 6418 let r = std::panic::catch_unwind(|| { 6419 guest.wait_vm_boot(None).unwrap(); 6420 6421 // Trigger guest a panic 6422 make_guest_panic(&guest); 6423 6424 // Wait a while for guest 6425 thread::sleep(std::time::Duration::new(10, 0)); 6426 6427 let expected_sequential_events = [&MetaEvent { 6428 event: "panic".to_string(), 6429 device_id: None, 6430 }]; 6431 assert!(check_latest_events_exact( 6432 &expected_sequential_events, 6433 &event_path 6434 )); 6435 }); 6436 6437 let _ = child.kill(); 6438 let output = child.wait_with_output().unwrap(); 6439 6440 handle_child_output(r, &output); 6441 } 6442 6443 #[test] 6444 fn test_tap_from_fd() { 6445 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6446 let guest = Guest::new(Box::new(focal)); 6447 let kernel_path = direct_kernel_boot_path(); 6448 6449 // Create a TAP interface with multi-queue enabled 6450 let num_queue_pairs: usize = 2; 6451 6452 use std::str::FromStr; 6453 let taps = net_util::open_tap( 6454 Some("chtap0"), 6455 Some(std::net::Ipv4Addr::from_str(&guest.network.host_ip).unwrap()), 6456 None, 6457 &mut None, 6458 None, 6459 num_queue_pairs, 6460 Some(libc::O_RDWR | libc::O_NONBLOCK), 6461 ) 6462 .unwrap(); 6463 6464 let mut child = GuestCommand::new(&guest) 6465 .args(["--cpus", &format!("boot={num_queue_pairs}")]) 6466 .args(["--memory", "size=512M"]) 6467 .args(["--kernel", kernel_path.to_str().unwrap()]) 6468 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6469 .default_disks() 6470 .args([ 6471 "--net", 6472 &format!( 6473 "fd=[{},{}],mac={},num_queues={}", 6474 taps[0].as_raw_fd(), 6475 taps[1].as_raw_fd(), 6476 guest.network.guest_mac, 6477 num_queue_pairs * 2 6478 ), 6479 ]) 6480 .capture_output() 6481 .spawn() 6482 .unwrap(); 6483 6484 let r = std::panic::catch_unwind(|| { 6485 guest.wait_vm_boot(None).unwrap(); 6486 6487 assert_eq!( 6488 guest 6489 .ssh_command("ip -o link | wc -l") 6490 .unwrap() 6491 .trim() 6492 .parse::<u32>() 6493 .unwrap_or_default(), 6494 2 6495 ); 6496 6497 guest.reboot_linux(0, None); 6498 6499 assert_eq!( 6500 guest 6501 .ssh_command("ip -o link | wc -l") 6502 .unwrap() 6503 .trim() 6504 .parse::<u32>() 6505 .unwrap_or_default(), 6506 2 6507 ); 6508 }); 6509 6510 let _ = child.kill(); 6511 let output = child.wait_with_output().unwrap(); 6512 6513 handle_child_output(r, &output); 6514 } 6515 6516 // By design, a guest VM won't be able to connect to the host 6517 // machine when using a macvtap network interface (while it can 6518 // communicate externally). As a workaround, this integration 6519 // test creates two macvtap interfaces in 'bridge' mode on the 6520 // same physical net interface, one for the guest and one for 6521 // the host. With additional setup on the IP address and the 6522 // routing table, it enables the communications between the 6523 // guest VM and the host machine. 6524 // Details: https://wiki.libvirt.org/page/TroubleshootMacvtapHostFail 6525 fn _test_macvtap(hotplug: bool, guest_macvtap_name: &str, host_macvtap_name: &str) { 6526 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6527 let guest = Guest::new(Box::new(focal)); 6528 let api_socket = temp_api_path(&guest.tmp_dir); 6529 6530 #[cfg(target_arch = "x86_64")] 6531 let kernel_path = direct_kernel_boot_path(); 6532 #[cfg(target_arch = "aarch64")] 6533 let kernel_path = edk2_path(); 6534 6535 let phy_net = "eth0"; 6536 6537 // Create a macvtap interface for the guest VM to use 6538 assert!(exec_host_command_status(&format!( 6539 "sudo ip link add link {phy_net} name {guest_macvtap_name} type macvtap mod bridge" 6540 )) 6541 .success()); 6542 assert!(exec_host_command_status(&format!( 6543 "sudo ip link set {} address {} up", 6544 guest_macvtap_name, guest.network.guest_mac 6545 )) 6546 .success()); 6547 assert!( 6548 exec_host_command_status(&format!("sudo ip link show {guest_macvtap_name}")).success() 6549 ); 6550 6551 let tap_index = 6552 fs::read_to_string(format!("/sys/class/net/{guest_macvtap_name}/ifindex")).unwrap(); 6553 let tap_device = format!("/dev/tap{}", tap_index.trim()); 6554 6555 assert!(exec_host_command_status(&format!("sudo chown $UID.$UID {tap_device}")).success()); 6556 6557 let cstr_tap_device = std::ffi::CString::new(tap_device).unwrap(); 6558 let tap_fd1 = unsafe { libc::open(cstr_tap_device.as_ptr(), libc::O_RDWR) }; 6559 assert!(tap_fd1 > 0); 6560 let tap_fd2 = unsafe { libc::open(cstr_tap_device.as_ptr(), libc::O_RDWR) }; 6561 assert!(tap_fd2 > 0); 6562 6563 // Create a macvtap on the same physical net interface for 6564 // the host machine to use 6565 assert!(exec_host_command_status(&format!( 6566 "sudo ip link add link {phy_net} name {host_macvtap_name} type macvtap mod bridge" 6567 )) 6568 .success()); 6569 // Use default mask "255.255.255.0" 6570 assert!(exec_host_command_status(&format!( 6571 "sudo ip address add {}/24 dev {}", 6572 guest.network.host_ip, host_macvtap_name 6573 )) 6574 .success()); 6575 assert!( 6576 exec_host_command_status(&format!("sudo ip link set dev {host_macvtap_name} up")) 6577 .success() 6578 ); 6579 6580 let mut guest_command = GuestCommand::new(&guest); 6581 guest_command 6582 .args(["--cpus", "boot=2"]) 6583 .args(["--memory", "size=512M"]) 6584 .args(["--kernel", kernel_path.to_str().unwrap()]) 6585 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6586 .default_disks() 6587 .args(["--api-socket", &api_socket]); 6588 6589 let net_params = format!( 6590 "fd=[{},{}],mac={},num_queues=4", 6591 tap_fd1, tap_fd2, guest.network.guest_mac 6592 ); 6593 6594 if !hotplug { 6595 guest_command.args(["--net", &net_params]); 6596 } 6597 6598 let mut child = guest_command.capture_output().spawn().unwrap(); 6599 6600 if hotplug { 6601 // Give some time to the VMM process to listen to the API 6602 // socket. This is the only requirement to avoid the following 6603 // call to ch-remote from failing. 6604 thread::sleep(std::time::Duration::new(10, 0)); 6605 // Hotplug the virtio-net device 6606 let (cmd_success, cmd_output) = 6607 remote_command_w_output(&api_socket, "add-net", Some(&net_params)); 6608 assert!(cmd_success); 6609 #[cfg(target_arch = "x86_64")] 6610 assert!(String::from_utf8_lossy(&cmd_output) 6611 .contains("{\"id\":\"_net2\",\"bdf\":\"0000:00:05.0\"}")); 6612 #[cfg(target_arch = "aarch64")] 6613 assert!(String::from_utf8_lossy(&cmd_output) 6614 .contains("{\"id\":\"_net0\",\"bdf\":\"0000:00:05.0\"}")); 6615 } 6616 6617 // The functional connectivity provided by the virtio-net device 6618 // gets tested through wait_vm_boot() as it expects to receive a 6619 // HTTP request, and through the SSH command as well. 6620 let r = std::panic::catch_unwind(|| { 6621 guest.wait_vm_boot(None).unwrap(); 6622 6623 assert_eq!( 6624 guest 6625 .ssh_command("ip -o link | wc -l") 6626 .unwrap() 6627 .trim() 6628 .parse::<u32>() 6629 .unwrap_or_default(), 6630 2 6631 ); 6632 6633 guest.reboot_linux(0, None); 6634 6635 assert_eq!( 6636 guest 6637 .ssh_command("ip -o link | wc -l") 6638 .unwrap() 6639 .trim() 6640 .parse::<u32>() 6641 .unwrap_or_default(), 6642 2 6643 ); 6644 }); 6645 6646 let _ = child.kill(); 6647 6648 exec_host_command_status(&format!("sudo ip link del {guest_macvtap_name}")); 6649 exec_host_command_status(&format!("sudo ip link del {host_macvtap_name}")); 6650 6651 let output = child.wait_with_output().unwrap(); 6652 6653 handle_child_output(r, &output); 6654 } 6655 6656 #[test] 6657 #[cfg_attr(target_arch = "aarch64", ignore = "See #5443")] 6658 fn test_macvtap() { 6659 _test_macvtap(false, "guestmacvtap0", "hostmacvtap0") 6660 } 6661 6662 #[test] 6663 #[cfg_attr(target_arch = "aarch64", ignore = "See #5443")] 6664 fn test_macvtap_hotplug() { 6665 _test_macvtap(true, "guestmacvtap1", "hostmacvtap1") 6666 } 6667 6668 #[test] 6669 #[cfg(not(feature = "mshv"))] 6670 fn test_ovs_dpdk() { 6671 let focal1 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6672 let guest1 = Guest::new(Box::new(focal1)); 6673 6674 let focal2 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6675 let guest2 = Guest::new(Box::new(focal2)); 6676 let api_socket_source = format!("{}.1", temp_api_path(&guest2.tmp_dir)); 6677 6678 let (mut child1, mut child2) = 6679 setup_ovs_dpdk_guests(&guest1, &guest2, &api_socket_source, false); 6680 6681 // Create the snapshot directory 6682 let snapshot_dir = temp_snapshot_dir_path(&guest2.tmp_dir); 6683 6684 let r = std::panic::catch_unwind(|| { 6685 // Remove one of the two ports from the OVS bridge 6686 assert!(exec_host_command_status("ovs-vsctl del-port vhost-user1").success()); 6687 6688 // Spawn a new netcat listener in the first VM 6689 let guest_ip = guest1.network.guest_ip.clone(); 6690 thread::spawn(move || { 6691 ssh_command_ip( 6692 "nc -l 12345", 6693 &guest_ip, 6694 DEFAULT_SSH_RETRIES, 6695 DEFAULT_SSH_TIMEOUT, 6696 ) 6697 .unwrap(); 6698 }); 6699 6700 // Wait for the server to be listening 6701 thread::sleep(std::time::Duration::new(5, 0)); 6702 6703 // Check the connection fails this time 6704 assert!(guest2.ssh_command("nc -vz 172.100.0.1 12345").is_err()); 6705 6706 // Add the OVS port back 6707 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface vhost-user1 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient1").success()); 6708 6709 // And finally check the connection is functional again 6710 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap(); 6711 6712 // Pause the VM 6713 assert!(remote_command(&api_socket_source, "pause", None)); 6714 6715 // Take a snapshot from the VM 6716 assert!(remote_command( 6717 &api_socket_source, 6718 "snapshot", 6719 Some(format!("file://{snapshot_dir}").as_str()), 6720 )); 6721 6722 // Wait to make sure the snapshot is completed 6723 thread::sleep(std::time::Duration::new(10, 0)); 6724 }); 6725 6726 // Shutdown the source VM 6727 let _ = child2.kill(); 6728 let output = child2.wait_with_output().unwrap(); 6729 handle_child_output(r, &output); 6730 6731 // Remove the vhost-user socket file. 6732 Command::new("rm") 6733 .arg("-f") 6734 .arg("/tmp/dpdkvhostclient2") 6735 .output() 6736 .unwrap(); 6737 6738 let api_socket_restored = format!("{}.2", temp_api_path(&guest2.tmp_dir)); 6739 // Restore the VM from the snapshot 6740 let mut child2 = GuestCommand::new(&guest2) 6741 .args(["--api-socket", &api_socket_restored]) 6742 .args([ 6743 "--restore", 6744 format!("source_url=file://{snapshot_dir}").as_str(), 6745 ]) 6746 .capture_output() 6747 .spawn() 6748 .unwrap(); 6749 6750 // Wait for the VM to be restored 6751 thread::sleep(std::time::Duration::new(10, 0)); 6752 6753 let r = std::panic::catch_unwind(|| { 6754 // Resume the VM 6755 assert!(remote_command(&api_socket_restored, "resume", None)); 6756 6757 // Spawn a new netcat listener in the first VM 6758 let guest_ip = guest1.network.guest_ip.clone(); 6759 thread::spawn(move || { 6760 ssh_command_ip( 6761 "nc -l 12345", 6762 &guest_ip, 6763 DEFAULT_SSH_RETRIES, 6764 DEFAULT_SSH_TIMEOUT, 6765 ) 6766 .unwrap(); 6767 }); 6768 6769 // Wait for the server to be listening 6770 thread::sleep(std::time::Duration::new(5, 0)); 6771 6772 // And check the connection is still functional after restore 6773 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap(); 6774 }); 6775 6776 let _ = child1.kill(); 6777 let _ = child2.kill(); 6778 6779 let output = child1.wait_with_output().unwrap(); 6780 child2.wait().unwrap(); 6781 6782 cleanup_ovs_dpdk(); 6783 6784 handle_child_output(r, &output); 6785 } 6786 6787 fn setup_spdk_nvme(nvme_dir: &std::path::Path) { 6788 cleanup_spdk_nvme(); 6789 6790 assert!(exec_host_command_status(&format!( 6791 "mkdir -p {}", 6792 nvme_dir.join("nvme-vfio-user").to_str().unwrap() 6793 )) 6794 .success()); 6795 assert!(exec_host_command_status(&format!( 6796 "truncate {} -s 128M", 6797 nvme_dir.join("test-disk.raw").to_str().unwrap() 6798 )) 6799 .success()); 6800 assert!(exec_host_command_status(&format!( 6801 "mkfs.ext4 {}", 6802 nvme_dir.join("test-disk.raw").to_str().unwrap() 6803 )) 6804 .success()); 6805 6806 // Start the SPDK nvmf_tgt daemon to present NVMe device as a VFIO user device 6807 Command::new("/usr/local/bin/spdk-nvme/nvmf_tgt") 6808 .args(["-i", "0", "-m", "0x1"]) 6809 .spawn() 6810 .unwrap(); 6811 thread::sleep(std::time::Duration::new(2, 0)); 6812 6813 assert!(exec_host_command_with_retries( 6814 "/usr/local/bin/spdk-nvme/rpc.py nvmf_create_transport -t VFIOUSER", 6815 3, 6816 std::time::Duration::new(5, 0), 6817 )); 6818 assert!(exec_host_command_status(&format!( 6819 "/usr/local/bin/spdk-nvme/rpc.py bdev_aio_create {} test 512", 6820 nvme_dir.join("test-disk.raw").to_str().unwrap() 6821 )) 6822 .success()); 6823 assert!(exec_host_command_status( 6824 "/usr/local/bin/spdk-nvme/rpc.py nvmf_create_subsystem nqn.2019-07.io.spdk:cnode -a -s test" 6825 ) 6826 .success()); 6827 assert!(exec_host_command_status( 6828 "/usr/local/bin/spdk-nvme/rpc.py nvmf_subsystem_add_ns nqn.2019-07.io.spdk:cnode test" 6829 ) 6830 .success()); 6831 assert!(exec_host_command_status(&format!( 6832 "/usr/local/bin/spdk-nvme/rpc.py nvmf_subsystem_add_listener nqn.2019-07.io.spdk:cnode -t VFIOUSER -a {} -s 0", 6833 nvme_dir.join("nvme-vfio-user").to_str().unwrap() 6834 )) 6835 .success()); 6836 } 6837 6838 fn cleanup_spdk_nvme() { 6839 exec_host_command_status("pkill -f nvmf_tgt"); 6840 } 6841 6842 #[test] 6843 fn test_vfio_user() { 6844 let jammy_image = JAMMY_IMAGE_NAME.to_string(); 6845 let jammy = UbuntuDiskConfig::new(jammy_image); 6846 let guest = Guest::new(Box::new(jammy)); 6847 6848 let spdk_nvme_dir = guest.tmp_dir.as_path().join("test-vfio-user"); 6849 setup_spdk_nvme(spdk_nvme_dir.as_path()); 6850 6851 let api_socket = temp_api_path(&guest.tmp_dir); 6852 let mut child = GuestCommand::new(&guest) 6853 .args(["--api-socket", &api_socket]) 6854 .args(["--cpus", "boot=1"]) 6855 .args(["--memory", "size=512M,shared=on,hugepages=on"]) 6856 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 6857 .args(["--serial", "tty", "--console", "off"]) 6858 .default_disks() 6859 .default_net() 6860 .capture_output() 6861 .spawn() 6862 .unwrap(); 6863 6864 let r = std::panic::catch_unwind(|| { 6865 guest.wait_vm_boot(None).unwrap(); 6866 6867 // Hotplug the SPDK-NVMe device to the VM 6868 let (cmd_success, cmd_output) = remote_command_w_output( 6869 &api_socket, 6870 "add-user-device", 6871 Some(&format!( 6872 "socket={},id=vfio_user0", 6873 spdk_nvme_dir 6874 .as_path() 6875 .join("nvme-vfio-user/cntrl") 6876 .to_str() 6877 .unwrap(), 6878 )), 6879 ); 6880 assert!(cmd_success); 6881 assert!(String::from_utf8_lossy(&cmd_output) 6882 .contains("{\"id\":\"vfio_user0\",\"bdf\":\"0000:00:05.0\"}")); 6883 6884 thread::sleep(std::time::Duration::new(10, 0)); 6885 6886 // Check both if /dev/nvme exists and if the block size is 128M. 6887 assert_eq!( 6888 guest 6889 .ssh_command("lsblk | grep nvme0n1 | grep -c 128M") 6890 .unwrap() 6891 .trim() 6892 .parse::<u32>() 6893 .unwrap_or_default(), 6894 1 6895 ); 6896 6897 // Check changes persist after reboot 6898 assert_eq!( 6899 guest.ssh_command("sudo mount /dev/nvme0n1 /mnt").unwrap(), 6900 "" 6901 ); 6902 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), "lost+found\n"); 6903 guest 6904 .ssh_command("echo test123 | sudo tee /mnt/test") 6905 .unwrap(); 6906 assert_eq!(guest.ssh_command("sudo umount /mnt").unwrap(), ""); 6907 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), ""); 6908 6909 guest.reboot_linux(0, None); 6910 assert_eq!( 6911 guest.ssh_command("sudo mount /dev/nvme0n1 /mnt").unwrap(), 6912 "" 6913 ); 6914 assert_eq!( 6915 guest.ssh_command("sudo cat /mnt/test").unwrap().trim(), 6916 "test123" 6917 ); 6918 }); 6919 6920 cleanup_spdk_nvme(); 6921 6922 let _ = child.kill(); 6923 let output = child.wait_with_output().unwrap(); 6924 6925 handle_child_output(r, &output); 6926 } 6927 6928 #[test] 6929 #[cfg(target_arch = "x86_64")] 6930 fn test_vdpa_block() { 6931 // Before trying to run the test, verify the vdpa_sim_blk module is correctly loaded. 6932 assert!(exec_host_command_status("lsmod | grep vdpa_sim_blk").success()); 6933 6934 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6935 let guest = Guest::new(Box::new(focal)); 6936 let api_socket = temp_api_path(&guest.tmp_dir); 6937 6938 let kernel_path = direct_kernel_boot_path(); 6939 6940 let mut child = GuestCommand::new(&guest) 6941 .args(["--cpus", "boot=2"]) 6942 .args(["--memory", "size=512M,hugepages=on"]) 6943 .args(["--kernel", kernel_path.to_str().unwrap()]) 6944 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6945 .default_disks() 6946 .default_net() 6947 .args(["--vdpa", "path=/dev/vhost-vdpa-0,num_queues=1"]) 6948 .args(["--platform", "num_pci_segments=2,iommu_segments=1"]) 6949 .args(["--api-socket", &api_socket]) 6950 .capture_output() 6951 .spawn() 6952 .unwrap(); 6953 6954 let r = std::panic::catch_unwind(|| { 6955 guest.wait_vm_boot(None).unwrap(); 6956 6957 // Check both if /dev/vdc exists and if the block size is 128M. 6958 assert_eq!( 6959 guest 6960 .ssh_command("lsblk | grep vdc | grep -c 128M") 6961 .unwrap() 6962 .trim() 6963 .parse::<u32>() 6964 .unwrap_or_default(), 6965 1 6966 ); 6967 6968 // Check the content of the block device after we wrote to it. 6969 // The vpda-sim-blk should let us read what we previously wrote. 6970 guest 6971 .ssh_command("sudo bash -c 'echo foobar > /dev/vdc'") 6972 .unwrap(); 6973 assert_eq!( 6974 guest.ssh_command("sudo head -1 /dev/vdc").unwrap().trim(), 6975 "foobar" 6976 ); 6977 6978 // Hotplug an extra vDPA block device behind the vIOMMU 6979 // Add a new vDPA device to the VM 6980 let (cmd_success, cmd_output) = remote_command_w_output( 6981 &api_socket, 6982 "add-vdpa", 6983 Some("id=myvdpa0,path=/dev/vhost-vdpa-1,num_queues=1,pci_segment=1,iommu=on"), 6984 ); 6985 assert!(cmd_success); 6986 assert!(String::from_utf8_lossy(&cmd_output) 6987 .contains("{\"id\":\"myvdpa0\",\"bdf\":\"0001:00:01.0\"}")); 6988 6989 thread::sleep(std::time::Duration::new(10, 0)); 6990 6991 // Check IOMMU setup 6992 assert!(guest 6993 .does_device_vendor_pair_match("0x1057", "0x1af4") 6994 .unwrap_or_default()); 6995 assert_eq!( 6996 guest 6997 .ssh_command("ls /sys/kernel/iommu_groups/0/devices") 6998 .unwrap() 6999 .trim(), 7000 "0001:00:01.0" 7001 ); 7002 7003 // Check both if /dev/vdd exists and if the block size is 128M. 7004 assert_eq!( 7005 guest 7006 .ssh_command("lsblk | grep vdd | grep -c 128M") 7007 .unwrap() 7008 .trim() 7009 .parse::<u32>() 7010 .unwrap_or_default(), 7011 1 7012 ); 7013 7014 // Write some content to the block device we've just plugged. 7015 guest 7016 .ssh_command("sudo bash -c 'echo foobar > /dev/vdd'") 7017 .unwrap(); 7018 7019 // Check we can read the content back. 7020 assert_eq!( 7021 guest.ssh_command("sudo head -1 /dev/vdd").unwrap().trim(), 7022 "foobar" 7023 ); 7024 7025 // Unplug the device 7026 let cmd_success = remote_command(&api_socket, "remove-device", Some("myvdpa0")); 7027 assert!(cmd_success); 7028 thread::sleep(std::time::Duration::new(10, 0)); 7029 7030 // Check /dev/vdd doesn't exist anymore 7031 assert_eq!( 7032 guest 7033 .ssh_command("lsblk | grep -c vdd || true") 7034 .unwrap() 7035 .trim() 7036 .parse::<u32>() 7037 .unwrap_or(1), 7038 0 7039 ); 7040 }); 7041 7042 let _ = child.kill(); 7043 let output = child.wait_with_output().unwrap(); 7044 7045 handle_child_output(r, &output); 7046 } 7047 7048 #[test] 7049 #[cfg(target_arch = "x86_64")] 7050 #[ignore = "See #5756"] 7051 fn test_vdpa_net() { 7052 // Before trying to run the test, verify the vdpa_sim_net module is correctly loaded. 7053 if !exec_host_command_status("lsmod | grep vdpa_sim_net").success() { 7054 return; 7055 } 7056 7057 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7058 let guest = Guest::new(Box::new(focal)); 7059 7060 let kernel_path = direct_kernel_boot_path(); 7061 7062 let mut child = GuestCommand::new(&guest) 7063 .args(["--cpus", "boot=2"]) 7064 .args(["--memory", "size=512M,hugepages=on"]) 7065 .args(["--kernel", kernel_path.to_str().unwrap()]) 7066 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 7067 .default_disks() 7068 .default_net() 7069 .args(["--vdpa", "path=/dev/vhost-vdpa-2,num_queues=2"]) 7070 .capture_output() 7071 .spawn() 7072 .unwrap(); 7073 7074 let r = std::panic::catch_unwind(|| { 7075 guest.wait_vm_boot(None).unwrap(); 7076 7077 // Check we can find network interface related to vDPA device 7078 assert_eq!( 7079 guest 7080 .ssh_command("ip -o link | grep -c ens6") 7081 .unwrap() 7082 .trim() 7083 .parse::<u32>() 7084 .unwrap_or(0), 7085 1 7086 ); 7087 7088 guest 7089 .ssh_command("sudo ip addr add 172.16.1.2/24 dev ens6") 7090 .unwrap(); 7091 guest.ssh_command("sudo ip link set up dev ens6").unwrap(); 7092 7093 // Check there is no packet yet on both TX/RX of the network interface 7094 assert_eq!( 7095 guest 7096 .ssh_command("ip -j -p -s link show ens6 | grep -c '\"packets\": 0'") 7097 .unwrap() 7098 .trim() 7099 .parse::<u32>() 7100 .unwrap_or(0), 7101 2 7102 ); 7103 7104 // Send 6 packets with ping command 7105 guest.ssh_command("ping 172.16.1.10 -c 6 || true").unwrap(); 7106 7107 // Check we can find 6 packets on both TX/RX of the network interface 7108 assert_eq!( 7109 guest 7110 .ssh_command("ip -j -p -s link show ens6 | grep -c '\"packets\": 6'") 7111 .unwrap() 7112 .trim() 7113 .parse::<u32>() 7114 .unwrap_or(0), 7115 2 7116 ); 7117 7118 // No need to check for hotplug as we already tested it through 7119 // test_vdpa_block() 7120 }); 7121 7122 let _ = child.kill(); 7123 let output = child.wait_with_output().unwrap(); 7124 7125 handle_child_output(r, &output); 7126 } 7127 7128 #[test] 7129 #[cfg(target_arch = "x86_64")] 7130 fn test_tpm() { 7131 let focal = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string()); 7132 let guest = Guest::new(Box::new(focal)); 7133 7134 let (mut swtpm_command, swtpm_socket_path) = prepare_swtpm_daemon(&guest.tmp_dir); 7135 7136 let mut guest_cmd = GuestCommand::new(&guest); 7137 guest_cmd 7138 .args(["--cpus", "boot=1"]) 7139 .args(["--memory", "size=512M"]) 7140 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 7141 .args(["--tpm", &format!("socket={swtpm_socket_path}")]) 7142 .capture_output() 7143 .default_disks() 7144 .default_net(); 7145 7146 // Start swtpm daemon 7147 let mut swtpm_child = swtpm_command.spawn().unwrap(); 7148 thread::sleep(std::time::Duration::new(10, 0)); 7149 let mut child = guest_cmd.spawn().unwrap(); 7150 let r = std::panic::catch_unwind(|| { 7151 guest.wait_vm_boot(None).unwrap(); 7152 assert_eq!( 7153 guest.ssh_command("ls /dev/tpm0").unwrap().trim(), 7154 "/dev/tpm0" 7155 ); 7156 guest.ssh_command("sudo tpm2_selftest -f").unwrap(); 7157 guest 7158 .ssh_command("echo 'hello' > /tmp/checksum_test; ") 7159 .unwrap(); 7160 guest.ssh_command("cmp <(sudo tpm2_pcrevent /tmp/checksum_test | grep sha256 | awk '{print $2}') <(sha256sum /tmp/checksum_test| awk '{print $1}')").unwrap(); 7161 }); 7162 7163 let _ = swtpm_child.kill(); 7164 let _d_out = swtpm_child.wait_with_output().unwrap(); 7165 7166 let _ = child.kill(); 7167 let output = child.wait_with_output().unwrap(); 7168 7169 handle_child_output(r, &output); 7170 } 7171 7172 #[test] 7173 #[cfg(target_arch = "x86_64")] 7174 fn test_double_tty() { 7175 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7176 let guest = Guest::new(Box::new(focal)); 7177 let mut cmd = GuestCommand::new(&guest); 7178 let api_socket = temp_api_path(&guest.tmp_dir); 7179 let tty_str: &str = "console=hvc0 earlyprintk=ttyS0 "; 7180 // linux printk module enable console log. 7181 let con_dis_str: &str = "console [hvc0] enabled"; 7182 // linux printk module disable console log. 7183 let con_enb_str: &str = "bootconsole [earlyser0] disabled"; 7184 7185 let kernel_path = direct_kernel_boot_path(); 7186 7187 cmd.args(["--cpus", "boot=1"]) 7188 .args(["--memory", "size=512M"]) 7189 .args(["--kernel", kernel_path.to_str().unwrap()]) 7190 .args([ 7191 "--cmdline", 7192 DIRECT_KERNEL_BOOT_CMDLINE 7193 .replace("console=hvc0 ", tty_str) 7194 .as_str(), 7195 ]) 7196 .capture_output() 7197 .default_disks() 7198 .default_net() 7199 .args(["--serial", "tty"]) 7200 .args(["--console", "tty"]) 7201 .args(["--api-socket", &api_socket]); 7202 7203 let mut child = cmd.spawn().unwrap(); 7204 7205 let mut r = std::panic::catch_unwind(|| { 7206 guest.wait_vm_boot(None).unwrap(); 7207 }); 7208 7209 let _ = child.kill(); 7210 let output = child.wait_with_output().unwrap(); 7211 7212 if r.is_ok() { 7213 r = std::panic::catch_unwind(|| { 7214 let s = String::from_utf8_lossy(&output.stdout); 7215 assert!(s.contains(tty_str)); 7216 assert!(s.contains(con_dis_str)); 7217 assert!(s.contains(con_enb_str)); 7218 }); 7219 } 7220 7221 handle_child_output(r, &output); 7222 } 7223 } 7224 7225 mod dbus_api { 7226 use crate::*; 7227 7228 // Start cloud-hypervisor with no VM parameters, running both the HTTP 7229 // and DBus APIs. Alternate calls to the external APIs (HTTP and DBus) 7230 // to create a VM, boot it, and verify that it can be shut down and then 7231 // booted again. 7232 #[test] 7233 fn test_api_dbus_and_http_interleaved() { 7234 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7235 let guest = Guest::new(Box::new(focal)); 7236 let dbus_api = TargetApi::new_dbus_api(&guest.tmp_dir); 7237 let http_api = TargetApi::new_http_api(&guest.tmp_dir); 7238 7239 let mut child = GuestCommand::new(&guest) 7240 .args(dbus_api.guest_args()) 7241 .args(http_api.guest_args()) 7242 .capture_output() 7243 .spawn() 7244 .unwrap(); 7245 7246 thread::sleep(std::time::Duration::new(1, 0)); 7247 7248 // Verify API servers are running 7249 assert!(dbus_api.remote_command("ping", None)); 7250 assert!(http_api.remote_command("ping", None)); 7251 7252 // Create the VM first 7253 let cpu_count: u8 = 4; 7254 let request_body = guest.api_create_body( 7255 cpu_count, 7256 direct_kernel_boot_path().to_str().unwrap(), 7257 DIRECT_KERNEL_BOOT_CMDLINE, 7258 ); 7259 7260 let temp_config_path = guest.tmp_dir.as_path().join("config"); 7261 std::fs::write(&temp_config_path, request_body).unwrap(); 7262 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 7263 7264 let r = std::panic::catch_unwind(|| { 7265 // Create the VM 7266 assert!(dbus_api.remote_command("create", Some(create_config),)); 7267 7268 // Then boot it 7269 assert!(http_api.remote_command("boot", None)); 7270 guest.wait_vm_boot(None).unwrap(); 7271 7272 // Check that the VM booted as expected 7273 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 7274 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 7275 7276 // Sync and shutdown without powering off to prevent filesystem 7277 // corruption. 7278 guest.ssh_command("sync").unwrap(); 7279 guest.ssh_command("sudo shutdown -H now").unwrap(); 7280 7281 // Wait for the guest to be fully shutdown 7282 thread::sleep(std::time::Duration::new(20, 0)); 7283 7284 // Then shutdown the VM 7285 assert!(dbus_api.remote_command("shutdown", None)); 7286 7287 // Then boot it again 7288 assert!(http_api.remote_command("boot", None)); 7289 guest.wait_vm_boot(None).unwrap(); 7290 7291 // Check that the VM booted as expected 7292 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 7293 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 7294 }); 7295 7296 let _ = child.kill(); 7297 let output = child.wait_with_output().unwrap(); 7298 7299 handle_child_output(r, &output); 7300 } 7301 7302 #[test] 7303 fn test_api_dbus_create_boot() { 7304 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7305 let guest = Guest::new(Box::new(focal)); 7306 7307 _test_api_create_boot(TargetApi::new_dbus_api(&guest.tmp_dir), guest) 7308 } 7309 7310 #[test] 7311 fn test_api_dbus_shutdown() { 7312 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7313 let guest = Guest::new(Box::new(focal)); 7314 7315 _test_api_shutdown(TargetApi::new_dbus_api(&guest.tmp_dir), guest) 7316 } 7317 7318 #[test] 7319 fn test_api_dbus_delete() { 7320 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7321 let guest = Guest::new(Box::new(focal)); 7322 7323 _test_api_delete(TargetApi::new_dbus_api(&guest.tmp_dir), guest); 7324 } 7325 7326 #[test] 7327 fn test_api_dbus_pause_resume() { 7328 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7329 let guest = Guest::new(Box::new(focal)); 7330 7331 _test_api_pause_resume(TargetApi::new_dbus_api(&guest.tmp_dir), guest) 7332 } 7333 } 7334 7335 mod common_sequential { 7336 #[cfg(not(feature = "mshv"))] 7337 use crate::*; 7338 7339 #[test] 7340 #[cfg(not(feature = "mshv"))] 7341 fn test_memory_mergeable_on() { 7342 test_memory_mergeable(true) 7343 } 7344 } 7345 7346 mod windows { 7347 use crate::*; 7348 use once_cell::sync::Lazy; 7349 7350 static NEXT_DISK_ID: Lazy<Mutex<u8>> = Lazy::new(|| Mutex::new(1)); 7351 7352 struct WindowsGuest { 7353 guest: Guest, 7354 auth: PasswordAuth, 7355 } 7356 7357 trait FsType { 7358 const FS_FAT: u8; 7359 const FS_NTFS: u8; 7360 } 7361 impl FsType for WindowsGuest { 7362 const FS_FAT: u8 = 0; 7363 const FS_NTFS: u8 = 1; 7364 } 7365 7366 impl WindowsGuest { 7367 fn new() -> Self { 7368 let disk = WindowsDiskConfig::new(WINDOWS_IMAGE_NAME.to_string()); 7369 let guest = Guest::new(Box::new(disk)); 7370 let auth = PasswordAuth { 7371 username: String::from("administrator"), 7372 password: String::from("Admin123"), 7373 }; 7374 7375 WindowsGuest { guest, auth } 7376 } 7377 7378 fn guest(&self) -> &Guest { 7379 &self.guest 7380 } 7381 7382 fn ssh_cmd(&self, cmd: &str) -> String { 7383 ssh_command_ip_with_auth( 7384 cmd, 7385 &self.auth, 7386 &self.guest.network.guest_ip, 7387 DEFAULT_SSH_RETRIES, 7388 DEFAULT_SSH_TIMEOUT, 7389 ) 7390 .unwrap() 7391 } 7392 7393 fn cpu_count(&self) -> u8 { 7394 self.ssh_cmd("powershell -Command \"(Get-CimInstance win32_computersystem).NumberOfLogicalProcessors\"") 7395 .trim() 7396 .parse::<u8>() 7397 .unwrap_or(0) 7398 } 7399 7400 fn ram_size(&self) -> usize { 7401 self.ssh_cmd("powershell -Command \"(Get-CimInstance win32_computersystem).TotalPhysicalMemory\"") 7402 .trim() 7403 .parse::<usize>() 7404 .unwrap_or(0) 7405 } 7406 7407 fn netdev_count(&self) -> u8 { 7408 self.ssh_cmd("powershell -Command \"netsh int ipv4 show interfaces | Select-String ethernet | Measure-Object -Line | Format-Table -HideTableHeaders\"") 7409 .trim() 7410 .parse::<u8>() 7411 .unwrap_or(0) 7412 } 7413 7414 fn disk_count(&self) -> u8 { 7415 self.ssh_cmd("powershell -Command \"Get-Disk | Measure-Object -Line | Format-Table -HideTableHeaders\"") 7416 .trim() 7417 .parse::<u8>() 7418 .unwrap_or(0) 7419 } 7420 7421 fn reboot(&self) { 7422 let _ = self.ssh_cmd("shutdown /r /t 0"); 7423 } 7424 7425 fn shutdown(&self) { 7426 let _ = self.ssh_cmd("shutdown /s /t 0"); 7427 } 7428 7429 fn run_dnsmasq(&self) -> std::process::Child { 7430 let listen_address = format!("--listen-address={}", self.guest.network.host_ip); 7431 let dhcp_host = format!( 7432 "--dhcp-host={},{}", 7433 self.guest.network.guest_mac, self.guest.network.guest_ip 7434 ); 7435 let dhcp_range = format!( 7436 "--dhcp-range=eth,{},{}", 7437 self.guest.network.guest_ip, self.guest.network.guest_ip 7438 ); 7439 7440 Command::new("dnsmasq") 7441 .arg("--no-daemon") 7442 .arg("--log-queries") 7443 .arg(listen_address.as_str()) 7444 .arg("--except-interface=lo") 7445 .arg("--bind-dynamic") // Allow listening to host_ip while the interface is not ready yet. 7446 .arg("--conf-file=/dev/null") 7447 .arg(dhcp_host.as_str()) 7448 .arg(dhcp_range.as_str()) 7449 .spawn() 7450 .unwrap() 7451 } 7452 7453 // TODO Cleanup image file explicitly after test, if there's some space issues. 7454 fn disk_new(&self, fs: u8, sz: usize) -> String { 7455 let mut guard = NEXT_DISK_ID.lock().unwrap(); 7456 let id = *guard; 7457 *guard = id + 1; 7458 7459 let img = PathBuf::from(format!("/tmp/test-hotplug-{id}.raw")); 7460 let _ = fs::remove_file(&img); 7461 7462 // Create an image file 7463 let out = Command::new("qemu-img") 7464 .args([ 7465 "create", 7466 "-f", 7467 "raw", 7468 img.to_str().unwrap(), 7469 format!("{sz}m").as_str(), 7470 ]) 7471 .output() 7472 .expect("qemu-img command failed") 7473 .stdout; 7474 println!("{out:?}"); 7475 7476 // Associate image to a loop device 7477 let out = Command::new("losetup") 7478 .args(["--show", "-f", img.to_str().unwrap()]) 7479 .output() 7480 .expect("failed to create loop device") 7481 .stdout; 7482 let _tmp = String::from_utf8_lossy(&out); 7483 let loop_dev = _tmp.trim(); 7484 println!("{out:?}"); 7485 7486 // Create a partition table 7487 // echo 'type=7' | sudo sfdisk "${LOOP}" 7488 let mut child = Command::new("sfdisk") 7489 .args([loop_dev]) 7490 .stdin(Stdio::piped()) 7491 .spawn() 7492 .unwrap(); 7493 let stdin = child.stdin.as_mut().expect("failed to open stdin"); 7494 stdin 7495 .write_all("type=7".as_bytes()) 7496 .expect("failed to write stdin"); 7497 let out = child.wait_with_output().expect("sfdisk failed").stdout; 7498 println!("{out:?}"); 7499 7500 // Disengage the loop device 7501 let out = Command::new("losetup") 7502 .args(["-d", loop_dev]) 7503 .output() 7504 .expect("loop device not found") 7505 .stdout; 7506 println!("{out:?}"); 7507 7508 // Re-associate loop device pointing to the partition only 7509 let out = Command::new("losetup") 7510 .args([ 7511 "--show", 7512 "--offset", 7513 (512 * 2048).to_string().as_str(), 7514 "-f", 7515 img.to_str().unwrap(), 7516 ]) 7517 .output() 7518 .expect("failed to create loop device") 7519 .stdout; 7520 let _tmp = String::from_utf8_lossy(&out); 7521 let loop_dev = _tmp.trim(); 7522 println!("{out:?}"); 7523 7524 // Create filesystem. 7525 let fs_cmd = match fs { 7526 WindowsGuest::FS_FAT => "mkfs.msdos", 7527 WindowsGuest::FS_NTFS => "mkfs.ntfs", 7528 _ => panic!("Unknown filesystem type '{fs}'"), 7529 }; 7530 let out = Command::new(fs_cmd) 7531 .args([&loop_dev]) 7532 .output() 7533 .unwrap_or_else(|_| panic!("{fs_cmd} failed")) 7534 .stdout; 7535 println!("{out:?}"); 7536 7537 // Disengage the loop device 7538 let out = Command::new("losetup") 7539 .args(["-d", loop_dev]) 7540 .output() 7541 .unwrap_or_else(|_| panic!("loop device '{loop_dev}' not found")) 7542 .stdout; 7543 println!("{out:?}"); 7544 7545 img.to_str().unwrap().to_string() 7546 } 7547 7548 fn disks_set_rw(&self) { 7549 let _ = self.ssh_cmd("powershell -Command \"Get-Disk | Where-Object IsOffline -eq $True | Set-Disk -IsReadOnly $False\""); 7550 } 7551 7552 fn disks_online(&self) { 7553 let _ = self.ssh_cmd("powershell -Command \"Get-Disk | Where-Object IsOffline -eq $True | Set-Disk -IsOffline $False\""); 7554 } 7555 7556 fn disk_file_put(&self, fname: &str, data: &str) { 7557 let _ = self.ssh_cmd(&format!( 7558 "powershell -Command \"'{data}' | Set-Content -Path {fname}\"" 7559 )); 7560 } 7561 7562 fn disk_file_read(&self, fname: &str) -> String { 7563 self.ssh_cmd(&format!( 7564 "powershell -Command \"Get-Content -Path {fname}\"" 7565 )) 7566 } 7567 7568 fn wait_for_boot(&self) -> bool { 7569 let cmd = "dir /b c:\\ | find \"Windows\""; 7570 let tmo_max = 180; 7571 // The timeout increase by n*1+n*2+n*3+..., therefore the initial 7572 // interval must be small. 7573 let tmo_int = 2; 7574 let out = ssh_command_ip_with_auth( 7575 cmd, 7576 &self.auth, 7577 &self.guest.network.guest_ip, 7578 { 7579 let mut ret = 1; 7580 let mut tmo_acc = 0; 7581 loop { 7582 tmo_acc += tmo_int * ret; 7583 if tmo_acc >= tmo_max { 7584 break; 7585 } 7586 ret += 1; 7587 } 7588 ret 7589 }, 7590 tmo_int, 7591 ) 7592 .unwrap(); 7593 7594 if "Windows" == out.trim() { 7595 return true; 7596 } 7597 7598 false 7599 } 7600 } 7601 7602 fn vcpu_threads_count(pid: u32) -> u8 { 7603 // ps -T -p 12345 | grep vcpu | wc -l 7604 let out = Command::new("ps") 7605 .args(["-T", "-p", format!("{pid}").as_str()]) 7606 .output() 7607 .expect("ps command failed") 7608 .stdout; 7609 return String::from_utf8_lossy(&out).matches("vcpu").count() as u8; 7610 } 7611 7612 fn netdev_ctrl_threads_count(pid: u32) -> u8 { 7613 // ps -T -p 12345 | grep "_net[0-9]*_ctrl" | wc -l 7614 let out = Command::new("ps") 7615 .args(["-T", "-p", format!("{pid}").as_str()]) 7616 .output() 7617 .expect("ps command failed") 7618 .stdout; 7619 let mut n = 0; 7620 String::from_utf8_lossy(&out) 7621 .split_whitespace() 7622 .for_each(|s| n += (s.starts_with("_net") && s.ends_with("_ctrl")) as u8); // _net1_ctrl 7623 n 7624 } 7625 7626 fn disk_ctrl_threads_count(pid: u32) -> u8 { 7627 // ps -T -p 15782 | grep "_disk[0-9]*_q0" | wc -l 7628 let out = Command::new("ps") 7629 .args(["-T", "-p", format!("{pid}").as_str()]) 7630 .output() 7631 .expect("ps command failed") 7632 .stdout; 7633 let mut n = 0; 7634 String::from_utf8_lossy(&out) 7635 .split_whitespace() 7636 .for_each(|s| n += (s.starts_with("_disk") && s.ends_with("_q0")) as u8); // _disk0_q0, don't care about multiple queues as they're related to the same hdd 7637 n 7638 } 7639 7640 #[test] 7641 fn test_windows_guest() { 7642 let windows_guest = WindowsGuest::new(); 7643 7644 let mut child = GuestCommand::new(windows_guest.guest()) 7645 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 7646 .args(["--memory", "size=4G"]) 7647 .args(["--kernel", edk2_path().to_str().unwrap()]) 7648 .args(["--serial", "tty"]) 7649 .args(["--console", "off"]) 7650 .default_disks() 7651 .default_net() 7652 .capture_output() 7653 .spawn() 7654 .unwrap(); 7655 7656 let fd = child.stdout.as_ref().unwrap().as_raw_fd(); 7657 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7658 let fd = child.stderr.as_ref().unwrap().as_raw_fd(); 7659 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7660 7661 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE); 7662 7663 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7664 7665 let r = std::panic::catch_unwind(|| { 7666 // Wait to make sure Windows boots up 7667 assert!(windows_guest.wait_for_boot()); 7668 7669 windows_guest.shutdown(); 7670 }); 7671 7672 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7673 let _ = child.kill(); 7674 let output = child.wait_with_output().unwrap(); 7675 7676 let _ = child_dnsmasq.kill(); 7677 let _ = child_dnsmasq.wait(); 7678 7679 handle_child_output(r, &output); 7680 } 7681 7682 #[test] 7683 fn test_windows_guest_multiple_queues() { 7684 let windows_guest = WindowsGuest::new(); 7685 7686 let mut ovmf_path = dirs::home_dir().unwrap(); 7687 ovmf_path.push("workloads"); 7688 ovmf_path.push(OVMF_NAME); 7689 7690 let mut child = GuestCommand::new(windows_guest.guest()) 7691 .args(["--cpus", "boot=4,kvm_hyperv=on"]) 7692 .args(["--memory", "size=4G"]) 7693 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7694 .args(["--serial", "tty"]) 7695 .args(["--console", "off"]) 7696 .args([ 7697 "--disk", 7698 format!( 7699 "path={},num_queues=4", 7700 windows_guest 7701 .guest() 7702 .disk_config 7703 .disk(DiskType::OperatingSystem) 7704 .unwrap() 7705 ) 7706 .as_str(), 7707 ]) 7708 .args([ 7709 "--net", 7710 format!( 7711 "tap=,mac={},ip={},mask=255.255.255.0,num_queues=8", 7712 windows_guest.guest().network.guest_mac, 7713 windows_guest.guest().network.host_ip 7714 ) 7715 .as_str(), 7716 ]) 7717 .capture_output() 7718 .spawn() 7719 .unwrap(); 7720 7721 let fd = child.stdout.as_ref().unwrap().as_raw_fd(); 7722 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7723 let fd = child.stderr.as_ref().unwrap().as_raw_fd(); 7724 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7725 7726 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE); 7727 7728 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7729 7730 let r = std::panic::catch_unwind(|| { 7731 // Wait to make sure Windows boots up 7732 assert!(windows_guest.wait_for_boot()); 7733 7734 windows_guest.shutdown(); 7735 }); 7736 7737 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7738 let _ = child.kill(); 7739 let output = child.wait_with_output().unwrap(); 7740 7741 let _ = child_dnsmasq.kill(); 7742 let _ = child_dnsmasq.wait(); 7743 7744 handle_child_output(r, &output); 7745 } 7746 7747 #[test] 7748 #[cfg(not(feature = "mshv"))] 7749 #[ignore = "See #4327"] 7750 fn test_windows_guest_snapshot_restore() { 7751 let windows_guest = WindowsGuest::new(); 7752 7753 let mut ovmf_path = dirs::home_dir().unwrap(); 7754 ovmf_path.push("workloads"); 7755 ovmf_path.push(OVMF_NAME); 7756 7757 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 7758 let api_socket_source = format!("{}.1", temp_api_path(&tmp_dir)); 7759 7760 let mut child = GuestCommand::new(windows_guest.guest()) 7761 .args(["--api-socket", &api_socket_source]) 7762 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 7763 .args(["--memory", "size=4G"]) 7764 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7765 .args(["--serial", "tty"]) 7766 .args(["--console", "off"]) 7767 .default_disks() 7768 .default_net() 7769 .capture_output() 7770 .spawn() 7771 .unwrap(); 7772 7773 let fd = child.stdout.as_ref().unwrap().as_raw_fd(); 7774 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7775 let fd = child.stderr.as_ref().unwrap().as_raw_fd(); 7776 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 7777 7778 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE); 7779 7780 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7781 7782 // Wait to make sure Windows boots up 7783 assert!(windows_guest.wait_for_boot()); 7784 7785 let snapshot_dir = temp_snapshot_dir_path(&tmp_dir); 7786 7787 // Pause the VM 7788 assert!(remote_command(&api_socket_source, "pause", None)); 7789 7790 // Take a snapshot from the VM 7791 assert!(remote_command( 7792 &api_socket_source, 7793 "snapshot", 7794 Some(format!("file://{snapshot_dir}").as_str()), 7795 )); 7796 7797 // Wait to make sure the snapshot is completed 7798 thread::sleep(std::time::Duration::new(30, 0)); 7799 7800 let _ = child.kill(); 7801 child.wait().unwrap(); 7802 7803 let api_socket_restored = format!("{}.2", temp_api_path(&tmp_dir)); 7804 7805 // Restore the VM from the snapshot 7806 let mut child = GuestCommand::new(windows_guest.guest()) 7807 .args(["--api-socket", &api_socket_restored]) 7808 .args([ 7809 "--restore", 7810 format!("source_url=file://{snapshot_dir}").as_str(), 7811 ]) 7812 .capture_output() 7813 .spawn() 7814 .unwrap(); 7815 7816 // Wait for the VM to be restored 7817 thread::sleep(std::time::Duration::new(20, 0)); 7818 7819 let r = std::panic::catch_unwind(|| { 7820 // Resume the VM 7821 assert!(remote_command(&api_socket_restored, "resume", None)); 7822 7823 windows_guest.shutdown(); 7824 }); 7825 7826 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7827 let _ = child.kill(); 7828 let output = child.wait_with_output().unwrap(); 7829 7830 let _ = child_dnsmasq.kill(); 7831 let _ = child_dnsmasq.wait(); 7832 7833 handle_child_output(r, &output); 7834 } 7835 7836 #[test] 7837 #[cfg(not(feature = "mshv"))] 7838 #[cfg(not(target_arch = "aarch64"))] 7839 fn test_windows_guest_cpu_hotplug() { 7840 let windows_guest = WindowsGuest::new(); 7841 7842 let mut ovmf_path = dirs::home_dir().unwrap(); 7843 ovmf_path.push("workloads"); 7844 ovmf_path.push(OVMF_NAME); 7845 7846 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 7847 let api_socket = temp_api_path(&tmp_dir); 7848 7849 let mut child = GuestCommand::new(windows_guest.guest()) 7850 .args(["--api-socket", &api_socket]) 7851 .args(["--cpus", "boot=2,max=8,kvm_hyperv=on"]) 7852 .args(["--memory", "size=4G"]) 7853 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7854 .args(["--serial", "tty"]) 7855 .args(["--console", "off"]) 7856 .default_disks() 7857 .default_net() 7858 .capture_output() 7859 .spawn() 7860 .unwrap(); 7861 7862 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7863 7864 let r = std::panic::catch_unwind(|| { 7865 // Wait to make sure Windows boots up 7866 assert!(windows_guest.wait_for_boot()); 7867 7868 let vcpu_num = 2; 7869 // Check the initial number of CPUs the guest sees 7870 assert_eq!(windows_guest.cpu_count(), vcpu_num); 7871 // Check the initial number of vcpu threads in the CH process 7872 assert_eq!(vcpu_threads_count(child.id()), vcpu_num); 7873 7874 let vcpu_num = 6; 7875 // Hotplug some CPUs 7876 resize_command(&api_socket, Some(vcpu_num), None, None, None); 7877 // Wait to make sure CPUs are added 7878 thread::sleep(std::time::Duration::new(10, 0)); 7879 // Check the guest sees the correct number 7880 assert_eq!(windows_guest.cpu_count(), vcpu_num); 7881 // Check the CH process has the correct number of vcpu threads 7882 assert_eq!(vcpu_threads_count(child.id()), vcpu_num); 7883 7884 let vcpu_num = 4; 7885 // Remove some CPUs. Note that Windows doesn't support hot-remove. 7886 resize_command(&api_socket, Some(vcpu_num), None, None, None); 7887 // Wait to make sure CPUs are removed 7888 thread::sleep(std::time::Duration::new(10, 0)); 7889 // Reboot to let Windows catch up 7890 windows_guest.reboot(); 7891 // Wait to make sure Windows completely rebooted 7892 thread::sleep(std::time::Duration::new(60, 0)); 7893 // Check the guest sees the correct number 7894 assert_eq!(windows_guest.cpu_count(), vcpu_num); 7895 // Check the CH process has the correct number of vcpu threads 7896 assert_eq!(vcpu_threads_count(child.id()), vcpu_num); 7897 7898 windows_guest.shutdown(); 7899 }); 7900 7901 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7902 let _ = child.kill(); 7903 let output = child.wait_with_output().unwrap(); 7904 7905 let _ = child_dnsmasq.kill(); 7906 let _ = child_dnsmasq.wait(); 7907 7908 handle_child_output(r, &output); 7909 } 7910 7911 #[test] 7912 #[cfg(not(feature = "mshv"))] 7913 #[cfg(not(target_arch = "aarch64"))] 7914 fn test_windows_guest_ram_hotplug() { 7915 let windows_guest = WindowsGuest::new(); 7916 7917 let mut ovmf_path = dirs::home_dir().unwrap(); 7918 ovmf_path.push("workloads"); 7919 ovmf_path.push(OVMF_NAME); 7920 7921 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 7922 let api_socket = temp_api_path(&tmp_dir); 7923 7924 let mut child = GuestCommand::new(windows_guest.guest()) 7925 .args(["--api-socket", &api_socket]) 7926 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 7927 .args(["--memory", "size=2G,hotplug_size=5G"]) 7928 .args(["--kernel", ovmf_path.to_str().unwrap()]) 7929 .args(["--serial", "tty"]) 7930 .args(["--console", "off"]) 7931 .default_disks() 7932 .default_net() 7933 .capture_output() 7934 .spawn() 7935 .unwrap(); 7936 7937 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 7938 7939 let r = std::panic::catch_unwind(|| { 7940 // Wait to make sure Windows boots up 7941 assert!(windows_guest.wait_for_boot()); 7942 7943 let ram_size = 2 * 1024 * 1024 * 1024; 7944 // Check the initial number of RAM the guest sees 7945 let current_ram_size = windows_guest.ram_size(); 7946 // This size seems to be reserved by the system and thus the 7947 // reported amount differs by this constant value. 7948 let reserved_ram_size = ram_size - current_ram_size; 7949 // Verify that there's not more than 4mb constant diff wasted 7950 // by the reserved ram. 7951 assert!(reserved_ram_size < 4 * 1024 * 1024); 7952 7953 let ram_size = 4 * 1024 * 1024 * 1024; 7954 // Hotplug some RAM 7955 resize_command(&api_socket, None, Some(ram_size), None, None); 7956 // Wait to make sure RAM has been added 7957 thread::sleep(std::time::Duration::new(10, 0)); 7958 // Check the guest sees the correct number 7959 assert_eq!(windows_guest.ram_size(), ram_size - reserved_ram_size); 7960 7961 let ram_size = 3 * 1024 * 1024 * 1024; 7962 // Unplug some RAM. Note that hot-remove most likely won't work. 7963 resize_command(&api_socket, None, Some(ram_size), None, None); 7964 // Wait to make sure RAM has been added 7965 thread::sleep(std::time::Duration::new(10, 0)); 7966 // Reboot to let Windows catch up 7967 windows_guest.reboot(); 7968 // Wait to make sure guest completely rebooted 7969 thread::sleep(std::time::Duration::new(60, 0)); 7970 // Check the guest sees the correct number 7971 assert_eq!(windows_guest.ram_size(), ram_size - reserved_ram_size); 7972 7973 windows_guest.shutdown(); 7974 }); 7975 7976 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 7977 let _ = child.kill(); 7978 let output = child.wait_with_output().unwrap(); 7979 7980 let _ = child_dnsmasq.kill(); 7981 let _ = child_dnsmasq.wait(); 7982 7983 handle_child_output(r, &output); 7984 } 7985 7986 #[test] 7987 #[cfg(not(feature = "mshv"))] 7988 fn test_windows_guest_netdev_hotplug() { 7989 let windows_guest = WindowsGuest::new(); 7990 7991 let mut ovmf_path = dirs::home_dir().unwrap(); 7992 ovmf_path.push("workloads"); 7993 ovmf_path.push(OVMF_NAME); 7994 7995 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 7996 let api_socket = temp_api_path(&tmp_dir); 7997 7998 let mut child = GuestCommand::new(windows_guest.guest()) 7999 .args(["--api-socket", &api_socket]) 8000 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8001 .args(["--memory", "size=4G"]) 8002 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8003 .args(["--serial", "tty"]) 8004 .args(["--console", "off"]) 8005 .default_disks() 8006 .default_net() 8007 .capture_output() 8008 .spawn() 8009 .unwrap(); 8010 8011 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8012 8013 let r = std::panic::catch_unwind(|| { 8014 // Wait to make sure Windows boots up 8015 assert!(windows_guest.wait_for_boot()); 8016 8017 // Initially present network device 8018 let netdev_num = 1; 8019 assert_eq!(windows_guest.netdev_count(), netdev_num); 8020 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 8021 8022 // Hotplug network device 8023 let (cmd_success, cmd_output) = remote_command_w_output( 8024 &api_socket, 8025 "add-net", 8026 Some(windows_guest.guest().default_net_string().as_str()), 8027 ); 8028 assert!(cmd_success); 8029 assert!(String::from_utf8_lossy(&cmd_output).contains("\"id\":\"_net2\"")); 8030 thread::sleep(std::time::Duration::new(5, 0)); 8031 // Verify the device is on the system 8032 let netdev_num = 2; 8033 assert_eq!(windows_guest.netdev_count(), netdev_num); 8034 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 8035 8036 // Remove network device 8037 let cmd_success = remote_command(&api_socket, "remove-device", Some("_net2")); 8038 assert!(cmd_success); 8039 thread::sleep(std::time::Duration::new(5, 0)); 8040 // Verify the device has been removed 8041 let netdev_num = 1; 8042 assert_eq!(windows_guest.netdev_count(), netdev_num); 8043 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 8044 8045 windows_guest.shutdown(); 8046 }); 8047 8048 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8049 let _ = child.kill(); 8050 let output = child.wait_with_output().unwrap(); 8051 8052 let _ = child_dnsmasq.kill(); 8053 let _ = child_dnsmasq.wait(); 8054 8055 handle_child_output(r, &output); 8056 } 8057 8058 #[test] 8059 #[ignore = "See #6037"] 8060 #[cfg(not(feature = "mshv"))] 8061 #[cfg(not(target_arch = "aarch64"))] 8062 fn test_windows_guest_disk_hotplug() { 8063 let windows_guest = WindowsGuest::new(); 8064 8065 let mut ovmf_path = dirs::home_dir().unwrap(); 8066 ovmf_path.push("workloads"); 8067 ovmf_path.push(OVMF_NAME); 8068 8069 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8070 let api_socket = temp_api_path(&tmp_dir); 8071 8072 let mut child = GuestCommand::new(windows_guest.guest()) 8073 .args(["--api-socket", &api_socket]) 8074 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8075 .args(["--memory", "size=4G"]) 8076 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8077 .args(["--serial", "tty"]) 8078 .args(["--console", "off"]) 8079 .default_disks() 8080 .default_net() 8081 .capture_output() 8082 .spawn() 8083 .unwrap(); 8084 8085 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8086 8087 let disk = windows_guest.disk_new(WindowsGuest::FS_FAT, 100); 8088 8089 let r = std::panic::catch_unwind(|| { 8090 // Wait to make sure Windows boots up 8091 assert!(windows_guest.wait_for_boot()); 8092 8093 // Initially present disk device 8094 let disk_num = 1; 8095 assert_eq!(windows_guest.disk_count(), disk_num); 8096 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8097 8098 // Hotplug disk device 8099 let (cmd_success, cmd_output) = remote_command_w_output( 8100 &api_socket, 8101 "add-disk", 8102 Some(format!("path={disk},readonly=off").as_str()), 8103 ); 8104 assert!(cmd_success); 8105 assert!(String::from_utf8_lossy(&cmd_output).contains("\"id\":\"_disk2\"")); 8106 thread::sleep(std::time::Duration::new(5, 0)); 8107 // Online disk device 8108 windows_guest.disks_set_rw(); 8109 windows_guest.disks_online(); 8110 // Verify the device is on the system 8111 let disk_num = 2; 8112 assert_eq!(windows_guest.disk_count(), disk_num); 8113 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8114 8115 let data = "hello"; 8116 let fname = "d:\\world"; 8117 windows_guest.disk_file_put(fname, data); 8118 8119 // Unmount disk device 8120 let cmd_success = remote_command(&api_socket, "remove-device", Some("_disk2")); 8121 assert!(cmd_success); 8122 thread::sleep(std::time::Duration::new(5, 0)); 8123 // Verify the device has been removed 8124 let disk_num = 1; 8125 assert_eq!(windows_guest.disk_count(), disk_num); 8126 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8127 8128 // Remount and check the file exists with the expected contents 8129 let (cmd_success, _cmd_output) = remote_command_w_output( 8130 &api_socket, 8131 "add-disk", 8132 Some(format!("path={disk},readonly=off").as_str()), 8133 ); 8134 assert!(cmd_success); 8135 thread::sleep(std::time::Duration::new(5, 0)); 8136 let out = windows_guest.disk_file_read(fname); 8137 assert_eq!(data, out.trim()); 8138 8139 // Intentionally no unmount, it'll happen at shutdown. 8140 8141 windows_guest.shutdown(); 8142 }); 8143 8144 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8145 let _ = child.kill(); 8146 let output = child.wait_with_output().unwrap(); 8147 8148 let _ = child_dnsmasq.kill(); 8149 let _ = child_dnsmasq.wait(); 8150 8151 handle_child_output(r, &output); 8152 } 8153 8154 #[test] 8155 #[ignore = "See #6037"] 8156 #[cfg(not(feature = "mshv"))] 8157 #[cfg(not(target_arch = "aarch64"))] 8158 fn test_windows_guest_disk_hotplug_multi() { 8159 let windows_guest = WindowsGuest::new(); 8160 8161 let mut ovmf_path = dirs::home_dir().unwrap(); 8162 ovmf_path.push("workloads"); 8163 ovmf_path.push(OVMF_NAME); 8164 8165 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8166 let api_socket = temp_api_path(&tmp_dir); 8167 8168 let mut child = GuestCommand::new(windows_guest.guest()) 8169 .args(["--api-socket", &api_socket]) 8170 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8171 .args(["--memory", "size=2G"]) 8172 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8173 .args(["--serial", "tty"]) 8174 .args(["--console", "off"]) 8175 .default_disks() 8176 .default_net() 8177 .capture_output() 8178 .spawn() 8179 .unwrap(); 8180 8181 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8182 8183 // Predefined data to used at various test stages 8184 let disk_test_data: [[String; 4]; 2] = [ 8185 [ 8186 "_disk2".to_string(), 8187 windows_guest.disk_new(WindowsGuest::FS_FAT, 123), 8188 "d:\\world".to_string(), 8189 "hello".to_string(), 8190 ], 8191 [ 8192 "_disk3".to_string(), 8193 windows_guest.disk_new(WindowsGuest::FS_NTFS, 333), 8194 "e:\\hello".to_string(), 8195 "world".to_string(), 8196 ], 8197 ]; 8198 8199 let r = std::panic::catch_unwind(|| { 8200 // Wait to make sure Windows boots up 8201 assert!(windows_guest.wait_for_boot()); 8202 8203 // Initially present disk device 8204 let disk_num = 1; 8205 assert_eq!(windows_guest.disk_count(), disk_num); 8206 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8207 8208 for it in &disk_test_data { 8209 let disk_id = it[0].as_str(); 8210 let disk = it[1].as_str(); 8211 // Hotplug disk device 8212 let (cmd_success, cmd_output) = remote_command_w_output( 8213 &api_socket, 8214 "add-disk", 8215 Some(format!("path={disk},readonly=off").as_str()), 8216 ); 8217 assert!(cmd_success); 8218 assert!(String::from_utf8_lossy(&cmd_output) 8219 .contains(format!("\"id\":\"{disk_id}\"").as_str())); 8220 thread::sleep(std::time::Duration::new(5, 0)); 8221 // Online disk devices 8222 windows_guest.disks_set_rw(); 8223 windows_guest.disks_online(); 8224 } 8225 // Verify the devices are on the system 8226 let disk_num = (disk_test_data.len() + 1) as u8; 8227 assert_eq!(windows_guest.disk_count(), disk_num); 8228 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8229 8230 // Put test data 8231 for it in &disk_test_data { 8232 let fname = it[2].as_str(); 8233 let data = it[3].as_str(); 8234 windows_guest.disk_file_put(fname, data); 8235 } 8236 8237 // Unmount disk devices 8238 for it in &disk_test_data { 8239 let disk_id = it[0].as_str(); 8240 let cmd_success = remote_command(&api_socket, "remove-device", Some(disk_id)); 8241 assert!(cmd_success); 8242 thread::sleep(std::time::Duration::new(5, 0)); 8243 } 8244 8245 // Verify the devices have been removed 8246 let disk_num = 1; 8247 assert_eq!(windows_guest.disk_count(), disk_num); 8248 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8249 8250 // Remount 8251 for it in &disk_test_data { 8252 let disk = it[1].as_str(); 8253 let (cmd_success, _cmd_output) = remote_command_w_output( 8254 &api_socket, 8255 "add-disk", 8256 Some(format!("path={disk},readonly=off").as_str()), 8257 ); 8258 assert!(cmd_success); 8259 thread::sleep(std::time::Duration::new(5, 0)); 8260 } 8261 8262 // Check the files exists with the expected contents 8263 for it in &disk_test_data { 8264 let fname = it[2].as_str(); 8265 let data = it[3].as_str(); 8266 let out = windows_guest.disk_file_read(fname); 8267 assert_eq!(data, out.trim()); 8268 } 8269 8270 // Intentionally no unmount, it'll happen at shutdown. 8271 8272 windows_guest.shutdown(); 8273 }); 8274 8275 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8276 let _ = child.kill(); 8277 let output = child.wait_with_output().unwrap(); 8278 8279 let _ = child_dnsmasq.kill(); 8280 let _ = child_dnsmasq.wait(); 8281 8282 handle_child_output(r, &output); 8283 } 8284 8285 #[test] 8286 #[cfg(not(feature = "mshv"))] 8287 #[cfg(not(target_arch = "aarch64"))] 8288 fn test_windows_guest_netdev_multi() { 8289 let windows_guest = WindowsGuest::new(); 8290 8291 let mut ovmf_path = dirs::home_dir().unwrap(); 8292 ovmf_path.push("workloads"); 8293 ovmf_path.push(OVMF_NAME); 8294 8295 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8296 let api_socket = temp_api_path(&tmp_dir); 8297 8298 let mut child = GuestCommand::new(windows_guest.guest()) 8299 .args(["--api-socket", &api_socket]) 8300 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8301 .args(["--memory", "size=4G"]) 8302 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8303 .args(["--serial", "tty"]) 8304 .args(["--console", "off"]) 8305 .default_disks() 8306 // The multi net dev config is borrowed from test_multiple_network_interfaces 8307 .args([ 8308 "--net", 8309 windows_guest.guest().default_net_string().as_str(), 8310 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0", 8311 "tap=mytap42,mac=fe:1f:9e:e1:60:f2,ip=192.168.4.1,mask=255.255.255.0", 8312 ]) 8313 .capture_output() 8314 .spawn() 8315 .unwrap(); 8316 8317 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8318 8319 let r = std::panic::catch_unwind(|| { 8320 // Wait to make sure Windows boots up 8321 assert!(windows_guest.wait_for_boot()); 8322 8323 let netdev_num = 3; 8324 assert_eq!(windows_guest.netdev_count(), netdev_num); 8325 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 8326 8327 let tap_count = exec_host_command_output("ip link | grep -c mytap42"); 8328 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1"); 8329 8330 windows_guest.shutdown(); 8331 }); 8332 8333 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8334 let _ = child.kill(); 8335 let output = child.wait_with_output().unwrap(); 8336 8337 let _ = child_dnsmasq.kill(); 8338 let _ = child_dnsmasq.wait(); 8339 8340 handle_child_output(r, &output); 8341 } 8342 } 8343 8344 #[cfg(target_arch = "x86_64")] 8345 mod sgx { 8346 use crate::*; 8347 8348 #[test] 8349 fn test_sgx() { 8350 let jammy_image = JAMMY_IMAGE_NAME.to_string(); 8351 let jammy = UbuntuDiskConfig::new(jammy_image); 8352 let guest = Guest::new(Box::new(jammy)); 8353 8354 let mut child = GuestCommand::new(&guest) 8355 .args(["--cpus", "boot=1"]) 8356 .args(["--memory", "size=512M"]) 8357 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 8358 .default_disks() 8359 .default_net() 8360 .args(["--sgx-epc", "id=epc0,size=64M"]) 8361 .capture_output() 8362 .spawn() 8363 .unwrap(); 8364 8365 let r = std::panic::catch_unwind(|| { 8366 guest.wait_vm_boot(None).unwrap(); 8367 8368 // Check if SGX is correctly detected in the guest. 8369 guest.check_sgx_support().unwrap(); 8370 8371 // Validate the SGX EPC section is 64MiB. 8372 assert_eq!( 8373 guest 8374 .ssh_command("cpuid -l 0x12 -s 2 | grep 'section size' | cut -d '=' -f 2") 8375 .unwrap() 8376 .trim(), 8377 "0x0000000004000000" 8378 ); 8379 }); 8380 8381 let _ = child.kill(); 8382 let output = child.wait_with_output().unwrap(); 8383 8384 handle_child_output(r, &output); 8385 } 8386 } 8387 8388 #[cfg(target_arch = "x86_64")] 8389 mod vfio { 8390 use crate::*; 8391 8392 fn test_nvidia_card_memory_hotplug(hotplug_method: &str) { 8393 let jammy = UbuntuDiskConfig::new(JAMMY_NVIDIA_IMAGE_NAME.to_string()); 8394 let guest = Guest::new(Box::new(jammy)); 8395 let api_socket = temp_api_path(&guest.tmp_dir); 8396 8397 let mut child = GuestCommand::new(&guest) 8398 .args(["--cpus", "boot=4"]) 8399 .args([ 8400 "--memory", 8401 format!("size=4G,hotplug_size=4G,hotplug_method={hotplug_method}").as_str(), 8402 ]) 8403 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 8404 .args(["--device", "path=/sys/bus/pci/devices/0000:31:00.0/"]) 8405 .args(["--api-socket", &api_socket]) 8406 .default_disks() 8407 .default_net() 8408 .capture_output() 8409 .spawn() 8410 .unwrap(); 8411 8412 let r = std::panic::catch_unwind(|| { 8413 guest.wait_vm_boot(None).unwrap(); 8414 8415 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 8416 8417 guest.enable_memory_hotplug(); 8418 8419 // Add RAM to the VM 8420 let desired_ram = 6 << 30; 8421 resize_command(&api_socket, None, Some(desired_ram), None, None); 8422 thread::sleep(std::time::Duration::new(30, 0)); 8423 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 8424 8425 // Check the VFIO device works when RAM is increased to 6GiB 8426 guest.check_nvidia_gpu(); 8427 }); 8428 8429 let _ = child.kill(); 8430 let output = child.wait_with_output().unwrap(); 8431 8432 handle_child_output(r, &output); 8433 } 8434 8435 #[test] 8436 fn test_nvidia_card_memory_hotplug_acpi() { 8437 test_nvidia_card_memory_hotplug("acpi") 8438 } 8439 8440 #[test] 8441 fn test_nvidia_card_memory_hotplug_virtio_mem() { 8442 test_nvidia_card_memory_hotplug("virtio-mem") 8443 } 8444 8445 #[test] 8446 fn test_nvidia_card_pci_hotplug() { 8447 let jammy = UbuntuDiskConfig::new(JAMMY_NVIDIA_IMAGE_NAME.to_string()); 8448 let guest = Guest::new(Box::new(jammy)); 8449 let api_socket = temp_api_path(&guest.tmp_dir); 8450 8451 let mut child = GuestCommand::new(&guest) 8452 .args(["--cpus", "boot=4"]) 8453 .args(["--memory", "size=4G"]) 8454 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 8455 .args(["--api-socket", &api_socket]) 8456 .default_disks() 8457 .default_net() 8458 .capture_output() 8459 .spawn() 8460 .unwrap(); 8461 8462 let r = std::panic::catch_unwind(|| { 8463 guest.wait_vm_boot(None).unwrap(); 8464 8465 // Hotplug the card to the VM 8466 let (cmd_success, cmd_output) = remote_command_w_output( 8467 &api_socket, 8468 "add-device", 8469 Some("id=vfio0,path=/sys/bus/pci/devices/0000:31:00.0/"), 8470 ); 8471 assert!(cmd_success); 8472 assert!(String::from_utf8_lossy(&cmd_output) 8473 .contains("{\"id\":\"vfio0\",\"bdf\":\"0000:00:06.0\"}")); 8474 8475 thread::sleep(std::time::Duration::new(10, 0)); 8476 8477 // Check the VFIO device works after hotplug 8478 guest.check_nvidia_gpu(); 8479 }); 8480 8481 let _ = child.kill(); 8482 let output = child.wait_with_output().unwrap(); 8483 8484 handle_child_output(r, &output); 8485 } 8486 8487 #[test] 8488 fn test_nvidia_card_reboot() { 8489 let jammy = UbuntuDiskConfig::new(JAMMY_NVIDIA_IMAGE_NAME.to_string()); 8490 let guest = Guest::new(Box::new(jammy)); 8491 let api_socket = temp_api_path(&guest.tmp_dir); 8492 8493 let mut child = GuestCommand::new(&guest) 8494 .args(["--cpus", "boot=4"]) 8495 .args(["--memory", "size=4G"]) 8496 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 8497 .args(["--device", "path=/sys/bus/pci/devices/0000:31:00.0/"]) 8498 .args(["--api-socket", &api_socket]) 8499 .default_disks() 8500 .default_net() 8501 .capture_output() 8502 .spawn() 8503 .unwrap(); 8504 8505 let r = std::panic::catch_unwind(|| { 8506 guest.wait_vm_boot(None).unwrap(); 8507 8508 // Check the VFIO device works after boot 8509 guest.check_nvidia_gpu(); 8510 8511 guest.reboot_linux(0, None); 8512 8513 // Check the VFIO device works after reboot 8514 guest.check_nvidia_gpu(); 8515 }); 8516 8517 let _ = child.kill(); 8518 let output = child.wait_with_output().unwrap(); 8519 8520 handle_child_output(r, &output); 8521 } 8522 } 8523 8524 mod live_migration { 8525 use crate::*; 8526 8527 fn start_live_migration( 8528 migration_socket: &str, 8529 src_api_socket: &str, 8530 dest_api_socket: &str, 8531 local: bool, 8532 ) -> bool { 8533 // Start to receive migration from the destintion VM 8534 let mut receive_migration = Command::new(clh_command("ch-remote")) 8535 .args([ 8536 &format!("--api-socket={dest_api_socket}"), 8537 "receive-migration", 8538 &format! {"unix:{migration_socket}"}, 8539 ]) 8540 .stderr(Stdio::piped()) 8541 .stdout(Stdio::piped()) 8542 .spawn() 8543 .unwrap(); 8544 // Give it '1s' to make sure the 'migration_socket' file is properly created 8545 thread::sleep(std::time::Duration::new(1, 0)); 8546 // Start to send migration from the source VM 8547 8548 let mut args = [ 8549 format!("--api-socket={}", &src_api_socket), 8550 "send-migration".to_string(), 8551 format! {"unix:{migration_socket}"}, 8552 ] 8553 .to_vec(); 8554 8555 if local { 8556 args.insert(2, "--local".to_string()); 8557 } 8558 8559 let mut send_migration = Command::new(clh_command("ch-remote")) 8560 .args(&args) 8561 .stderr(Stdio::piped()) 8562 .stdout(Stdio::piped()) 8563 .spawn() 8564 .unwrap(); 8565 8566 // The 'send-migration' command should be executed successfully within the given timeout 8567 let send_success = if let Some(status) = send_migration 8568 .wait_timeout(std::time::Duration::from_secs(30)) 8569 .unwrap() 8570 { 8571 status.success() 8572 } else { 8573 false 8574 }; 8575 8576 if !send_success { 8577 let _ = send_migration.kill(); 8578 let output = send_migration.wait_with_output().unwrap(); 8579 eprintln!( 8580 "\n\n==== Start 'send_migration' output ==== \ 8581 \n\n---stdout---\n{}\n\n---stderr---\n{} \ 8582 \n\n==== End 'send_migration' output ====\n\n", 8583 String::from_utf8_lossy(&output.stdout), 8584 String::from_utf8_lossy(&output.stderr) 8585 ); 8586 } 8587 8588 // The 'receive-migration' command should be executed successfully within the given timeout 8589 let receive_success = if let Some(status) = receive_migration 8590 .wait_timeout(std::time::Duration::from_secs(30)) 8591 .unwrap() 8592 { 8593 status.success() 8594 } else { 8595 false 8596 }; 8597 8598 if !receive_success { 8599 let _ = receive_migration.kill(); 8600 let output = receive_migration.wait_with_output().unwrap(); 8601 eprintln!( 8602 "\n\n==== Start 'receive_migration' output ==== \ 8603 \n\n---stdout---\n{}\n\n---stderr---\n{} \ 8604 \n\n==== End 'receive_migration' output ====\n\n", 8605 String::from_utf8_lossy(&output.stdout), 8606 String::from_utf8_lossy(&output.stderr) 8607 ); 8608 } 8609 8610 send_success && receive_success 8611 } 8612 8613 fn print_and_panic(src_vm: Child, dest_vm: Child, ovs_vm: Option<Child>, message: &str) -> ! { 8614 let mut src_vm = src_vm; 8615 let mut dest_vm = dest_vm; 8616 8617 let _ = src_vm.kill(); 8618 let src_output = src_vm.wait_with_output().unwrap(); 8619 eprintln!( 8620 "\n\n==== Start 'source_vm' stdout ====\n\n{}\n\n==== End 'source_vm' stdout ====", 8621 String::from_utf8_lossy(&src_output.stdout) 8622 ); 8623 eprintln!( 8624 "\n\n==== Start 'source_vm' stderr ====\n\n{}\n\n==== End 'source_vm' stderr ====", 8625 String::from_utf8_lossy(&src_output.stderr) 8626 ); 8627 let _ = dest_vm.kill(); 8628 let dest_output = dest_vm.wait_with_output().unwrap(); 8629 eprintln!( 8630 "\n\n==== Start 'destination_vm' stdout ====\n\n{}\n\n==== End 'destination_vm' stdout ====", 8631 String::from_utf8_lossy(&dest_output.stdout) 8632 ); 8633 eprintln!( 8634 "\n\n==== Start 'destination_vm' stderr ====\n\n{}\n\n==== End 'destination_vm' stderr ====", 8635 String::from_utf8_lossy(&dest_output.stderr) 8636 ); 8637 8638 if let Some(ovs_vm) = ovs_vm { 8639 let mut ovs_vm = ovs_vm; 8640 let _ = ovs_vm.kill(); 8641 let ovs_output = ovs_vm.wait_with_output().unwrap(); 8642 eprintln!( 8643 "\n\n==== Start 'ovs_vm' stdout ====\n\n{}\n\n==== End 'ovs_vm' stdout ====", 8644 String::from_utf8_lossy(&ovs_output.stdout) 8645 ); 8646 eprintln!( 8647 "\n\n==== Start 'ovs_vm' stderr ====\n\n{}\n\n==== End 'ovs_vm' stderr ====", 8648 String::from_utf8_lossy(&ovs_output.stderr) 8649 ); 8650 8651 cleanup_ovs_dpdk(); 8652 } 8653 8654 panic!("Test failed: {message}") 8655 } 8656 8657 // This test exercises the local live-migration between two Cloud Hypervisor VMs on the 8658 // same host. It ensures the following behaviors: 8659 // 1. The source VM is up and functional (including various virtio-devices are working properly); 8660 // 2. The 'send-migration' and 'receive-migration' command finished successfully; 8661 // 3. The source VM terminated gracefully after live migration; 8662 // 4. The destination VM is functional (including various virtio-devices are working properly) after 8663 // live migration; 8664 // Note: This test does not use vsock as we can't create two identical vsock on the same host. 8665 fn _test_live_migration(upgrade_test: bool, local: bool) { 8666 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 8667 let guest = Guest::new(Box::new(focal)); 8668 let kernel_path = direct_kernel_boot_path(); 8669 let console_text = String::from("On a branch floating down river a cricket, singing."); 8670 let net_id = "net123"; 8671 let net_params = format!( 8672 "id={},tap=,mac={},ip={},mask=255.255.255.0", 8673 net_id, guest.network.guest_mac, guest.network.host_ip 8674 ); 8675 8676 let memory_param: &[&str] = if local { 8677 &["--memory", "size=4G,shared=on"] 8678 } else { 8679 &["--memory", "size=4G"] 8680 }; 8681 8682 let boot_vcpus = 2; 8683 let max_vcpus = 4; 8684 8685 let pmem_temp_file = TempFile::new().unwrap(); 8686 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 8687 std::process::Command::new("mkfs.ext4") 8688 .arg(pmem_temp_file.as_path()) 8689 .output() 8690 .expect("Expect creating disk image to succeed"); 8691 let pmem_path = String::from("/dev/pmem0"); 8692 8693 // Start the source VM 8694 let src_vm_path = if !upgrade_test { 8695 clh_command("cloud-hypervisor") 8696 } else { 8697 cloud_hypervisor_release_path() 8698 }; 8699 let src_api_socket = temp_api_path(&guest.tmp_dir); 8700 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 8701 src_vm_cmd 8702 .args([ 8703 "--cpus", 8704 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 8705 ]) 8706 .args(memory_param) 8707 .args(["--kernel", kernel_path.to_str().unwrap()]) 8708 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 8709 .default_disks() 8710 .args(["--net", net_params.as_str()]) 8711 .args(["--api-socket", &src_api_socket]) 8712 .args([ 8713 "--pmem", 8714 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 8715 ]); 8716 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 8717 8718 // Start the destination VM 8719 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 8720 dest_api_socket.push_str(".dest"); 8721 let mut dest_child = GuestCommand::new(&guest) 8722 .args(["--api-socket", &dest_api_socket]) 8723 .capture_output() 8724 .spawn() 8725 .unwrap(); 8726 8727 let r = std::panic::catch_unwind(|| { 8728 guest.wait_vm_boot(None).unwrap(); 8729 8730 // Make sure the source VM is functaionl 8731 // Check the number of vCPUs 8732 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 8733 8734 // Check the guest RAM 8735 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 8736 8737 // Check the guest virtio-devices, e.g. block, rng, console, and net 8738 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 8739 8740 // x86_64: Following what's done in the `test_snapshot_restore`, we need 8741 // to make sure that removing and adding back the virtio-net device does 8742 // not break the live-migration support for virtio-pci. 8743 #[cfg(target_arch = "x86_64")] 8744 { 8745 assert!(remote_command( 8746 &src_api_socket, 8747 "remove-device", 8748 Some(net_id), 8749 )); 8750 thread::sleep(std::time::Duration::new(10, 0)); 8751 8752 // Plug the virtio-net device again 8753 assert!(remote_command( 8754 &src_api_socket, 8755 "add-net", 8756 Some(net_params.as_str()), 8757 )); 8758 thread::sleep(std::time::Duration::new(10, 0)); 8759 } 8760 8761 // Start the live-migration 8762 let migration_socket = String::from( 8763 guest 8764 .tmp_dir 8765 .as_path() 8766 .join("live-migration.sock") 8767 .to_str() 8768 .unwrap(), 8769 ); 8770 8771 assert!( 8772 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 8773 "Unsuccessful command: 'send-migration' or 'receive-migration'." 8774 ); 8775 }); 8776 8777 // Check and report any errors occurred during the live-migration 8778 if r.is_err() { 8779 print_and_panic( 8780 src_child, 8781 dest_child, 8782 None, 8783 "Error occurred during live-migration", 8784 ); 8785 } 8786 8787 // Check the source vm has been terminated successful (give it '3s' to settle) 8788 thread::sleep(std::time::Duration::new(3, 0)); 8789 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 8790 print_and_panic( 8791 src_child, 8792 dest_child, 8793 None, 8794 "source VM was not terminated successfully.", 8795 ); 8796 }; 8797 8798 // Post live-migration check to make sure the destination VM is funcational 8799 let r = std::panic::catch_unwind(|| { 8800 // Perform same checks to validate VM has been properly migrated 8801 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 8802 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 8803 8804 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 8805 }); 8806 8807 // Clean-up the destination VM and make sure it terminated correctly 8808 let _ = dest_child.kill(); 8809 let dest_output = dest_child.wait_with_output().unwrap(); 8810 handle_child_output(r, &dest_output); 8811 8812 // Check the destination VM has the expected 'concole_text' from its output 8813 let r = std::panic::catch_unwind(|| { 8814 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 8815 }); 8816 handle_child_output(r, &dest_output); 8817 } 8818 8819 fn _test_live_migration_balloon(upgrade_test: bool, local: bool) { 8820 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 8821 let guest = Guest::new(Box::new(focal)); 8822 let kernel_path = direct_kernel_boot_path(); 8823 let console_text = String::from("On a branch floating down river a cricket, singing."); 8824 let net_id = "net123"; 8825 let net_params = format!( 8826 "id={},tap=,mac={},ip={},mask=255.255.255.0", 8827 net_id, guest.network.guest_mac, guest.network.host_ip 8828 ); 8829 8830 let memory_param: &[&str] = if local { 8831 &[ 8832 "--memory", 8833 "size=4G,hotplug_method=virtio-mem,hotplug_size=8G,shared=on", 8834 "--balloon", 8835 "size=0", 8836 ] 8837 } else { 8838 &[ 8839 "--memory", 8840 "size=4G,hotplug_method=virtio-mem,hotplug_size=8G", 8841 "--balloon", 8842 "size=0", 8843 ] 8844 }; 8845 8846 let boot_vcpus = 2; 8847 let max_vcpus = 4; 8848 8849 let pmem_temp_file = TempFile::new().unwrap(); 8850 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 8851 std::process::Command::new("mkfs.ext4") 8852 .arg(pmem_temp_file.as_path()) 8853 .output() 8854 .expect("Expect creating disk image to succeed"); 8855 let pmem_path = String::from("/dev/pmem0"); 8856 8857 // Start the source VM 8858 let src_vm_path = if !upgrade_test { 8859 clh_command("cloud-hypervisor") 8860 } else { 8861 cloud_hypervisor_release_path() 8862 }; 8863 let src_api_socket = temp_api_path(&guest.tmp_dir); 8864 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 8865 src_vm_cmd 8866 .args([ 8867 "--cpus", 8868 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 8869 ]) 8870 .args(memory_param) 8871 .args(["--kernel", kernel_path.to_str().unwrap()]) 8872 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 8873 .default_disks() 8874 .args(["--net", net_params.as_str()]) 8875 .args(["--api-socket", &src_api_socket]) 8876 .args([ 8877 "--pmem", 8878 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 8879 ]); 8880 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 8881 8882 // Start the destination VM 8883 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 8884 dest_api_socket.push_str(".dest"); 8885 let mut dest_child = GuestCommand::new(&guest) 8886 .args(["--api-socket", &dest_api_socket]) 8887 .capture_output() 8888 .spawn() 8889 .unwrap(); 8890 8891 let r = std::panic::catch_unwind(|| { 8892 guest.wait_vm_boot(None).unwrap(); 8893 8894 // Make sure the source VM is functaionl 8895 // Check the number of vCPUs 8896 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 8897 8898 // Check the guest RAM 8899 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 8900 // Increase the guest RAM 8901 resize_command(&src_api_socket, None, Some(6 << 30), None, None); 8902 thread::sleep(std::time::Duration::new(5, 0)); 8903 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 8904 // Use balloon to remove RAM from the VM 8905 resize_command(&src_api_socket, None, None, Some(1 << 30), None); 8906 thread::sleep(std::time::Duration::new(5, 0)); 8907 let total_memory = guest.get_total_memory().unwrap_or_default(); 8908 assert!(total_memory > 4_800_000); 8909 assert!(total_memory < 5_760_000); 8910 8911 // Check the guest virtio-devices, e.g. block, rng, console, and net 8912 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 8913 8914 // x86_64: Following what's done in the `test_snapshot_restore`, we need 8915 // to make sure that removing and adding back the virtio-net device does 8916 // not break the live-migration support for virtio-pci. 8917 #[cfg(target_arch = "x86_64")] 8918 { 8919 assert!(remote_command( 8920 &src_api_socket, 8921 "remove-device", 8922 Some(net_id), 8923 )); 8924 thread::sleep(std::time::Duration::new(10, 0)); 8925 8926 // Plug the virtio-net device again 8927 assert!(remote_command( 8928 &src_api_socket, 8929 "add-net", 8930 Some(net_params.as_str()), 8931 )); 8932 thread::sleep(std::time::Duration::new(10, 0)); 8933 } 8934 8935 // Start the live-migration 8936 let migration_socket = String::from( 8937 guest 8938 .tmp_dir 8939 .as_path() 8940 .join("live-migration.sock") 8941 .to_str() 8942 .unwrap(), 8943 ); 8944 8945 assert!( 8946 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 8947 "Unsuccessful command: 'send-migration' or 'receive-migration'." 8948 ); 8949 }); 8950 8951 // Check and report any errors occurred during the live-migration 8952 if r.is_err() { 8953 print_and_panic( 8954 src_child, 8955 dest_child, 8956 None, 8957 "Error occurred during live-migration", 8958 ); 8959 } 8960 8961 // Check the source vm has been terminated successful (give it '3s' to settle) 8962 thread::sleep(std::time::Duration::new(3, 0)); 8963 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 8964 print_and_panic( 8965 src_child, 8966 dest_child, 8967 None, 8968 "source VM was not terminated successfully.", 8969 ); 8970 }; 8971 8972 // Post live-migration check to make sure the destination VM is funcational 8973 let r = std::panic::catch_unwind(|| { 8974 // Perform same checks to validate VM has been properly migrated 8975 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 8976 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 8977 8978 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 8979 8980 // Perform checks on guest RAM using balloon 8981 let total_memory = guest.get_total_memory().unwrap_or_default(); 8982 assert!(total_memory > 4_800_000); 8983 assert!(total_memory < 5_760_000); 8984 // Deflate balloon to restore entire RAM to the VM 8985 resize_command(&dest_api_socket, None, None, Some(0), None); 8986 thread::sleep(std::time::Duration::new(5, 0)); 8987 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 8988 // Decrease guest RAM with virtio-mem 8989 resize_command(&dest_api_socket, None, Some(5 << 30), None, None); 8990 thread::sleep(std::time::Duration::new(5, 0)); 8991 let total_memory = guest.get_total_memory().unwrap_or_default(); 8992 assert!(total_memory > 4_800_000); 8993 assert!(total_memory < 5_760_000); 8994 }); 8995 8996 // Clean-up the destination VM and make sure it terminated correctly 8997 let _ = dest_child.kill(); 8998 let dest_output = dest_child.wait_with_output().unwrap(); 8999 handle_child_output(r, &dest_output); 9000 9001 // Check the destination VM has the expected 'concole_text' from its output 9002 let r = std::panic::catch_unwind(|| { 9003 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 9004 }); 9005 handle_child_output(r, &dest_output); 9006 } 9007 9008 fn _test_live_migration_numa(upgrade_test: bool, local: bool) { 9009 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9010 let guest = Guest::new(Box::new(focal)); 9011 let kernel_path = direct_kernel_boot_path(); 9012 let console_text = String::from("On a branch floating down river a cricket, singing."); 9013 let net_id = "net123"; 9014 let net_params = format!( 9015 "id={},tap=,mac={},ip={},mask=255.255.255.0", 9016 net_id, guest.network.guest_mac, guest.network.host_ip 9017 ); 9018 9019 let memory_param: &[&str] = if local { 9020 &[ 9021 "--memory", 9022 "size=0,hotplug_method=virtio-mem,shared=on", 9023 "--memory-zone", 9024 "id=mem0,size=1G,hotplug_size=4G,shared=on", 9025 "id=mem1,size=1G,hotplug_size=4G,shared=on", 9026 "id=mem2,size=2G,hotplug_size=4G,shared=on", 9027 "--numa", 9028 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0", 9029 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1", 9030 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2", 9031 ] 9032 } else { 9033 &[ 9034 "--memory", 9035 "size=0,hotplug_method=virtio-mem", 9036 "--memory-zone", 9037 "id=mem0,size=1G,hotplug_size=4G", 9038 "id=mem1,size=1G,hotplug_size=4G", 9039 "id=mem2,size=2G,hotplug_size=4G", 9040 "--numa", 9041 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0", 9042 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1", 9043 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2", 9044 ] 9045 }; 9046 9047 let boot_vcpus = 6; 9048 let max_vcpus = 12; 9049 9050 let pmem_temp_file = TempFile::new().unwrap(); 9051 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 9052 std::process::Command::new("mkfs.ext4") 9053 .arg(pmem_temp_file.as_path()) 9054 .output() 9055 .expect("Expect creating disk image to succeed"); 9056 let pmem_path = String::from("/dev/pmem0"); 9057 9058 // Start the source VM 9059 let src_vm_path = if !upgrade_test { 9060 clh_command("cloud-hypervisor") 9061 } else { 9062 cloud_hypervisor_release_path() 9063 }; 9064 let src_api_socket = temp_api_path(&guest.tmp_dir); 9065 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 9066 src_vm_cmd 9067 .args([ 9068 "--cpus", 9069 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 9070 ]) 9071 .args(memory_param) 9072 .args(["--kernel", kernel_path.to_str().unwrap()]) 9073 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9074 .default_disks() 9075 .args(["--net", net_params.as_str()]) 9076 .args(["--api-socket", &src_api_socket]) 9077 .args([ 9078 "--pmem", 9079 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 9080 ]); 9081 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 9082 9083 // Start the destination VM 9084 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 9085 dest_api_socket.push_str(".dest"); 9086 let mut dest_child = GuestCommand::new(&guest) 9087 .args(["--api-socket", &dest_api_socket]) 9088 .capture_output() 9089 .spawn() 9090 .unwrap(); 9091 9092 let r = std::panic::catch_unwind(|| { 9093 guest.wait_vm_boot(None).unwrap(); 9094 9095 // Make sure the source VM is functaionl 9096 // Check the number of vCPUs 9097 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9098 9099 // Check the guest RAM 9100 assert!(guest.get_total_memory().unwrap_or_default() > 2_880_000); 9101 9102 // Check the guest virtio-devices, e.g. block, rng, console, and net 9103 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9104 9105 // Check the NUMA parameters are applied correctly and resize 9106 // each zone to test the case where we migrate a VM with the 9107 // virtio-mem regions being used. 9108 { 9109 guest.check_numa_common( 9110 Some(&[960_000, 960_000, 1_920_000]), 9111 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 9112 Some(&["10 15 20", "20 10 25", "25 30 10"]), 9113 ); 9114 9115 // AArch64 currently does not support hotplug, and therefore we only 9116 // test hotplug-related function on x86_64 here. 9117 #[cfg(target_arch = "x86_64")] 9118 { 9119 guest.enable_memory_hotplug(); 9120 9121 // Resize every memory zone and check each associated NUMA node 9122 // has been assigned the right amount of memory. 9123 resize_zone_command(&src_api_socket, "mem0", "2G"); 9124 resize_zone_command(&src_api_socket, "mem1", "2G"); 9125 resize_zone_command(&src_api_socket, "mem2", "3G"); 9126 thread::sleep(std::time::Duration::new(5, 0)); 9127 9128 guest.check_numa_common(Some(&[1_920_000, 1_920_000, 1_920_000]), None, None); 9129 } 9130 } 9131 9132 // x86_64: Following what's done in the `test_snapshot_restore`, we need 9133 // to make sure that removing and adding back the virtio-net device does 9134 // not break the live-migration support for virtio-pci. 9135 #[cfg(target_arch = "x86_64")] 9136 { 9137 assert!(remote_command( 9138 &src_api_socket, 9139 "remove-device", 9140 Some(net_id), 9141 )); 9142 thread::sleep(std::time::Duration::new(10, 0)); 9143 9144 // Plug the virtio-net device again 9145 assert!(remote_command( 9146 &src_api_socket, 9147 "add-net", 9148 Some(net_params.as_str()), 9149 )); 9150 thread::sleep(std::time::Duration::new(10, 0)); 9151 } 9152 9153 // Start the live-migration 9154 let migration_socket = String::from( 9155 guest 9156 .tmp_dir 9157 .as_path() 9158 .join("live-migration.sock") 9159 .to_str() 9160 .unwrap(), 9161 ); 9162 9163 assert!( 9164 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9165 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9166 ); 9167 }); 9168 9169 // Check and report any errors occurred during the live-migration 9170 if r.is_err() { 9171 print_and_panic( 9172 src_child, 9173 dest_child, 9174 None, 9175 "Error occurred during live-migration", 9176 ); 9177 } 9178 9179 // Check the source vm has been terminated successful (give it '3s' to settle) 9180 thread::sleep(std::time::Duration::new(3, 0)); 9181 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 9182 print_and_panic( 9183 src_child, 9184 dest_child, 9185 None, 9186 "source VM was not terminated successfully.", 9187 ); 9188 }; 9189 9190 // Post live-migration check to make sure the destination VM is funcational 9191 let r = std::panic::catch_unwind(|| { 9192 // Perform same checks to validate VM has been properly migrated 9193 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9194 #[cfg(target_arch = "x86_64")] 9195 assert!(guest.get_total_memory().unwrap_or_default() > 6_720_000); 9196 #[cfg(target_arch = "aarch64")] 9197 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9198 9199 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9200 9201 // Perform NUMA related checks 9202 { 9203 #[cfg(target_arch = "aarch64")] 9204 { 9205 guest.check_numa_common( 9206 Some(&[960_000, 960_000, 1_920_000]), 9207 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 9208 Some(&["10 15 20", "20 10 25", "25 30 10"]), 9209 ); 9210 } 9211 9212 // AArch64 currently does not support hotplug, and therefore we only 9213 // test hotplug-related function on x86_64 here. 9214 #[cfg(target_arch = "x86_64")] 9215 { 9216 guest.check_numa_common( 9217 Some(&[1_920_000, 1_920_000, 2_880_000]), 9218 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 9219 Some(&["10 15 20", "20 10 25", "25 30 10"]), 9220 ); 9221 9222 guest.enable_memory_hotplug(); 9223 9224 // Resize every memory zone and check each associated NUMA node 9225 // has been assigned the right amount of memory. 9226 resize_zone_command(&dest_api_socket, "mem0", "4G"); 9227 resize_zone_command(&dest_api_socket, "mem1", "4G"); 9228 resize_zone_command(&dest_api_socket, "mem2", "4G"); 9229 // Resize to the maximum amount of CPUs and check each NUMA 9230 // node has been assigned the right CPUs set. 9231 resize_command(&dest_api_socket, Some(max_vcpus), None, None, None); 9232 thread::sleep(std::time::Duration::new(5, 0)); 9233 9234 guest.check_numa_common( 9235 Some(&[3_840_000, 3_840_000, 3_840_000]), 9236 Some(&[vec![0, 1, 2, 9], vec![3, 4, 6, 7, 8], vec![5, 10, 11]]), 9237 None, 9238 ); 9239 } 9240 } 9241 }); 9242 9243 // Clean-up the destination VM and make sure it terminated correctly 9244 let _ = dest_child.kill(); 9245 let dest_output = dest_child.wait_with_output().unwrap(); 9246 handle_child_output(r, &dest_output); 9247 9248 // Check the destination VM has the expected 'concole_text' from its output 9249 let r = std::panic::catch_unwind(|| { 9250 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 9251 }); 9252 handle_child_output(r, &dest_output); 9253 } 9254 9255 fn _test_live_migration_watchdog(upgrade_test: bool, local: bool) { 9256 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9257 let guest = Guest::new(Box::new(focal)); 9258 let kernel_path = direct_kernel_boot_path(); 9259 let console_text = String::from("On a branch floating down river a cricket, singing."); 9260 let net_id = "net123"; 9261 let net_params = format!( 9262 "id={},tap=,mac={},ip={},mask=255.255.255.0", 9263 net_id, guest.network.guest_mac, guest.network.host_ip 9264 ); 9265 9266 let memory_param: &[&str] = if local { 9267 &["--memory", "size=4G,shared=on"] 9268 } else { 9269 &["--memory", "size=4G"] 9270 }; 9271 9272 let boot_vcpus = 2; 9273 let max_vcpus = 4; 9274 9275 let pmem_temp_file = TempFile::new().unwrap(); 9276 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 9277 std::process::Command::new("mkfs.ext4") 9278 .arg(pmem_temp_file.as_path()) 9279 .output() 9280 .expect("Expect creating disk image to succeed"); 9281 let pmem_path = String::from("/dev/pmem0"); 9282 9283 // Start the source VM 9284 let src_vm_path = if !upgrade_test { 9285 clh_command("cloud-hypervisor") 9286 } else { 9287 cloud_hypervisor_release_path() 9288 }; 9289 let src_api_socket = temp_api_path(&guest.tmp_dir); 9290 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 9291 src_vm_cmd 9292 .args([ 9293 "--cpus", 9294 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 9295 ]) 9296 .args(memory_param) 9297 .args(["--kernel", kernel_path.to_str().unwrap()]) 9298 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9299 .default_disks() 9300 .args(["--net", net_params.as_str()]) 9301 .args(["--api-socket", &src_api_socket]) 9302 .args([ 9303 "--pmem", 9304 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 9305 ]) 9306 .args(["--watchdog"]); 9307 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 9308 9309 // Start the destination VM 9310 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 9311 dest_api_socket.push_str(".dest"); 9312 let mut dest_child = GuestCommand::new(&guest) 9313 .args(["--api-socket", &dest_api_socket]) 9314 .capture_output() 9315 .spawn() 9316 .unwrap(); 9317 9318 let r = std::panic::catch_unwind(|| { 9319 guest.wait_vm_boot(None).unwrap(); 9320 9321 // Make sure the source VM is functaionl 9322 // Check the number of vCPUs 9323 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9324 // Check the guest RAM 9325 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9326 // Check the guest virtio-devices, e.g. block, rng, console, and net 9327 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9328 // x86_64: Following what's done in the `test_snapshot_restore`, we need 9329 // to make sure that removing and adding back the virtio-net device does 9330 // not break the live-migration support for virtio-pci. 9331 #[cfg(target_arch = "x86_64")] 9332 { 9333 assert!(remote_command( 9334 &src_api_socket, 9335 "remove-device", 9336 Some(net_id), 9337 )); 9338 thread::sleep(std::time::Duration::new(10, 0)); 9339 9340 // Plug the virtio-net device again 9341 assert!(remote_command( 9342 &src_api_socket, 9343 "add-net", 9344 Some(net_params.as_str()), 9345 )); 9346 thread::sleep(std::time::Duration::new(10, 0)); 9347 } 9348 9349 // Enable watchdog and ensure its functional 9350 let mut expected_reboot_count = 1; 9351 // Enable the watchdog with a 15s timeout 9352 enable_guest_watchdog(&guest, 15); 9353 // Reboot and check that systemd has activated the watchdog 9354 guest.ssh_command("sudo reboot").unwrap(); 9355 guest.wait_vm_boot(None).unwrap(); 9356 expected_reboot_count += 1; 9357 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9358 assert_eq!( 9359 guest 9360 .ssh_command("sudo journalctl | grep -c -- \"Watchdog started\"") 9361 .unwrap() 9362 .trim() 9363 .parse::<u32>() 9364 .unwrap_or_default(), 9365 2 9366 ); 9367 // Allow some normal time to elapse to check we don't get spurious reboots 9368 thread::sleep(std::time::Duration::new(40, 0)); 9369 // Check no reboot 9370 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9371 9372 // Start the live-migration 9373 let migration_socket = String::from( 9374 guest 9375 .tmp_dir 9376 .as_path() 9377 .join("live-migration.sock") 9378 .to_str() 9379 .unwrap(), 9380 ); 9381 9382 assert!( 9383 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9384 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9385 ); 9386 }); 9387 9388 // Check and report any errors occurred during the live-migration 9389 if r.is_err() { 9390 print_and_panic( 9391 src_child, 9392 dest_child, 9393 None, 9394 "Error occurred during live-migration", 9395 ); 9396 } 9397 9398 // Check the source vm has been terminated successful (give it '3s' to settle) 9399 thread::sleep(std::time::Duration::new(3, 0)); 9400 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 9401 print_and_panic( 9402 src_child, 9403 dest_child, 9404 None, 9405 "source VM was not terminated successfully.", 9406 ); 9407 }; 9408 9409 // Post live-migration check to make sure the destination VM is funcational 9410 let r = std::panic::catch_unwind(|| { 9411 // Perform same checks to validate VM has been properly migrated 9412 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9413 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9414 9415 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9416 9417 // Perform checks on watchdog 9418 let mut expected_reboot_count = 2; 9419 9420 // Allow some normal time to elapse to check we don't get spurious reboots 9421 thread::sleep(std::time::Duration::new(40, 0)); 9422 // Check no reboot 9423 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9424 9425 // Trigger a panic (sync first). We need to do this inside a screen with a delay so the SSH command returns. 9426 guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap(); 9427 // Allow some time for the watchdog to trigger (max 30s) and reboot to happen 9428 guest.wait_vm_boot(Some(50)).unwrap(); 9429 // Check a reboot is triggered by the watchdog 9430 expected_reboot_count += 1; 9431 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9432 9433 #[cfg(target_arch = "x86_64")] 9434 { 9435 // Now pause the VM and remain offline for 30s 9436 assert!(remote_command(&dest_api_socket, "pause", None)); 9437 thread::sleep(std::time::Duration::new(30, 0)); 9438 assert!(remote_command(&dest_api_socket, "resume", None)); 9439 9440 // Check no reboot 9441 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9442 } 9443 }); 9444 9445 // Clean-up the destination VM and make sure it terminated correctly 9446 let _ = dest_child.kill(); 9447 let dest_output = dest_child.wait_with_output().unwrap(); 9448 handle_child_output(r, &dest_output); 9449 9450 // Check the destination VM has the expected 'concole_text' from its output 9451 let r = std::panic::catch_unwind(|| { 9452 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 9453 }); 9454 handle_child_output(r, &dest_output); 9455 } 9456 9457 fn _test_live_migration_ovs_dpdk(upgrade_test: bool, local: bool) { 9458 let ovs_focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9459 let ovs_guest = Guest::new(Box::new(ovs_focal)); 9460 9461 let migration_focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9462 let migration_guest = Guest::new(Box::new(migration_focal)); 9463 let src_api_socket = temp_api_path(&migration_guest.tmp_dir); 9464 9465 // Start two VMs that are connected through ovs-dpdk and one of the VMs is the source VM for live-migration 9466 let (mut ovs_child, mut src_child) = 9467 setup_ovs_dpdk_guests(&ovs_guest, &migration_guest, &src_api_socket, upgrade_test); 9468 9469 // Start the destination VM 9470 let mut dest_api_socket = temp_api_path(&migration_guest.tmp_dir); 9471 dest_api_socket.push_str(".dest"); 9472 let mut dest_child = GuestCommand::new(&migration_guest) 9473 .args(["--api-socket", &dest_api_socket]) 9474 .capture_output() 9475 .spawn() 9476 .unwrap(); 9477 9478 let r = std::panic::catch_unwind(|| { 9479 // Give it '1s' to make sure the 'dest_api_socket' file is properly created 9480 thread::sleep(std::time::Duration::new(1, 0)); 9481 9482 // Start the live-migration 9483 let migration_socket = String::from( 9484 migration_guest 9485 .tmp_dir 9486 .as_path() 9487 .join("live-migration.sock") 9488 .to_str() 9489 .unwrap(), 9490 ); 9491 9492 assert!( 9493 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9494 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9495 ); 9496 }); 9497 9498 // Check and report any errors occurred during the live-migration 9499 if r.is_err() { 9500 print_and_panic( 9501 src_child, 9502 dest_child, 9503 Some(ovs_child), 9504 "Error occurred during live-migration", 9505 ); 9506 } 9507 9508 // Check the source vm has been terminated successful (give it '3s' to settle) 9509 thread::sleep(std::time::Duration::new(3, 0)); 9510 if !src_child.try_wait().unwrap().map_or(false, |s| s.success()) { 9511 print_and_panic( 9512 src_child, 9513 dest_child, 9514 Some(ovs_child), 9515 "source VM was not terminated successfully.", 9516 ); 9517 }; 9518 9519 // Post live-migration check to make sure the destination VM is funcational 9520 let r = std::panic::catch_unwind(|| { 9521 // Perform same checks to validate VM has been properly migrated 9522 // Spawn a new netcat listener in the OVS VM 9523 let guest_ip = ovs_guest.network.guest_ip.clone(); 9524 thread::spawn(move || { 9525 ssh_command_ip( 9526 "nc -l 12345", 9527 &guest_ip, 9528 DEFAULT_SSH_RETRIES, 9529 DEFAULT_SSH_TIMEOUT, 9530 ) 9531 .unwrap(); 9532 }); 9533 9534 // Wait for the server to be listening 9535 thread::sleep(std::time::Duration::new(5, 0)); 9536 9537 // And check the connection is still functional after live-migration 9538 migration_guest 9539 .ssh_command("nc -vz 172.100.0.1 12345") 9540 .unwrap(); 9541 }); 9542 9543 // Clean-up the destination VM and OVS VM, and make sure they terminated correctly 9544 let _ = dest_child.kill(); 9545 let _ = ovs_child.kill(); 9546 let dest_output = dest_child.wait_with_output().unwrap(); 9547 let ovs_output = ovs_child.wait_with_output().unwrap(); 9548 9549 cleanup_ovs_dpdk(); 9550 9551 handle_child_output(r, &dest_output); 9552 handle_child_output(Ok(()), &ovs_output); 9553 } 9554 9555 mod live_migration_parallel { 9556 use super::*; 9557 #[test] 9558 fn test_live_migration_basic() { 9559 _test_live_migration(false, false) 9560 } 9561 9562 #[test] 9563 fn test_live_migration_local() { 9564 _test_live_migration(false, true) 9565 } 9566 9567 #[test] 9568 #[cfg(not(feature = "mshv"))] 9569 fn test_live_migration_numa() { 9570 _test_live_migration_numa(false, false) 9571 } 9572 9573 #[test] 9574 #[cfg(not(feature = "mshv"))] 9575 fn test_live_migration_numa_local() { 9576 _test_live_migration_numa(false, true) 9577 } 9578 9579 #[test] 9580 fn test_live_migration_watchdog() { 9581 _test_live_migration_watchdog(false, false) 9582 } 9583 9584 #[test] 9585 fn test_live_migration_watchdog_local() { 9586 _test_live_migration_watchdog(false, true) 9587 } 9588 9589 #[test] 9590 fn test_live_migration_balloon() { 9591 _test_live_migration_balloon(false, false) 9592 } 9593 9594 #[test] 9595 fn test_live_migration_balloon_local() { 9596 _test_live_migration_balloon(false, true) 9597 } 9598 9599 #[test] 9600 #[ignore = "See #6134"] 9601 fn test_live_upgrade_basic() { 9602 _test_live_migration(true, false) 9603 } 9604 9605 #[test] 9606 #[ignore = "See #6134"] 9607 fn test_live_upgrade_local() { 9608 _test_live_migration(true, true) 9609 } 9610 9611 #[test] 9612 #[ignore = "See #6134"] 9613 #[cfg(not(feature = "mshv"))] 9614 fn test_live_upgrade_numa() { 9615 _test_live_migration_numa(true, false) 9616 } 9617 9618 #[test] 9619 #[ignore = "See #6134"] 9620 #[cfg(not(feature = "mshv"))] 9621 fn test_live_upgrade_numa_local() { 9622 _test_live_migration_numa(true, true) 9623 } 9624 9625 #[test] 9626 #[ignore = "See #6134"] 9627 fn test_live_upgrade_watchdog() { 9628 _test_live_migration_watchdog(true, false) 9629 } 9630 9631 #[test] 9632 #[ignore = "See #6134"] 9633 fn test_live_upgrade_watchdog_local() { 9634 _test_live_migration_watchdog(true, true) 9635 } 9636 9637 #[test] 9638 #[ignore = "See #6134"] 9639 fn test_live_upgrade_balloon() { 9640 _test_live_migration_balloon(true, false) 9641 } 9642 9643 #[test] 9644 #[ignore = "See #6134"] 9645 fn test_live_upgrade_balloon_local() { 9646 _test_live_migration_balloon(true, true) 9647 } 9648 } 9649 9650 mod live_migration_sequential { 9651 #[cfg(target_arch = "x86_64")] 9652 #[cfg(not(feature = "mshv"))] 9653 use super::*; 9654 9655 // Require to run ovs-dpdk tests sequentially because they rely on the same ovs-dpdk setup 9656 #[test] 9657 #[ignore = "See #5532"] 9658 #[cfg(target_arch = "x86_64")] 9659 #[cfg(not(feature = "mshv"))] 9660 fn test_live_migration_ovs_dpdk() { 9661 _test_live_migration_ovs_dpdk(false, false); 9662 } 9663 9664 #[test] 9665 #[cfg(target_arch = "x86_64")] 9666 #[cfg(not(feature = "mshv"))] 9667 fn test_live_migration_ovs_dpdk_local() { 9668 _test_live_migration_ovs_dpdk(false, true); 9669 } 9670 9671 #[test] 9672 #[ignore = "See #5532"] 9673 #[cfg(target_arch = "x86_64")] 9674 #[cfg(not(feature = "mshv"))] 9675 fn test_live_upgrade_ovs_dpdk() { 9676 _test_live_migration_ovs_dpdk(true, false); 9677 } 9678 9679 #[test] 9680 #[ignore = "See #5532"] 9681 #[cfg(target_arch = "x86_64")] 9682 #[cfg(not(feature = "mshv"))] 9683 fn test_live_upgrade_ovs_dpdk_local() { 9684 _test_live_migration_ovs_dpdk(true, true); 9685 } 9686 } 9687 } 9688 9689 #[cfg(target_arch = "aarch64")] 9690 mod aarch64_acpi { 9691 use crate::*; 9692 9693 #[test] 9694 fn test_simple_launch_acpi() { 9695 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9696 9697 vec![Box::new(focal)].drain(..).for_each(|disk_config| { 9698 let guest = Guest::new(disk_config); 9699 9700 let mut child = GuestCommand::new(&guest) 9701 .args(["--cpus", "boot=1"]) 9702 .args(["--memory", "size=512M"]) 9703 .args(["--kernel", edk2_path().to_str().unwrap()]) 9704 .default_disks() 9705 .default_net() 9706 .args(["--serial", "tty", "--console", "off"]) 9707 .capture_output() 9708 .spawn() 9709 .unwrap(); 9710 9711 let r = std::panic::catch_unwind(|| { 9712 guest.wait_vm_boot(Some(120)).unwrap(); 9713 9714 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 9715 assert!(guest.get_total_memory().unwrap_or_default() > 400_000); 9716 assert_eq!(guest.get_pci_bridge_class().unwrap_or_default(), "0x060000"); 9717 }); 9718 9719 let _ = child.kill(); 9720 let output = child.wait_with_output().unwrap(); 9721 9722 handle_child_output(r, &output); 9723 }); 9724 } 9725 9726 #[test] 9727 fn test_guest_numa_nodes_acpi() { 9728 _test_guest_numa_nodes(true); 9729 } 9730 9731 #[test] 9732 fn test_cpu_topology_421_acpi() { 9733 test_cpu_topology(4, 2, 1, true); 9734 } 9735 9736 #[test] 9737 fn test_cpu_topology_142_acpi() { 9738 test_cpu_topology(1, 4, 2, true); 9739 } 9740 9741 #[test] 9742 fn test_cpu_topology_262_acpi() { 9743 test_cpu_topology(2, 6, 2, true); 9744 } 9745 9746 #[test] 9747 fn test_power_button_acpi() { 9748 _test_power_button(true); 9749 } 9750 9751 #[test] 9752 fn test_virtio_iommu() { 9753 _test_virtio_iommu(true) 9754 } 9755 } 9756 9757 mod rate_limiter { 9758 use super::*; 9759 9760 // Check if the 'measured' rate is within the expected 'difference' (in percentage) 9761 // compared to given 'limit' rate. 9762 fn check_rate_limit(measured: f64, limit: f64, difference: f64) -> bool { 9763 let upper_limit = limit * (1_f64 + difference); 9764 let lower_limit = limit * (1_f64 - difference); 9765 9766 if measured > lower_limit && measured < upper_limit { 9767 return true; 9768 } 9769 9770 eprintln!( 9771 "\n\n==== Start 'check_rate_limit' failed ==== \ 9772 \n\nmeasured={measured}, , lower_limit={lower_limit}, upper_limit={upper_limit} \ 9773 \n\n==== End 'check_rate_limit' failed ====\n\n" 9774 ); 9775 9776 false 9777 } 9778 9779 fn _test_rate_limiter_net(rx: bool) { 9780 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9781 let guest = Guest::new(Box::new(focal)); 9782 9783 let test_timeout = 10; 9784 let num_queues = 2; 9785 let queue_size = 256; 9786 let bw_size = 10485760_u64; // bytes 9787 let bw_refill_time = 100; // ms 9788 let limit_bps = (bw_size * 8 * 1000) as f64 / bw_refill_time as f64; 9789 9790 let net_params = format!( 9791 "tap=,mac={},ip={},mask=255.255.255.0,num_queues={},queue_size={},bw_size={},bw_refill_time={}", 9792 guest.network.guest_mac, 9793 guest.network.host_ip, 9794 num_queues, 9795 queue_size, 9796 bw_size, 9797 bw_refill_time, 9798 ); 9799 9800 let mut child = GuestCommand::new(&guest) 9801 .args(["--cpus", &format!("boot={}", num_queues / 2)]) 9802 .args(["--memory", "size=4G"]) 9803 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 9804 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9805 .default_disks() 9806 .args(["--net", net_params.as_str()]) 9807 .capture_output() 9808 .spawn() 9809 .unwrap(); 9810 9811 let r = std::panic::catch_unwind(|| { 9812 guest.wait_vm_boot(None).unwrap(); 9813 let measured_bps = 9814 measure_virtio_net_throughput(test_timeout, num_queues / 2, &guest, rx, true) 9815 .unwrap(); 9816 assert!(check_rate_limit(measured_bps, limit_bps, 0.1)); 9817 }); 9818 9819 let _ = child.kill(); 9820 let output = child.wait_with_output().unwrap(); 9821 handle_child_output(r, &output); 9822 } 9823 9824 #[test] 9825 fn test_rate_limiter_net_rx() { 9826 _test_rate_limiter_net(true); 9827 } 9828 9829 #[test] 9830 fn test_rate_limiter_net_tx() { 9831 _test_rate_limiter_net(false); 9832 } 9833 9834 fn _test_rate_limiter_block(bandwidth: bool, num_queues: u32) { 9835 let test_timeout = 10; 9836 let fio_ops = FioOps::RandRW; 9837 9838 let bw_size = if bandwidth { 9839 10485760_u64 // bytes 9840 } else { 9841 100_u64 // I/O 9842 }; 9843 let bw_refill_time = 100; // ms 9844 let limit_rate = (bw_size * 1000) as f64 / bw_refill_time as f64; 9845 9846 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9847 let guest = Guest::new(Box::new(focal)); 9848 let api_socket = temp_api_path(&guest.tmp_dir); 9849 let test_img_dir = TempDir::new_with_prefix("/var/tmp/ch").unwrap(); 9850 let blk_rate_limiter_test_img = 9851 String::from(test_img_dir.as_path().join("blk.img").to_str().unwrap()); 9852 9853 // Create the test block image 9854 assert!(exec_host_command_output(&format!( 9855 "dd if=/dev/zero of={blk_rate_limiter_test_img} bs=1M count=1024" 9856 )) 9857 .status 9858 .success()); 9859 9860 let test_blk_params = if bandwidth { 9861 format!( 9862 "path={blk_rate_limiter_test_img},num_queues={num_queues},bw_size={bw_size},bw_refill_time={bw_refill_time}" 9863 ) 9864 } else { 9865 format!( 9866 "path={blk_rate_limiter_test_img},num_queues={num_queues},ops_size={bw_size},ops_refill_time={bw_refill_time}" 9867 ) 9868 }; 9869 9870 let mut child = GuestCommand::new(&guest) 9871 .args(["--cpus", &format!("boot={num_queues}")]) 9872 .args(["--memory", "size=4G"]) 9873 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 9874 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9875 .args([ 9876 "--disk", 9877 format!( 9878 "path={}", 9879 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 9880 ) 9881 .as_str(), 9882 format!( 9883 "path={}", 9884 guest.disk_config.disk(DiskType::CloudInit).unwrap() 9885 ) 9886 .as_str(), 9887 test_blk_params.as_str(), 9888 ]) 9889 .default_net() 9890 .args(["--api-socket", &api_socket]) 9891 .capture_output() 9892 .spawn() 9893 .unwrap(); 9894 9895 let r = std::panic::catch_unwind(|| { 9896 guest.wait_vm_boot(None).unwrap(); 9897 9898 let fio_command = format!( 9899 "sudo fio --filename=/dev/vdc --name=test --output-format=json \ 9900 --direct=1 --bs=4k --ioengine=io_uring --iodepth=64 \ 9901 --rw={fio_ops} --runtime={test_timeout} --numjobs={num_queues}" 9902 ); 9903 let output = guest.ssh_command(&fio_command).unwrap(); 9904 9905 // Parse fio output 9906 let measured_rate = if bandwidth { 9907 parse_fio_output(&output, &fio_ops, num_queues).unwrap() 9908 } else { 9909 parse_fio_output_iops(&output, &fio_ops, num_queues).unwrap() 9910 }; 9911 assert!(check_rate_limit(measured_rate, limit_rate, 0.1)); 9912 }); 9913 9914 let _ = child.kill(); 9915 let output = child.wait_with_output().unwrap(); 9916 handle_child_output(r, &output); 9917 } 9918 9919 fn _test_rate_limiter_group_block(bandwidth: bool, num_queues: u32, num_disks: u32) { 9920 let test_timeout = 10; 9921 let fio_ops = FioOps::RandRW; 9922 9923 let bw_size = if bandwidth { 9924 10485760_u64 // bytes 9925 } else { 9926 100_u64 // I/O 9927 }; 9928 let bw_refill_time = 100; // ms 9929 let limit_rate = (bw_size * 1000) as f64 / bw_refill_time as f64; 9930 9931 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9932 let guest = Guest::new(Box::new(focal)); 9933 let api_socket = temp_api_path(&guest.tmp_dir); 9934 let test_img_dir = TempDir::new_with_prefix("/var/tmp/ch").unwrap(); 9935 9936 let rate_limit_group_arg = if bandwidth { 9937 format!("id=group0,bw_size={bw_size},bw_refill_time={bw_refill_time}") 9938 } else { 9939 format!("id=group0,ops_size={bw_size},ops_refill_time={bw_refill_time}") 9940 }; 9941 9942 let mut disk_args = vec![ 9943 "--disk".to_string(), 9944 format!( 9945 "path={}", 9946 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 9947 ), 9948 format!( 9949 "path={}", 9950 guest.disk_config.disk(DiskType::CloudInit).unwrap() 9951 ), 9952 ]; 9953 9954 for i in 0..num_disks { 9955 let test_img_path = String::from( 9956 test_img_dir 9957 .as_path() 9958 .join(format!("blk{}.img", i)) 9959 .to_str() 9960 .unwrap(), 9961 ); 9962 9963 assert!(exec_host_command_output(&format!( 9964 "dd if=/dev/zero of={test_img_path} bs=1M count=1024" 9965 )) 9966 .status 9967 .success()); 9968 9969 disk_args.push(format!( 9970 "path={test_img_path},num_queues={num_queues},rate_limit_group=group0" 9971 )); 9972 } 9973 9974 let mut child = GuestCommand::new(&guest) 9975 .args(["--cpus", &format!("boot={}", num_queues * num_disks)]) 9976 .args(["--memory", "size=4G"]) 9977 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 9978 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9979 .args(["--rate-limit-group", &rate_limit_group_arg]) 9980 .args(disk_args) 9981 .default_net() 9982 .args(["--api-socket", &api_socket]) 9983 .capture_output() 9984 .spawn() 9985 .unwrap(); 9986 9987 let r = std::panic::catch_unwind(|| { 9988 guest.wait_vm_boot(None).unwrap(); 9989 9990 let mut fio_command = format!( 9991 "sudo fio --name=global --output-format=json \ 9992 --direct=1 --bs=4k --ioengine=io_uring --iodepth=64 \ 9993 --rw={fio_ops} --runtime={test_timeout} --numjobs={num_queues}" 9994 ); 9995 9996 // Generate additional argument for each disk: 9997 // --name=job0 --filename=/dev/vdc \ 9998 // --name=job1 --filename=/dev/vdd \ 9999 // --name=job2 --filename=/dev/vde \ 10000 // ... 10001 for i in 0..num_disks { 10002 let c: char = 'c'; 10003 let arg = format!( 10004 " --name=job{i} --filename=/dev/vd{}", 10005 char::from_u32((c as u32) + i).unwrap() 10006 ); 10007 fio_command += &arg; 10008 } 10009 let output = guest.ssh_command(&fio_command).unwrap(); 10010 10011 // Parse fio output 10012 let measured_rate = if bandwidth { 10013 parse_fio_output(&output, &fio_ops, num_queues * num_disks).unwrap() 10014 } else { 10015 parse_fio_output_iops(&output, &fio_ops, num_queues * num_disks).unwrap() 10016 }; 10017 assert!(check_rate_limit(measured_rate, limit_rate, 0.1)); 10018 }); 10019 10020 let _ = child.kill(); 10021 let output = child.wait_with_output().unwrap(); 10022 handle_child_output(r, &output); 10023 } 10024 10025 #[test] 10026 fn test_rate_limiter_block_bandwidth() { 10027 _test_rate_limiter_block(true, 1); 10028 _test_rate_limiter_block(true, 2) 10029 } 10030 10031 #[test] 10032 fn test_rate_limiter_group_block_bandwidth() { 10033 _test_rate_limiter_group_block(true, 1, 1); 10034 _test_rate_limiter_group_block(true, 2, 1); 10035 _test_rate_limiter_group_block(true, 1, 2); 10036 _test_rate_limiter_group_block(true, 2, 2); 10037 } 10038 10039 #[test] 10040 fn test_rate_limiter_block_iops() { 10041 _test_rate_limiter_block(false, 1); 10042 _test_rate_limiter_block(false, 2); 10043 } 10044 10045 #[test] 10046 fn test_rate_limiter_group_block_iops() { 10047 _test_rate_limiter_group_block(false, 1, 1); 10048 _test_rate_limiter_group_block(false, 2, 1); 10049 _test_rate_limiter_group_block(false, 1, 2); 10050 _test_rate_limiter_group_block(false, 2, 2); 10051 } 10052 } 10053