1 // Copyright © 2020 Intel Corporation 2 // 3 // SPDX-License-Identifier: Apache-2.0 4 // 5 #![allow(clippy::undocumented_unsafe_blocks)] 6 // When enabling the `mshv` feature, we skip quite some tests and 7 // hence have known dead-code. This annotation silences dead-code 8 // related warnings for our quality workflow to pass. 9 #![allow(dead_code)] 10 11 extern crate test_infra; 12 13 use std::collections::HashMap; 14 use std::io::{BufRead, Read, Seek, Write}; 15 use std::net::TcpListener; 16 use std::os::unix::io::AsRawFd; 17 use std::path::PathBuf; 18 use std::process::{Child, Command, Stdio}; 19 use std::string::String; 20 use std::sync::mpsc::Receiver; 21 use std::sync::{mpsc, Mutex}; 22 use std::time::Duration; 23 use std::{fs, io, thread}; 24 25 use net_util::MacAddr; 26 use test_infra::*; 27 use vmm_sys_util::tempdir::TempDir; 28 use vmm_sys_util::tempfile::TempFile; 29 use wait_timeout::ChildExt; 30 31 // Constant taken from the VMM crate. 32 const MAX_NUM_PCI_SEGMENTS: u16 = 96; 33 34 #[cfg(target_arch = "x86_64")] 35 mod x86_64 { 36 pub const FOCAL_IMAGE_NAME: &str = "focal-server-cloudimg-amd64-custom-20210609-0.raw"; 37 pub const JAMMY_VFIO_IMAGE_NAME: &str = 38 "jammy-server-cloudimg-amd64-custom-vfio-20241012-0.raw"; 39 pub const FOCAL_IMAGE_NAME_QCOW2: &str = "focal-server-cloudimg-amd64-custom-20210609-0.qcow2"; 40 pub const FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE: &str = 41 "focal-server-cloudimg-amd64-custom-20210609-0-backing.qcow2"; 42 pub const FOCAL_IMAGE_NAME_VHD: &str = "focal-server-cloudimg-amd64-custom-20210609-0.vhd"; 43 pub const FOCAL_IMAGE_NAME_VHDX: &str = "focal-server-cloudimg-amd64-custom-20210609-0.vhdx"; 44 pub const JAMMY_IMAGE_NAME: &str = "jammy-server-cloudimg-amd64-custom-20241017-0.raw"; 45 pub const WINDOWS_IMAGE_NAME: &str = "windows-server-2022-amd64-2.raw"; 46 pub const OVMF_NAME: &str = "CLOUDHV.fd"; 47 pub const GREP_SERIAL_IRQ_CMD: &str = "grep -c 'IO-APIC.*ttyS0' /proc/interrupts || true"; 48 } 49 50 #[cfg(target_arch = "x86_64")] 51 use x86_64::*; 52 53 #[cfg(target_arch = "aarch64")] 54 mod aarch64 { 55 pub const FOCAL_IMAGE_NAME: &str = "focal-server-cloudimg-arm64-custom-20210929-0.raw"; 56 pub const FOCAL_IMAGE_UPDATE_KERNEL_NAME: &str = 57 "focal-server-cloudimg-arm64-custom-20210929-0-update-kernel.raw"; 58 pub const FOCAL_IMAGE_NAME_QCOW2: &str = "focal-server-cloudimg-arm64-custom-20210929-0.qcow2"; 59 pub const FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE: &str = 60 "focal-server-cloudimg-arm64-custom-20210929-0-backing.qcow2"; 61 pub const FOCAL_IMAGE_NAME_VHD: &str = "focal-server-cloudimg-arm64-custom-20210929-0.vhd"; 62 pub const FOCAL_IMAGE_NAME_VHDX: &str = "focal-server-cloudimg-arm64-custom-20210929-0.vhdx"; 63 pub const JAMMY_IMAGE_NAME: &str = "jammy-server-cloudimg-arm64-custom-20220329-0.raw"; 64 pub const WINDOWS_IMAGE_NAME: &str = "windows-11-iot-enterprise-aarch64.raw"; 65 pub const OVMF_NAME: &str = "CLOUDHV_EFI.fd"; 66 pub const GREP_SERIAL_IRQ_CMD: &str = "grep -c 'GICv3.*uart-pl011' /proc/interrupts || true"; 67 pub const GREP_PMU_IRQ_CMD: &str = "grep -c 'GICv3.*arm-pmu' /proc/interrupts || true"; 68 } 69 70 #[cfg(target_arch = "aarch64")] 71 use aarch64::*; 72 73 const DIRECT_KERNEL_BOOT_CMDLINE: &str = 74 "root=/dev/vda1 console=hvc0 rw systemd.journald.forward_to_console=1"; 75 76 const CONSOLE_TEST_STRING: &str = "Started OpenBSD Secure Shell server"; 77 78 // This enum exists to make it more convenient to 79 // implement test for both D-Bus and REST APIs. 80 enum TargetApi { 81 // API socket 82 HttpApi(String), 83 // well known service name, object path 84 DBusApi(String, String), 85 } 86 87 impl TargetApi { 88 fn new_http_api(tmp_dir: &TempDir) -> Self { 89 Self::HttpApi(temp_api_path(tmp_dir)) 90 } 91 92 fn new_dbus_api(tmp_dir: &TempDir) -> Self { 93 // `tmp_dir` is in the form of "/tmp/chXXXXXX" 94 // and we take the `chXXXXXX` part as a unique identifier for the guest 95 let id = tmp_dir.as_path().file_name().unwrap().to_str().unwrap(); 96 97 Self::DBusApi( 98 format!("org.cloudhypervisor.{id}"), 99 format!("/org/cloudhypervisor/{id}"), 100 ) 101 } 102 103 fn guest_args(&self) -> Vec<String> { 104 match self { 105 TargetApi::HttpApi(api_socket) => { 106 vec![format!("--api-socket={}", api_socket.as_str())] 107 } 108 TargetApi::DBusApi(service_name, object_path) => { 109 vec![ 110 format!("--dbus-service-name={}", service_name.as_str()), 111 format!("--dbus-object-path={}", object_path.as_str()), 112 ] 113 } 114 } 115 } 116 117 fn remote_args(&self) -> Vec<String> { 118 // `guest_args` and `remote_args` are consistent with each other 119 self.guest_args() 120 } 121 122 fn remote_command(&self, command: &str, arg: Option<&str>) -> bool { 123 let mut cmd = Command::new(clh_command("ch-remote")); 124 cmd.args(self.remote_args()); 125 cmd.arg(command); 126 127 if let Some(arg) = arg { 128 cmd.arg(arg); 129 } 130 131 let output = cmd.output().unwrap(); 132 if output.status.success() { 133 true 134 } else { 135 eprintln!("Error running ch-remote command: {:?}", &cmd); 136 let stderr = String::from_utf8_lossy(&output.stderr); 137 eprintln!("stderr: {stderr}"); 138 false 139 } 140 } 141 } 142 143 // Start cloud-hypervisor with no VM parameters, only the API server running. 144 // From the API: Create a VM, boot it and check that it looks as expected. 145 fn _test_api_create_boot(target_api: TargetApi, guest: Guest) { 146 let mut child = GuestCommand::new(&guest) 147 .args(target_api.guest_args()) 148 .capture_output() 149 .spawn() 150 .unwrap(); 151 152 thread::sleep(std::time::Duration::new(1, 0)); 153 154 // Verify API server is running 155 assert!(target_api.remote_command("ping", None)); 156 157 // Create the VM first 158 let cpu_count: u8 = 4; 159 let request_body = guest.api_create_body( 160 cpu_count, 161 direct_kernel_boot_path().to_str().unwrap(), 162 DIRECT_KERNEL_BOOT_CMDLINE, 163 ); 164 165 let temp_config_path = guest.tmp_dir.as_path().join("config"); 166 std::fs::write(&temp_config_path, request_body).unwrap(); 167 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 168 169 assert!(target_api.remote_command("create", Some(create_config),)); 170 171 // Then boot it 172 assert!(target_api.remote_command("boot", None)); 173 thread::sleep(std::time::Duration::new(20, 0)); 174 175 let r = std::panic::catch_unwind(|| { 176 // Check that the VM booted as expected 177 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 178 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 179 }); 180 181 kill_child(&mut child); 182 let output = child.wait_with_output().unwrap(); 183 184 handle_child_output(r, &output); 185 } 186 187 // Start cloud-hypervisor with no VM parameters, only the API server running. 188 // From the API: Create a VM, boot it and check it can be shutdown and then 189 // booted again 190 fn _test_api_shutdown(target_api: TargetApi, guest: Guest) { 191 let mut child = GuestCommand::new(&guest) 192 .args(target_api.guest_args()) 193 .capture_output() 194 .spawn() 195 .unwrap(); 196 197 thread::sleep(std::time::Duration::new(1, 0)); 198 199 // Verify API server is running 200 assert!(target_api.remote_command("ping", None)); 201 202 // Create the VM first 203 let cpu_count: u8 = 4; 204 let request_body = guest.api_create_body( 205 cpu_count, 206 direct_kernel_boot_path().to_str().unwrap(), 207 DIRECT_KERNEL_BOOT_CMDLINE, 208 ); 209 210 let temp_config_path = guest.tmp_dir.as_path().join("config"); 211 std::fs::write(&temp_config_path, request_body).unwrap(); 212 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 213 214 let r = std::panic::catch_unwind(|| { 215 assert!(target_api.remote_command("create", Some(create_config))); 216 217 // Then boot it 218 assert!(target_api.remote_command("boot", None)); 219 220 guest.wait_vm_boot(None).unwrap(); 221 222 // Check that the VM booted as expected 223 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 224 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 225 226 // Sync and shutdown without powering off to prevent filesystem 227 // corruption. 228 guest.ssh_command("sync").unwrap(); 229 guest.ssh_command("sudo shutdown -H now").unwrap(); 230 231 // Wait for the guest to be fully shutdown 232 thread::sleep(std::time::Duration::new(20, 0)); 233 234 // Then shut it down 235 assert!(target_api.remote_command("shutdown", None)); 236 237 // Then boot it again 238 assert!(target_api.remote_command("boot", None)); 239 240 guest.wait_vm_boot(None).unwrap(); 241 242 // Check that the VM booted as expected 243 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 244 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 245 }); 246 247 kill_child(&mut child); 248 let output = child.wait_with_output().unwrap(); 249 250 handle_child_output(r, &output); 251 } 252 253 // Start cloud-hypervisor with no VM parameters, only the API server running. 254 // From the API: Create a VM, boot it and check it can be deleted and then recreated 255 // booted again. 256 fn _test_api_delete(target_api: TargetApi, guest: Guest) { 257 let mut child = GuestCommand::new(&guest) 258 .args(target_api.guest_args()) 259 .capture_output() 260 .spawn() 261 .unwrap(); 262 263 thread::sleep(std::time::Duration::new(1, 0)); 264 265 // Verify API server is running 266 assert!(target_api.remote_command("ping", None)); 267 268 // Create the VM first 269 let cpu_count: u8 = 4; 270 let request_body = guest.api_create_body( 271 cpu_count, 272 direct_kernel_boot_path().to_str().unwrap(), 273 DIRECT_KERNEL_BOOT_CMDLINE, 274 ); 275 let temp_config_path = guest.tmp_dir.as_path().join("config"); 276 std::fs::write(&temp_config_path, request_body).unwrap(); 277 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 278 279 let r = std::panic::catch_unwind(|| { 280 assert!(target_api.remote_command("create", Some(create_config))); 281 282 // Then boot it 283 assert!(target_api.remote_command("boot", None)); 284 285 guest.wait_vm_boot(None).unwrap(); 286 287 // Check that the VM booted as expected 288 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 289 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 290 291 // Sync and shutdown without powering off to prevent filesystem 292 // corruption. 293 guest.ssh_command("sync").unwrap(); 294 guest.ssh_command("sudo shutdown -H now").unwrap(); 295 296 // Wait for the guest to be fully shutdown 297 thread::sleep(std::time::Duration::new(20, 0)); 298 299 // Then delete it 300 assert!(target_api.remote_command("delete", None)); 301 302 assert!(target_api.remote_command("create", Some(create_config))); 303 304 // Then boot it again 305 assert!(target_api.remote_command("boot", None)); 306 307 guest.wait_vm_boot(None).unwrap(); 308 309 // Check that the VM booted as expected 310 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 311 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 312 }); 313 314 kill_child(&mut child); 315 let output = child.wait_with_output().unwrap(); 316 317 handle_child_output(r, &output); 318 } 319 320 // Start cloud-hypervisor with no VM parameters, only the API server running. 321 // From the API: Create a VM, boot it and check that it looks as expected. 322 // Then we pause the VM, check that it's no longer available. 323 // Finally we resume the VM and check that it's available. 324 fn _test_api_pause_resume(target_api: TargetApi, guest: Guest) { 325 let mut child = GuestCommand::new(&guest) 326 .args(target_api.guest_args()) 327 .capture_output() 328 .spawn() 329 .unwrap(); 330 331 thread::sleep(std::time::Duration::new(1, 0)); 332 333 // Verify API server is running 334 assert!(target_api.remote_command("ping", None)); 335 336 // Create the VM first 337 let cpu_count: u8 = 4; 338 let request_body = guest.api_create_body( 339 cpu_count, 340 direct_kernel_boot_path().to_str().unwrap(), 341 DIRECT_KERNEL_BOOT_CMDLINE, 342 ); 343 344 let temp_config_path = guest.tmp_dir.as_path().join("config"); 345 std::fs::write(&temp_config_path, request_body).unwrap(); 346 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 347 348 assert!(target_api.remote_command("create", Some(create_config))); 349 350 // Then boot it 351 assert!(target_api.remote_command("boot", None)); 352 thread::sleep(std::time::Duration::new(20, 0)); 353 354 let r = std::panic::catch_unwind(|| { 355 // Check that the VM booted as expected 356 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 357 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 358 359 // We now pause the VM 360 assert!(target_api.remote_command("pause", None)); 361 362 // Check pausing again fails 363 assert!(!target_api.remote_command("pause", None)); 364 365 thread::sleep(std::time::Duration::new(2, 0)); 366 367 // SSH into the VM should fail 368 ssh_command_ip( 369 "grep -c processor /proc/cpuinfo", 370 &guest.network.guest_ip, 371 2, 372 5, 373 ) 374 .unwrap_err(); 375 376 // Resume the VM 377 assert!(target_api.remote_command("resume", None)); 378 379 // Check resuming again fails 380 assert!(!target_api.remote_command("resume", None)); 381 382 thread::sleep(std::time::Duration::new(2, 0)); 383 384 // Now we should be able to SSH back in and get the right number of CPUs 385 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 386 }); 387 388 kill_child(&mut child); 389 let output = child.wait_with_output().unwrap(); 390 391 handle_child_output(r, &output); 392 } 393 394 fn _test_pty_interaction(pty_path: PathBuf) { 395 let mut cf = std::fs::OpenOptions::new() 396 .write(true) 397 .read(true) 398 .open(pty_path) 399 .unwrap(); 400 401 // Some dumb sleeps but we don't want to write 402 // before the console is up and we don't want 403 // to try and write the next line before the 404 // login process is ready. 405 thread::sleep(std::time::Duration::new(5, 0)); 406 assert_eq!(cf.write(b"cloud\n").unwrap(), 6); 407 thread::sleep(std::time::Duration::new(2, 0)); 408 assert_eq!(cf.write(b"cloud123\n").unwrap(), 9); 409 thread::sleep(std::time::Duration::new(2, 0)); 410 assert_eq!(cf.write(b"echo test_pty_console\n").unwrap(), 22); 411 thread::sleep(std::time::Duration::new(2, 0)); 412 413 // read pty and ensure they have a login shell 414 // some fairly hacky workarounds to avoid looping 415 // forever in case the channel is blocked getting output 416 let ptyc = pty_read(cf); 417 let mut empty = 0; 418 let mut prev = String::new(); 419 loop { 420 thread::sleep(std::time::Duration::new(2, 0)); 421 match ptyc.try_recv() { 422 Ok(line) => { 423 empty = 0; 424 prev = prev + &line; 425 if prev.contains("test_pty_console") { 426 break; 427 } 428 } 429 Err(mpsc::TryRecvError::Empty) => { 430 empty += 1; 431 assert!(empty <= 5, "No login on pty"); 432 } 433 _ => { 434 panic!("No login on pty") 435 } 436 } 437 } 438 } 439 440 fn prepare_virtiofsd(tmp_dir: &TempDir, shared_dir: &str) -> (std::process::Child, String) { 441 let mut workload_path = dirs::home_dir().unwrap(); 442 workload_path.push("workloads"); 443 444 let mut virtiofsd_path = workload_path; 445 virtiofsd_path.push("virtiofsd"); 446 let virtiofsd_path = String::from(virtiofsd_path.to_str().unwrap()); 447 448 let virtiofsd_socket_path = 449 String::from(tmp_dir.as_path().join("virtiofs.sock").to_str().unwrap()); 450 451 // Start the daemon 452 let child = Command::new(virtiofsd_path.as_str()) 453 .args(["--shared-dir", shared_dir]) 454 .args(["--socket-path", virtiofsd_socket_path.as_str()]) 455 .args(["--cache", "never"]) 456 .spawn() 457 .unwrap(); 458 459 thread::sleep(std::time::Duration::new(10, 0)); 460 461 (child, virtiofsd_socket_path) 462 } 463 464 fn prepare_vubd( 465 tmp_dir: &TempDir, 466 blk_img: &str, 467 num_queues: usize, 468 rdonly: bool, 469 direct: bool, 470 ) -> (std::process::Child, String) { 471 let mut workload_path = dirs::home_dir().unwrap(); 472 workload_path.push("workloads"); 473 474 let mut blk_file_path = workload_path; 475 blk_file_path.push(blk_img); 476 let blk_file_path = String::from(blk_file_path.to_str().unwrap()); 477 478 let vubd_socket_path = String::from(tmp_dir.as_path().join("vub.sock").to_str().unwrap()); 479 480 // Start the daemon 481 let child = Command::new(clh_command("vhost_user_block")) 482 .args([ 483 "--block-backend", 484 format!( 485 "path={blk_file_path},socket={vubd_socket_path},num_queues={num_queues},readonly={rdonly},direct={direct}" 486 ) 487 .as_str(), 488 ]) 489 .spawn() 490 .unwrap(); 491 492 thread::sleep(std::time::Duration::new(10, 0)); 493 494 (child, vubd_socket_path) 495 } 496 497 fn temp_vsock_path(tmp_dir: &TempDir) -> String { 498 String::from(tmp_dir.as_path().join("vsock").to_str().unwrap()) 499 } 500 501 fn temp_api_path(tmp_dir: &TempDir) -> String { 502 String::from( 503 tmp_dir 504 .as_path() 505 .join("cloud-hypervisor.sock") 506 .to_str() 507 .unwrap(), 508 ) 509 } 510 511 fn temp_event_monitor_path(tmp_dir: &TempDir) -> String { 512 String::from(tmp_dir.as_path().join("event.json").to_str().unwrap()) 513 } 514 515 // Creates the directory and returns the path. 516 fn temp_snapshot_dir_path(tmp_dir: &TempDir) -> String { 517 let snapshot_dir = String::from(tmp_dir.as_path().join("snapshot").to_str().unwrap()); 518 std::fs::create_dir(&snapshot_dir).unwrap(); 519 snapshot_dir 520 } 521 522 fn temp_vmcore_file_path(tmp_dir: &TempDir) -> String { 523 let vmcore_file = String::from(tmp_dir.as_path().join("vmcore").to_str().unwrap()); 524 vmcore_file 525 } 526 527 // Creates the path for direct kernel boot and return the path. 528 // For x86_64, this function returns the vmlinux kernel path. 529 // For AArch64, this function returns the PE kernel path. 530 fn direct_kernel_boot_path() -> PathBuf { 531 let mut workload_path = dirs::home_dir().unwrap(); 532 workload_path.push("workloads"); 533 534 let mut kernel_path = workload_path; 535 #[cfg(target_arch = "x86_64")] 536 kernel_path.push("vmlinux"); 537 #[cfg(target_arch = "aarch64")] 538 kernel_path.push("Image"); 539 540 kernel_path 541 } 542 543 fn edk2_path() -> PathBuf { 544 let mut workload_path = dirs::home_dir().unwrap(); 545 workload_path.push("workloads"); 546 let mut edk2_path = workload_path; 547 edk2_path.push(OVMF_NAME); 548 549 edk2_path 550 } 551 552 fn cloud_hypervisor_release_path() -> String { 553 let mut workload_path = dirs::home_dir().unwrap(); 554 workload_path.push("workloads"); 555 556 let mut ch_release_path = workload_path; 557 #[cfg(target_arch = "x86_64")] 558 ch_release_path.push("cloud-hypervisor-static"); 559 #[cfg(target_arch = "aarch64")] 560 ch_release_path.push("cloud-hypervisor-static-aarch64"); 561 562 ch_release_path.into_os_string().into_string().unwrap() 563 } 564 565 fn prepare_vhost_user_net_daemon( 566 tmp_dir: &TempDir, 567 ip: &str, 568 tap: Option<&str>, 569 mtu: Option<u16>, 570 num_queues: usize, 571 client_mode: bool, 572 ) -> (std::process::Command, String) { 573 let vunet_socket_path = String::from(tmp_dir.as_path().join("vunet.sock").to_str().unwrap()); 574 575 // Start the daemon 576 let mut net_params = format!( 577 "ip={ip},mask=255.255.255.0,socket={vunet_socket_path},num_queues={num_queues},queue_size=1024,client={client_mode}" 578 ); 579 580 if let Some(tap) = tap { 581 net_params.push_str(format!(",tap={tap}").as_str()); 582 } 583 584 if let Some(mtu) = mtu { 585 net_params.push_str(format!(",mtu={mtu}").as_str()); 586 } 587 588 let mut command = Command::new(clh_command("vhost_user_net")); 589 command.args(["--net-backend", net_params.as_str()]); 590 591 (command, vunet_socket_path) 592 } 593 594 fn prepare_swtpm_daemon(tmp_dir: &TempDir) -> (std::process::Command, String) { 595 let swtpm_tpm_dir = String::from(tmp_dir.as_path().join("swtpm").to_str().unwrap()); 596 let swtpm_socket_path = String::from( 597 tmp_dir 598 .as_path() 599 .join("swtpm") 600 .join("swtpm.sock") 601 .to_str() 602 .unwrap(), 603 ); 604 std::fs::create_dir(&swtpm_tpm_dir).unwrap(); 605 606 let mut swtpm_command = Command::new("swtpm"); 607 let swtpm_args = [ 608 "socket", 609 "--tpmstate", 610 &format!("dir={swtpm_tpm_dir}"), 611 "--ctrl", 612 &format!("type=unixio,path={swtpm_socket_path}"), 613 "--flags", 614 "startup-clear", 615 "--tpm2", 616 ]; 617 swtpm_command.args(swtpm_args); 618 619 (swtpm_command, swtpm_socket_path) 620 } 621 622 fn remote_command(api_socket: &str, command: &str, arg: Option<&str>) -> bool { 623 let mut cmd = Command::new(clh_command("ch-remote")); 624 cmd.args([&format!("--api-socket={api_socket}"), command]); 625 626 if let Some(arg) = arg { 627 cmd.arg(arg); 628 } 629 let output = cmd.output().unwrap(); 630 if output.status.success() { 631 true 632 } else { 633 eprintln!("Error running ch-remote command: {:?}", &cmd); 634 let stderr = String::from_utf8_lossy(&output.stderr); 635 eprintln!("stderr: {stderr}"); 636 false 637 } 638 } 639 640 fn remote_command_w_output(api_socket: &str, command: &str, arg: Option<&str>) -> (bool, Vec<u8>) { 641 let mut cmd = Command::new(clh_command("ch-remote")); 642 cmd.args([&format!("--api-socket={api_socket}"), command]); 643 644 if let Some(arg) = arg { 645 cmd.arg(arg); 646 } 647 648 let output = cmd.output().expect("Failed to launch ch-remote"); 649 650 (output.status.success(), output.stdout) 651 } 652 653 fn resize_command( 654 api_socket: &str, 655 desired_vcpus: Option<u8>, 656 desired_ram: Option<usize>, 657 desired_balloon: Option<usize>, 658 event_file: Option<&str>, 659 ) -> bool { 660 let mut cmd = Command::new(clh_command("ch-remote")); 661 cmd.args([&format!("--api-socket={api_socket}"), "resize"]); 662 663 if let Some(desired_vcpus) = desired_vcpus { 664 cmd.arg(format!("--cpus={desired_vcpus}")); 665 } 666 667 if let Some(desired_ram) = desired_ram { 668 cmd.arg(format!("--memory={desired_ram}")); 669 } 670 671 if let Some(desired_balloon) = desired_balloon { 672 cmd.arg(format!("--balloon={desired_balloon}")); 673 } 674 675 let ret = cmd.status().expect("Failed to launch ch-remote").success(); 676 677 if let Some(event_path) = event_file { 678 let latest_events = [ 679 &MetaEvent { 680 event: "resizing".to_string(), 681 device_id: None, 682 }, 683 &MetaEvent { 684 event: "resized".to_string(), 685 device_id: None, 686 }, 687 ]; 688 // See: #5938 689 thread::sleep(std::time::Duration::new(1, 0)); 690 assert!(check_latest_events_exact(&latest_events, event_path)); 691 } 692 693 ret 694 } 695 696 fn resize_zone_command(api_socket: &str, id: &str, desired_size: &str) -> bool { 697 let mut cmd = Command::new(clh_command("ch-remote")); 698 cmd.args([ 699 &format!("--api-socket={api_socket}"), 700 "resize-zone", 701 &format!("--id={id}"), 702 &format!("--size={desired_size}"), 703 ]); 704 705 cmd.status().expect("Failed to launch ch-remote").success() 706 } 707 708 // setup OVS-DPDK bridge and ports 709 fn setup_ovs_dpdk() { 710 // setup OVS-DPDK 711 assert!(exec_host_command_status("service openvswitch-switch start").success()); 712 assert!(exec_host_command_status("ovs-vsctl init").success()); 713 assert!( 714 exec_host_command_status("ovs-vsctl set Open_vSwitch . other_config:dpdk-init=true") 715 .success() 716 ); 717 assert!(exec_host_command_status("service openvswitch-switch restart").success()); 718 719 // Create OVS-DPDK bridge and ports 720 assert!(exec_host_command_status( 721 "ovs-vsctl add-br ovsbr0 -- set bridge ovsbr0 datapath_type=netdev", 722 ) 723 .success()); 724 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface vhost-user1 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient1").success()); 725 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user2 -- set Interface vhost-user2 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient2").success()); 726 assert!(exec_host_command_status("ip link set up dev ovsbr0").success()); 727 assert!(exec_host_command_status("service openvswitch-switch restart").success()); 728 } 729 fn cleanup_ovs_dpdk() { 730 assert!(exec_host_command_status("ovs-vsctl del-br ovsbr0").success()); 731 exec_host_command_status("rm -f ovs-vsctl /tmp/dpdkvhostclient1 /tmp/dpdkvhostclient2"); 732 } 733 // Setup two guests and ensure they are connected through ovs-dpdk 734 fn setup_ovs_dpdk_guests( 735 guest1: &Guest, 736 guest2: &Guest, 737 api_socket: &str, 738 release_binary: bool, 739 ) -> (Child, Child) { 740 setup_ovs_dpdk(); 741 742 let clh_path = if !release_binary { 743 clh_command("cloud-hypervisor") 744 } else { 745 cloud_hypervisor_release_path() 746 }; 747 748 let mut child1 = GuestCommand::new_with_binary_path(guest1, &clh_path) 749 .args(["--cpus", "boot=2"]) 750 .args(["--memory", "size=0,shared=on"]) 751 .args(["--memory-zone", "id=mem0,size=1G,shared=on,host_numa_node=0"]) 752 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 753 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 754 .default_disks() 755 .args(["--net", guest1.default_net_string().as_str(), "vhost_user=true,socket=/tmp/dpdkvhostclient1,num_queues=2,queue_size=256,vhost_mode=server"]) 756 .capture_output() 757 .spawn() 758 .unwrap(); 759 760 #[cfg(target_arch = "x86_64")] 761 let guest_net_iface = "ens5"; 762 #[cfg(target_arch = "aarch64")] 763 let guest_net_iface = "enp0s5"; 764 765 let r = std::panic::catch_unwind(|| { 766 guest1.wait_vm_boot(None).unwrap(); 767 768 guest1 769 .ssh_command(&format!( 770 "sudo ip addr add 172.100.0.1/24 dev {guest_net_iface}" 771 )) 772 .unwrap(); 773 guest1 774 .ssh_command(&format!("sudo ip link set up dev {guest_net_iface}")) 775 .unwrap(); 776 777 let guest_ip = guest1.network.guest_ip.clone(); 778 thread::spawn(move || { 779 ssh_command_ip( 780 "nc -l 12345", 781 &guest_ip, 782 DEFAULT_SSH_RETRIES, 783 DEFAULT_SSH_TIMEOUT, 784 ) 785 .unwrap(); 786 }); 787 }); 788 if r.is_err() { 789 cleanup_ovs_dpdk(); 790 791 let _ = child1.kill(); 792 let output = child1.wait_with_output().unwrap(); 793 handle_child_output(r, &output); 794 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return 795 } 796 797 let mut child2 = GuestCommand::new_with_binary_path(guest2, &clh_path) 798 .args(["--api-socket", api_socket]) 799 .args(["--cpus", "boot=2"]) 800 .args(["--memory", "size=0,shared=on"]) 801 .args(["--memory-zone", "id=mem0,size=1G,shared=on,host_numa_node=0"]) 802 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 803 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 804 .default_disks() 805 .args(["--net", guest2.default_net_string().as_str(), "vhost_user=true,socket=/tmp/dpdkvhostclient2,num_queues=2,queue_size=256,vhost_mode=server"]) 806 .capture_output() 807 .spawn() 808 .unwrap(); 809 810 let r = std::panic::catch_unwind(|| { 811 guest2.wait_vm_boot(None).unwrap(); 812 813 guest2 814 .ssh_command(&format!( 815 "sudo ip addr add 172.100.0.2/24 dev {guest_net_iface}" 816 )) 817 .unwrap(); 818 guest2 819 .ssh_command(&format!("sudo ip link set up dev {guest_net_iface}")) 820 .unwrap(); 821 822 // Check the connection works properly between the two VMs 823 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap(); 824 }); 825 if r.is_err() { 826 cleanup_ovs_dpdk(); 827 828 let _ = child1.kill(); 829 let _ = child2.kill(); 830 let output = child2.wait_with_output().unwrap(); 831 handle_child_output(r, &output); 832 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return 833 } 834 835 (child1, child2) 836 } 837 838 enum FwType { 839 Ovmf, 840 RustHypervisorFirmware, 841 } 842 843 fn fw_path(_fw_type: FwType) -> String { 844 let mut workload_path = dirs::home_dir().unwrap(); 845 workload_path.push("workloads"); 846 847 let mut fw_path = workload_path; 848 #[cfg(target_arch = "aarch64")] 849 fw_path.push("CLOUDHV_EFI.fd"); 850 #[cfg(target_arch = "x86_64")] 851 { 852 match _fw_type { 853 FwType::Ovmf => fw_path.push(OVMF_NAME), 854 FwType::RustHypervisorFirmware => fw_path.push("hypervisor-fw"), 855 } 856 } 857 858 fw_path.to_str().unwrap().to_string() 859 } 860 861 #[derive(Debug)] 862 struct MetaEvent { 863 event: String, 864 device_id: Option<String>, 865 } 866 867 impl MetaEvent { 868 pub fn match_with_json_event(&self, v: &serde_json::Value) -> bool { 869 let mut matched = false; 870 if v["event"].as_str().unwrap() == self.event { 871 if let Some(device_id) = &self.device_id { 872 if v["properties"]["id"].as_str().unwrap() == device_id { 873 matched = true 874 } 875 } else { 876 matched = true; 877 } 878 } 879 matched 880 } 881 } 882 883 // Parse the event_monitor file based on the format that each event 884 // is followed by a double newline 885 fn parse_event_file(event_file: &str) -> Vec<serde_json::Value> { 886 let content = fs::read(event_file).unwrap(); 887 let mut ret = Vec::new(); 888 for entry in String::from_utf8_lossy(&content) 889 .trim() 890 .split("\n\n") 891 .collect::<Vec<&str>>() 892 { 893 ret.push(serde_json::from_str(entry).unwrap()); 894 } 895 896 ret 897 } 898 899 // Return true if all events from the input 'expected_events' are matched sequentially 900 // with events from the 'event_file' 901 fn check_sequential_events(expected_events: &[&MetaEvent], event_file: &str) -> bool { 902 let json_events = parse_event_file(event_file); 903 let len = expected_events.len(); 904 let mut idx = 0; 905 for e in &json_events { 906 if idx == len { 907 break; 908 } 909 if expected_events[idx].match_with_json_event(e) { 910 idx += 1; 911 } 912 } 913 914 let ret = idx == len; 915 916 if !ret { 917 eprintln!( 918 "\n\n==== Start 'check_sequential_events' failed ==== \ 919 \n\nexpected_events={:?}\nactual_events={:?} \ 920 \n\n==== End 'check_sequential_events' failed ====", 921 expected_events, json_events, 922 ); 923 } 924 925 ret 926 } 927 928 // Return true if all events from the input 'expected_events' are matched exactly 929 // with events from the 'event_file' 930 fn check_sequential_events_exact(expected_events: &[&MetaEvent], event_file: &str) -> bool { 931 let json_events = parse_event_file(event_file); 932 assert!(expected_events.len() <= json_events.len()); 933 let json_events = &json_events[..expected_events.len()]; 934 935 for (idx, e) in json_events.iter().enumerate() { 936 if !expected_events[idx].match_with_json_event(e) { 937 eprintln!( 938 "\n\n==== Start 'check_sequential_events_exact' failed ==== \ 939 \n\nexpected_events={:?}\nactual_events={:?} \ 940 \n\n==== End 'check_sequential_events_exact' failed ====", 941 expected_events, json_events, 942 ); 943 944 return false; 945 } 946 } 947 948 true 949 } 950 951 // Return true if events from the input 'latest_events' are matched exactly 952 // with the most recent events from the 'event_file' 953 fn check_latest_events_exact(latest_events: &[&MetaEvent], event_file: &str) -> bool { 954 let json_events = parse_event_file(event_file); 955 assert!(latest_events.len() <= json_events.len()); 956 let json_events = &json_events[(json_events.len() - latest_events.len())..]; 957 958 for (idx, e) in json_events.iter().enumerate() { 959 if !latest_events[idx].match_with_json_event(e) { 960 eprintln!( 961 "\n\n==== Start 'check_latest_events_exact' failed ==== \ 962 \n\nexpected_events={:?}\nactual_events={:?} \ 963 \n\n==== End 'check_latest_events_exact' failed ====", 964 latest_events, json_events, 965 ); 966 967 return false; 968 } 969 } 970 971 true 972 } 973 974 fn test_cpu_topology(threads_per_core: u8, cores_per_package: u8, packages: u8, use_fw: bool) { 975 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 976 let guest = Guest::new(Box::new(focal)); 977 let total_vcpus = threads_per_core * cores_per_package * packages; 978 let direct_kernel_boot_path = direct_kernel_boot_path(); 979 let mut kernel_path = direct_kernel_boot_path.to_str().unwrap(); 980 let fw_path = fw_path(FwType::RustHypervisorFirmware); 981 if use_fw { 982 kernel_path = fw_path.as_str(); 983 } 984 985 let mut child = GuestCommand::new(&guest) 986 .args([ 987 "--cpus", 988 &format!( 989 "boot={total_vcpus},topology={threads_per_core}:{cores_per_package}:1:{packages}" 990 ), 991 ]) 992 .args(["--memory", "size=512M"]) 993 .args(["--kernel", kernel_path]) 994 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 995 .default_disks() 996 .default_net() 997 .capture_output() 998 .spawn() 999 .unwrap(); 1000 1001 let r = std::panic::catch_unwind(|| { 1002 guest.wait_vm_boot(None).unwrap(); 1003 assert_eq!( 1004 guest.get_cpu_count().unwrap_or_default(), 1005 u32::from(total_vcpus) 1006 ); 1007 assert_eq!( 1008 guest 1009 .ssh_command("lscpu | grep \"per core\" | cut -f 2 -d \":\" | sed \"s# *##\"") 1010 .unwrap() 1011 .trim() 1012 .parse::<u8>() 1013 .unwrap_or(0), 1014 threads_per_core 1015 ); 1016 1017 assert_eq!( 1018 guest 1019 .ssh_command("lscpu | grep \"per socket\" | cut -f 2 -d \":\" | sed \"s# *##\"") 1020 .unwrap() 1021 .trim() 1022 .parse::<u8>() 1023 .unwrap_or(0), 1024 cores_per_package 1025 ); 1026 1027 assert_eq!( 1028 guest 1029 .ssh_command("lscpu | grep \"Socket\" | cut -f 2 -d \":\" | sed \"s# *##\"") 1030 .unwrap() 1031 .trim() 1032 .parse::<u8>() 1033 .unwrap_or(0), 1034 packages 1035 ); 1036 1037 #[cfg(target_arch = "x86_64")] 1038 { 1039 let mut cpu_id = 0; 1040 for package_id in 0..packages { 1041 for core_id in 0..cores_per_package { 1042 for _ in 0..threads_per_core { 1043 assert_eq!( 1044 guest 1045 .ssh_command(&format!("cat /sys/devices/system/cpu/cpu{cpu_id}/topology/physical_package_id")) 1046 .unwrap() 1047 .trim() 1048 .parse::<u8>() 1049 .unwrap_or(0), 1050 package_id 1051 ); 1052 1053 assert_eq!( 1054 guest 1055 .ssh_command(&format!( 1056 "cat /sys/devices/system/cpu/cpu{cpu_id}/topology/core_id" 1057 )) 1058 .unwrap() 1059 .trim() 1060 .parse::<u8>() 1061 .unwrap_or(0), 1062 core_id 1063 ); 1064 1065 cpu_id += 1; 1066 } 1067 } 1068 } 1069 } 1070 }); 1071 1072 kill_child(&mut child); 1073 let output = child.wait_with_output().unwrap(); 1074 1075 handle_child_output(r, &output); 1076 } 1077 1078 #[allow(unused_variables)] 1079 fn _test_guest_numa_nodes(acpi: bool) { 1080 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1081 let guest = Guest::new(Box::new(focal)); 1082 let api_socket = temp_api_path(&guest.tmp_dir); 1083 #[cfg(target_arch = "x86_64")] 1084 let kernel_path = direct_kernel_boot_path(); 1085 #[cfg(target_arch = "aarch64")] 1086 let kernel_path = if acpi { 1087 edk2_path() 1088 } else { 1089 direct_kernel_boot_path() 1090 }; 1091 1092 let mut child = GuestCommand::new(&guest) 1093 .args(["--cpus", "boot=6,max=12"]) 1094 .args(["--memory", "size=0,hotplug_method=virtio-mem"]) 1095 .args([ 1096 "--memory-zone", 1097 "id=mem0,size=1G,hotplug_size=3G", 1098 "id=mem1,size=2G,hotplug_size=3G", 1099 "id=mem2,size=3G,hotplug_size=3G", 1100 ]) 1101 .args([ 1102 "--numa", 1103 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0", 1104 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1", 1105 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2", 1106 ]) 1107 .args(["--kernel", kernel_path.to_str().unwrap()]) 1108 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1109 .args(["--api-socket", &api_socket]) 1110 .capture_output() 1111 .default_disks() 1112 .default_net() 1113 .spawn() 1114 .unwrap(); 1115 1116 let r = std::panic::catch_unwind(|| { 1117 guest.wait_vm_boot(None).unwrap(); 1118 1119 guest.check_numa_common( 1120 Some(&[960_000, 1_920_000, 2_880_000]), 1121 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 1122 Some(&["10 15 20", "20 10 25", "25 30 10"]), 1123 ); 1124 1125 // AArch64 currently does not support hotplug, and therefore we only 1126 // test hotplug-related function on x86_64 here. 1127 #[cfg(target_arch = "x86_64")] 1128 { 1129 guest.enable_memory_hotplug(); 1130 1131 // Resize every memory zone and check each associated NUMA node 1132 // has been assigned the right amount of memory. 1133 resize_zone_command(&api_socket, "mem0", "4G"); 1134 resize_zone_command(&api_socket, "mem1", "4G"); 1135 resize_zone_command(&api_socket, "mem2", "4G"); 1136 // Resize to the maximum amount of CPUs and check each NUMA 1137 // node has been assigned the right CPUs set. 1138 resize_command(&api_socket, Some(12), None, None, None); 1139 thread::sleep(std::time::Duration::new(5, 0)); 1140 1141 guest.check_numa_common( 1142 Some(&[3_840_000, 3_840_000, 3_840_000]), 1143 Some(&[vec![0, 1, 2, 9], vec![3, 4, 6, 7, 8], vec![5, 10, 11]]), 1144 None, 1145 ); 1146 } 1147 }); 1148 1149 kill_child(&mut child); 1150 let output = child.wait_with_output().unwrap(); 1151 1152 handle_child_output(r, &output); 1153 } 1154 1155 #[allow(unused_variables)] 1156 fn _test_power_button(acpi: bool) { 1157 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1158 let guest = Guest::new(Box::new(focal)); 1159 let mut cmd = GuestCommand::new(&guest); 1160 let api_socket = temp_api_path(&guest.tmp_dir); 1161 1162 #[cfg(target_arch = "x86_64")] 1163 let kernel_path = direct_kernel_boot_path(); 1164 #[cfg(target_arch = "aarch64")] 1165 let kernel_path = if acpi { 1166 edk2_path() 1167 } else { 1168 direct_kernel_boot_path() 1169 }; 1170 1171 cmd.args(["--cpus", "boot=1"]) 1172 .args(["--memory", "size=512M"]) 1173 .args(["--kernel", kernel_path.to_str().unwrap()]) 1174 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1175 .capture_output() 1176 .default_disks() 1177 .default_net() 1178 .args(["--api-socket", &api_socket]); 1179 1180 let child = cmd.spawn().unwrap(); 1181 1182 let r = std::panic::catch_unwind(|| { 1183 guest.wait_vm_boot(None).unwrap(); 1184 assert!(remote_command(&api_socket, "power-button", None)); 1185 }); 1186 1187 let output = child.wait_with_output().unwrap(); 1188 assert!(output.status.success()); 1189 handle_child_output(r, &output); 1190 } 1191 1192 type PrepareNetDaemon = dyn Fn( 1193 &TempDir, 1194 &str, 1195 Option<&str>, 1196 Option<u16>, 1197 usize, 1198 bool, 1199 ) -> (std::process::Command, String); 1200 1201 fn test_vhost_user_net( 1202 tap: Option<&str>, 1203 num_queues: usize, 1204 prepare_daemon: &PrepareNetDaemon, 1205 generate_host_mac: bool, 1206 client_mode_daemon: bool, 1207 ) { 1208 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1209 let guest = Guest::new(Box::new(focal)); 1210 let api_socket = temp_api_path(&guest.tmp_dir); 1211 1212 let kernel_path = direct_kernel_boot_path(); 1213 1214 let host_mac = if generate_host_mac { 1215 Some(MacAddr::local_random()) 1216 } else { 1217 None 1218 }; 1219 1220 let mtu = Some(3000); 1221 1222 let (mut daemon_command, vunet_socket_path) = prepare_daemon( 1223 &guest.tmp_dir, 1224 &guest.network.host_ip, 1225 tap, 1226 mtu, 1227 num_queues, 1228 client_mode_daemon, 1229 ); 1230 1231 let net_params = format!( 1232 "vhost_user=true,mac={},socket={},num_queues={},queue_size=1024{},vhost_mode={},mtu=3000", 1233 guest.network.guest_mac, 1234 vunet_socket_path, 1235 num_queues, 1236 if let Some(host_mac) = host_mac { 1237 format!(",host_mac={host_mac}") 1238 } else { 1239 "".to_owned() 1240 }, 1241 if client_mode_daemon { 1242 "server" 1243 } else { 1244 "client" 1245 }, 1246 ); 1247 1248 let mut ch_command = GuestCommand::new(&guest); 1249 ch_command 1250 .args(["--cpus", format!("boot={}", num_queues / 2).as_str()]) 1251 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"]) 1252 .args(["--kernel", kernel_path.to_str().unwrap()]) 1253 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1254 .default_disks() 1255 .args(["--net", net_params.as_str()]) 1256 .args(["--api-socket", &api_socket]) 1257 .capture_output(); 1258 1259 let mut daemon_child: std::process::Child; 1260 let mut child: std::process::Child; 1261 1262 if client_mode_daemon { 1263 child = ch_command.spawn().unwrap(); 1264 // Make sure the VMM is waiting for the backend to connect 1265 thread::sleep(std::time::Duration::new(10, 0)); 1266 daemon_child = daemon_command.spawn().unwrap(); 1267 } else { 1268 daemon_child = daemon_command.spawn().unwrap(); 1269 // Make sure the backend is waiting for the VMM to connect 1270 thread::sleep(std::time::Duration::new(10, 0)); 1271 child = ch_command.spawn().unwrap(); 1272 } 1273 1274 let r = std::panic::catch_unwind(|| { 1275 guest.wait_vm_boot(None).unwrap(); 1276 1277 if let Some(tap_name) = tap { 1278 let tap_count = exec_host_command_output(&format!("ip link | grep -c {tap_name}")); 1279 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1"); 1280 } 1281 1282 if let Some(host_mac) = tap { 1283 let mac_count = exec_host_command_output(&format!("ip link | grep -c {host_mac}")); 1284 assert_eq!(String::from_utf8_lossy(&mac_count.stdout).trim(), "1"); 1285 } 1286 1287 #[cfg(target_arch = "aarch64")] 1288 let iface = "enp0s4"; 1289 #[cfg(target_arch = "x86_64")] 1290 let iface = "ens4"; 1291 1292 assert_eq!( 1293 guest 1294 .ssh_command(format!("cat /sys/class/net/{iface}/mtu").as_str()) 1295 .unwrap() 1296 .trim(), 1297 "3000" 1298 ); 1299 1300 // 1 network interface + default localhost ==> 2 interfaces 1301 // It's important to note that this test is fully exercising the 1302 // vhost-user-net implementation and the associated backend since 1303 // it does not define any --net network interface. That means all 1304 // the ssh communication in that test happens through the network 1305 // interface backed by vhost-user-net. 1306 assert_eq!( 1307 guest 1308 .ssh_command("ip -o link | wc -l") 1309 .unwrap() 1310 .trim() 1311 .parse::<u32>() 1312 .unwrap_or_default(), 1313 2 1314 ); 1315 1316 // The following pci devices will appear on guest with PCI-MSI 1317 // interrupt vectors assigned. 1318 // 1 virtio-console with 3 vectors: config, Rx, Tx 1319 // 1 virtio-blk with 2 vectors: config, Request 1320 // 1 virtio-blk with 2 vectors: config, Request 1321 // 1 virtio-rng with 2 vectors: config, Request 1322 // Since virtio-net has 2 queue pairs, its vectors is as follows: 1323 // 1 virtio-net with 5 vectors: config, Rx (2), Tx (2) 1324 // Based on the above, the total vectors should 14. 1325 #[cfg(target_arch = "x86_64")] 1326 let grep_cmd = "grep -c PCI-MSI /proc/interrupts"; 1327 #[cfg(target_arch = "aarch64")] 1328 let grep_cmd = "grep -c ITS-PCI-MSIX /proc/interrupts"; 1329 assert_eq!( 1330 guest 1331 .ssh_command(grep_cmd) 1332 .unwrap() 1333 .trim() 1334 .parse::<u32>() 1335 .unwrap_or_default(), 1336 10 + (num_queues as u32) 1337 ); 1338 1339 // ACPI feature is needed. 1340 #[cfg(target_arch = "x86_64")] 1341 { 1342 guest.enable_memory_hotplug(); 1343 1344 // Add RAM to the VM 1345 let desired_ram = 1024 << 20; 1346 resize_command(&api_socket, None, Some(desired_ram), None, None); 1347 1348 thread::sleep(std::time::Duration::new(10, 0)); 1349 1350 // Here by simply checking the size (through ssh), we validate 1351 // the connection is still working, which means vhost-user-net 1352 // keeps working after the resize. 1353 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 1354 } 1355 }); 1356 1357 kill_child(&mut child); 1358 let output = child.wait_with_output().unwrap(); 1359 1360 thread::sleep(std::time::Duration::new(5, 0)); 1361 let _ = daemon_child.kill(); 1362 let _ = daemon_child.wait(); 1363 1364 handle_child_output(r, &output); 1365 } 1366 1367 type PrepareBlkDaemon = dyn Fn(&TempDir, &str, usize, bool, bool) -> (std::process::Child, String); 1368 1369 fn test_vhost_user_blk( 1370 num_queues: usize, 1371 readonly: bool, 1372 direct: bool, 1373 prepare_vhost_user_blk_daemon: Option<&PrepareBlkDaemon>, 1374 ) { 1375 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1376 let guest = Guest::new(Box::new(focal)); 1377 let api_socket = temp_api_path(&guest.tmp_dir); 1378 1379 let kernel_path = direct_kernel_boot_path(); 1380 1381 let (blk_params, daemon_child) = { 1382 let prepare_daemon = prepare_vhost_user_blk_daemon.unwrap(); 1383 // Start the daemon 1384 let (daemon_child, vubd_socket_path) = 1385 prepare_daemon(&guest.tmp_dir, "blk.img", num_queues, readonly, direct); 1386 1387 ( 1388 format!( 1389 "vhost_user=true,socket={vubd_socket_path},num_queues={num_queues},queue_size=128", 1390 ), 1391 Some(daemon_child), 1392 ) 1393 }; 1394 1395 let mut child = GuestCommand::new(&guest) 1396 .args(["--cpus", format!("boot={num_queues}").as_str()]) 1397 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"]) 1398 .args(["--kernel", kernel_path.to_str().unwrap()]) 1399 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1400 .args([ 1401 "--disk", 1402 format!( 1403 "path={}", 1404 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 1405 ) 1406 .as_str(), 1407 format!( 1408 "path={}", 1409 guest.disk_config.disk(DiskType::CloudInit).unwrap() 1410 ) 1411 .as_str(), 1412 blk_params.as_str(), 1413 ]) 1414 .default_net() 1415 .args(["--api-socket", &api_socket]) 1416 .capture_output() 1417 .spawn() 1418 .unwrap(); 1419 1420 let r = std::panic::catch_unwind(|| { 1421 guest.wait_vm_boot(None).unwrap(); 1422 1423 // Check both if /dev/vdc exists and if the block size is 16M. 1424 assert_eq!( 1425 guest 1426 .ssh_command("lsblk | grep vdc | grep -c 16M") 1427 .unwrap() 1428 .trim() 1429 .parse::<u32>() 1430 .unwrap_or_default(), 1431 1 1432 ); 1433 1434 // Check if this block is RO or RW. 1435 assert_eq!( 1436 guest 1437 .ssh_command("lsblk | grep vdc | awk '{print $5}'") 1438 .unwrap() 1439 .trim() 1440 .parse::<u32>() 1441 .unwrap_or_default(), 1442 readonly as u32 1443 ); 1444 1445 // Check if the number of queues in /sys/block/vdc/mq matches the 1446 // expected num_queues. 1447 assert_eq!( 1448 guest 1449 .ssh_command("ls -ll /sys/block/vdc/mq | grep ^d | wc -l") 1450 .unwrap() 1451 .trim() 1452 .parse::<u32>() 1453 .unwrap_or_default(), 1454 num_queues as u32 1455 ); 1456 1457 // Mount the device 1458 let mount_ro_rw_flag = if readonly { "ro,noload" } else { "rw" }; 1459 guest.ssh_command("mkdir mount_image").unwrap(); 1460 guest 1461 .ssh_command( 1462 format!("sudo mount -o {mount_ro_rw_flag} -t ext4 /dev/vdc mount_image/").as_str(), 1463 ) 1464 .unwrap(); 1465 1466 // Check the content of the block device. The file "foo" should 1467 // contain "bar". 1468 assert_eq!( 1469 guest.ssh_command("cat mount_image/foo").unwrap().trim(), 1470 "bar" 1471 ); 1472 1473 // ACPI feature is needed. 1474 #[cfg(target_arch = "x86_64")] 1475 { 1476 guest.enable_memory_hotplug(); 1477 1478 // Add RAM to the VM 1479 let desired_ram = 1024 << 20; 1480 resize_command(&api_socket, None, Some(desired_ram), None, None); 1481 1482 thread::sleep(std::time::Duration::new(10, 0)); 1483 1484 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 1485 1486 // Check again the content of the block device after the resize 1487 // has been performed. 1488 assert_eq!( 1489 guest.ssh_command("cat mount_image/foo").unwrap().trim(), 1490 "bar" 1491 ); 1492 } 1493 1494 // Unmount the device 1495 guest.ssh_command("sudo umount /dev/vdc").unwrap(); 1496 guest.ssh_command("rm -r mount_image").unwrap(); 1497 }); 1498 1499 kill_child(&mut child); 1500 let output = child.wait_with_output().unwrap(); 1501 1502 if let Some(mut daemon_child) = daemon_child { 1503 thread::sleep(std::time::Duration::new(5, 0)); 1504 let _ = daemon_child.kill(); 1505 let _ = daemon_child.wait(); 1506 } 1507 1508 handle_child_output(r, &output); 1509 } 1510 1511 fn test_boot_from_vhost_user_blk( 1512 num_queues: usize, 1513 readonly: bool, 1514 direct: bool, 1515 prepare_vhost_user_blk_daemon: Option<&PrepareBlkDaemon>, 1516 ) { 1517 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1518 let guest = Guest::new(Box::new(focal)); 1519 1520 let kernel_path = direct_kernel_boot_path(); 1521 1522 let disk_path = guest.disk_config.disk(DiskType::OperatingSystem).unwrap(); 1523 1524 let (blk_boot_params, daemon_child) = { 1525 let prepare_daemon = prepare_vhost_user_blk_daemon.unwrap(); 1526 // Start the daemon 1527 let (daemon_child, vubd_socket_path) = prepare_daemon( 1528 &guest.tmp_dir, 1529 disk_path.as_str(), 1530 num_queues, 1531 readonly, 1532 direct, 1533 ); 1534 1535 ( 1536 format!( 1537 "vhost_user=true,socket={vubd_socket_path},num_queues={num_queues},queue_size=128", 1538 ), 1539 Some(daemon_child), 1540 ) 1541 }; 1542 1543 let mut child = GuestCommand::new(&guest) 1544 .args(["--cpus", format!("boot={num_queues}").as_str()]) 1545 .args(["--memory", "size=512M,shared=on"]) 1546 .args(["--kernel", kernel_path.to_str().unwrap()]) 1547 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1548 .args([ 1549 "--disk", 1550 blk_boot_params.as_str(), 1551 format!( 1552 "path={}", 1553 guest.disk_config.disk(DiskType::CloudInit).unwrap() 1554 ) 1555 .as_str(), 1556 ]) 1557 .default_net() 1558 .capture_output() 1559 .spawn() 1560 .unwrap(); 1561 1562 let r = std::panic::catch_unwind(|| { 1563 guest.wait_vm_boot(None).unwrap(); 1564 1565 // Just check the VM booted correctly. 1566 assert_eq!(guest.get_cpu_count().unwrap_or_default(), num_queues as u32); 1567 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 1568 }); 1569 kill_child(&mut child); 1570 let output = child.wait_with_output().unwrap(); 1571 1572 if let Some(mut daemon_child) = daemon_child { 1573 thread::sleep(std::time::Duration::new(5, 0)); 1574 let _ = daemon_child.kill(); 1575 let _ = daemon_child.wait(); 1576 } 1577 1578 handle_child_output(r, &output); 1579 } 1580 1581 fn _test_virtio_fs( 1582 prepare_daemon: &dyn Fn(&TempDir, &str) -> (std::process::Child, String), 1583 hotplug: bool, 1584 pci_segment: Option<u16>, 1585 ) { 1586 #[cfg(target_arch = "aarch64")] 1587 let focal_image = if hotplug { 1588 FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string() 1589 } else { 1590 FOCAL_IMAGE_NAME.to_string() 1591 }; 1592 #[cfg(target_arch = "x86_64")] 1593 let focal_image = FOCAL_IMAGE_NAME.to_string(); 1594 let focal = UbuntuDiskConfig::new(focal_image); 1595 let guest = Guest::new(Box::new(focal)); 1596 let api_socket = temp_api_path(&guest.tmp_dir); 1597 1598 let mut workload_path = dirs::home_dir().unwrap(); 1599 workload_path.push("workloads"); 1600 1601 let mut shared_dir = workload_path; 1602 shared_dir.push("shared_dir"); 1603 1604 #[cfg(target_arch = "x86_64")] 1605 let kernel_path = direct_kernel_boot_path(); 1606 #[cfg(target_arch = "aarch64")] 1607 let kernel_path = if hotplug { 1608 edk2_path() 1609 } else { 1610 direct_kernel_boot_path() 1611 }; 1612 1613 let (mut daemon_child, virtiofsd_socket_path) = 1614 prepare_daemon(&guest.tmp_dir, shared_dir.to_str().unwrap()); 1615 1616 let mut guest_command = GuestCommand::new(&guest); 1617 guest_command 1618 .args(["--cpus", "boot=1"]) 1619 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"]) 1620 .args(["--kernel", kernel_path.to_str().unwrap()]) 1621 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1622 .default_disks() 1623 .default_net() 1624 .args(["--api-socket", &api_socket]); 1625 if pci_segment.is_some() { 1626 guest_command.args([ 1627 "--platform", 1628 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 1629 ]); 1630 } 1631 1632 let fs_params = format!( 1633 "id=myfs0,tag=myfs,socket={},num_queues=1,queue_size=1024{}", 1634 virtiofsd_socket_path, 1635 if let Some(pci_segment) = pci_segment { 1636 format!(",pci_segment={pci_segment}") 1637 } else { 1638 "".to_owned() 1639 } 1640 ); 1641 1642 if !hotplug { 1643 guest_command.args(["--fs", fs_params.as_str()]); 1644 } 1645 1646 let mut child = guest_command.capture_output().spawn().unwrap(); 1647 1648 let r = std::panic::catch_unwind(|| { 1649 guest.wait_vm_boot(None).unwrap(); 1650 1651 if hotplug { 1652 // Add fs to the VM 1653 let (cmd_success, cmd_output) = 1654 remote_command_w_output(&api_socket, "add-fs", Some(&fs_params)); 1655 assert!(cmd_success); 1656 1657 if let Some(pci_segment) = pci_segment { 1658 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 1659 "{{\"id\":\"myfs0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 1660 ))); 1661 } else { 1662 assert!(String::from_utf8_lossy(&cmd_output) 1663 .contains("{\"id\":\"myfs0\",\"bdf\":\"0000:00:06.0\"}")); 1664 } 1665 1666 thread::sleep(std::time::Duration::new(10, 0)); 1667 } 1668 1669 // Mount shared directory through virtio_fs filesystem 1670 guest 1671 .ssh_command("mkdir -p mount_dir && sudo mount -t virtiofs myfs mount_dir/") 1672 .unwrap(); 1673 1674 // Check file1 exists and its content is "foo" 1675 assert_eq!( 1676 guest.ssh_command("cat mount_dir/file1").unwrap().trim(), 1677 "foo" 1678 ); 1679 // Check file2 does not exist 1680 guest 1681 .ssh_command("[ ! -f 'mount_dir/file2' ] || true") 1682 .unwrap(); 1683 1684 // Check file3 exists and its content is "bar" 1685 assert_eq!( 1686 guest.ssh_command("cat mount_dir/file3").unwrap().trim(), 1687 "bar" 1688 ); 1689 1690 // ACPI feature is needed. 1691 #[cfg(target_arch = "x86_64")] 1692 { 1693 guest.enable_memory_hotplug(); 1694 1695 // Add RAM to the VM 1696 let desired_ram = 1024 << 20; 1697 resize_command(&api_socket, None, Some(desired_ram), None, None); 1698 1699 thread::sleep(std::time::Duration::new(30, 0)); 1700 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 1701 1702 // After the resize, check again that file1 exists and its 1703 // content is "foo". 1704 assert_eq!( 1705 guest.ssh_command("cat mount_dir/file1").unwrap().trim(), 1706 "foo" 1707 ); 1708 } 1709 1710 if hotplug { 1711 // Remove from VM 1712 guest.ssh_command("sudo umount mount_dir").unwrap(); 1713 assert!(remote_command(&api_socket, "remove-device", Some("myfs0"))); 1714 } 1715 }); 1716 1717 let (r, hotplug_daemon_child) = if r.is_ok() && hotplug { 1718 thread::sleep(std::time::Duration::new(10, 0)); 1719 let (daemon_child, virtiofsd_socket_path) = 1720 prepare_daemon(&guest.tmp_dir, shared_dir.to_str().unwrap()); 1721 1722 let r = std::panic::catch_unwind(|| { 1723 thread::sleep(std::time::Duration::new(10, 0)); 1724 let fs_params = format!( 1725 "id=myfs0,tag=myfs,socket={},num_queues=1,queue_size=1024{}", 1726 virtiofsd_socket_path, 1727 if let Some(pci_segment) = pci_segment { 1728 format!(",pci_segment={pci_segment}") 1729 } else { 1730 "".to_owned() 1731 } 1732 ); 1733 1734 // Add back and check it works 1735 let (cmd_success, cmd_output) = 1736 remote_command_w_output(&api_socket, "add-fs", Some(&fs_params)); 1737 assert!(cmd_success); 1738 if let Some(pci_segment) = pci_segment { 1739 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 1740 "{{\"id\":\"myfs0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 1741 ))); 1742 } else { 1743 assert!(String::from_utf8_lossy(&cmd_output) 1744 .contains("{\"id\":\"myfs0\",\"bdf\":\"0000:00:06.0\"}")); 1745 } 1746 1747 thread::sleep(std::time::Duration::new(10, 0)); 1748 // Mount shared directory through virtio_fs filesystem 1749 guest 1750 .ssh_command("mkdir -p mount_dir && sudo mount -t virtiofs myfs mount_dir/") 1751 .unwrap(); 1752 1753 // Check file1 exists and its content is "foo" 1754 assert_eq!( 1755 guest.ssh_command("cat mount_dir/file1").unwrap().trim(), 1756 "foo" 1757 ); 1758 }); 1759 1760 (r, Some(daemon_child)) 1761 } else { 1762 (r, None) 1763 }; 1764 1765 kill_child(&mut child); 1766 let output = child.wait_with_output().unwrap(); 1767 1768 let _ = daemon_child.kill(); 1769 let _ = daemon_child.wait(); 1770 1771 if let Some(mut daemon_child) = hotplug_daemon_child { 1772 let _ = daemon_child.kill(); 1773 let _ = daemon_child.wait(); 1774 } 1775 1776 handle_child_output(r, &output); 1777 } 1778 1779 fn test_virtio_pmem(discard_writes: bool, specify_size: bool) { 1780 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1781 let guest = Guest::new(Box::new(focal)); 1782 1783 let kernel_path = direct_kernel_boot_path(); 1784 1785 let pmem_temp_file = TempFile::new().unwrap(); 1786 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 1787 1788 std::process::Command::new("mkfs.ext4") 1789 .arg(pmem_temp_file.as_path()) 1790 .output() 1791 .expect("Expect creating disk image to succeed"); 1792 1793 let mut child = GuestCommand::new(&guest) 1794 .args(["--cpus", "boot=1"]) 1795 .args(["--memory", "size=512M"]) 1796 .args(["--kernel", kernel_path.to_str().unwrap()]) 1797 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1798 .default_disks() 1799 .default_net() 1800 .args([ 1801 "--pmem", 1802 format!( 1803 "file={}{}{}", 1804 pmem_temp_file.as_path().to_str().unwrap(), 1805 if specify_size { ",size=128M" } else { "" }, 1806 if discard_writes { 1807 ",discard_writes=on" 1808 } else { 1809 "" 1810 } 1811 ) 1812 .as_str(), 1813 ]) 1814 .capture_output() 1815 .spawn() 1816 .unwrap(); 1817 1818 let r = std::panic::catch_unwind(|| { 1819 guest.wait_vm_boot(None).unwrap(); 1820 1821 // Check for the presence of /dev/pmem0 1822 assert_eq!( 1823 guest.ssh_command("ls /dev/pmem0").unwrap().trim(), 1824 "/dev/pmem0" 1825 ); 1826 1827 // Check changes persist after reboot 1828 assert_eq!(guest.ssh_command("sudo mount /dev/pmem0 /mnt").unwrap(), ""); 1829 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), "lost+found\n"); 1830 guest 1831 .ssh_command("echo test123 | sudo tee /mnt/test") 1832 .unwrap(); 1833 assert_eq!(guest.ssh_command("sudo umount /mnt").unwrap(), ""); 1834 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), ""); 1835 1836 guest.reboot_linux(0, None); 1837 assert_eq!(guest.ssh_command("sudo mount /dev/pmem0 /mnt").unwrap(), ""); 1838 assert_eq!( 1839 guest 1840 .ssh_command("sudo cat /mnt/test || true") 1841 .unwrap() 1842 .trim(), 1843 if discard_writes { "" } else { "test123" } 1844 ); 1845 }); 1846 1847 kill_child(&mut child); 1848 let output = child.wait_with_output().unwrap(); 1849 1850 handle_child_output(r, &output); 1851 } 1852 1853 fn get_fd_count(pid: u32) -> usize { 1854 fs::read_dir(format!("/proc/{pid}/fd")).unwrap().count() 1855 } 1856 1857 fn _test_virtio_vsock(hotplug: bool) { 1858 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1859 let guest = Guest::new(Box::new(focal)); 1860 1861 #[cfg(target_arch = "x86_64")] 1862 let kernel_path = direct_kernel_boot_path(); 1863 #[cfg(target_arch = "aarch64")] 1864 let kernel_path = if hotplug { 1865 edk2_path() 1866 } else { 1867 direct_kernel_boot_path() 1868 }; 1869 1870 let socket = temp_vsock_path(&guest.tmp_dir); 1871 let api_socket = temp_api_path(&guest.tmp_dir); 1872 1873 let mut cmd = GuestCommand::new(&guest); 1874 cmd.args(["--api-socket", &api_socket]); 1875 cmd.args(["--cpus", "boot=1"]); 1876 cmd.args(["--memory", "size=512M"]); 1877 cmd.args(["--kernel", kernel_path.to_str().unwrap()]); 1878 cmd.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]); 1879 cmd.default_disks(); 1880 cmd.default_net(); 1881 1882 if !hotplug { 1883 cmd.args(["--vsock", format!("cid=3,socket={socket}").as_str()]); 1884 } 1885 1886 let mut child = cmd.capture_output().spawn().unwrap(); 1887 1888 let r = std::panic::catch_unwind(|| { 1889 guest.wait_vm_boot(None).unwrap(); 1890 1891 if hotplug { 1892 let (cmd_success, cmd_output) = remote_command_w_output( 1893 &api_socket, 1894 "add-vsock", 1895 Some(format!("cid=3,socket={socket},id=test0").as_str()), 1896 ); 1897 assert!(cmd_success); 1898 assert!(String::from_utf8_lossy(&cmd_output) 1899 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 1900 thread::sleep(std::time::Duration::new(10, 0)); 1901 // Check adding a second one fails 1902 assert!(!remote_command( 1903 &api_socket, 1904 "add-vsock", 1905 Some("cid=1234,socket=/tmp/fail") 1906 )); 1907 } 1908 1909 // Validate vsock works as expected. 1910 guest.check_vsock(socket.as_str()); 1911 guest.reboot_linux(0, None); 1912 // Validate vsock still works after a reboot. 1913 guest.check_vsock(socket.as_str()); 1914 1915 if hotplug { 1916 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 1917 } 1918 }); 1919 1920 kill_child(&mut child); 1921 let output = child.wait_with_output().unwrap(); 1922 1923 handle_child_output(r, &output); 1924 } 1925 1926 fn get_ksm_pages_shared() -> u32 { 1927 fs::read_to_string("/sys/kernel/mm/ksm/pages_shared") 1928 .unwrap() 1929 .trim() 1930 .parse::<u32>() 1931 .unwrap() 1932 } 1933 1934 fn test_memory_mergeable(mergeable: bool) { 1935 let memory_param = if mergeable { 1936 "mergeable=on" 1937 } else { 1938 "mergeable=off" 1939 }; 1940 1941 // We are assuming the rest of the system in our CI is not using mergeable memory 1942 let ksm_ps_init = get_ksm_pages_shared(); 1943 assert!(ksm_ps_init == 0); 1944 1945 let focal1 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1946 let guest1 = Guest::new(Box::new(focal1)); 1947 let mut child1 = GuestCommand::new(&guest1) 1948 .args(["--cpus", "boot=1"]) 1949 .args(["--memory", format!("size=512M,{memory_param}").as_str()]) 1950 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 1951 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1952 .default_disks() 1953 .args(["--net", guest1.default_net_string().as_str()]) 1954 .args(["--serial", "tty", "--console", "off"]) 1955 .capture_output() 1956 .spawn() 1957 .unwrap(); 1958 1959 let r = std::panic::catch_unwind(|| { 1960 guest1.wait_vm_boot(None).unwrap(); 1961 }); 1962 if r.is_err() { 1963 kill_child(&mut child1); 1964 let output = child1.wait_with_output().unwrap(); 1965 handle_child_output(r, &output); 1966 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return 1967 } 1968 1969 let ksm_ps_guest1 = get_ksm_pages_shared(); 1970 1971 let focal2 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1972 let guest2 = Guest::new(Box::new(focal2)); 1973 let mut child2 = GuestCommand::new(&guest2) 1974 .args(["--cpus", "boot=1"]) 1975 .args(["--memory", format!("size=512M,{memory_param}").as_str()]) 1976 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 1977 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1978 .default_disks() 1979 .args(["--net", guest2.default_net_string().as_str()]) 1980 .args(["--serial", "tty", "--console", "off"]) 1981 .capture_output() 1982 .spawn() 1983 .unwrap(); 1984 1985 let r = std::panic::catch_unwind(|| { 1986 guest2.wait_vm_boot(None).unwrap(); 1987 let ksm_ps_guest2 = get_ksm_pages_shared(); 1988 1989 if mergeable { 1990 println!( 1991 "ksm pages_shared after vm1 booted '{ksm_ps_guest1}', ksm pages_shared after vm2 booted '{ksm_ps_guest2}'" 1992 ); 1993 // We are expecting the number of shared pages to increase as the number of VM increases 1994 assert!(ksm_ps_guest1 < ksm_ps_guest2); 1995 } else { 1996 assert!(ksm_ps_guest1 == 0); 1997 assert!(ksm_ps_guest2 == 0); 1998 } 1999 }); 2000 2001 kill_child(&mut child1); 2002 kill_child(&mut child2); 2003 2004 let output = child1.wait_with_output().unwrap(); 2005 child2.wait().unwrap(); 2006 2007 handle_child_output(r, &output); 2008 } 2009 2010 fn _get_vmm_overhead(pid: u32, guest_memory_size: u32) -> HashMap<String, u32> { 2011 let smaps = fs::File::open(format!("/proc/{pid}/smaps")).unwrap(); 2012 let reader = io::BufReader::new(smaps); 2013 2014 let mut skip_map: bool = false; 2015 let mut region_name: String = "".to_string(); 2016 let mut region_maps = HashMap::new(); 2017 for line in reader.lines() { 2018 let l = line.unwrap(); 2019 2020 if l.contains('-') { 2021 let values: Vec<&str> = l.split_whitespace().collect(); 2022 region_name = values.last().unwrap().trim().to_string(); 2023 if region_name == "0" { 2024 region_name = "anonymous".to_string() 2025 } 2026 } 2027 2028 // Each section begins with something that looks like: 2029 // Size: 2184 kB 2030 if l.starts_with("Size:") { 2031 let values: Vec<&str> = l.split_whitespace().collect(); 2032 let map_size = values[1].parse::<u32>().unwrap(); 2033 // We skip the assigned guest RAM map, its RSS is only 2034 // dependent on the guest actual memory usage. 2035 // Everything else can be added to the VMM overhead. 2036 skip_map = map_size >= guest_memory_size; 2037 continue; 2038 } 2039 2040 // If this is a map we're taking into account, then we only 2041 // count the RSS. The sum of all counted RSS is the VMM overhead. 2042 if !skip_map && l.starts_with("Rss:") { 2043 let values: Vec<&str> = l.split_whitespace().collect(); 2044 let value = values[1].trim().parse::<u32>().unwrap(); 2045 *region_maps.entry(region_name.clone()).or_insert(0) += value; 2046 } 2047 } 2048 2049 region_maps 2050 } 2051 2052 fn get_vmm_overhead(pid: u32, guest_memory_size: u32) -> u32 { 2053 let mut total = 0; 2054 2055 for (region_name, value) in &_get_vmm_overhead(pid, guest_memory_size) { 2056 eprintln!("{region_name}: {value}"); 2057 total += value; 2058 } 2059 2060 total 2061 } 2062 2063 fn process_rss_kib(pid: u32) -> usize { 2064 let command = format!("ps -q {pid} -o rss="); 2065 let rss = exec_host_command_output(&command); 2066 String::from_utf8_lossy(&rss.stdout).trim().parse().unwrap() 2067 } 2068 2069 // 10MB is our maximum accepted overhead. 2070 const MAXIMUM_VMM_OVERHEAD_KB: u32 = 10 * 1024; 2071 2072 #[derive(PartialEq, Eq, PartialOrd)] 2073 struct Counters { 2074 rx_bytes: u64, 2075 rx_frames: u64, 2076 tx_bytes: u64, 2077 tx_frames: u64, 2078 read_bytes: u64, 2079 write_bytes: u64, 2080 read_ops: u64, 2081 write_ops: u64, 2082 } 2083 2084 fn get_counters(api_socket: &str) -> Counters { 2085 // Get counters 2086 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "counters", None); 2087 assert!(cmd_success); 2088 2089 let counters: HashMap<&str, HashMap<&str, u64>> = 2090 serde_json::from_slice(&cmd_output).unwrap_or_default(); 2091 2092 let rx_bytes = *counters.get("_net2").unwrap().get("rx_bytes").unwrap(); 2093 let rx_frames = *counters.get("_net2").unwrap().get("rx_frames").unwrap(); 2094 let tx_bytes = *counters.get("_net2").unwrap().get("tx_bytes").unwrap(); 2095 let tx_frames = *counters.get("_net2").unwrap().get("tx_frames").unwrap(); 2096 2097 let read_bytes = *counters.get("_disk0").unwrap().get("read_bytes").unwrap(); 2098 let write_bytes = *counters.get("_disk0").unwrap().get("write_bytes").unwrap(); 2099 let read_ops = *counters.get("_disk0").unwrap().get("read_ops").unwrap(); 2100 let write_ops = *counters.get("_disk0").unwrap().get("write_ops").unwrap(); 2101 2102 Counters { 2103 rx_bytes, 2104 rx_frames, 2105 tx_bytes, 2106 tx_frames, 2107 read_bytes, 2108 write_bytes, 2109 read_ops, 2110 write_ops, 2111 } 2112 } 2113 2114 fn pty_read(mut pty: std::fs::File) -> Receiver<String> { 2115 let (tx, rx) = mpsc::channel::<String>(); 2116 thread::spawn(move || loop { 2117 thread::sleep(std::time::Duration::new(1, 0)); 2118 let mut buf = [0; 512]; 2119 match pty.read(&mut buf) { 2120 Ok(_bytes) => { 2121 let output = std::str::from_utf8(&buf).unwrap().to_string(); 2122 match tx.send(output) { 2123 Ok(_) => (), 2124 Err(_) => break, 2125 } 2126 } 2127 Err(_) => break, 2128 } 2129 }); 2130 rx 2131 } 2132 2133 fn get_pty_path(api_socket: &str, pty_type: &str) -> PathBuf { 2134 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None); 2135 assert!(cmd_success); 2136 let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default(); 2137 assert_eq!("Pty", info["config"][pty_type]["mode"]); 2138 PathBuf::from( 2139 info["config"][pty_type]["file"] 2140 .as_str() 2141 .expect("Missing pty path"), 2142 ) 2143 } 2144 2145 // VFIO test network setup. 2146 // We reserve a different IP class for it: 172.18.0.0/24. 2147 #[cfg(target_arch = "x86_64")] 2148 fn setup_vfio_network_interfaces() { 2149 // 'vfio-br0' 2150 assert!(exec_host_command_status("sudo ip link add name vfio-br0 type bridge").success()); 2151 assert!(exec_host_command_status("sudo ip link set vfio-br0 up").success()); 2152 assert!(exec_host_command_status("sudo ip addr add 172.18.0.1/24 dev vfio-br0").success()); 2153 // 'vfio-tap0' 2154 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap0 mode tap").success()); 2155 assert!(exec_host_command_status("sudo ip link set vfio-tap0 master vfio-br0").success()); 2156 assert!(exec_host_command_status("sudo ip link set vfio-tap0 up").success()); 2157 // 'vfio-tap1' 2158 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap1 mode tap").success()); 2159 assert!(exec_host_command_status("sudo ip link set vfio-tap1 master vfio-br0").success()); 2160 assert!(exec_host_command_status("sudo ip link set vfio-tap1 up").success()); 2161 // 'vfio-tap2' 2162 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap2 mode tap").success()); 2163 assert!(exec_host_command_status("sudo ip link set vfio-tap2 master vfio-br0").success()); 2164 assert!(exec_host_command_status("sudo ip link set vfio-tap2 up").success()); 2165 // 'vfio-tap3' 2166 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap3 mode tap").success()); 2167 assert!(exec_host_command_status("sudo ip link set vfio-tap3 master vfio-br0").success()); 2168 assert!(exec_host_command_status("sudo ip link set vfio-tap3 up").success()); 2169 } 2170 2171 // Tear VFIO test network down 2172 #[cfg(target_arch = "x86_64")] 2173 fn cleanup_vfio_network_interfaces() { 2174 assert!(exec_host_command_status("sudo ip link del vfio-br0").success()); 2175 assert!(exec_host_command_status("sudo ip link del vfio-tap0").success()); 2176 assert!(exec_host_command_status("sudo ip link del vfio-tap1").success()); 2177 assert!(exec_host_command_status("sudo ip link del vfio-tap2").success()); 2178 assert!(exec_host_command_status("sudo ip link del vfio-tap3").success()); 2179 } 2180 2181 fn balloon_size(api_socket: &str) -> u64 { 2182 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None); 2183 assert!(cmd_success); 2184 2185 let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default(); 2186 let total_mem = &info["config"]["memory"]["size"] 2187 .to_string() 2188 .parse::<u64>() 2189 .unwrap(); 2190 let actual_mem = &info["memory_actual_size"] 2191 .to_string() 2192 .parse::<u64>() 2193 .unwrap(); 2194 total_mem - actual_mem 2195 } 2196 2197 fn vm_state(api_socket: &str) -> String { 2198 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None); 2199 assert!(cmd_success); 2200 2201 let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default(); 2202 let state = &info["state"].as_str().unwrap(); 2203 2204 state.to_string() 2205 } 2206 2207 // This test validates that it can find the virtio-iommu device at first. 2208 // It also verifies that both disks and the network card are attached to 2209 // the virtual IOMMU by looking at /sys/kernel/iommu_groups directory. 2210 // The last interesting part of this test is that it exercises the network 2211 // interface attached to the virtual IOMMU since this is the one used to 2212 // send all commands through SSH. 2213 fn _test_virtio_iommu(acpi: bool) { 2214 // Virtio-iommu support is ready in recent kernel (v5.14). But the kernel in 2215 // Focal image is still old. 2216 // So if ACPI is enabled on AArch64, we use a modified Focal image in which 2217 // the kernel binary has been updated. 2218 #[cfg(target_arch = "aarch64")] 2219 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string(); 2220 #[cfg(target_arch = "x86_64")] 2221 let focal_image = FOCAL_IMAGE_NAME.to_string(); 2222 let focal = UbuntuDiskConfig::new(focal_image); 2223 let guest = Guest::new(Box::new(focal)); 2224 2225 #[cfg(target_arch = "x86_64")] 2226 let kernel_path = direct_kernel_boot_path(); 2227 #[cfg(target_arch = "aarch64")] 2228 let kernel_path = if acpi { 2229 edk2_path() 2230 } else { 2231 direct_kernel_boot_path() 2232 }; 2233 2234 let mut child = GuestCommand::new(&guest) 2235 .args(["--cpus", "boot=1"]) 2236 .args(["--memory", "size=512M"]) 2237 .args(["--kernel", kernel_path.to_str().unwrap()]) 2238 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2239 .args([ 2240 "--disk", 2241 format!( 2242 "path={},iommu=on", 2243 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 2244 ) 2245 .as_str(), 2246 format!( 2247 "path={},iommu=on", 2248 guest.disk_config.disk(DiskType::CloudInit).unwrap() 2249 ) 2250 .as_str(), 2251 ]) 2252 .args(["--net", guest.default_net_string_w_iommu().as_str()]) 2253 .capture_output() 2254 .spawn() 2255 .unwrap(); 2256 2257 let r = std::panic::catch_unwind(|| { 2258 guest.wait_vm_boot(None).unwrap(); 2259 2260 // Verify the virtio-iommu device is present. 2261 assert!(guest 2262 .does_device_vendor_pair_match("0x1057", "0x1af4") 2263 .unwrap_or_default()); 2264 2265 // On AArch64, if the guest system boots from FDT, the behavior of IOMMU is a bit 2266 // different with ACPI. 2267 // All devices on the PCI bus will be attached to the virtual IOMMU, except the 2268 // virtio-iommu device itself. So these devices will all be added to IOMMU groups, 2269 // and appear under folder '/sys/kernel/iommu_groups/'. 2270 // The result is, in the case of FDT, IOMMU group '0' contains "0000:00:01.0" 2271 // which is the console. The first disk "0000:00:02.0" is in group '1'. 2272 // While on ACPI, console device is not attached to IOMMU. So the IOMMU group '0' 2273 // contains "0000:00:02.0" which is the first disk. 2274 // 2275 // Verify the iommu group of the first disk. 2276 let iommu_group = !acpi as i32; 2277 assert_eq!( 2278 guest 2279 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str()) 2280 .unwrap() 2281 .trim(), 2282 "0000:00:02.0" 2283 ); 2284 2285 // Verify the iommu group of the second disk. 2286 let iommu_group = if acpi { 1 } else { 2 }; 2287 assert_eq!( 2288 guest 2289 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str()) 2290 .unwrap() 2291 .trim(), 2292 "0000:00:03.0" 2293 ); 2294 2295 // Verify the iommu group of the network card. 2296 let iommu_group = if acpi { 2 } else { 3 }; 2297 assert_eq!( 2298 guest 2299 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str()) 2300 .unwrap() 2301 .trim(), 2302 "0000:00:04.0" 2303 ); 2304 }); 2305 2306 kill_child(&mut child); 2307 let output = child.wait_with_output().unwrap(); 2308 2309 handle_child_output(r, &output); 2310 } 2311 2312 fn get_reboot_count(guest: &Guest) -> u32 { 2313 guest 2314 .ssh_command("sudo last | grep -c reboot") 2315 .unwrap() 2316 .trim() 2317 .parse::<u32>() 2318 .unwrap_or_default() 2319 } 2320 2321 fn enable_guest_watchdog(guest: &Guest, watchdog_sec: u32) { 2322 // Check for PCI device 2323 assert!(guest 2324 .does_device_vendor_pair_match("0x1063", "0x1af4") 2325 .unwrap_or_default()); 2326 2327 // Enable systemd watchdog 2328 guest 2329 .ssh_command(&format!( 2330 "echo RuntimeWatchdogSec={watchdog_sec}s | sudo tee -a /etc/systemd/system.conf" 2331 )) 2332 .unwrap(); 2333 2334 guest.ssh_command("sudo systemctl daemon-reexec").unwrap(); 2335 } 2336 2337 fn make_guest_panic(guest: &Guest) { 2338 // Check for pvpanic device 2339 assert!(guest 2340 .does_device_vendor_pair_match("0x0011", "0x1b36") 2341 .unwrap_or_default()); 2342 2343 // Trigger guest a panic 2344 guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap(); 2345 } 2346 2347 mod common_parallel { 2348 use std::fs::OpenOptions; 2349 use std::io::SeekFrom; 2350 2351 use crate::*; 2352 2353 #[test] 2354 #[cfg(target_arch = "x86_64")] 2355 fn test_focal_hypervisor_fw() { 2356 test_simple_launch(fw_path(FwType::RustHypervisorFirmware), FOCAL_IMAGE_NAME) 2357 } 2358 2359 #[test] 2360 #[cfg(target_arch = "x86_64")] 2361 fn test_focal_ovmf() { 2362 test_simple_launch(fw_path(FwType::Ovmf), FOCAL_IMAGE_NAME) 2363 } 2364 2365 #[cfg(target_arch = "x86_64")] 2366 fn test_simple_launch(fw_path: String, disk_path: &str) { 2367 let disk_config = Box::new(UbuntuDiskConfig::new(disk_path.to_string())); 2368 let guest = Guest::new(disk_config); 2369 let event_path = temp_event_monitor_path(&guest.tmp_dir); 2370 2371 let mut child = GuestCommand::new(&guest) 2372 .args(["--cpus", "boot=1"]) 2373 .args(["--memory", "size=512M"]) 2374 .args(["--kernel", fw_path.as_str()]) 2375 .default_disks() 2376 .default_net() 2377 .args(["--serial", "tty", "--console", "off"]) 2378 .args(["--event-monitor", format!("path={event_path}").as_str()]) 2379 .capture_output() 2380 .spawn() 2381 .unwrap(); 2382 2383 let r = std::panic::catch_unwind(|| { 2384 guest.wait_vm_boot(Some(120)).unwrap(); 2385 2386 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 2387 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 2388 assert_eq!(guest.get_pci_bridge_class().unwrap_or_default(), "0x060000"); 2389 2390 let expected_sequential_events = [ 2391 &MetaEvent { 2392 event: "starting".to_string(), 2393 device_id: None, 2394 }, 2395 &MetaEvent { 2396 event: "booting".to_string(), 2397 device_id: None, 2398 }, 2399 &MetaEvent { 2400 event: "booted".to_string(), 2401 device_id: None, 2402 }, 2403 &MetaEvent { 2404 event: "activated".to_string(), 2405 device_id: Some("_disk0".to_string()), 2406 }, 2407 &MetaEvent { 2408 event: "reset".to_string(), 2409 device_id: Some("_disk0".to_string()), 2410 }, 2411 ]; 2412 assert!(check_sequential_events( 2413 &expected_sequential_events, 2414 &event_path 2415 )); 2416 2417 // It's been observed on the Bionic image that udev and snapd 2418 // services can cause some delay in the VM's shutdown. Disabling 2419 // them improves the reliability of this test. 2420 let _ = guest.ssh_command("sudo systemctl disable udev"); 2421 let _ = guest.ssh_command("sudo systemctl stop udev"); 2422 let _ = guest.ssh_command("sudo systemctl disable snapd"); 2423 let _ = guest.ssh_command("sudo systemctl stop snapd"); 2424 2425 guest.ssh_command("sudo poweroff").unwrap(); 2426 thread::sleep(std::time::Duration::new(20, 0)); 2427 let latest_events = [ 2428 &MetaEvent { 2429 event: "shutdown".to_string(), 2430 device_id: None, 2431 }, 2432 &MetaEvent { 2433 event: "deleted".to_string(), 2434 device_id: None, 2435 }, 2436 &MetaEvent { 2437 event: "shutdown".to_string(), 2438 device_id: None, 2439 }, 2440 ]; 2441 assert!(check_latest_events_exact(&latest_events, &event_path)); 2442 }); 2443 2444 kill_child(&mut child); 2445 let output = child.wait_with_output().unwrap(); 2446 2447 handle_child_output(r, &output); 2448 } 2449 2450 #[test] 2451 fn test_multi_cpu() { 2452 let jammy_image = JAMMY_IMAGE_NAME.to_string(); 2453 let jammy = UbuntuDiskConfig::new(jammy_image); 2454 let guest = Guest::new(Box::new(jammy)); 2455 2456 let mut cmd = GuestCommand::new(&guest); 2457 cmd.args(["--cpus", "boot=2,max=4"]) 2458 .args(["--memory", "size=512M"]) 2459 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2460 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2461 .capture_output() 2462 .default_disks() 2463 .default_net(); 2464 2465 let mut child = cmd.spawn().unwrap(); 2466 2467 let r = std::panic::catch_unwind(|| { 2468 guest.wait_vm_boot(Some(120)).unwrap(); 2469 2470 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 2471 2472 assert_eq!( 2473 guest 2474 .ssh_command( 2475 r#"sudo dmesg | grep "smp: Brought up" | sed "s/\[\ *[0-9.]*\] //""# 2476 ) 2477 .unwrap() 2478 .trim(), 2479 "smp: Brought up 1 node, 2 CPUs" 2480 ); 2481 }); 2482 2483 kill_child(&mut child); 2484 let output = child.wait_with_output().unwrap(); 2485 2486 handle_child_output(r, &output); 2487 } 2488 2489 #[test] 2490 fn test_cpu_topology_421() { 2491 test_cpu_topology(4, 2, 1, false); 2492 } 2493 2494 #[test] 2495 fn test_cpu_topology_142() { 2496 test_cpu_topology(1, 4, 2, false); 2497 } 2498 2499 #[test] 2500 fn test_cpu_topology_262() { 2501 test_cpu_topology(2, 6, 2, false); 2502 } 2503 2504 #[test] 2505 #[cfg(target_arch = "x86_64")] 2506 #[cfg(not(feature = "mshv"))] 2507 fn test_cpu_physical_bits() { 2508 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2509 let guest = Guest::new(Box::new(focal)); 2510 let max_phys_bits: u8 = 36; 2511 let mut child = GuestCommand::new(&guest) 2512 .args(["--cpus", &format!("max_phys_bits={max_phys_bits}")]) 2513 .args(["--memory", "size=512M"]) 2514 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2515 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2516 .default_disks() 2517 .default_net() 2518 .capture_output() 2519 .spawn() 2520 .unwrap(); 2521 2522 let r = std::panic::catch_unwind(|| { 2523 guest.wait_vm_boot(None).unwrap(); 2524 2525 assert!( 2526 guest 2527 .ssh_command("lscpu | grep \"Address sizes:\" | cut -f 2 -d \":\" | sed \"s# *##\" | cut -f 1 -d \" \"") 2528 .unwrap() 2529 .trim() 2530 .parse::<u8>() 2531 .unwrap_or(max_phys_bits + 1) <= max_phys_bits, 2532 ); 2533 }); 2534 2535 kill_child(&mut child); 2536 let output = child.wait_with_output().unwrap(); 2537 2538 handle_child_output(r, &output); 2539 } 2540 2541 #[test] 2542 fn test_cpu_affinity() { 2543 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2544 let guest = Guest::new(Box::new(focal)); 2545 2546 // We need the host to have at least 4 CPUs if we want to be able 2547 // to run this test. 2548 let host_cpus_count = exec_host_command_output("nproc"); 2549 assert!( 2550 String::from_utf8_lossy(&host_cpus_count.stdout) 2551 .trim() 2552 .parse::<u16>() 2553 .unwrap_or(0) 2554 >= 4 2555 ); 2556 2557 let mut child = GuestCommand::new(&guest) 2558 .args(["--cpus", "boot=2,affinity=[0@[0,2],1@[1,3]]"]) 2559 .args(["--memory", "size=512M"]) 2560 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2561 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2562 .default_disks() 2563 .default_net() 2564 .capture_output() 2565 .spawn() 2566 .unwrap(); 2567 2568 let r = std::panic::catch_unwind(|| { 2569 guest.wait_vm_boot(None).unwrap(); 2570 let pid = child.id(); 2571 let taskset_vcpu0 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep vcpu0 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2572 assert_eq!(String::from_utf8_lossy(&taskset_vcpu0.stdout).trim(), "0,2"); 2573 let taskset_vcpu1 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep vcpu1 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2574 assert_eq!(String::from_utf8_lossy(&taskset_vcpu1.stdout).trim(), "1,3"); 2575 }); 2576 2577 kill_child(&mut child); 2578 let output = child.wait_with_output().unwrap(); 2579 handle_child_output(r, &output); 2580 } 2581 2582 #[test] 2583 fn test_virtio_queue_affinity() { 2584 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2585 let guest = Guest::new(Box::new(focal)); 2586 2587 // We need the host to have at least 4 CPUs if we want to be able 2588 // to run this test. 2589 let host_cpus_count = exec_host_command_output("nproc"); 2590 assert!( 2591 String::from_utf8_lossy(&host_cpus_count.stdout) 2592 .trim() 2593 .parse::<u16>() 2594 .unwrap_or(0) 2595 >= 4 2596 ); 2597 2598 let mut child = GuestCommand::new(&guest) 2599 .args(["--cpus", "boot=4"]) 2600 .args(["--memory", "size=512M"]) 2601 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2602 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2603 .args([ 2604 "--disk", 2605 format!( 2606 "path={}", 2607 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 2608 ) 2609 .as_str(), 2610 format!( 2611 "path={},num_queues=4,queue_affinity=[0@[0,2],1@[1,3],2@[1],3@[3]]", 2612 guest.disk_config.disk(DiskType::CloudInit).unwrap() 2613 ) 2614 .as_str(), 2615 ]) 2616 .default_net() 2617 .capture_output() 2618 .spawn() 2619 .unwrap(); 2620 2621 let r = std::panic::catch_unwind(|| { 2622 guest.wait_vm_boot(None).unwrap(); 2623 let pid = child.id(); 2624 let taskset_q0 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep disk1_q0 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2625 assert_eq!(String::from_utf8_lossy(&taskset_q0.stdout).trim(), "0,2"); 2626 let taskset_q1 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep disk1_q1 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2627 assert_eq!(String::from_utf8_lossy(&taskset_q1.stdout).trim(), "1,3"); 2628 let taskset_q2 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep disk1_q2 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2629 assert_eq!(String::from_utf8_lossy(&taskset_q2.stdout).trim(), "1"); 2630 let taskset_q3 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep disk1_q3 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2631 assert_eq!(String::from_utf8_lossy(&taskset_q3.stdout).trim(), "3"); 2632 }); 2633 2634 kill_child(&mut child); 2635 let output = child.wait_with_output().unwrap(); 2636 handle_child_output(r, &output); 2637 } 2638 2639 #[test] 2640 #[cfg(not(feature = "mshv"))] 2641 fn test_large_vm() { 2642 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2643 let guest = Guest::new(Box::new(focal)); 2644 let mut cmd = GuestCommand::new(&guest); 2645 cmd.args(["--cpus", "boot=48"]) 2646 .args(["--memory", "size=5120M"]) 2647 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2648 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2649 .args(["--serial", "tty"]) 2650 .args(["--console", "off"]) 2651 .capture_output() 2652 .default_disks() 2653 .default_net(); 2654 2655 let mut child = cmd.spawn().unwrap(); 2656 2657 guest.wait_vm_boot(None).unwrap(); 2658 2659 let r = std::panic::catch_unwind(|| { 2660 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 48); 2661 assert_eq!( 2662 guest 2663 .ssh_command("lscpu | grep \"On-line\" | cut -f 2 -d \":\" | sed \"s# *##\"") 2664 .unwrap() 2665 .trim(), 2666 "0-47" 2667 ); 2668 2669 assert!(guest.get_total_memory().unwrap_or_default() > 5_000_000); 2670 }); 2671 2672 kill_child(&mut child); 2673 let output = child.wait_with_output().unwrap(); 2674 2675 handle_child_output(r, &output); 2676 } 2677 2678 #[test] 2679 #[cfg(not(feature = "mshv"))] 2680 fn test_huge_memory() { 2681 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2682 let guest = Guest::new(Box::new(focal)); 2683 let mut cmd = GuestCommand::new(&guest); 2684 cmd.args(["--cpus", "boot=1"]) 2685 .args(["--memory", "size=128G"]) 2686 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2687 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2688 .capture_output() 2689 .default_disks() 2690 .default_net(); 2691 2692 let mut child = cmd.spawn().unwrap(); 2693 2694 guest.wait_vm_boot(Some(120)).unwrap(); 2695 2696 let r = std::panic::catch_unwind(|| { 2697 assert!(guest.get_total_memory().unwrap_or_default() > 128_000_000); 2698 }); 2699 2700 kill_child(&mut child); 2701 let output = child.wait_with_output().unwrap(); 2702 2703 handle_child_output(r, &output); 2704 } 2705 2706 #[test] 2707 fn test_power_button() { 2708 _test_power_button(false); 2709 } 2710 2711 #[test] 2712 #[cfg(not(feature = "mshv"))] 2713 fn test_user_defined_memory_regions() { 2714 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2715 let guest = Guest::new(Box::new(focal)); 2716 let api_socket = temp_api_path(&guest.tmp_dir); 2717 2718 let kernel_path = direct_kernel_boot_path(); 2719 2720 let mut child = GuestCommand::new(&guest) 2721 .args(["--cpus", "boot=1"]) 2722 .args(["--memory", "size=0,hotplug_method=virtio-mem"]) 2723 .args([ 2724 "--memory-zone", 2725 "id=mem0,size=1G,hotplug_size=2G", 2726 "id=mem1,size=1G,shared=on", 2727 "id=mem2,size=1G,host_numa_node=0,hotplug_size=2G", 2728 ]) 2729 .args(["--kernel", kernel_path.to_str().unwrap()]) 2730 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2731 .args(["--api-socket", &api_socket]) 2732 .capture_output() 2733 .default_disks() 2734 .default_net() 2735 .spawn() 2736 .unwrap(); 2737 2738 let r = std::panic::catch_unwind(|| { 2739 guest.wait_vm_boot(None).unwrap(); 2740 2741 assert!(guest.get_total_memory().unwrap_or_default() > 2_880_000); 2742 2743 guest.enable_memory_hotplug(); 2744 2745 resize_zone_command(&api_socket, "mem0", "3G"); 2746 thread::sleep(std::time::Duration::new(5, 0)); 2747 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000); 2748 resize_zone_command(&api_socket, "mem2", "3G"); 2749 thread::sleep(std::time::Duration::new(5, 0)); 2750 assert!(guest.get_total_memory().unwrap_or_default() > 6_720_000); 2751 resize_zone_command(&api_socket, "mem0", "2G"); 2752 thread::sleep(std::time::Duration::new(5, 0)); 2753 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 2754 resize_zone_command(&api_socket, "mem2", "2G"); 2755 thread::sleep(std::time::Duration::new(5, 0)); 2756 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000); 2757 2758 guest.reboot_linux(0, None); 2759 2760 // Check the amount of RAM after reboot 2761 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000); 2762 assert!(guest.get_total_memory().unwrap_or_default() < 5_760_000); 2763 2764 // Check if we can still resize down to the initial 'boot'size 2765 resize_zone_command(&api_socket, "mem0", "1G"); 2766 thread::sleep(std::time::Duration::new(5, 0)); 2767 assert!(guest.get_total_memory().unwrap_or_default() < 4_800_000); 2768 resize_zone_command(&api_socket, "mem2", "1G"); 2769 thread::sleep(std::time::Duration::new(5, 0)); 2770 assert!(guest.get_total_memory().unwrap_or_default() < 3_840_000); 2771 }); 2772 2773 kill_child(&mut child); 2774 let output = child.wait_with_output().unwrap(); 2775 2776 handle_child_output(r, &output); 2777 } 2778 2779 #[test] 2780 #[cfg(not(feature = "mshv"))] 2781 fn test_guest_numa_nodes() { 2782 _test_guest_numa_nodes(false); 2783 } 2784 2785 #[test] 2786 #[cfg(target_arch = "x86_64")] 2787 fn test_iommu_segments() { 2788 let focal_image = FOCAL_IMAGE_NAME.to_string(); 2789 let focal = UbuntuDiskConfig::new(focal_image); 2790 let guest = Guest::new(Box::new(focal)); 2791 2792 // Prepare another disk file for the virtio-disk device 2793 let test_disk_path = String::from( 2794 guest 2795 .tmp_dir 2796 .as_path() 2797 .join("test-disk.raw") 2798 .to_str() 2799 .unwrap(), 2800 ); 2801 assert!( 2802 exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success() 2803 ); 2804 assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success()); 2805 2806 let api_socket = temp_api_path(&guest.tmp_dir); 2807 let mut cmd = GuestCommand::new(&guest); 2808 2809 cmd.args(["--cpus", "boot=1"]) 2810 .args(["--api-socket", &api_socket]) 2811 .args(["--memory", "size=512M"]) 2812 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2813 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2814 .args([ 2815 "--platform", 2816 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS},iommu_segments=[1]"), 2817 ]) 2818 .default_disks() 2819 .capture_output() 2820 .default_net(); 2821 2822 let mut child = cmd.spawn().unwrap(); 2823 2824 guest.wait_vm_boot(None).unwrap(); 2825 2826 let r = std::panic::catch_unwind(|| { 2827 let (cmd_success, cmd_output) = remote_command_w_output( 2828 &api_socket, 2829 "add-disk", 2830 Some( 2831 format!( 2832 "path={},id=test0,pci_segment=1,iommu=on", 2833 test_disk_path.as_str() 2834 ) 2835 .as_str(), 2836 ), 2837 ); 2838 assert!(cmd_success); 2839 assert!(String::from_utf8_lossy(&cmd_output) 2840 .contains("{\"id\":\"test0\",\"bdf\":\"0001:00:01.0\"}")); 2841 2842 // Check IOMMU setup 2843 assert!(guest 2844 .does_device_vendor_pair_match("0x1057", "0x1af4") 2845 .unwrap_or_default()); 2846 assert_eq!( 2847 guest 2848 .ssh_command("ls /sys/kernel/iommu_groups/0/devices") 2849 .unwrap() 2850 .trim(), 2851 "0001:00:01.0" 2852 ); 2853 }); 2854 2855 kill_child(&mut child); 2856 let output = child.wait_with_output().unwrap(); 2857 2858 handle_child_output(r, &output); 2859 } 2860 2861 #[test] 2862 fn test_pci_msi() { 2863 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2864 let guest = Guest::new(Box::new(focal)); 2865 let mut cmd = GuestCommand::new(&guest); 2866 cmd.args(["--cpus", "boot=1"]) 2867 .args(["--memory", "size=512M"]) 2868 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2869 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2870 .capture_output() 2871 .default_disks() 2872 .default_net(); 2873 2874 let mut child = cmd.spawn().unwrap(); 2875 2876 guest.wait_vm_boot(None).unwrap(); 2877 2878 #[cfg(target_arch = "x86_64")] 2879 let grep_cmd = "grep -c PCI-MSI /proc/interrupts"; 2880 #[cfg(target_arch = "aarch64")] 2881 let grep_cmd = "grep -c ITS-PCI-MSIX /proc/interrupts"; 2882 2883 let r = std::panic::catch_unwind(|| { 2884 assert_eq!( 2885 guest 2886 .ssh_command(grep_cmd) 2887 .unwrap() 2888 .trim() 2889 .parse::<u32>() 2890 .unwrap_or_default(), 2891 12 2892 ); 2893 }); 2894 2895 kill_child(&mut child); 2896 let output = child.wait_with_output().unwrap(); 2897 2898 handle_child_output(r, &output); 2899 } 2900 2901 #[test] 2902 fn test_virtio_net_ctrl_queue() { 2903 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2904 let guest = Guest::new(Box::new(focal)); 2905 let mut cmd = GuestCommand::new(&guest); 2906 cmd.args(["--cpus", "boot=1"]) 2907 .args(["--memory", "size=512M"]) 2908 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2909 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2910 .args(["--net", guest.default_net_string_w_mtu(3000).as_str()]) 2911 .capture_output() 2912 .default_disks(); 2913 2914 let mut child = cmd.spawn().unwrap(); 2915 2916 guest.wait_vm_boot(None).unwrap(); 2917 2918 #[cfg(target_arch = "aarch64")] 2919 let iface = "enp0s4"; 2920 #[cfg(target_arch = "x86_64")] 2921 let iface = "ens4"; 2922 2923 let r = std::panic::catch_unwind(|| { 2924 assert_eq!( 2925 guest 2926 .ssh_command( 2927 format!("sudo ethtool -K {iface} rx-gro-hw off && echo success").as_str() 2928 ) 2929 .unwrap() 2930 .trim(), 2931 "success" 2932 ); 2933 assert_eq!( 2934 guest 2935 .ssh_command(format!("cat /sys/class/net/{iface}/mtu").as_str()) 2936 .unwrap() 2937 .trim(), 2938 "3000" 2939 ); 2940 }); 2941 2942 kill_child(&mut child); 2943 let output = child.wait_with_output().unwrap(); 2944 2945 handle_child_output(r, &output); 2946 } 2947 2948 #[test] 2949 fn test_pci_multiple_segments() { 2950 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2951 let guest = Guest::new(Box::new(focal)); 2952 2953 // Prepare another disk file for the virtio-disk device 2954 let test_disk_path = String::from( 2955 guest 2956 .tmp_dir 2957 .as_path() 2958 .join("test-disk.raw") 2959 .to_str() 2960 .unwrap(), 2961 ); 2962 assert!( 2963 exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success() 2964 ); 2965 assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success()); 2966 2967 let mut cmd = GuestCommand::new(&guest); 2968 cmd.args(["--cpus", "boot=1"]) 2969 .args(["--memory", "size=512M"]) 2970 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2971 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2972 .args([ 2973 "--platform", 2974 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 2975 ]) 2976 .args([ 2977 "--disk", 2978 format!( 2979 "path={}", 2980 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 2981 ) 2982 .as_str(), 2983 format!( 2984 "path={}", 2985 guest.disk_config.disk(DiskType::CloudInit).unwrap() 2986 ) 2987 .as_str(), 2988 format!("path={test_disk_path},pci_segment=15").as_str(), 2989 ]) 2990 .capture_output() 2991 .default_net(); 2992 2993 let mut child = cmd.spawn().unwrap(); 2994 2995 guest.wait_vm_boot(None).unwrap(); 2996 2997 let grep_cmd = "lspci | grep \"Host bridge\" | wc -l"; 2998 2999 let r = std::panic::catch_unwind(|| { 3000 // There should be MAX_NUM_PCI_SEGMENTS PCI host bridges in the guest. 3001 assert_eq!( 3002 guest 3003 .ssh_command(grep_cmd) 3004 .unwrap() 3005 .trim() 3006 .parse::<u16>() 3007 .unwrap_or_default(), 3008 MAX_NUM_PCI_SEGMENTS 3009 ); 3010 3011 // Check both if /dev/vdc exists and if the block size is 4M. 3012 assert_eq!( 3013 guest 3014 .ssh_command("lsblk | grep vdc | grep -c 4M") 3015 .unwrap() 3016 .trim() 3017 .parse::<u32>() 3018 .unwrap_or_default(), 3019 1 3020 ); 3021 3022 // Mount the device. 3023 guest.ssh_command("mkdir mount_image").unwrap(); 3024 guest 3025 .ssh_command("sudo mount -o rw -t ext4 /dev/vdc mount_image/") 3026 .unwrap(); 3027 // Grant all users with write permission. 3028 guest.ssh_command("sudo chmod a+w mount_image/").unwrap(); 3029 3030 // Write something to the device. 3031 guest 3032 .ssh_command("sudo echo \"bar\" >> mount_image/foo") 3033 .unwrap(); 3034 3035 // Check the content of the block device. The file "foo" should 3036 // contain "bar". 3037 assert_eq!( 3038 guest 3039 .ssh_command("sudo cat mount_image/foo") 3040 .unwrap() 3041 .trim(), 3042 "bar" 3043 ); 3044 }); 3045 3046 kill_child(&mut child); 3047 let output = child.wait_with_output().unwrap(); 3048 3049 handle_child_output(r, &output); 3050 } 3051 3052 #[test] 3053 fn test_pci_multiple_segments_numa_node() { 3054 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3055 let guest = Guest::new(Box::new(focal)); 3056 let api_socket = temp_api_path(&guest.tmp_dir); 3057 #[cfg(target_arch = "x86_64")] 3058 let kernel_path = direct_kernel_boot_path(); 3059 #[cfg(target_arch = "aarch64")] 3060 let kernel_path = edk2_path(); 3061 3062 // Prepare another disk file for the virtio-disk device 3063 let test_disk_path = String::from( 3064 guest 3065 .tmp_dir 3066 .as_path() 3067 .join("test-disk.raw") 3068 .to_str() 3069 .unwrap(), 3070 ); 3071 assert!( 3072 exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success() 3073 ); 3074 assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success()); 3075 const TEST_DISK_NODE: u16 = 1; 3076 3077 let mut child = GuestCommand::new(&guest) 3078 .args(["--platform", "num_pci_segments=2"]) 3079 .args(["--cpus", "boot=2"]) 3080 .args(["--memory", "size=0"]) 3081 .args(["--memory-zone", "id=mem0,size=256M", "id=mem1,size=256M"]) 3082 .args([ 3083 "--numa", 3084 "guest_numa_id=0,cpus=[0],distances=[1@20],memory_zones=mem0,pci_segments=[0]", 3085 "guest_numa_id=1,cpus=[1],distances=[0@20],memory_zones=mem1,pci_segments=[1]", 3086 ]) 3087 .args(["--kernel", kernel_path.to_str().unwrap()]) 3088 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3089 .args(["--api-socket", &api_socket]) 3090 .capture_output() 3091 .args([ 3092 "--disk", 3093 format!( 3094 "path={}", 3095 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 3096 ) 3097 .as_str(), 3098 format!( 3099 "path={}", 3100 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3101 ) 3102 .as_str(), 3103 format!("path={test_disk_path},pci_segment={TEST_DISK_NODE}").as_str(), 3104 ]) 3105 .default_net() 3106 .spawn() 3107 .unwrap(); 3108 3109 let cmd = "cat /sys/block/vdc/device/../numa_node"; 3110 3111 let r = std::panic::catch_unwind(|| { 3112 guest.wait_vm_boot(None).unwrap(); 3113 3114 assert_eq!( 3115 guest 3116 .ssh_command(cmd) 3117 .unwrap() 3118 .trim() 3119 .parse::<u16>() 3120 .unwrap_or_default(), 3121 TEST_DISK_NODE 3122 ); 3123 }); 3124 3125 kill_child(&mut child); 3126 let output = child.wait_with_output().unwrap(); 3127 3128 handle_child_output(r, &output); 3129 } 3130 3131 #[test] 3132 fn test_direct_kernel_boot() { 3133 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3134 let guest = Guest::new(Box::new(focal)); 3135 3136 let kernel_path = direct_kernel_boot_path(); 3137 3138 let mut child = GuestCommand::new(&guest) 3139 .args(["--cpus", "boot=1"]) 3140 .args(["--memory", "size=512M"]) 3141 .args(["--kernel", kernel_path.to_str().unwrap()]) 3142 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3143 .default_disks() 3144 .default_net() 3145 .capture_output() 3146 .spawn() 3147 .unwrap(); 3148 3149 let r = std::panic::catch_unwind(|| { 3150 guest.wait_vm_boot(None).unwrap(); 3151 3152 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 3153 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 3154 3155 let grep_cmd = if cfg!(target_arch = "x86_64") { 3156 "grep -c PCI-MSI /proc/interrupts" 3157 } else { 3158 "grep -c ITS-PCI-MSIX /proc/interrupts" 3159 }; 3160 assert_eq!( 3161 guest 3162 .ssh_command(grep_cmd) 3163 .unwrap() 3164 .trim() 3165 .parse::<u32>() 3166 .unwrap_or_default(), 3167 12 3168 ); 3169 }); 3170 3171 kill_child(&mut child); 3172 let output = child.wait_with_output().unwrap(); 3173 3174 handle_child_output(r, &output); 3175 } 3176 3177 #[test] 3178 #[cfg(target_arch = "x86_64")] 3179 fn test_direct_kernel_boot_bzimage() { 3180 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3181 let guest = Guest::new(Box::new(focal)); 3182 3183 let mut kernel_path = direct_kernel_boot_path(); 3184 // Replace the default kernel with the bzImage. 3185 kernel_path.pop(); 3186 kernel_path.push("bzImage"); 3187 3188 let mut child = GuestCommand::new(&guest) 3189 .args(["--cpus", "boot=1"]) 3190 .args(["--memory", "size=512M"]) 3191 .args(["--kernel", kernel_path.to_str().unwrap()]) 3192 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3193 .default_disks() 3194 .default_net() 3195 .capture_output() 3196 .spawn() 3197 .unwrap(); 3198 3199 let r = std::panic::catch_unwind(|| { 3200 guest.wait_vm_boot(None).unwrap(); 3201 3202 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 3203 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 3204 3205 let grep_cmd = if cfg!(target_arch = "x86_64") { 3206 "grep -c PCI-MSI /proc/interrupts" 3207 } else { 3208 "grep -c ITS-PCI-MSIX /proc/interrupts" 3209 }; 3210 assert_eq!( 3211 guest 3212 .ssh_command(grep_cmd) 3213 .unwrap() 3214 .trim() 3215 .parse::<u32>() 3216 .unwrap_or_default(), 3217 12 3218 ); 3219 }); 3220 3221 kill_child(&mut child); 3222 let output = child.wait_with_output().unwrap(); 3223 3224 handle_child_output(r, &output); 3225 } 3226 3227 fn _test_virtio_block(image_name: &str, disable_io_uring: bool, disable_aio: bool) { 3228 let focal = UbuntuDiskConfig::new(image_name.to_string()); 3229 let guest = Guest::new(Box::new(focal)); 3230 3231 let mut workload_path = dirs::home_dir().unwrap(); 3232 workload_path.push("workloads"); 3233 3234 let mut blk_file_path = workload_path; 3235 blk_file_path.push("blk.img"); 3236 3237 let kernel_path = direct_kernel_boot_path(); 3238 3239 let mut cloud_child = GuestCommand::new(&guest) 3240 .args(["--cpus", "boot=4"]) 3241 .args(["--memory", "size=512M,shared=on"]) 3242 .args(["--kernel", kernel_path.to_str().unwrap()]) 3243 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3244 .args([ 3245 "--disk", 3246 format!( 3247 "path={}", 3248 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 3249 ) 3250 .as_str(), 3251 format!( 3252 "path={}", 3253 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3254 ) 3255 .as_str(), 3256 format!( 3257 "path={},readonly=on,direct=on,num_queues=4,_disable_io_uring={},_disable_aio={}", 3258 blk_file_path.to_str().unwrap(), 3259 disable_io_uring, 3260 disable_aio, 3261 ) 3262 .as_str(), 3263 ]) 3264 .default_net() 3265 .capture_output() 3266 .spawn() 3267 .unwrap(); 3268 3269 let r = std::panic::catch_unwind(|| { 3270 guest.wait_vm_boot(None).unwrap(); 3271 3272 // Check both if /dev/vdc exists and if the block size is 16M. 3273 assert_eq!( 3274 guest 3275 .ssh_command("lsblk | grep vdc | grep -c 16M") 3276 .unwrap() 3277 .trim() 3278 .parse::<u32>() 3279 .unwrap_or_default(), 3280 1 3281 ); 3282 3283 // Check both if /dev/vdc exists and if this block is RO. 3284 assert_eq!( 3285 guest 3286 .ssh_command("lsblk | grep vdc | awk '{print $5}'") 3287 .unwrap() 3288 .trim() 3289 .parse::<u32>() 3290 .unwrap_or_default(), 3291 1 3292 ); 3293 3294 // Check if the number of queues is 4. 3295 assert_eq!( 3296 guest 3297 .ssh_command("ls -ll /sys/block/vdc/mq | grep ^d | wc -l") 3298 .unwrap() 3299 .trim() 3300 .parse::<u32>() 3301 .unwrap_or_default(), 3302 4 3303 ); 3304 }); 3305 3306 let _ = cloud_child.kill(); 3307 let output = cloud_child.wait_with_output().unwrap(); 3308 3309 handle_child_output(r, &output); 3310 } 3311 3312 #[test] 3313 fn test_virtio_block_io_uring() { 3314 _test_virtio_block(FOCAL_IMAGE_NAME, false, true) 3315 } 3316 3317 #[test] 3318 fn test_virtio_block_aio() { 3319 _test_virtio_block(FOCAL_IMAGE_NAME, true, false) 3320 } 3321 3322 #[test] 3323 fn test_virtio_block_sync() { 3324 _test_virtio_block(FOCAL_IMAGE_NAME, true, true) 3325 } 3326 3327 #[test] 3328 fn test_virtio_block_qcow2() { 3329 _test_virtio_block(FOCAL_IMAGE_NAME_QCOW2, false, false) 3330 } 3331 3332 #[test] 3333 fn test_virtio_block_qcow2_backing_file() { 3334 _test_virtio_block(FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE, false, false) 3335 } 3336 3337 #[test] 3338 fn test_virtio_block_vhd() { 3339 let mut workload_path = dirs::home_dir().unwrap(); 3340 workload_path.push("workloads"); 3341 3342 let mut raw_file_path = workload_path.clone(); 3343 let mut vhd_file_path = workload_path; 3344 raw_file_path.push(FOCAL_IMAGE_NAME); 3345 vhd_file_path.push(FOCAL_IMAGE_NAME_VHD); 3346 3347 // Generate VHD file from RAW file 3348 std::process::Command::new("qemu-img") 3349 .arg("convert") 3350 .arg("-p") 3351 .args(["-f", "raw"]) 3352 .args(["-O", "vpc"]) 3353 .args(["-o", "subformat=fixed"]) 3354 .arg(raw_file_path.to_str().unwrap()) 3355 .arg(vhd_file_path.to_str().unwrap()) 3356 .output() 3357 .expect("Expect generating VHD image from RAW image"); 3358 3359 _test_virtio_block(FOCAL_IMAGE_NAME_VHD, false, false) 3360 } 3361 3362 #[test] 3363 fn test_virtio_block_vhdx() { 3364 let mut workload_path = dirs::home_dir().unwrap(); 3365 workload_path.push("workloads"); 3366 3367 let mut raw_file_path = workload_path.clone(); 3368 let mut vhdx_file_path = workload_path; 3369 raw_file_path.push(FOCAL_IMAGE_NAME); 3370 vhdx_file_path.push(FOCAL_IMAGE_NAME_VHDX); 3371 3372 // Generate dynamic VHDX file from RAW file 3373 std::process::Command::new("qemu-img") 3374 .arg("convert") 3375 .arg("-p") 3376 .args(["-f", "raw"]) 3377 .args(["-O", "vhdx"]) 3378 .arg(raw_file_path.to_str().unwrap()) 3379 .arg(vhdx_file_path.to_str().unwrap()) 3380 .output() 3381 .expect("Expect generating dynamic VHDx image from RAW image"); 3382 3383 _test_virtio_block(FOCAL_IMAGE_NAME_VHDX, false, false) 3384 } 3385 3386 #[test] 3387 fn test_virtio_block_dynamic_vhdx_expand() { 3388 const VIRTUAL_DISK_SIZE: u64 = 100 << 20; 3389 const EMPTY_VHDX_FILE_SIZE: u64 = 8 << 20; 3390 const FULL_VHDX_FILE_SIZE: u64 = 112 << 20; 3391 const DYNAMIC_VHDX_NAME: &str = "dynamic.vhdx"; 3392 3393 let mut workload_path = dirs::home_dir().unwrap(); 3394 workload_path.push("workloads"); 3395 3396 let mut vhdx_file_path = workload_path; 3397 vhdx_file_path.push(DYNAMIC_VHDX_NAME); 3398 let vhdx_path = vhdx_file_path.to_str().unwrap(); 3399 3400 // Generate a 100 MiB dynamic VHDX file 3401 std::process::Command::new("qemu-img") 3402 .arg("create") 3403 .args(["-f", "vhdx"]) 3404 .arg(vhdx_path) 3405 .arg(VIRTUAL_DISK_SIZE.to_string()) 3406 .output() 3407 .expect("Expect generating dynamic VHDx image from RAW image"); 3408 3409 // Check if the size matches with empty VHDx file size 3410 assert_eq!(vhdx_image_size(vhdx_path), EMPTY_VHDX_FILE_SIZE); 3411 3412 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3413 let guest = Guest::new(Box::new(focal)); 3414 let kernel_path = direct_kernel_boot_path(); 3415 3416 let mut cloud_child = GuestCommand::new(&guest) 3417 .args(["--cpus", "boot=1"]) 3418 .args(["--memory", "size=512M"]) 3419 .args(["--kernel", kernel_path.to_str().unwrap()]) 3420 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3421 .args([ 3422 "--disk", 3423 format!( 3424 "path={}", 3425 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 3426 ) 3427 .as_str(), 3428 format!( 3429 "path={}", 3430 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3431 ) 3432 .as_str(), 3433 format!("path={vhdx_path}").as_str(), 3434 ]) 3435 .default_net() 3436 .capture_output() 3437 .spawn() 3438 .unwrap(); 3439 3440 let r = std::panic::catch_unwind(|| { 3441 guest.wait_vm_boot(None).unwrap(); 3442 3443 // Check both if /dev/vdc exists and if the block size is 100 MiB. 3444 assert_eq!( 3445 guest 3446 .ssh_command("lsblk | grep vdc | grep -c 100M") 3447 .unwrap() 3448 .trim() 3449 .parse::<u32>() 3450 .unwrap_or_default(), 3451 1 3452 ); 3453 3454 // Write 100 MB of data to the VHDx disk 3455 guest 3456 .ssh_command("sudo dd if=/dev/urandom of=/dev/vdc bs=1M count=100") 3457 .unwrap(); 3458 }); 3459 3460 // Check if the size matches with expected expanded VHDx file size 3461 assert_eq!(vhdx_image_size(vhdx_path), FULL_VHDX_FILE_SIZE); 3462 3463 kill_child(&mut cloud_child); 3464 let output = cloud_child.wait_with_output().unwrap(); 3465 3466 handle_child_output(r, &output); 3467 } 3468 3469 fn vhdx_image_size(disk_name: &str) -> u64 { 3470 std::fs::File::open(disk_name) 3471 .unwrap() 3472 .seek(SeekFrom::End(0)) 3473 .unwrap() 3474 } 3475 3476 #[test] 3477 fn test_virtio_block_direct_and_firmware() { 3478 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3479 let guest = Guest::new(Box::new(focal)); 3480 3481 // The OS disk must be copied to a location that is not backed by 3482 // tmpfs, otherwise the syscall openat(2) with O_DIRECT simply fails 3483 // with EINVAL because tmpfs doesn't support this flag. 3484 let mut workloads_path = dirs::home_dir().unwrap(); 3485 workloads_path.push("workloads"); 3486 let os_dir = TempDir::new_in(workloads_path.as_path()).unwrap(); 3487 let mut os_path = os_dir.as_path().to_path_buf(); 3488 os_path.push("osdisk.img"); 3489 rate_limited_copy( 3490 guest.disk_config.disk(DiskType::OperatingSystem).unwrap(), 3491 os_path.as_path(), 3492 ) 3493 .expect("copying of OS disk failed"); 3494 3495 let mut child = GuestCommand::new(&guest) 3496 .args(["--cpus", "boot=1"]) 3497 .args(["--memory", "size=512M"]) 3498 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 3499 .args([ 3500 "--disk", 3501 format!("path={},direct=on", os_path.as_path().to_str().unwrap()).as_str(), 3502 format!( 3503 "path={}", 3504 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3505 ) 3506 .as_str(), 3507 ]) 3508 .default_net() 3509 .capture_output() 3510 .spawn() 3511 .unwrap(); 3512 3513 let r = std::panic::catch_unwind(|| { 3514 guest.wait_vm_boot(Some(120)).unwrap(); 3515 }); 3516 3517 kill_child(&mut child); 3518 let output = child.wait_with_output().unwrap(); 3519 3520 handle_child_output(r, &output); 3521 } 3522 3523 #[test] 3524 fn test_vhost_user_net_default() { 3525 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, false, false) 3526 } 3527 3528 #[test] 3529 fn test_vhost_user_net_named_tap() { 3530 test_vhost_user_net( 3531 Some("mytap0"), 3532 2, 3533 &prepare_vhost_user_net_daemon, 3534 false, 3535 false, 3536 ) 3537 } 3538 3539 #[test] 3540 fn test_vhost_user_net_existing_tap() { 3541 test_vhost_user_net( 3542 Some("vunet-tap0"), 3543 2, 3544 &prepare_vhost_user_net_daemon, 3545 false, 3546 false, 3547 ) 3548 } 3549 3550 #[test] 3551 fn test_vhost_user_net_multiple_queues() { 3552 test_vhost_user_net(None, 4, &prepare_vhost_user_net_daemon, false, false) 3553 } 3554 3555 #[test] 3556 fn test_vhost_user_net_tap_multiple_queues() { 3557 test_vhost_user_net( 3558 Some("vunet-tap1"), 3559 4, 3560 &prepare_vhost_user_net_daemon, 3561 false, 3562 false, 3563 ) 3564 } 3565 3566 #[test] 3567 fn test_vhost_user_net_host_mac() { 3568 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, true, false) 3569 } 3570 3571 #[test] 3572 fn test_vhost_user_net_client_mode() { 3573 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, false, true) 3574 } 3575 3576 #[test] 3577 #[cfg(not(target_arch = "aarch64"))] 3578 fn test_vhost_user_blk_default() { 3579 test_vhost_user_blk(2, false, false, Some(&prepare_vubd)) 3580 } 3581 3582 #[test] 3583 #[cfg(not(target_arch = "aarch64"))] 3584 fn test_vhost_user_blk_readonly() { 3585 test_vhost_user_blk(1, true, false, Some(&prepare_vubd)) 3586 } 3587 3588 #[test] 3589 #[cfg(not(target_arch = "aarch64"))] 3590 fn test_vhost_user_blk_direct() { 3591 test_vhost_user_blk(1, false, true, Some(&prepare_vubd)) 3592 } 3593 3594 #[test] 3595 fn test_boot_from_vhost_user_blk_default() { 3596 test_boot_from_vhost_user_blk(1, false, false, Some(&prepare_vubd)) 3597 } 3598 3599 #[test] 3600 #[cfg(target_arch = "x86_64")] 3601 fn test_split_irqchip() { 3602 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3603 let guest = Guest::new(Box::new(focal)); 3604 3605 let mut child = GuestCommand::new(&guest) 3606 .args(["--cpus", "boot=1"]) 3607 .args(["--memory", "size=512M"]) 3608 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3609 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3610 .default_disks() 3611 .default_net() 3612 .capture_output() 3613 .spawn() 3614 .unwrap(); 3615 3616 let r = std::panic::catch_unwind(|| { 3617 guest.wait_vm_boot(None).unwrap(); 3618 3619 assert_eq!( 3620 guest 3621 .ssh_command("grep -c IO-APIC.*timer /proc/interrupts || true") 3622 .unwrap() 3623 .trim() 3624 .parse::<u32>() 3625 .unwrap_or(1), 3626 0 3627 ); 3628 assert_eq!( 3629 guest 3630 .ssh_command("grep -c IO-APIC.*cascade /proc/interrupts || true") 3631 .unwrap() 3632 .trim() 3633 .parse::<u32>() 3634 .unwrap_or(1), 3635 0 3636 ); 3637 }); 3638 3639 kill_child(&mut child); 3640 let output = child.wait_with_output().unwrap(); 3641 3642 handle_child_output(r, &output); 3643 } 3644 3645 #[test] 3646 #[cfg(target_arch = "x86_64")] 3647 fn test_dmi_serial_number() { 3648 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3649 let guest = Guest::new(Box::new(focal)); 3650 3651 let mut child = GuestCommand::new(&guest) 3652 .args(["--cpus", "boot=1"]) 3653 .args(["--memory", "size=512M"]) 3654 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3655 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3656 .args(["--platform", "serial_number=a=b;c=d"]) 3657 .default_disks() 3658 .default_net() 3659 .capture_output() 3660 .spawn() 3661 .unwrap(); 3662 3663 let r = std::panic::catch_unwind(|| { 3664 guest.wait_vm_boot(None).unwrap(); 3665 3666 assert_eq!( 3667 guest 3668 .ssh_command("sudo cat /sys/class/dmi/id/product_serial") 3669 .unwrap() 3670 .trim(), 3671 "a=b;c=d" 3672 ); 3673 }); 3674 3675 kill_child(&mut child); 3676 let output = child.wait_with_output().unwrap(); 3677 3678 handle_child_output(r, &output); 3679 } 3680 3681 #[test] 3682 #[cfg(target_arch = "x86_64")] 3683 fn test_dmi_uuid() { 3684 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3685 let guest = Guest::new(Box::new(focal)); 3686 3687 let mut child = GuestCommand::new(&guest) 3688 .args(["--cpus", "boot=1"]) 3689 .args(["--memory", "size=512M"]) 3690 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3691 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3692 .args(["--platform", "uuid=1e8aa28a-435d-4027-87f4-40dceff1fa0a"]) 3693 .default_disks() 3694 .default_net() 3695 .capture_output() 3696 .spawn() 3697 .unwrap(); 3698 3699 let r = std::panic::catch_unwind(|| { 3700 guest.wait_vm_boot(None).unwrap(); 3701 3702 assert_eq!( 3703 guest 3704 .ssh_command("sudo cat /sys/class/dmi/id/product_uuid") 3705 .unwrap() 3706 .trim(), 3707 "1e8aa28a-435d-4027-87f4-40dceff1fa0a" 3708 ); 3709 }); 3710 3711 kill_child(&mut child); 3712 let output = child.wait_with_output().unwrap(); 3713 3714 handle_child_output(r, &output); 3715 } 3716 3717 #[test] 3718 #[cfg(target_arch = "x86_64")] 3719 fn test_dmi_oem_strings() { 3720 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3721 let guest = Guest::new(Box::new(focal)); 3722 3723 let s1 = "io.systemd.credential:xx=yy"; 3724 let s2 = "This is a test string"; 3725 3726 let oem_strings = format!("oem_strings=[{s1},{s2}]"); 3727 3728 let mut child = GuestCommand::new(&guest) 3729 .args(["--cpus", "boot=1"]) 3730 .args(["--memory", "size=512M"]) 3731 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3732 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3733 .args(["--platform", &oem_strings]) 3734 .default_disks() 3735 .default_net() 3736 .capture_output() 3737 .spawn() 3738 .unwrap(); 3739 3740 let r = std::panic::catch_unwind(|| { 3741 guest.wait_vm_boot(None).unwrap(); 3742 3743 assert_eq!( 3744 guest 3745 .ssh_command("sudo dmidecode --oem-string count") 3746 .unwrap() 3747 .trim(), 3748 "2" 3749 ); 3750 3751 assert_eq!( 3752 guest 3753 .ssh_command("sudo dmidecode --oem-string 1") 3754 .unwrap() 3755 .trim(), 3756 s1 3757 ); 3758 3759 assert_eq!( 3760 guest 3761 .ssh_command("sudo dmidecode --oem-string 2") 3762 .unwrap() 3763 .trim(), 3764 s2 3765 ); 3766 }); 3767 3768 kill_child(&mut child); 3769 let output = child.wait_with_output().unwrap(); 3770 3771 handle_child_output(r, &output); 3772 } 3773 3774 #[test] 3775 fn test_virtio_fs() { 3776 _test_virtio_fs(&prepare_virtiofsd, false, None) 3777 } 3778 3779 #[test] 3780 fn test_virtio_fs_hotplug() { 3781 _test_virtio_fs(&prepare_virtiofsd, true, None) 3782 } 3783 3784 #[test] 3785 #[cfg(not(feature = "mshv"))] 3786 fn test_virtio_fs_multi_segment_hotplug() { 3787 _test_virtio_fs(&prepare_virtiofsd, true, Some(15)) 3788 } 3789 3790 #[test] 3791 #[cfg(not(feature = "mshv"))] 3792 fn test_virtio_fs_multi_segment() { 3793 _test_virtio_fs(&prepare_virtiofsd, false, Some(15)) 3794 } 3795 3796 #[test] 3797 fn test_virtio_pmem_persist_writes() { 3798 test_virtio_pmem(false, false) 3799 } 3800 3801 #[test] 3802 fn test_virtio_pmem_discard_writes() { 3803 test_virtio_pmem(true, false) 3804 } 3805 3806 #[test] 3807 fn test_virtio_pmem_with_size() { 3808 test_virtio_pmem(true, true) 3809 } 3810 3811 #[test] 3812 fn test_boot_from_virtio_pmem() { 3813 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3814 let guest = Guest::new(Box::new(focal)); 3815 3816 let kernel_path = direct_kernel_boot_path(); 3817 3818 let mut child = GuestCommand::new(&guest) 3819 .args(["--cpus", "boot=1"]) 3820 .args(["--memory", "size=512M"]) 3821 .args(["--kernel", kernel_path.to_str().unwrap()]) 3822 .args([ 3823 "--disk", 3824 format!( 3825 "path={}", 3826 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3827 ) 3828 .as_str(), 3829 ]) 3830 .default_net() 3831 .args([ 3832 "--pmem", 3833 format!( 3834 "file={},size={}", 3835 guest.disk_config.disk(DiskType::OperatingSystem).unwrap(), 3836 fs::metadata(guest.disk_config.disk(DiskType::OperatingSystem).unwrap()) 3837 .unwrap() 3838 .len() 3839 ) 3840 .as_str(), 3841 ]) 3842 .args([ 3843 "--cmdline", 3844 DIRECT_KERNEL_BOOT_CMDLINE 3845 .replace("vda1", "pmem0p1") 3846 .as_str(), 3847 ]) 3848 .capture_output() 3849 .spawn() 3850 .unwrap(); 3851 3852 let r = std::panic::catch_unwind(|| { 3853 guest.wait_vm_boot(None).unwrap(); 3854 3855 // Simple checks to validate the VM booted properly 3856 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 3857 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 3858 }); 3859 3860 kill_child(&mut child); 3861 let output = child.wait_with_output().unwrap(); 3862 3863 handle_child_output(r, &output); 3864 } 3865 3866 #[test] 3867 fn test_multiple_network_interfaces() { 3868 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3869 let guest = Guest::new(Box::new(focal)); 3870 3871 let kernel_path = direct_kernel_boot_path(); 3872 3873 let mut child = GuestCommand::new(&guest) 3874 .args(["--cpus", "boot=1"]) 3875 .args(["--memory", "size=512M"]) 3876 .args(["--kernel", kernel_path.to_str().unwrap()]) 3877 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3878 .default_disks() 3879 .args([ 3880 "--net", 3881 guest.default_net_string().as_str(), 3882 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0", 3883 "tap=mytap1,mac=fe:1f:9e:e1:60:f2,ip=192.168.4.1,mask=255.255.255.0", 3884 ]) 3885 .capture_output() 3886 .spawn() 3887 .unwrap(); 3888 3889 let r = std::panic::catch_unwind(|| { 3890 guest.wait_vm_boot(None).unwrap(); 3891 3892 let tap_count = exec_host_command_output("ip link | grep -c mytap1"); 3893 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1"); 3894 3895 // 3 network interfaces + default localhost ==> 4 interfaces 3896 assert_eq!( 3897 guest 3898 .ssh_command("ip -o link | wc -l") 3899 .unwrap() 3900 .trim() 3901 .parse::<u32>() 3902 .unwrap_or_default(), 3903 4 3904 ); 3905 }); 3906 3907 kill_child(&mut child); 3908 let output = child.wait_with_output().unwrap(); 3909 3910 handle_child_output(r, &output); 3911 } 3912 3913 #[test] 3914 #[cfg(target_arch = "aarch64")] 3915 fn test_pmu_on() { 3916 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3917 let guest = Guest::new(Box::new(focal)); 3918 let mut child = GuestCommand::new(&guest) 3919 .args(["--cpus", "boot=1"]) 3920 .args(["--memory", "size=512M"]) 3921 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3922 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3923 .default_disks() 3924 .default_net() 3925 .capture_output() 3926 .spawn() 3927 .unwrap(); 3928 3929 let r = std::panic::catch_unwind(|| { 3930 guest.wait_vm_boot(None).unwrap(); 3931 3932 // Test that PMU exists. 3933 assert_eq!( 3934 guest 3935 .ssh_command(GREP_PMU_IRQ_CMD) 3936 .unwrap() 3937 .trim() 3938 .parse::<u32>() 3939 .unwrap_or_default(), 3940 1 3941 ); 3942 }); 3943 3944 kill_child(&mut child); 3945 let output = child.wait_with_output().unwrap(); 3946 3947 handle_child_output(r, &output); 3948 } 3949 3950 #[test] 3951 fn test_serial_off() { 3952 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3953 let guest = Guest::new(Box::new(focal)); 3954 let mut child = GuestCommand::new(&guest) 3955 .args(["--cpus", "boot=1"]) 3956 .args(["--memory", "size=512M"]) 3957 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3958 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3959 .default_disks() 3960 .default_net() 3961 .args(["--serial", "off"]) 3962 .capture_output() 3963 .spawn() 3964 .unwrap(); 3965 3966 let r = std::panic::catch_unwind(|| { 3967 guest.wait_vm_boot(None).unwrap(); 3968 3969 // Test that there is no ttyS0 3970 assert_eq!( 3971 guest 3972 .ssh_command(GREP_SERIAL_IRQ_CMD) 3973 .unwrap() 3974 .trim() 3975 .parse::<u32>() 3976 .unwrap_or(1), 3977 0 3978 ); 3979 }); 3980 3981 kill_child(&mut child); 3982 let output = child.wait_with_output().unwrap(); 3983 3984 handle_child_output(r, &output); 3985 } 3986 3987 #[test] 3988 fn test_serial_null() { 3989 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3990 let guest = Guest::new(Box::new(focal)); 3991 let mut cmd = GuestCommand::new(&guest); 3992 #[cfg(target_arch = "x86_64")] 3993 let console_str: &str = "console=ttyS0"; 3994 #[cfg(target_arch = "aarch64")] 3995 let console_str: &str = "console=ttyAMA0"; 3996 3997 cmd.args(["--cpus", "boot=1"]) 3998 .args(["--memory", "size=512M"]) 3999 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4000 .args([ 4001 "--cmdline", 4002 DIRECT_KERNEL_BOOT_CMDLINE 4003 .replace("console=hvc0 ", console_str) 4004 .as_str(), 4005 ]) 4006 .default_disks() 4007 .default_net() 4008 .args(["--serial", "null"]) 4009 .args(["--console", "off"]) 4010 .capture_output(); 4011 4012 let mut child = cmd.spawn().unwrap(); 4013 4014 let r = std::panic::catch_unwind(|| { 4015 guest.wait_vm_boot(None).unwrap(); 4016 4017 // Test that there is a ttyS0 4018 assert_eq!( 4019 guest 4020 .ssh_command(GREP_SERIAL_IRQ_CMD) 4021 .unwrap() 4022 .trim() 4023 .parse::<u32>() 4024 .unwrap_or_default(), 4025 1 4026 ); 4027 }); 4028 4029 kill_child(&mut child); 4030 let output = child.wait_with_output().unwrap(); 4031 handle_child_output(r, &output); 4032 4033 let r = std::panic::catch_unwind(|| { 4034 assert!(!String::from_utf8_lossy(&output.stdout).contains(CONSOLE_TEST_STRING)); 4035 }); 4036 4037 handle_child_output(r, &output); 4038 } 4039 4040 #[test] 4041 fn test_serial_tty() { 4042 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4043 let guest = Guest::new(Box::new(focal)); 4044 4045 let kernel_path = direct_kernel_boot_path(); 4046 4047 #[cfg(target_arch = "x86_64")] 4048 let console_str: &str = "console=ttyS0"; 4049 #[cfg(target_arch = "aarch64")] 4050 let console_str: &str = "console=ttyAMA0"; 4051 4052 let mut child = GuestCommand::new(&guest) 4053 .args(["--cpus", "boot=1"]) 4054 .args(["--memory", "size=512M"]) 4055 .args(["--kernel", kernel_path.to_str().unwrap()]) 4056 .args([ 4057 "--cmdline", 4058 DIRECT_KERNEL_BOOT_CMDLINE 4059 .replace("console=hvc0 ", console_str) 4060 .as_str(), 4061 ]) 4062 .default_disks() 4063 .default_net() 4064 .args(["--serial", "tty"]) 4065 .args(["--console", "off"]) 4066 .capture_output() 4067 .spawn() 4068 .unwrap(); 4069 4070 let r = std::panic::catch_unwind(|| { 4071 guest.wait_vm_boot(None).unwrap(); 4072 4073 // Test that there is a ttyS0 4074 assert_eq!( 4075 guest 4076 .ssh_command(GREP_SERIAL_IRQ_CMD) 4077 .unwrap() 4078 .trim() 4079 .parse::<u32>() 4080 .unwrap_or_default(), 4081 1 4082 ); 4083 }); 4084 4085 // This sleep is needed to wait for the login prompt 4086 thread::sleep(std::time::Duration::new(2, 0)); 4087 4088 kill_child(&mut child); 4089 let output = child.wait_with_output().unwrap(); 4090 handle_child_output(r, &output); 4091 4092 let r = std::panic::catch_unwind(|| { 4093 assert!(String::from_utf8_lossy(&output.stdout).contains(CONSOLE_TEST_STRING)); 4094 }); 4095 4096 handle_child_output(r, &output); 4097 } 4098 4099 #[test] 4100 fn test_serial_file() { 4101 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4102 let guest = Guest::new(Box::new(focal)); 4103 4104 let serial_path = guest.tmp_dir.as_path().join("serial-output"); 4105 #[cfg(target_arch = "x86_64")] 4106 let console_str: &str = "console=ttyS0"; 4107 #[cfg(target_arch = "aarch64")] 4108 let console_str: &str = "console=ttyAMA0"; 4109 4110 let mut child = GuestCommand::new(&guest) 4111 .args(["--cpus", "boot=1"]) 4112 .args(["--memory", "size=512M"]) 4113 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4114 .args([ 4115 "--cmdline", 4116 DIRECT_KERNEL_BOOT_CMDLINE 4117 .replace("console=hvc0 ", console_str) 4118 .as_str(), 4119 ]) 4120 .default_disks() 4121 .default_net() 4122 .args([ 4123 "--serial", 4124 format!("file={}", serial_path.to_str().unwrap()).as_str(), 4125 ]) 4126 .capture_output() 4127 .spawn() 4128 .unwrap(); 4129 4130 let r = std::panic::catch_unwind(|| { 4131 guest.wait_vm_boot(None).unwrap(); 4132 4133 // Test that there is a ttyS0 4134 assert_eq!( 4135 guest 4136 .ssh_command(GREP_SERIAL_IRQ_CMD) 4137 .unwrap() 4138 .trim() 4139 .parse::<u32>() 4140 .unwrap_or_default(), 4141 1 4142 ); 4143 4144 guest.ssh_command("sudo shutdown -h now").unwrap(); 4145 }); 4146 4147 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 4148 kill_child(&mut child); 4149 let output = child.wait_with_output().unwrap(); 4150 handle_child_output(r, &output); 4151 4152 let r = std::panic::catch_unwind(|| { 4153 // Check that the cloud-hypervisor binary actually terminated 4154 assert!(output.status.success()); 4155 4156 // Do this check after shutdown of the VM as an easy way to ensure 4157 // all writes are flushed to disk 4158 let mut f = std::fs::File::open(serial_path).unwrap(); 4159 let mut buf = String::new(); 4160 f.read_to_string(&mut buf).unwrap(); 4161 assert!(buf.contains(CONSOLE_TEST_STRING)); 4162 }); 4163 4164 handle_child_output(r, &output); 4165 } 4166 4167 #[test] 4168 fn test_pty_interaction() { 4169 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4170 let guest = Guest::new(Box::new(focal)); 4171 let api_socket = temp_api_path(&guest.tmp_dir); 4172 let serial_option = if cfg!(target_arch = "x86_64") { 4173 " console=ttyS0" 4174 } else { 4175 " console=ttyAMA0" 4176 }; 4177 let cmdline = DIRECT_KERNEL_BOOT_CMDLINE.to_owned() + serial_option; 4178 4179 let mut child = GuestCommand::new(&guest) 4180 .args(["--cpus", "boot=1"]) 4181 .args(["--memory", "size=512M"]) 4182 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4183 .args(["--cmdline", &cmdline]) 4184 .default_disks() 4185 .default_net() 4186 .args(["--serial", "null"]) 4187 .args(["--console", "pty"]) 4188 .args(["--api-socket", &api_socket]) 4189 .spawn() 4190 .unwrap(); 4191 4192 let r = std::panic::catch_unwind(|| { 4193 guest.wait_vm_boot(None).unwrap(); 4194 // Get pty fd for console 4195 let console_path = get_pty_path(&api_socket, "console"); 4196 _test_pty_interaction(console_path); 4197 4198 guest.ssh_command("sudo shutdown -h now").unwrap(); 4199 }); 4200 4201 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 4202 let _ = child.kill(); 4203 let output = child.wait_with_output().unwrap(); 4204 handle_child_output(r, &output); 4205 4206 let r = std::panic::catch_unwind(|| { 4207 // Check that the cloud-hypervisor binary actually terminated 4208 assert!(output.status.success()) 4209 }); 4210 handle_child_output(r, &output); 4211 } 4212 4213 #[test] 4214 fn test_serial_socket_interaction() { 4215 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4216 let guest = Guest::new(Box::new(focal)); 4217 let serial_socket = guest.tmp_dir.as_path().join("serial.socket"); 4218 let serial_socket_pty = guest.tmp_dir.as_path().join("serial.pty"); 4219 let serial_option = if cfg!(target_arch = "x86_64") { 4220 " console=ttyS0" 4221 } else { 4222 " console=ttyAMA0" 4223 }; 4224 let cmdline = DIRECT_KERNEL_BOOT_CMDLINE.to_owned() + serial_option; 4225 4226 let mut child = GuestCommand::new(&guest) 4227 .args(["--cpus", "boot=1"]) 4228 .args(["--memory", "size=512M"]) 4229 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4230 .args(["--cmdline", &cmdline]) 4231 .default_disks() 4232 .default_net() 4233 .args(["--console", "null"]) 4234 .args([ 4235 "--serial", 4236 format!("socket={}", serial_socket.to_str().unwrap()).as_str(), 4237 ]) 4238 .spawn() 4239 .unwrap(); 4240 4241 let _ = std::panic::catch_unwind(|| { 4242 guest.wait_vm_boot(None).unwrap(); 4243 }); 4244 4245 let mut socat_command = Command::new("socat"); 4246 let socat_args = [ 4247 &format!("pty,link={},raw", serial_socket_pty.display()), 4248 &format!("UNIX-CONNECT:{}", serial_socket.display()), 4249 ]; 4250 socat_command.args(socat_args); 4251 4252 let mut socat_child = socat_command.spawn().unwrap(); 4253 thread::sleep(std::time::Duration::new(1, 0)); 4254 4255 let _ = std::panic::catch_unwind(|| { 4256 _test_pty_interaction(serial_socket_pty); 4257 }); 4258 4259 let _ = socat_child.kill(); 4260 let _ = socat_child.wait(); 4261 4262 let r = std::panic::catch_unwind(|| { 4263 guest.ssh_command("sudo shutdown -h now").unwrap(); 4264 }); 4265 4266 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 4267 kill_child(&mut child); 4268 let output = child.wait_with_output().unwrap(); 4269 handle_child_output(r, &output); 4270 4271 let r = std::panic::catch_unwind(|| { 4272 // Check that the cloud-hypervisor binary actually terminated 4273 if !output.status.success() { 4274 panic!( 4275 "Cloud Hypervisor process failed to terminate gracefully: {:?}", 4276 output.status 4277 ); 4278 } 4279 }); 4280 handle_child_output(r, &output); 4281 } 4282 4283 #[test] 4284 fn test_virtio_console() { 4285 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4286 let guest = Guest::new(Box::new(focal)); 4287 4288 let kernel_path = direct_kernel_boot_path(); 4289 4290 let mut child = GuestCommand::new(&guest) 4291 .args(["--cpus", "boot=1"]) 4292 .args(["--memory", "size=512M"]) 4293 .args(["--kernel", kernel_path.to_str().unwrap()]) 4294 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4295 .default_disks() 4296 .default_net() 4297 .args(["--console", "tty"]) 4298 .args(["--serial", "null"]) 4299 .capture_output() 4300 .spawn() 4301 .unwrap(); 4302 4303 let text = String::from("On a branch floating down river a cricket, singing."); 4304 let cmd = format!("echo {text} | sudo tee /dev/hvc0"); 4305 4306 let r = std::panic::catch_unwind(|| { 4307 guest.wait_vm_boot(None).unwrap(); 4308 4309 assert!(guest 4310 .does_device_vendor_pair_match("0x1043", "0x1af4") 4311 .unwrap_or_default()); 4312 4313 guest.ssh_command(&cmd).unwrap(); 4314 }); 4315 4316 kill_child(&mut child); 4317 let output = child.wait_with_output().unwrap(); 4318 handle_child_output(r, &output); 4319 4320 let r = std::panic::catch_unwind(|| { 4321 assert!(String::from_utf8_lossy(&output.stdout).contains(&text)); 4322 }); 4323 4324 handle_child_output(r, &output); 4325 } 4326 4327 #[test] 4328 fn test_console_file() { 4329 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4330 let guest = Guest::new(Box::new(focal)); 4331 4332 let console_path = guest.tmp_dir.as_path().join("console-output"); 4333 let mut child = GuestCommand::new(&guest) 4334 .args(["--cpus", "boot=1"]) 4335 .args(["--memory", "size=512M"]) 4336 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4337 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4338 .default_disks() 4339 .default_net() 4340 .args([ 4341 "--console", 4342 format!("file={}", console_path.to_str().unwrap()).as_str(), 4343 ]) 4344 .capture_output() 4345 .spawn() 4346 .unwrap(); 4347 4348 guest.wait_vm_boot(None).unwrap(); 4349 4350 guest.ssh_command("sudo shutdown -h now").unwrap(); 4351 4352 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 4353 kill_child(&mut child); 4354 let output = child.wait_with_output().unwrap(); 4355 4356 let r = std::panic::catch_unwind(|| { 4357 // Check that the cloud-hypervisor binary actually terminated 4358 assert!(output.status.success()); 4359 4360 // Do this check after shutdown of the VM as an easy way to ensure 4361 // all writes are flushed to disk 4362 let mut f = std::fs::File::open(console_path).unwrap(); 4363 let mut buf = String::new(); 4364 f.read_to_string(&mut buf).unwrap(); 4365 4366 if !buf.contains(CONSOLE_TEST_STRING) { 4367 eprintln!( 4368 "\n\n==== Console file output ====\n\n{buf}\n\n==== End console file output ====" 4369 ); 4370 } 4371 assert!(buf.contains(CONSOLE_TEST_STRING)); 4372 }); 4373 4374 handle_child_output(r, &output); 4375 } 4376 4377 #[test] 4378 #[cfg(target_arch = "x86_64")] 4379 #[cfg(not(feature = "mshv"))] 4380 // The VFIO integration test starts cloud-hypervisor guest with 3 TAP 4381 // backed networking interfaces, bound through a simple bridge on the host. 4382 // So if the nested cloud-hypervisor succeeds in getting a directly 4383 // assigned interface from its cloud-hypervisor host, we should be able to 4384 // ssh into it, and verify that it's running with the right kernel command 4385 // line (We tag the command line from cloud-hypervisor for that purpose). 4386 // The third device is added to validate that hotplug works correctly since 4387 // it is being added to the L2 VM through hotplugging mechanism. 4388 // Also, we pass-through a virtio-blk device to the L2 VM to test the 32-bit 4389 // vfio device support 4390 fn test_vfio() { 4391 setup_vfio_network_interfaces(); 4392 4393 let jammy = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string()); 4394 let guest = Guest::new_from_ip_range(Box::new(jammy), "172.18", 0); 4395 4396 let mut workload_path = dirs::home_dir().unwrap(); 4397 workload_path.push("workloads"); 4398 4399 let kernel_path = direct_kernel_boot_path(); 4400 4401 let mut vfio_path = workload_path.clone(); 4402 vfio_path.push("vfio"); 4403 4404 let mut cloud_init_vfio_base_path = vfio_path.clone(); 4405 cloud_init_vfio_base_path.push("cloudinit.img"); 4406 4407 // We copy our cloudinit into the vfio mount point, for the nested 4408 // cloud-hypervisor guest to use. 4409 rate_limited_copy( 4410 guest.disk_config.disk(DiskType::CloudInit).unwrap(), 4411 &cloud_init_vfio_base_path, 4412 ) 4413 .expect("copying of cloud-init disk failed"); 4414 4415 let mut vfio_disk_path = workload_path.clone(); 4416 vfio_disk_path.push("vfio.img"); 4417 4418 // Create the vfio disk image 4419 let output = Command::new("mkfs.ext4") 4420 .arg("-d") 4421 .arg(vfio_path.to_str().unwrap()) 4422 .arg(vfio_disk_path.to_str().unwrap()) 4423 .arg("2g") 4424 .output() 4425 .unwrap(); 4426 if !output.status.success() { 4427 eprintln!("{}", String::from_utf8_lossy(&output.stderr)); 4428 panic!("mkfs.ext4 command generated an error"); 4429 } 4430 4431 let mut blk_file_path = workload_path; 4432 blk_file_path.push("blk.img"); 4433 4434 let vfio_tap0 = "vfio-tap0"; 4435 let vfio_tap1 = "vfio-tap1"; 4436 let vfio_tap2 = "vfio-tap2"; 4437 let vfio_tap3 = "vfio-tap3"; 4438 4439 let mut child = GuestCommand::new(&guest) 4440 .args(["--cpus", "boot=4"]) 4441 .args(["--memory", "size=2G,hugepages=on,shared=on"]) 4442 .args(["--kernel", kernel_path.to_str().unwrap()]) 4443 .args([ 4444 "--disk", 4445 format!( 4446 "path={}", 4447 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 4448 ) 4449 .as_str(), 4450 format!( 4451 "path={}", 4452 guest.disk_config.disk(DiskType::CloudInit).unwrap() 4453 ) 4454 .as_str(), 4455 format!("path={}", vfio_disk_path.to_str().unwrap()).as_str(), 4456 format!("path={},iommu=on", blk_file_path.to_str().unwrap()).as_str(), 4457 ]) 4458 .args([ 4459 "--cmdline", 4460 format!( 4461 "{DIRECT_KERNEL_BOOT_CMDLINE} kvm-intel.nested=1 vfio_iommu_type1.allow_unsafe_interrupts" 4462 ) 4463 .as_str(), 4464 ]) 4465 .args([ 4466 "--net", 4467 format!("tap={},mac={}", vfio_tap0, guest.network.guest_mac).as_str(), 4468 format!( 4469 "tap={},mac={},iommu=on", 4470 vfio_tap1, guest.network.l2_guest_mac1 4471 ) 4472 .as_str(), 4473 format!( 4474 "tap={},mac={},iommu=on", 4475 vfio_tap2, guest.network.l2_guest_mac2 4476 ) 4477 .as_str(), 4478 format!( 4479 "tap={},mac={},iommu=on", 4480 vfio_tap3, guest.network.l2_guest_mac3 4481 ) 4482 .as_str(), 4483 ]) 4484 .capture_output() 4485 .spawn() 4486 .unwrap(); 4487 4488 thread::sleep(std::time::Duration::new(30, 0)); 4489 4490 let r = std::panic::catch_unwind(|| { 4491 guest.ssh_command_l1("sudo systemctl start vfio").unwrap(); 4492 thread::sleep(std::time::Duration::new(120, 0)); 4493 4494 // We booted our cloud hypervisor L2 guest with a "VFIOTAG" tag 4495 // added to its kernel command line. 4496 // Let's ssh into it and verify that it's there. If it is it means 4497 // we're in the right guest (The L2 one) because the QEMU L1 guest 4498 // does not have this command line tag. 4499 assert!(check_matched_lines_count( 4500 guest.ssh_command_l2_1("cat /proc/cmdline").unwrap().trim(), 4501 vec!["VFIOTAG"], 4502 1 4503 )); 4504 4505 // Let's also verify from the second virtio-net device passed to 4506 // the L2 VM. 4507 assert!(check_matched_lines_count( 4508 guest.ssh_command_l2_2("cat /proc/cmdline").unwrap().trim(), 4509 vec!["VFIOTAG"], 4510 1 4511 )); 4512 4513 // Check the amount of PCI devices appearing in L2 VM. 4514 assert!(check_lines_count( 4515 guest 4516 .ssh_command_l2_1("ls /sys/bus/pci/devices") 4517 .unwrap() 4518 .trim(), 4519 8 4520 )); 4521 4522 // Check both if /dev/vdc exists and if the block size is 16M in L2 VM 4523 assert!(check_matched_lines_count( 4524 guest.ssh_command_l2_1("lsblk").unwrap().trim(), 4525 vec!["vdc", "16M"], 4526 1 4527 )); 4528 4529 // Hotplug an extra virtio-net device through L2 VM. 4530 guest 4531 .ssh_command_l1( 4532 "echo 0000:00:09.0 | sudo tee /sys/bus/pci/devices/0000:00:09.0/driver/unbind", 4533 ) 4534 .unwrap(); 4535 guest 4536 .ssh_command_l1("echo 0000:00:09.0 | sudo tee /sys/bus/pci/drivers/vfio-pci/bind") 4537 .unwrap(); 4538 let vfio_hotplug_output = guest 4539 .ssh_command_l1( 4540 "sudo /mnt/ch-remote \ 4541 --api-socket=/tmp/ch_api.sock \ 4542 add-device path=/sys/bus/pci/devices/0000:00:09.0,id=vfio123", 4543 ) 4544 .unwrap(); 4545 assert!(check_matched_lines_count( 4546 vfio_hotplug_output.trim(), 4547 vec!["{\"id\":\"vfio123\",\"bdf\":\"0000:00:08.0\"}"], 4548 1 4549 )); 4550 4551 thread::sleep(std::time::Duration::new(10, 0)); 4552 4553 // Let's also verify from the third virtio-net device passed to 4554 // the L2 VM. This third device has been hotplugged through the L2 4555 // VM, so this is our way to validate hotplug works for VFIO PCI. 4556 assert!(check_matched_lines_count( 4557 guest.ssh_command_l2_3("cat /proc/cmdline").unwrap().trim(), 4558 vec!["VFIOTAG"], 4559 1 4560 )); 4561 4562 // Check the amount of PCI devices appearing in L2 VM. 4563 // There should be one more device than before, raising the count 4564 // up to 9 PCI devices. 4565 assert!(check_lines_count( 4566 guest 4567 .ssh_command_l2_1("ls /sys/bus/pci/devices") 4568 .unwrap() 4569 .trim(), 4570 9 4571 )); 4572 4573 // Let's now verify that we can correctly remove the virtio-net 4574 // device through the "remove-device" command responsible for 4575 // unplugging VFIO devices. 4576 guest 4577 .ssh_command_l1( 4578 "sudo /mnt/ch-remote \ 4579 --api-socket=/tmp/ch_api.sock \ 4580 remove-device vfio123", 4581 ) 4582 .unwrap(); 4583 thread::sleep(std::time::Duration::new(10, 0)); 4584 4585 // Check the amount of PCI devices appearing in L2 VM is back down 4586 // to 8 devices. 4587 assert!(check_lines_count( 4588 guest 4589 .ssh_command_l2_1("ls /sys/bus/pci/devices") 4590 .unwrap() 4591 .trim(), 4592 8 4593 )); 4594 4595 // Perform memory hotplug in L2 and validate the memory is showing 4596 // up as expected. In order to check, we will use the virtio-net 4597 // device already passed through L2 as a VFIO device, this will 4598 // verify that VFIO devices are functional with memory hotplug. 4599 assert!(guest.get_total_memory_l2().unwrap_or_default() > 480_000); 4600 guest 4601 .ssh_command_l2_1( 4602 "sudo bash -c 'echo online > /sys/devices/system/memory/auto_online_blocks'", 4603 ) 4604 .unwrap(); 4605 guest 4606 .ssh_command_l1( 4607 "sudo /mnt/ch-remote \ 4608 --api-socket=/tmp/ch_api.sock \ 4609 resize --memory=1073741824", 4610 ) 4611 .unwrap(); 4612 assert!(guest.get_total_memory_l2().unwrap_or_default() > 960_000); 4613 }); 4614 4615 kill_child(&mut child); 4616 let output = child.wait_with_output().unwrap(); 4617 4618 cleanup_vfio_network_interfaces(); 4619 4620 handle_child_output(r, &output); 4621 } 4622 4623 #[test] 4624 fn test_direct_kernel_boot_noacpi() { 4625 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4626 let guest = Guest::new(Box::new(focal)); 4627 4628 let kernel_path = direct_kernel_boot_path(); 4629 4630 let mut child = GuestCommand::new(&guest) 4631 .args(["--cpus", "boot=1"]) 4632 .args(["--memory", "size=512M"]) 4633 .args(["--kernel", kernel_path.to_str().unwrap()]) 4634 .args([ 4635 "--cmdline", 4636 format!("{DIRECT_KERNEL_BOOT_CMDLINE} acpi=off").as_str(), 4637 ]) 4638 .default_disks() 4639 .default_net() 4640 .capture_output() 4641 .spawn() 4642 .unwrap(); 4643 4644 let r = std::panic::catch_unwind(|| { 4645 guest.wait_vm_boot(None).unwrap(); 4646 4647 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 4648 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4649 }); 4650 4651 kill_child(&mut child); 4652 let output = child.wait_with_output().unwrap(); 4653 4654 handle_child_output(r, &output); 4655 } 4656 4657 #[test] 4658 fn test_virtio_vsock() { 4659 _test_virtio_vsock(false) 4660 } 4661 4662 #[test] 4663 fn test_virtio_vsock_hotplug() { 4664 _test_virtio_vsock(true); 4665 } 4666 4667 #[test] 4668 fn test_api_http_shutdown() { 4669 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4670 let guest = Guest::new(Box::new(focal)); 4671 4672 _test_api_shutdown(TargetApi::new_http_api(&guest.tmp_dir), guest) 4673 } 4674 4675 #[test] 4676 fn test_api_http_delete() { 4677 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4678 let guest = Guest::new(Box::new(focal)); 4679 4680 _test_api_delete(TargetApi::new_http_api(&guest.tmp_dir), guest); 4681 } 4682 4683 #[test] 4684 fn test_api_http_pause_resume() { 4685 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4686 let guest = Guest::new(Box::new(focal)); 4687 4688 _test_api_pause_resume(TargetApi::new_http_api(&guest.tmp_dir), guest) 4689 } 4690 4691 #[test] 4692 fn test_api_http_create_boot() { 4693 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4694 let guest = Guest::new(Box::new(focal)); 4695 4696 _test_api_create_boot(TargetApi::new_http_api(&guest.tmp_dir), guest) 4697 } 4698 4699 #[test] 4700 fn test_virtio_iommu() { 4701 _test_virtio_iommu(cfg!(target_arch = "x86_64")) 4702 } 4703 4704 #[test] 4705 // We cannot force the software running in the guest to reprogram the BAR 4706 // with some different addresses, but we have a reliable way of testing it 4707 // with a standard Linux kernel. 4708 // By removing a device from the PCI tree, and then rescanning the tree, 4709 // Linux consistently chooses to reorganize the PCI device BARs to other 4710 // locations in the guest address space. 4711 // This test creates a dedicated PCI network device to be checked as being 4712 // properly probed first, then removing it, and adding it again by doing a 4713 // rescan. 4714 fn test_pci_bar_reprogramming() { 4715 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4716 let guest = Guest::new(Box::new(focal)); 4717 4718 #[cfg(target_arch = "x86_64")] 4719 let kernel_path = direct_kernel_boot_path(); 4720 #[cfg(target_arch = "aarch64")] 4721 let kernel_path = edk2_path(); 4722 4723 let mut child = GuestCommand::new(&guest) 4724 .args(["--cpus", "boot=1"]) 4725 .args(["--memory", "size=512M"]) 4726 .args(["--kernel", kernel_path.to_str().unwrap()]) 4727 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4728 .default_disks() 4729 .args([ 4730 "--net", 4731 guest.default_net_string().as_str(), 4732 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0", 4733 ]) 4734 .capture_output() 4735 .spawn() 4736 .unwrap(); 4737 4738 let r = std::panic::catch_unwind(|| { 4739 guest.wait_vm_boot(None).unwrap(); 4740 4741 // 2 network interfaces + default localhost ==> 3 interfaces 4742 assert_eq!( 4743 guest 4744 .ssh_command("ip -o link | wc -l") 4745 .unwrap() 4746 .trim() 4747 .parse::<u32>() 4748 .unwrap_or_default(), 4749 3 4750 ); 4751 4752 let init_bar_addr = guest 4753 .ssh_command( 4754 "sudo awk '{print $1; exit}' /sys/bus/pci/devices/0000:00:05.0/resource", 4755 ) 4756 .unwrap(); 4757 4758 // Remove the PCI device 4759 guest 4760 .ssh_command("echo 1 | sudo tee /sys/bus/pci/devices/0000:00:05.0/remove") 4761 .unwrap(); 4762 4763 // Only 1 network interface left + default localhost ==> 2 interfaces 4764 assert_eq!( 4765 guest 4766 .ssh_command("ip -o link | wc -l") 4767 .unwrap() 4768 .trim() 4769 .parse::<u32>() 4770 .unwrap_or_default(), 4771 2 4772 ); 4773 4774 // Remove the PCI device 4775 guest 4776 .ssh_command("echo 1 | sudo tee /sys/bus/pci/rescan") 4777 .unwrap(); 4778 4779 // Back to 2 network interface + default localhost ==> 3 interfaces 4780 assert_eq!( 4781 guest 4782 .ssh_command("ip -o link | wc -l") 4783 .unwrap() 4784 .trim() 4785 .parse::<u32>() 4786 .unwrap_or_default(), 4787 3 4788 ); 4789 4790 let new_bar_addr = guest 4791 .ssh_command( 4792 "sudo awk '{print $1; exit}' /sys/bus/pci/devices/0000:00:05.0/resource", 4793 ) 4794 .unwrap(); 4795 4796 // Let's compare the BAR addresses for our virtio-net device. 4797 // They should be different as we expect the BAR reprogramming 4798 // to have happened. 4799 assert_ne!(init_bar_addr, new_bar_addr); 4800 }); 4801 4802 kill_child(&mut child); 4803 let output = child.wait_with_output().unwrap(); 4804 4805 handle_child_output(r, &output); 4806 } 4807 4808 #[test] 4809 fn test_memory_mergeable_off() { 4810 test_memory_mergeable(false) 4811 } 4812 4813 #[test] 4814 #[cfg(target_arch = "x86_64")] 4815 fn test_cpu_hotplug() { 4816 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4817 let guest = Guest::new(Box::new(focal)); 4818 let api_socket = temp_api_path(&guest.tmp_dir); 4819 4820 let kernel_path = direct_kernel_boot_path(); 4821 4822 let mut child = GuestCommand::new(&guest) 4823 .args(["--cpus", "boot=2,max=4"]) 4824 .args(["--memory", "size=512M"]) 4825 .args(["--kernel", kernel_path.to_str().unwrap()]) 4826 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4827 .default_disks() 4828 .default_net() 4829 .args(["--api-socket", &api_socket]) 4830 .capture_output() 4831 .spawn() 4832 .unwrap(); 4833 4834 let r = std::panic::catch_unwind(|| { 4835 guest.wait_vm_boot(None).unwrap(); 4836 4837 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 4838 4839 // Resize the VM 4840 let desired_vcpus = 4; 4841 resize_command(&api_socket, Some(desired_vcpus), None, None, None); 4842 4843 guest 4844 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online") 4845 .unwrap(); 4846 guest 4847 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online") 4848 .unwrap(); 4849 thread::sleep(std::time::Duration::new(10, 0)); 4850 assert_eq!( 4851 guest.get_cpu_count().unwrap_or_default(), 4852 u32::from(desired_vcpus) 4853 ); 4854 4855 guest.reboot_linux(0, None); 4856 4857 assert_eq!( 4858 guest.get_cpu_count().unwrap_or_default(), 4859 u32::from(desired_vcpus) 4860 ); 4861 4862 // Resize the VM 4863 let desired_vcpus = 2; 4864 resize_command(&api_socket, Some(desired_vcpus), None, None, None); 4865 4866 thread::sleep(std::time::Duration::new(10, 0)); 4867 assert_eq!( 4868 guest.get_cpu_count().unwrap_or_default(), 4869 u32::from(desired_vcpus) 4870 ); 4871 4872 // Resize the VM back up to 4 4873 let desired_vcpus = 4; 4874 resize_command(&api_socket, Some(desired_vcpus), None, None, None); 4875 4876 guest 4877 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online") 4878 .unwrap(); 4879 guest 4880 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online") 4881 .unwrap(); 4882 thread::sleep(std::time::Duration::new(10, 0)); 4883 assert_eq!( 4884 guest.get_cpu_count().unwrap_or_default(), 4885 u32::from(desired_vcpus) 4886 ); 4887 }); 4888 4889 kill_child(&mut child); 4890 let output = child.wait_with_output().unwrap(); 4891 4892 handle_child_output(r, &output); 4893 } 4894 4895 #[test] 4896 fn test_memory_hotplug() { 4897 #[cfg(target_arch = "aarch64")] 4898 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string(); 4899 #[cfg(target_arch = "x86_64")] 4900 let focal_image = FOCAL_IMAGE_NAME.to_string(); 4901 let focal = UbuntuDiskConfig::new(focal_image); 4902 let guest = Guest::new(Box::new(focal)); 4903 let api_socket = temp_api_path(&guest.tmp_dir); 4904 4905 #[cfg(target_arch = "aarch64")] 4906 let kernel_path = edk2_path(); 4907 #[cfg(target_arch = "x86_64")] 4908 let kernel_path = direct_kernel_boot_path(); 4909 4910 let mut child = GuestCommand::new(&guest) 4911 .args(["--cpus", "boot=2,max=4"]) 4912 .args(["--memory", "size=512M,hotplug_size=8192M"]) 4913 .args(["--kernel", kernel_path.to_str().unwrap()]) 4914 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4915 .default_disks() 4916 .default_net() 4917 .args(["--balloon", "size=0"]) 4918 .args(["--api-socket", &api_socket]) 4919 .capture_output() 4920 .spawn() 4921 .unwrap(); 4922 4923 let r = std::panic::catch_unwind(|| { 4924 guest.wait_vm_boot(None).unwrap(); 4925 4926 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4927 4928 guest.enable_memory_hotplug(); 4929 4930 // Add RAM to the VM 4931 let desired_ram = 1024 << 20; 4932 resize_command(&api_socket, None, Some(desired_ram), None, None); 4933 4934 thread::sleep(std::time::Duration::new(10, 0)); 4935 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4936 4937 // Use balloon to remove RAM from the VM 4938 let desired_balloon = 512 << 20; 4939 resize_command(&api_socket, None, None, Some(desired_balloon), None); 4940 4941 thread::sleep(std::time::Duration::new(10, 0)); 4942 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4943 assert!(guest.get_total_memory().unwrap_or_default() < 960_000); 4944 4945 guest.reboot_linux(0, None); 4946 4947 assert!(guest.get_total_memory().unwrap_or_default() < 960_000); 4948 4949 // Use balloon add RAM to the VM 4950 let desired_balloon = 0; 4951 resize_command(&api_socket, None, None, Some(desired_balloon), None); 4952 4953 thread::sleep(std::time::Duration::new(10, 0)); 4954 4955 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4956 4957 guest.enable_memory_hotplug(); 4958 4959 // Add RAM to the VM 4960 let desired_ram = 2048 << 20; 4961 resize_command(&api_socket, None, Some(desired_ram), None, None); 4962 4963 thread::sleep(std::time::Duration::new(10, 0)); 4964 assert!(guest.get_total_memory().unwrap_or_default() > 1_920_000); 4965 4966 // Remove RAM to the VM (only applies after reboot) 4967 let desired_ram = 1024 << 20; 4968 resize_command(&api_socket, None, Some(desired_ram), None, None); 4969 4970 guest.reboot_linux(1, None); 4971 4972 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4973 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000); 4974 }); 4975 4976 kill_child(&mut child); 4977 let output = child.wait_with_output().unwrap(); 4978 4979 handle_child_output(r, &output); 4980 } 4981 4982 #[test] 4983 #[cfg(not(feature = "mshv"))] 4984 fn test_virtio_mem() { 4985 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4986 let guest = Guest::new(Box::new(focal)); 4987 let api_socket = temp_api_path(&guest.tmp_dir); 4988 4989 let kernel_path = direct_kernel_boot_path(); 4990 4991 let mut child = GuestCommand::new(&guest) 4992 .args(["--cpus", "boot=2,max=4"]) 4993 .args([ 4994 "--memory", 4995 "size=512M,hotplug_method=virtio-mem,hotplug_size=8192M", 4996 ]) 4997 .args(["--kernel", kernel_path.to_str().unwrap()]) 4998 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4999 .default_disks() 5000 .default_net() 5001 .args(["--api-socket", &api_socket]) 5002 .capture_output() 5003 .spawn() 5004 .unwrap(); 5005 5006 let r = std::panic::catch_unwind(|| { 5007 guest.wait_vm_boot(None).unwrap(); 5008 5009 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 5010 5011 guest.enable_memory_hotplug(); 5012 5013 // Add RAM to the VM 5014 let desired_ram = 1024 << 20; 5015 resize_command(&api_socket, None, Some(desired_ram), None, None); 5016 5017 thread::sleep(std::time::Duration::new(10, 0)); 5018 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 5019 5020 // Add RAM to the VM 5021 let desired_ram = 2048 << 20; 5022 resize_command(&api_socket, None, Some(desired_ram), None, None); 5023 5024 thread::sleep(std::time::Duration::new(10, 0)); 5025 assert!(guest.get_total_memory().unwrap_or_default() > 1_920_000); 5026 5027 // Remove RAM from the VM 5028 let desired_ram = 1024 << 20; 5029 resize_command(&api_socket, None, Some(desired_ram), None, None); 5030 5031 thread::sleep(std::time::Duration::new(10, 0)); 5032 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 5033 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000); 5034 5035 guest.reboot_linux(0, None); 5036 5037 // Check the amount of memory after reboot is 1GiB 5038 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 5039 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000); 5040 5041 // Check we can still resize to 512MiB 5042 let desired_ram = 512 << 20; 5043 resize_command(&api_socket, None, Some(desired_ram), None, None); 5044 thread::sleep(std::time::Duration::new(10, 0)); 5045 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 5046 assert!(guest.get_total_memory().unwrap_or_default() < 960_000); 5047 }); 5048 5049 kill_child(&mut child); 5050 let output = child.wait_with_output().unwrap(); 5051 5052 handle_child_output(r, &output); 5053 } 5054 5055 #[test] 5056 #[cfg(target_arch = "x86_64")] 5057 #[cfg(not(feature = "mshv"))] 5058 // Test both vCPU and memory resizing together 5059 fn test_resize() { 5060 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5061 let guest = Guest::new(Box::new(focal)); 5062 let api_socket = temp_api_path(&guest.tmp_dir); 5063 5064 let kernel_path = direct_kernel_boot_path(); 5065 5066 let mut child = GuestCommand::new(&guest) 5067 .args(["--cpus", "boot=2,max=4"]) 5068 .args(["--memory", "size=512M,hotplug_size=8192M"]) 5069 .args(["--kernel", kernel_path.to_str().unwrap()]) 5070 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5071 .default_disks() 5072 .default_net() 5073 .args(["--api-socket", &api_socket]) 5074 .capture_output() 5075 .spawn() 5076 .unwrap(); 5077 5078 let r = std::panic::catch_unwind(|| { 5079 guest.wait_vm_boot(None).unwrap(); 5080 5081 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 5082 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 5083 5084 guest.enable_memory_hotplug(); 5085 5086 // Resize the VM 5087 let desired_vcpus = 4; 5088 let desired_ram = 1024 << 20; 5089 resize_command( 5090 &api_socket, 5091 Some(desired_vcpus), 5092 Some(desired_ram), 5093 None, 5094 None, 5095 ); 5096 5097 guest 5098 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online") 5099 .unwrap(); 5100 guest 5101 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online") 5102 .unwrap(); 5103 thread::sleep(std::time::Duration::new(10, 0)); 5104 assert_eq!( 5105 guest.get_cpu_count().unwrap_or_default(), 5106 u32::from(desired_vcpus) 5107 ); 5108 5109 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 5110 }); 5111 5112 kill_child(&mut child); 5113 let output = child.wait_with_output().unwrap(); 5114 5115 handle_child_output(r, &output); 5116 } 5117 5118 #[test] 5119 fn test_memory_overhead() { 5120 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5121 let guest = Guest::new(Box::new(focal)); 5122 5123 let kernel_path = direct_kernel_boot_path(); 5124 5125 let guest_memory_size_kb = 512 * 1024; 5126 5127 let mut child = GuestCommand::new(&guest) 5128 .args(["--cpus", "boot=1"]) 5129 .args(["--memory", format!("size={guest_memory_size_kb}K").as_str()]) 5130 .args(["--kernel", kernel_path.to_str().unwrap()]) 5131 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5132 .default_net() 5133 .default_disks() 5134 .capture_output() 5135 .spawn() 5136 .unwrap(); 5137 5138 guest.wait_vm_boot(None).unwrap(); 5139 5140 let r = std::panic::catch_unwind(|| { 5141 let overhead = get_vmm_overhead(child.id(), guest_memory_size_kb); 5142 eprintln!("Guest memory overhead: {overhead} vs {MAXIMUM_VMM_OVERHEAD_KB}"); 5143 assert!(overhead <= MAXIMUM_VMM_OVERHEAD_KB); 5144 }); 5145 5146 kill_child(&mut child); 5147 let output = child.wait_with_output().unwrap(); 5148 5149 handle_child_output(r, &output); 5150 } 5151 5152 #[test] 5153 #[cfg(target_arch = "x86_64")] 5154 // This test runs a guest with Landlock enabled and hotplugs a new disk. As 5155 // the path for the hotplug disk is not pre-added to Landlock rules, this 5156 // the test will result in a failure. 5157 fn test_landlock() { 5158 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5159 let guest = Guest::new(Box::new(focal)); 5160 5161 #[cfg(target_arch = "x86_64")] 5162 let kernel_path = direct_kernel_boot_path(); 5163 #[cfg(target_arch = "aarch64")] 5164 let kernel_path = edk2_path(); 5165 5166 let api_socket = temp_api_path(&guest.tmp_dir); 5167 5168 let mut child = GuestCommand::new(&guest) 5169 .args(["--api-socket", &api_socket]) 5170 .args(["--cpus", "boot=1"]) 5171 .args(["--memory", "size=512M"]) 5172 .args(["--kernel", kernel_path.to_str().unwrap()]) 5173 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5174 .args(["--landlock"]) 5175 .default_disks() 5176 .default_net() 5177 .capture_output() 5178 .spawn() 5179 .unwrap(); 5180 5181 let r = std::panic::catch_unwind(|| { 5182 guest.wait_vm_boot(None).unwrap(); 5183 5184 // Check /dev/vdc is not there 5185 assert_eq!( 5186 guest 5187 .ssh_command("lsblk | grep -c vdc.*16M || true") 5188 .unwrap() 5189 .trim() 5190 .parse::<u32>() 5191 .unwrap_or(1), 5192 0 5193 ); 5194 5195 // Now let's add the extra disk. 5196 let mut blk_file_path = dirs::home_dir().unwrap(); 5197 blk_file_path.push("workloads"); 5198 blk_file_path.push("blk.img"); 5199 // As the path to the hotplug disk is not pre-added, this remote 5200 // command will fail. 5201 assert!(!remote_command( 5202 &api_socket, 5203 "add-disk", 5204 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 5205 )); 5206 }); 5207 5208 let _ = child.kill(); 5209 let output = child.wait_with_output().unwrap(); 5210 5211 handle_child_output(r, &output); 5212 } 5213 5214 fn _test_disk_hotplug(landlock_enabled: bool) { 5215 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5216 let guest = Guest::new(Box::new(focal)); 5217 5218 #[cfg(target_arch = "x86_64")] 5219 let kernel_path = direct_kernel_boot_path(); 5220 #[cfg(target_arch = "aarch64")] 5221 let kernel_path = edk2_path(); 5222 5223 let api_socket = temp_api_path(&guest.tmp_dir); 5224 5225 let mut blk_file_path = dirs::home_dir().unwrap(); 5226 blk_file_path.push("workloads"); 5227 blk_file_path.push("blk.img"); 5228 5229 let mut cmd = GuestCommand::new(&guest); 5230 if landlock_enabled { 5231 cmd.args(["--landlock"]).args([ 5232 "--landlock-rules", 5233 format!("path={:?},access=rw", blk_file_path).as_str(), 5234 ]); 5235 } 5236 5237 cmd.args(["--api-socket", &api_socket]) 5238 .args(["--cpus", "boot=1"]) 5239 .args(["--memory", "size=512M"]) 5240 .args(["--kernel", kernel_path.to_str().unwrap()]) 5241 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5242 .default_disks() 5243 .default_net() 5244 .capture_output(); 5245 5246 let mut child = cmd.spawn().unwrap(); 5247 5248 let r = std::panic::catch_unwind(|| { 5249 guest.wait_vm_boot(None).unwrap(); 5250 5251 // Check /dev/vdc is not there 5252 assert_eq!( 5253 guest 5254 .ssh_command("lsblk | grep -c vdc.*16M || true") 5255 .unwrap() 5256 .trim() 5257 .parse::<u32>() 5258 .unwrap_or(1), 5259 0 5260 ); 5261 5262 // Now let's add the extra disk. 5263 let (cmd_success, cmd_output) = remote_command_w_output( 5264 &api_socket, 5265 "add-disk", 5266 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 5267 ); 5268 assert!(cmd_success); 5269 assert!(String::from_utf8_lossy(&cmd_output) 5270 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 5271 5272 thread::sleep(std::time::Duration::new(10, 0)); 5273 5274 // Check that /dev/vdc exists and the block size is 16M. 5275 assert_eq!( 5276 guest 5277 .ssh_command("lsblk | grep vdc | grep -c 16M") 5278 .unwrap() 5279 .trim() 5280 .parse::<u32>() 5281 .unwrap_or_default(), 5282 1 5283 ); 5284 // And check the block device can be read. 5285 guest 5286 .ssh_command("sudo dd if=/dev/vdc of=/dev/null bs=1M iflag=direct count=16") 5287 .unwrap(); 5288 5289 // Let's remove it the extra disk. 5290 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 5291 thread::sleep(std::time::Duration::new(5, 0)); 5292 // And check /dev/vdc is not there 5293 assert_eq!( 5294 guest 5295 .ssh_command("lsblk | grep -c vdc.*16M || true") 5296 .unwrap() 5297 .trim() 5298 .parse::<u32>() 5299 .unwrap_or(1), 5300 0 5301 ); 5302 5303 // And add it back to validate unplug did work correctly. 5304 let (cmd_success, cmd_output) = remote_command_w_output( 5305 &api_socket, 5306 "add-disk", 5307 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 5308 ); 5309 assert!(cmd_success); 5310 assert!(String::from_utf8_lossy(&cmd_output) 5311 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 5312 5313 thread::sleep(std::time::Duration::new(10, 0)); 5314 5315 // Check that /dev/vdc exists and the block size is 16M. 5316 assert_eq!( 5317 guest 5318 .ssh_command("lsblk | grep vdc | grep -c 16M") 5319 .unwrap() 5320 .trim() 5321 .parse::<u32>() 5322 .unwrap_or_default(), 5323 1 5324 ); 5325 // And check the block device can be read. 5326 guest 5327 .ssh_command("sudo dd if=/dev/vdc of=/dev/null bs=1M iflag=direct count=16") 5328 .unwrap(); 5329 5330 // Reboot the VM. 5331 guest.reboot_linux(0, None); 5332 5333 // Check still there after reboot 5334 assert_eq!( 5335 guest 5336 .ssh_command("lsblk | grep vdc | grep -c 16M") 5337 .unwrap() 5338 .trim() 5339 .parse::<u32>() 5340 .unwrap_or_default(), 5341 1 5342 ); 5343 5344 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 5345 5346 thread::sleep(std::time::Duration::new(20, 0)); 5347 5348 // Check device has gone away 5349 assert_eq!( 5350 guest 5351 .ssh_command("lsblk | grep -c vdc.*16M || true") 5352 .unwrap() 5353 .trim() 5354 .parse::<u32>() 5355 .unwrap_or(1), 5356 0 5357 ); 5358 5359 guest.reboot_linux(1, None); 5360 5361 // Check device still absent 5362 assert_eq!( 5363 guest 5364 .ssh_command("lsblk | grep -c vdc.*16M || true") 5365 .unwrap() 5366 .trim() 5367 .parse::<u32>() 5368 .unwrap_or(1), 5369 0 5370 ); 5371 }); 5372 5373 kill_child(&mut child); 5374 let output = child.wait_with_output().unwrap(); 5375 5376 handle_child_output(r, &output); 5377 } 5378 5379 #[test] 5380 fn test_disk_hotplug() { 5381 _test_disk_hotplug(false) 5382 } 5383 5384 #[test] 5385 #[cfg(target_arch = "x86_64")] 5386 fn test_disk_hotplug_with_landlock() { 5387 _test_disk_hotplug(true) 5388 } 5389 5390 fn create_loop_device(backing_file_path: &str, block_size: u32, num_retries: usize) -> String { 5391 const LOOP_CONFIGURE: u64 = 0x4c0a; 5392 const LOOP_CTL_GET_FREE: u64 = 0x4c82; 5393 const LOOP_CTL_PATH: &str = "/dev/loop-control"; 5394 const LOOP_DEVICE_PREFIX: &str = "/dev/loop"; 5395 5396 #[repr(C)] 5397 struct LoopInfo64 { 5398 lo_device: u64, 5399 lo_inode: u64, 5400 lo_rdevice: u64, 5401 lo_offset: u64, 5402 lo_sizelimit: u64, 5403 lo_number: u32, 5404 lo_encrypt_type: u32, 5405 lo_encrypt_key_size: u32, 5406 lo_flags: u32, 5407 lo_file_name: [u8; 64], 5408 lo_crypt_name: [u8; 64], 5409 lo_encrypt_key: [u8; 32], 5410 lo_init: [u64; 2], 5411 } 5412 5413 impl Default for LoopInfo64 { 5414 fn default() -> Self { 5415 LoopInfo64 { 5416 lo_device: 0, 5417 lo_inode: 0, 5418 lo_rdevice: 0, 5419 lo_offset: 0, 5420 lo_sizelimit: 0, 5421 lo_number: 0, 5422 lo_encrypt_type: 0, 5423 lo_encrypt_key_size: 0, 5424 lo_flags: 0, 5425 lo_file_name: [0; 64], 5426 lo_crypt_name: [0; 64], 5427 lo_encrypt_key: [0; 32], 5428 lo_init: [0; 2], 5429 } 5430 } 5431 } 5432 5433 #[derive(Default)] 5434 #[repr(C)] 5435 struct LoopConfig { 5436 fd: u32, 5437 block_size: u32, 5438 info: LoopInfo64, 5439 _reserved: [u64; 8], 5440 } 5441 5442 // Open loop-control device 5443 let loop_ctl_file = OpenOptions::new() 5444 .read(true) 5445 .write(true) 5446 .open(LOOP_CTL_PATH) 5447 .unwrap(); 5448 5449 // Request a free loop device 5450 let loop_device_number = 5451 unsafe { libc::ioctl(loop_ctl_file.as_raw_fd(), LOOP_CTL_GET_FREE as _) }; 5452 5453 if loop_device_number < 0 { 5454 panic!("Couldn't find a free loop device"); 5455 } 5456 5457 // Create loop device path 5458 let loop_device_path = format!("{LOOP_DEVICE_PREFIX}{loop_device_number}"); 5459 5460 // Open loop device 5461 let loop_device_file = OpenOptions::new() 5462 .read(true) 5463 .write(true) 5464 .open(&loop_device_path) 5465 .unwrap(); 5466 5467 // Open backing file 5468 let backing_file = OpenOptions::new() 5469 .read(true) 5470 .write(true) 5471 .open(backing_file_path) 5472 .unwrap(); 5473 5474 let loop_config = LoopConfig { 5475 fd: backing_file.as_raw_fd() as u32, 5476 block_size, 5477 ..Default::default() 5478 }; 5479 5480 for i in 0..num_retries { 5481 let ret = unsafe { 5482 libc::ioctl( 5483 loop_device_file.as_raw_fd(), 5484 LOOP_CONFIGURE as _, 5485 &loop_config, 5486 ) 5487 }; 5488 if ret != 0 { 5489 if i < num_retries - 1 { 5490 println!( 5491 "Iteration {}: Failed to configure the loop device {}: {}", 5492 i, 5493 loop_device_path, 5494 std::io::Error::last_os_error() 5495 ); 5496 } else { 5497 panic!( 5498 "Failed {} times trying to configure the loop device {}: {}", 5499 num_retries, 5500 loop_device_path, 5501 std::io::Error::last_os_error() 5502 ); 5503 } 5504 } else { 5505 break; 5506 } 5507 5508 // Wait for a bit before retrying 5509 thread::sleep(std::time::Duration::new(5, 0)); 5510 } 5511 5512 loop_device_path 5513 } 5514 5515 #[test] 5516 fn test_virtio_block_topology() { 5517 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5518 let guest = Guest::new(Box::new(focal)); 5519 5520 let kernel_path = direct_kernel_boot_path(); 5521 let test_disk_path = guest.tmp_dir.as_path().join("test.img"); 5522 5523 let output = exec_host_command_output( 5524 format!( 5525 "qemu-img create -f raw {} 16M", 5526 test_disk_path.to_str().unwrap() 5527 ) 5528 .as_str(), 5529 ); 5530 if !output.status.success() { 5531 let stdout = String::from_utf8_lossy(&output.stdout); 5532 let stderr = String::from_utf8_lossy(&output.stderr); 5533 panic!("qemu-img command failed\nstdout\n{stdout}\nstderr\n{stderr}"); 5534 } 5535 5536 let loop_dev = create_loop_device(test_disk_path.to_str().unwrap(), 4096, 5); 5537 5538 let mut child = GuestCommand::new(&guest) 5539 .args(["--cpus", "boot=1"]) 5540 .args(["--memory", "size=512M"]) 5541 .args(["--kernel", kernel_path.to_str().unwrap()]) 5542 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5543 .args([ 5544 "--disk", 5545 format!( 5546 "path={}", 5547 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 5548 ) 5549 .as_str(), 5550 format!( 5551 "path={}", 5552 guest.disk_config.disk(DiskType::CloudInit).unwrap() 5553 ) 5554 .as_str(), 5555 format!("path={}", &loop_dev).as_str(), 5556 ]) 5557 .default_net() 5558 .capture_output() 5559 .spawn() 5560 .unwrap(); 5561 5562 let r = std::panic::catch_unwind(|| { 5563 guest.wait_vm_boot(None).unwrap(); 5564 5565 // MIN-IO column 5566 assert_eq!( 5567 guest 5568 .ssh_command("lsblk -t| grep vdc | awk '{print $3}'") 5569 .unwrap() 5570 .trim() 5571 .parse::<u32>() 5572 .unwrap_or_default(), 5573 4096 5574 ); 5575 // PHY-SEC column 5576 assert_eq!( 5577 guest 5578 .ssh_command("lsblk -t| grep vdc | awk '{print $5}'") 5579 .unwrap() 5580 .trim() 5581 .parse::<u32>() 5582 .unwrap_or_default(), 5583 4096 5584 ); 5585 // LOG-SEC column 5586 assert_eq!( 5587 guest 5588 .ssh_command("lsblk -t| grep vdc | awk '{print $6}'") 5589 .unwrap() 5590 .trim() 5591 .parse::<u32>() 5592 .unwrap_or_default(), 5593 4096 5594 ); 5595 }); 5596 5597 kill_child(&mut child); 5598 let output = child.wait_with_output().unwrap(); 5599 5600 handle_child_output(r, &output); 5601 5602 Command::new("losetup") 5603 .args(["-d", &loop_dev]) 5604 .output() 5605 .expect("loop device not found"); 5606 } 5607 5608 #[test] 5609 fn test_virtio_balloon_deflate_on_oom() { 5610 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5611 let guest = Guest::new(Box::new(focal)); 5612 5613 let kernel_path = direct_kernel_boot_path(); 5614 5615 let api_socket = temp_api_path(&guest.tmp_dir); 5616 5617 //Let's start a 4G guest with balloon occupied 2G memory 5618 let mut child = GuestCommand::new(&guest) 5619 .args(["--api-socket", &api_socket]) 5620 .args(["--cpus", "boot=1"]) 5621 .args(["--memory", "size=4G"]) 5622 .args(["--kernel", kernel_path.to_str().unwrap()]) 5623 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5624 .args(["--balloon", "size=2G,deflate_on_oom=on"]) 5625 .default_disks() 5626 .default_net() 5627 .capture_output() 5628 .spawn() 5629 .unwrap(); 5630 5631 let r = std::panic::catch_unwind(|| { 5632 guest.wait_vm_boot(None).unwrap(); 5633 5634 // Wait for balloon memory's initialization and check its size. 5635 // The virtio-balloon driver might take a few seconds to report the 5636 // balloon effective size back to the VMM. 5637 thread::sleep(std::time::Duration::new(20, 0)); 5638 5639 let orig_balloon = balloon_size(&api_socket); 5640 println!("The original balloon memory size is {orig_balloon} bytes"); 5641 assert!(orig_balloon == 2147483648); 5642 5643 // Two steps to verify if the 'deflate_on_oom' parameter works. 5644 // 1st: run a command to trigger an OOM in the guest. 5645 guest 5646 .ssh_command("echo f | sudo tee /proc/sysrq-trigger") 5647 .unwrap(); 5648 5649 // Give some time for the OOM to happen in the guest and be reported 5650 // back to the host. 5651 thread::sleep(std::time::Duration::new(20, 0)); 5652 5653 // 2nd: check balloon_mem's value to verify balloon has been automatically deflated 5654 let deflated_balloon = balloon_size(&api_socket); 5655 println!("After deflating, balloon memory size is {deflated_balloon} bytes"); 5656 // Verify the balloon size deflated 5657 assert!(deflated_balloon < 2147483648); 5658 }); 5659 5660 kill_child(&mut child); 5661 let output = child.wait_with_output().unwrap(); 5662 5663 handle_child_output(r, &output); 5664 } 5665 5666 #[test] 5667 #[cfg(not(feature = "mshv"))] 5668 fn test_virtio_balloon_free_page_reporting() { 5669 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5670 let guest = Guest::new(Box::new(focal)); 5671 5672 //Let's start a 4G guest with balloon occupied 2G memory 5673 let mut child = GuestCommand::new(&guest) 5674 .args(["--cpus", "boot=1"]) 5675 .args(["--memory", "size=4G"]) 5676 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 5677 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5678 .args(["--balloon", "size=0,free_page_reporting=on"]) 5679 .default_disks() 5680 .default_net() 5681 .capture_output() 5682 .spawn() 5683 .unwrap(); 5684 5685 let pid = child.id(); 5686 let r = std::panic::catch_unwind(|| { 5687 guest.wait_vm_boot(None).unwrap(); 5688 5689 // Check the initial RSS is less than 1GiB 5690 let rss = process_rss_kib(pid); 5691 println!("RSS {rss} < 1048576"); 5692 assert!(rss < 1048576); 5693 5694 // Spawn a command inside the guest to consume 2GiB of RAM for 60 5695 // seconds 5696 let guest_ip = guest.network.guest_ip.clone(); 5697 thread::spawn(move || { 5698 ssh_command_ip( 5699 "stress --vm 1 --vm-bytes 2G --vm-keep --timeout 60", 5700 &guest_ip, 5701 DEFAULT_SSH_RETRIES, 5702 DEFAULT_SSH_TIMEOUT, 5703 ) 5704 .unwrap(); 5705 }); 5706 5707 // Wait for 50 seconds to make sure the stress command is consuming 5708 // the expected amount of memory. 5709 thread::sleep(std::time::Duration::new(50, 0)); 5710 let rss = process_rss_kib(pid); 5711 println!("RSS {rss} >= 2097152"); 5712 assert!(rss >= 2097152); 5713 5714 // Wait for an extra minute to make sure the stress command has 5715 // completed and that the guest reported the free pages to the VMM 5716 // through the virtio-balloon device. We expect the RSS to be under 5717 // 2GiB. 5718 thread::sleep(std::time::Duration::new(60, 0)); 5719 let rss = process_rss_kib(pid); 5720 println!("RSS {rss} < 2097152"); 5721 assert!(rss < 2097152); 5722 }); 5723 5724 kill_child(&mut child); 5725 let output = child.wait_with_output().unwrap(); 5726 5727 handle_child_output(r, &output); 5728 } 5729 5730 #[test] 5731 fn test_pmem_hotplug() { 5732 _test_pmem_hotplug(None) 5733 } 5734 5735 #[test] 5736 fn test_pmem_multi_segment_hotplug() { 5737 _test_pmem_hotplug(Some(15)) 5738 } 5739 5740 fn _test_pmem_hotplug(pci_segment: Option<u16>) { 5741 #[cfg(target_arch = "aarch64")] 5742 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string(); 5743 #[cfg(target_arch = "x86_64")] 5744 let focal_image = FOCAL_IMAGE_NAME.to_string(); 5745 let focal = UbuntuDiskConfig::new(focal_image); 5746 let guest = Guest::new(Box::new(focal)); 5747 5748 #[cfg(target_arch = "x86_64")] 5749 let kernel_path = direct_kernel_boot_path(); 5750 #[cfg(target_arch = "aarch64")] 5751 let kernel_path = edk2_path(); 5752 5753 let api_socket = temp_api_path(&guest.tmp_dir); 5754 5755 let mut cmd = GuestCommand::new(&guest); 5756 5757 cmd.args(["--api-socket", &api_socket]) 5758 .args(["--cpus", "boot=1"]) 5759 .args(["--memory", "size=512M"]) 5760 .args(["--kernel", kernel_path.to_str().unwrap()]) 5761 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5762 .default_disks() 5763 .default_net() 5764 .capture_output(); 5765 5766 if pci_segment.is_some() { 5767 cmd.args([ 5768 "--platform", 5769 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 5770 ]); 5771 } 5772 5773 let mut child = cmd.spawn().unwrap(); 5774 5775 let r = std::panic::catch_unwind(|| { 5776 guest.wait_vm_boot(None).unwrap(); 5777 5778 // Check /dev/pmem0 is not there 5779 assert_eq!( 5780 guest 5781 .ssh_command("lsblk | grep -c pmem0 || true") 5782 .unwrap() 5783 .trim() 5784 .parse::<u32>() 5785 .unwrap_or(1), 5786 0 5787 ); 5788 5789 let pmem_temp_file = TempFile::new().unwrap(); 5790 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 5791 let (cmd_success, cmd_output) = remote_command_w_output( 5792 &api_socket, 5793 "add-pmem", 5794 Some(&format!( 5795 "file={},id=test0{}", 5796 pmem_temp_file.as_path().to_str().unwrap(), 5797 if let Some(pci_segment) = pci_segment { 5798 format!(",pci_segment={pci_segment}") 5799 } else { 5800 "".to_owned() 5801 } 5802 )), 5803 ); 5804 assert!(cmd_success); 5805 if let Some(pci_segment) = pci_segment { 5806 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 5807 "{{\"id\":\"test0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 5808 ))); 5809 } else { 5810 assert!(String::from_utf8_lossy(&cmd_output) 5811 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 5812 } 5813 5814 // Check that /dev/pmem0 exists and the block size is 128M 5815 assert_eq!( 5816 guest 5817 .ssh_command("lsblk | grep pmem0 | grep -c 128M") 5818 .unwrap() 5819 .trim() 5820 .parse::<u32>() 5821 .unwrap_or_default(), 5822 1 5823 ); 5824 5825 guest.reboot_linux(0, None); 5826 5827 // Check still there after reboot 5828 assert_eq!( 5829 guest 5830 .ssh_command("lsblk | grep pmem0 | grep -c 128M") 5831 .unwrap() 5832 .trim() 5833 .parse::<u32>() 5834 .unwrap_or_default(), 5835 1 5836 ); 5837 5838 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 5839 5840 thread::sleep(std::time::Duration::new(20, 0)); 5841 5842 // Check device has gone away 5843 assert_eq!( 5844 guest 5845 .ssh_command("lsblk | grep -c pmem0.*128M || true") 5846 .unwrap() 5847 .trim() 5848 .parse::<u32>() 5849 .unwrap_or(1), 5850 0 5851 ); 5852 5853 guest.reboot_linux(1, None); 5854 5855 // Check still absent after reboot 5856 assert_eq!( 5857 guest 5858 .ssh_command("lsblk | grep -c pmem0.*128M || true") 5859 .unwrap() 5860 .trim() 5861 .parse::<u32>() 5862 .unwrap_or(1), 5863 0 5864 ); 5865 }); 5866 5867 kill_child(&mut child); 5868 let output = child.wait_with_output().unwrap(); 5869 5870 handle_child_output(r, &output); 5871 } 5872 5873 #[test] 5874 fn test_net_hotplug() { 5875 _test_net_hotplug(None) 5876 } 5877 5878 #[test] 5879 fn test_net_multi_segment_hotplug() { 5880 _test_net_hotplug(Some(15)) 5881 } 5882 5883 fn _test_net_hotplug(pci_segment: Option<u16>) { 5884 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5885 let guest = Guest::new(Box::new(focal)); 5886 5887 #[cfg(target_arch = "x86_64")] 5888 let kernel_path = direct_kernel_boot_path(); 5889 #[cfg(target_arch = "aarch64")] 5890 let kernel_path = edk2_path(); 5891 5892 let api_socket = temp_api_path(&guest.tmp_dir); 5893 5894 // Boot without network 5895 let mut cmd = GuestCommand::new(&guest); 5896 5897 cmd.args(["--api-socket", &api_socket]) 5898 .args(["--cpus", "boot=1"]) 5899 .args(["--memory", "size=512M"]) 5900 .args(["--kernel", kernel_path.to_str().unwrap()]) 5901 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5902 .default_disks() 5903 .capture_output(); 5904 5905 if pci_segment.is_some() { 5906 cmd.args([ 5907 "--platform", 5908 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 5909 ]); 5910 } 5911 5912 let mut child = cmd.spawn().unwrap(); 5913 5914 thread::sleep(std::time::Duration::new(20, 0)); 5915 5916 let r = std::panic::catch_unwind(|| { 5917 // Add network 5918 let (cmd_success, cmd_output) = remote_command_w_output( 5919 &api_socket, 5920 "add-net", 5921 Some( 5922 format!( 5923 "{}{},id=test0", 5924 guest.default_net_string(), 5925 if let Some(pci_segment) = pci_segment { 5926 format!(",pci_segment={pci_segment}") 5927 } else { 5928 "".to_owned() 5929 } 5930 ) 5931 .as_str(), 5932 ), 5933 ); 5934 assert!(cmd_success); 5935 5936 if let Some(pci_segment) = pci_segment { 5937 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 5938 "{{\"id\":\"test0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 5939 ))); 5940 } else { 5941 assert!(String::from_utf8_lossy(&cmd_output) 5942 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:05.0\"}")); 5943 } 5944 5945 thread::sleep(std::time::Duration::new(5, 0)); 5946 5947 // 1 network interfaces + default localhost ==> 2 interfaces 5948 assert_eq!( 5949 guest 5950 .ssh_command("ip -o link | wc -l") 5951 .unwrap() 5952 .trim() 5953 .parse::<u32>() 5954 .unwrap_or_default(), 5955 2 5956 ); 5957 5958 // Remove network 5959 assert!(remote_command(&api_socket, "remove-device", Some("test0"),)); 5960 thread::sleep(std::time::Duration::new(5, 0)); 5961 5962 let (cmd_success, cmd_output) = remote_command_w_output( 5963 &api_socket, 5964 "add-net", 5965 Some( 5966 format!( 5967 "{}{},id=test1", 5968 guest.default_net_string(), 5969 if let Some(pci_segment) = pci_segment { 5970 format!(",pci_segment={pci_segment}") 5971 } else { 5972 "".to_owned() 5973 } 5974 ) 5975 .as_str(), 5976 ), 5977 ); 5978 assert!(cmd_success); 5979 5980 if let Some(pci_segment) = pci_segment { 5981 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 5982 "{{\"id\":\"test1\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 5983 ))); 5984 } else { 5985 assert!(String::from_utf8_lossy(&cmd_output) 5986 .contains("{\"id\":\"test1\",\"bdf\":\"0000:00:05.0\"}")); 5987 } 5988 5989 thread::sleep(std::time::Duration::new(5, 0)); 5990 5991 // 1 network interfaces + default localhost ==> 2 interfaces 5992 assert_eq!( 5993 guest 5994 .ssh_command("ip -o link | wc -l") 5995 .unwrap() 5996 .trim() 5997 .parse::<u32>() 5998 .unwrap_or_default(), 5999 2 6000 ); 6001 6002 guest.reboot_linux(0, None); 6003 6004 // Check still there after reboot 6005 // 1 network interfaces + default localhost ==> 2 interfaces 6006 assert_eq!( 6007 guest 6008 .ssh_command("ip -o link | wc -l") 6009 .unwrap() 6010 .trim() 6011 .parse::<u32>() 6012 .unwrap_or_default(), 6013 2 6014 ); 6015 }); 6016 6017 kill_child(&mut child); 6018 let output = child.wait_with_output().unwrap(); 6019 6020 handle_child_output(r, &output); 6021 } 6022 6023 #[test] 6024 fn test_initramfs() { 6025 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6026 let guest = Guest::new(Box::new(focal)); 6027 let mut workload_path = dirs::home_dir().unwrap(); 6028 workload_path.push("workloads"); 6029 6030 #[cfg(target_arch = "x86_64")] 6031 let mut kernels = vec![direct_kernel_boot_path()]; 6032 #[cfg(target_arch = "aarch64")] 6033 let kernels = [direct_kernel_boot_path()]; 6034 6035 #[cfg(target_arch = "x86_64")] 6036 { 6037 let mut pvh_kernel_path = workload_path.clone(); 6038 pvh_kernel_path.push("vmlinux"); 6039 kernels.push(pvh_kernel_path); 6040 } 6041 6042 let mut initramfs_path = workload_path; 6043 initramfs_path.push("alpine_initramfs.img"); 6044 6045 let test_string = String::from("axz34i9rylotd8n50wbv6kcj7f2qushme1pg"); 6046 let cmdline = format!("console=hvc0 quiet TEST_STRING={test_string}"); 6047 6048 kernels.iter().for_each(|k_path| { 6049 let mut child = GuestCommand::new(&guest) 6050 .args(["--kernel", k_path.to_str().unwrap()]) 6051 .args(["--initramfs", initramfs_path.to_str().unwrap()]) 6052 .args(["--cmdline", &cmdline]) 6053 .capture_output() 6054 .spawn() 6055 .unwrap(); 6056 6057 thread::sleep(std::time::Duration::new(20, 0)); 6058 6059 kill_child(&mut child); 6060 let output = child.wait_with_output().unwrap(); 6061 6062 let r = std::panic::catch_unwind(|| { 6063 let s = String::from_utf8_lossy(&output.stdout); 6064 6065 assert_ne!(s.lines().position(|line| line == test_string), None); 6066 }); 6067 6068 handle_child_output(r, &output); 6069 }); 6070 } 6071 6072 #[test] 6073 fn test_counters() { 6074 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6075 let guest = Guest::new(Box::new(focal)); 6076 let api_socket = temp_api_path(&guest.tmp_dir); 6077 6078 let mut cmd = GuestCommand::new(&guest); 6079 cmd.args(["--cpus", "boot=1"]) 6080 .args(["--memory", "size=512M"]) 6081 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 6082 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6083 .default_disks() 6084 .args(["--net", guest.default_net_string().as_str()]) 6085 .args(["--api-socket", &api_socket]) 6086 .capture_output(); 6087 6088 let mut child = cmd.spawn().unwrap(); 6089 6090 let r = std::panic::catch_unwind(|| { 6091 guest.wait_vm_boot(None).unwrap(); 6092 6093 let orig_counters = get_counters(&api_socket); 6094 guest 6095 .ssh_command("dd if=/dev/zero of=test count=8 bs=1M") 6096 .unwrap(); 6097 6098 let new_counters = get_counters(&api_socket); 6099 6100 // Check that all the counters have increased 6101 assert!(new_counters > orig_counters); 6102 }); 6103 6104 kill_child(&mut child); 6105 let output = child.wait_with_output().unwrap(); 6106 6107 handle_child_output(r, &output); 6108 } 6109 6110 #[test] 6111 #[cfg(feature = "guest_debug")] 6112 fn test_coredump() { 6113 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6114 let guest = Guest::new(Box::new(focal)); 6115 let api_socket = temp_api_path(&guest.tmp_dir); 6116 6117 let mut cmd = GuestCommand::new(&guest); 6118 cmd.args(["--cpus", "boot=4"]) 6119 .args(["--memory", "size=4G"]) 6120 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 6121 .default_disks() 6122 .args(["--net", guest.default_net_string().as_str()]) 6123 .args(["--api-socket", &api_socket]) 6124 .capture_output(); 6125 6126 let mut child = cmd.spawn().unwrap(); 6127 let vmcore_file = temp_vmcore_file_path(&guest.tmp_dir); 6128 6129 let r = std::panic::catch_unwind(|| { 6130 guest.wait_vm_boot(None).unwrap(); 6131 6132 assert!(remote_command(&api_socket, "pause", None)); 6133 6134 assert!(remote_command( 6135 &api_socket, 6136 "coredump", 6137 Some(format!("file://{vmcore_file}").as_str()), 6138 )); 6139 6140 // the num of CORE notes should equals to vcpu 6141 let readelf_core_num_cmd = 6142 format!("readelf --all {vmcore_file} |grep CORE |grep -v Type |wc -l"); 6143 let core_num_in_elf = exec_host_command_output(&readelf_core_num_cmd); 6144 assert_eq!(String::from_utf8_lossy(&core_num_in_elf.stdout).trim(), "4"); 6145 6146 // the num of QEMU notes should equals to vcpu 6147 let readelf_vmm_num_cmd = format!("readelf --all {vmcore_file} |grep QEMU |wc -l"); 6148 let vmm_num_in_elf = exec_host_command_output(&readelf_vmm_num_cmd); 6149 assert_eq!(String::from_utf8_lossy(&vmm_num_in_elf.stdout).trim(), "4"); 6150 }); 6151 6152 kill_child(&mut child); 6153 let output = child.wait_with_output().unwrap(); 6154 6155 handle_child_output(r, &output); 6156 } 6157 6158 #[test] 6159 #[cfg(feature = "guest_debug")] 6160 fn test_coredump_no_pause() { 6161 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6162 let guest = Guest::new(Box::new(focal)); 6163 let api_socket = temp_api_path(&guest.tmp_dir); 6164 6165 let mut cmd = GuestCommand::new(&guest); 6166 cmd.args(["--cpus", "boot=4"]) 6167 .args(["--memory", "size=4G"]) 6168 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 6169 .default_disks() 6170 .args(["--net", guest.default_net_string().as_str()]) 6171 .args(["--api-socket", &api_socket]) 6172 .capture_output(); 6173 6174 let mut child = cmd.spawn().unwrap(); 6175 let vmcore_file = temp_vmcore_file_path(&guest.tmp_dir); 6176 6177 let r = std::panic::catch_unwind(|| { 6178 guest.wait_vm_boot(None).unwrap(); 6179 6180 assert!(remote_command( 6181 &api_socket, 6182 "coredump", 6183 Some(format!("file://{vmcore_file}").as_str()), 6184 )); 6185 6186 assert_eq!(vm_state(&api_socket), "Running"); 6187 }); 6188 6189 kill_child(&mut child); 6190 let output = child.wait_with_output().unwrap(); 6191 6192 handle_child_output(r, &output); 6193 } 6194 6195 #[test] 6196 fn test_watchdog() { 6197 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6198 let guest = Guest::new(Box::new(focal)); 6199 let api_socket = temp_api_path(&guest.tmp_dir); 6200 6201 let kernel_path = direct_kernel_boot_path(); 6202 let event_path = temp_event_monitor_path(&guest.tmp_dir); 6203 6204 let mut cmd = GuestCommand::new(&guest); 6205 cmd.args(["--cpus", "boot=1"]) 6206 .args(["--memory", "size=512M"]) 6207 .args(["--kernel", kernel_path.to_str().unwrap()]) 6208 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6209 .default_disks() 6210 .args(["--net", guest.default_net_string().as_str()]) 6211 .args(["--watchdog"]) 6212 .args(["--api-socket", &api_socket]) 6213 .args(["--event-monitor", format!("path={event_path}").as_str()]) 6214 .capture_output(); 6215 6216 let mut child = cmd.spawn().unwrap(); 6217 6218 let r = std::panic::catch_unwind(|| { 6219 guest.wait_vm_boot(None).unwrap(); 6220 6221 let mut expected_reboot_count = 1; 6222 6223 // Enable the watchdog with a 15s timeout 6224 enable_guest_watchdog(&guest, 15); 6225 6226 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6227 assert_eq!( 6228 guest 6229 .ssh_command("sudo journalctl | grep -c -- \"Watchdog started\"") 6230 .unwrap() 6231 .trim() 6232 .parse::<u32>() 6233 .unwrap_or_default(), 6234 1 6235 ); 6236 6237 // Allow some normal time to elapse to check we don't get spurious reboots 6238 thread::sleep(std::time::Duration::new(40, 0)); 6239 // Check no reboot 6240 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6241 6242 // Trigger a panic (sync first). We need to do this inside a screen with a delay so the SSH command returns. 6243 guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap(); 6244 // Allow some time for the watchdog to trigger (max 30s) and reboot to happen 6245 guest.wait_vm_boot(Some(50)).unwrap(); 6246 // Check a reboot is triggered by the watchdog 6247 expected_reboot_count += 1; 6248 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6249 6250 #[cfg(target_arch = "x86_64")] 6251 { 6252 // Now pause the VM and remain offline for 30s 6253 assert!(remote_command(&api_socket, "pause", None)); 6254 let latest_events = [ 6255 &MetaEvent { 6256 event: "pausing".to_string(), 6257 device_id: None, 6258 }, 6259 &MetaEvent { 6260 event: "paused".to_string(), 6261 device_id: None, 6262 }, 6263 ]; 6264 assert!(check_latest_events_exact(&latest_events, &event_path)); 6265 assert!(remote_command(&api_socket, "resume", None)); 6266 6267 // Check no reboot 6268 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6269 } 6270 }); 6271 6272 kill_child(&mut child); 6273 let output = child.wait_with_output().unwrap(); 6274 6275 handle_child_output(r, &output); 6276 } 6277 6278 #[test] 6279 fn test_pvpanic() { 6280 let jammy = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string()); 6281 let guest = Guest::new(Box::new(jammy)); 6282 let api_socket = temp_api_path(&guest.tmp_dir); 6283 let event_path = temp_event_monitor_path(&guest.tmp_dir); 6284 6285 let kernel_path = direct_kernel_boot_path(); 6286 6287 let mut cmd = GuestCommand::new(&guest); 6288 cmd.args(["--cpus", "boot=1"]) 6289 .args(["--memory", "size=512M"]) 6290 .args(["--kernel", kernel_path.to_str().unwrap()]) 6291 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6292 .default_disks() 6293 .args(["--net", guest.default_net_string().as_str()]) 6294 .args(["--pvpanic"]) 6295 .args(["--api-socket", &api_socket]) 6296 .args(["--event-monitor", format!("path={event_path}").as_str()]) 6297 .capture_output(); 6298 6299 let mut child = cmd.spawn().unwrap(); 6300 6301 let r = std::panic::catch_unwind(|| { 6302 guest.wait_vm_boot(None).unwrap(); 6303 6304 // Trigger guest a panic 6305 make_guest_panic(&guest); 6306 6307 // Wait a while for guest 6308 thread::sleep(std::time::Duration::new(10, 0)); 6309 6310 let expected_sequential_events = [&MetaEvent { 6311 event: "panic".to_string(), 6312 device_id: None, 6313 }]; 6314 assert!(check_latest_events_exact( 6315 &expected_sequential_events, 6316 &event_path 6317 )); 6318 }); 6319 6320 kill_child(&mut child); 6321 let output = child.wait_with_output().unwrap(); 6322 6323 handle_child_output(r, &output); 6324 } 6325 6326 #[test] 6327 fn test_tap_from_fd() { 6328 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6329 let guest = Guest::new(Box::new(focal)); 6330 let kernel_path = direct_kernel_boot_path(); 6331 6332 // Create a TAP interface with multi-queue enabled 6333 let num_queue_pairs: usize = 2; 6334 6335 use std::str::FromStr; 6336 let taps = net_util::open_tap( 6337 Some("chtap0"), 6338 Some(std::net::Ipv4Addr::from_str(&guest.network.host_ip).unwrap()), 6339 None, 6340 &mut None, 6341 None, 6342 num_queue_pairs, 6343 Some(libc::O_RDWR | libc::O_NONBLOCK), 6344 ) 6345 .unwrap(); 6346 6347 let mut child = GuestCommand::new(&guest) 6348 .args(["--cpus", &format!("boot={num_queue_pairs}")]) 6349 .args(["--memory", "size=512M"]) 6350 .args(["--kernel", kernel_path.to_str().unwrap()]) 6351 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6352 .default_disks() 6353 .args([ 6354 "--net", 6355 &format!( 6356 "fd=[{},{}],mac={},num_queues={}", 6357 taps[0].as_raw_fd(), 6358 taps[1].as_raw_fd(), 6359 guest.network.guest_mac, 6360 num_queue_pairs * 2 6361 ), 6362 ]) 6363 .capture_output() 6364 .spawn() 6365 .unwrap(); 6366 6367 let r = std::panic::catch_unwind(|| { 6368 guest.wait_vm_boot(None).unwrap(); 6369 6370 assert_eq!( 6371 guest 6372 .ssh_command("ip -o link | wc -l") 6373 .unwrap() 6374 .trim() 6375 .parse::<u32>() 6376 .unwrap_or_default(), 6377 2 6378 ); 6379 6380 guest.reboot_linux(0, None); 6381 6382 assert_eq!( 6383 guest 6384 .ssh_command("ip -o link | wc -l") 6385 .unwrap() 6386 .trim() 6387 .parse::<u32>() 6388 .unwrap_or_default(), 6389 2 6390 ); 6391 }); 6392 6393 kill_child(&mut child); 6394 let output = child.wait_with_output().unwrap(); 6395 6396 handle_child_output(r, &output); 6397 } 6398 6399 // By design, a guest VM won't be able to connect to the host 6400 // machine when using a macvtap network interface (while it can 6401 // communicate externally). As a workaround, this integration 6402 // test creates two macvtap interfaces in 'bridge' mode on the 6403 // same physical net interface, one for the guest and one for 6404 // the host. With additional setup on the IP address and the 6405 // routing table, it enables the communications between the 6406 // guest VM and the host machine. 6407 // Details: https://wiki.libvirt.org/page/TroubleshootMacvtapHostFail 6408 fn _test_macvtap(hotplug: bool, guest_macvtap_name: &str, host_macvtap_name: &str) { 6409 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6410 let guest = Guest::new(Box::new(focal)); 6411 let api_socket = temp_api_path(&guest.tmp_dir); 6412 6413 #[cfg(target_arch = "x86_64")] 6414 let kernel_path = direct_kernel_boot_path(); 6415 #[cfg(target_arch = "aarch64")] 6416 let kernel_path = edk2_path(); 6417 6418 let phy_net = "eth0"; 6419 6420 // Create a macvtap interface for the guest VM to use 6421 assert!(exec_host_command_status(&format!( 6422 "sudo ip link add link {phy_net} name {guest_macvtap_name} type macvtap mod bridge" 6423 )) 6424 .success()); 6425 assert!(exec_host_command_status(&format!( 6426 "sudo ip link set {} address {} up", 6427 guest_macvtap_name, guest.network.guest_mac 6428 )) 6429 .success()); 6430 assert!( 6431 exec_host_command_status(&format!("sudo ip link show {guest_macvtap_name}")).success() 6432 ); 6433 6434 let tap_index = 6435 fs::read_to_string(format!("/sys/class/net/{guest_macvtap_name}/ifindex")).unwrap(); 6436 let tap_device = format!("/dev/tap{}", tap_index.trim()); 6437 6438 assert!(exec_host_command_status(&format!("sudo chown $UID.$UID {tap_device}")).success()); 6439 6440 let cstr_tap_device = std::ffi::CString::new(tap_device).unwrap(); 6441 let tap_fd1 = unsafe { libc::open(cstr_tap_device.as_ptr(), libc::O_RDWR) }; 6442 assert!(tap_fd1 > 0); 6443 let tap_fd2 = unsafe { libc::open(cstr_tap_device.as_ptr(), libc::O_RDWR) }; 6444 assert!(tap_fd2 > 0); 6445 6446 // Create a macvtap on the same physical net interface for 6447 // the host machine to use 6448 assert!(exec_host_command_status(&format!( 6449 "sudo ip link add link {phy_net} name {host_macvtap_name} type macvtap mod bridge" 6450 )) 6451 .success()); 6452 // Use default mask "255.255.255.0" 6453 assert!(exec_host_command_status(&format!( 6454 "sudo ip address add {}/24 dev {}", 6455 guest.network.host_ip, host_macvtap_name 6456 )) 6457 .success()); 6458 assert!( 6459 exec_host_command_status(&format!("sudo ip link set dev {host_macvtap_name} up")) 6460 .success() 6461 ); 6462 6463 let mut guest_command = GuestCommand::new(&guest); 6464 guest_command 6465 .args(["--cpus", "boot=2"]) 6466 .args(["--memory", "size=512M"]) 6467 .args(["--kernel", kernel_path.to_str().unwrap()]) 6468 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6469 .default_disks() 6470 .args(["--api-socket", &api_socket]); 6471 6472 let net_params = format!( 6473 "fd=[{},{}],mac={},num_queues=4", 6474 tap_fd1, tap_fd2, guest.network.guest_mac 6475 ); 6476 6477 if !hotplug { 6478 guest_command.args(["--net", &net_params]); 6479 } 6480 6481 let mut child = guest_command.capture_output().spawn().unwrap(); 6482 6483 if hotplug { 6484 // Give some time to the VMM process to listen to the API 6485 // socket. This is the only requirement to avoid the following 6486 // call to ch-remote from failing. 6487 thread::sleep(std::time::Duration::new(10, 0)); 6488 // Hotplug the virtio-net device 6489 let (cmd_success, cmd_output) = 6490 remote_command_w_output(&api_socket, "add-net", Some(&net_params)); 6491 assert!(cmd_success); 6492 #[cfg(target_arch = "x86_64")] 6493 assert!(String::from_utf8_lossy(&cmd_output) 6494 .contains("{\"id\":\"_net2\",\"bdf\":\"0000:00:05.0\"}")); 6495 #[cfg(target_arch = "aarch64")] 6496 assert!(String::from_utf8_lossy(&cmd_output) 6497 .contains("{\"id\":\"_net0\",\"bdf\":\"0000:00:05.0\"}")); 6498 } 6499 6500 // The functional connectivity provided by the virtio-net device 6501 // gets tested through wait_vm_boot() as it expects to receive a 6502 // HTTP request, and through the SSH command as well. 6503 let r = std::panic::catch_unwind(|| { 6504 guest.wait_vm_boot(None).unwrap(); 6505 6506 assert_eq!( 6507 guest 6508 .ssh_command("ip -o link | wc -l") 6509 .unwrap() 6510 .trim() 6511 .parse::<u32>() 6512 .unwrap_or_default(), 6513 2 6514 ); 6515 6516 guest.reboot_linux(0, None); 6517 6518 assert_eq!( 6519 guest 6520 .ssh_command("ip -o link | wc -l") 6521 .unwrap() 6522 .trim() 6523 .parse::<u32>() 6524 .unwrap_or_default(), 6525 2 6526 ); 6527 }); 6528 6529 kill_child(&mut child); 6530 6531 exec_host_command_status(&format!("sudo ip link del {guest_macvtap_name}")); 6532 exec_host_command_status(&format!("sudo ip link del {host_macvtap_name}")); 6533 6534 let output = child.wait_with_output().unwrap(); 6535 6536 handle_child_output(r, &output); 6537 } 6538 6539 #[test] 6540 #[cfg_attr(target_arch = "aarch64", ignore = "See #5443")] 6541 fn test_macvtap() { 6542 _test_macvtap(false, "guestmacvtap0", "hostmacvtap0") 6543 } 6544 6545 #[test] 6546 #[cfg_attr(target_arch = "aarch64", ignore = "See #5443")] 6547 fn test_macvtap_hotplug() { 6548 _test_macvtap(true, "guestmacvtap1", "hostmacvtap1") 6549 } 6550 6551 #[test] 6552 #[cfg(not(feature = "mshv"))] 6553 fn test_ovs_dpdk() { 6554 let focal1 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6555 let guest1 = Guest::new(Box::new(focal1)); 6556 6557 let focal2 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6558 let guest2 = Guest::new(Box::new(focal2)); 6559 let api_socket_source = format!("{}.1", temp_api_path(&guest2.tmp_dir)); 6560 6561 let (mut child1, mut child2) = 6562 setup_ovs_dpdk_guests(&guest1, &guest2, &api_socket_source, false); 6563 6564 // Create the snapshot directory 6565 let snapshot_dir = temp_snapshot_dir_path(&guest2.tmp_dir); 6566 6567 let r = std::panic::catch_unwind(|| { 6568 // Remove one of the two ports from the OVS bridge 6569 assert!(exec_host_command_status("ovs-vsctl del-port vhost-user1").success()); 6570 6571 // Spawn a new netcat listener in the first VM 6572 let guest_ip = guest1.network.guest_ip.clone(); 6573 thread::spawn(move || { 6574 ssh_command_ip( 6575 "nc -l 12345", 6576 &guest_ip, 6577 DEFAULT_SSH_RETRIES, 6578 DEFAULT_SSH_TIMEOUT, 6579 ) 6580 .unwrap(); 6581 }); 6582 6583 // Wait for the server to be listening 6584 thread::sleep(std::time::Duration::new(5, 0)); 6585 6586 // Check the connection fails this time 6587 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap_err(); 6588 6589 // Add the OVS port back 6590 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface vhost-user1 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient1").success()); 6591 6592 // And finally check the connection is functional again 6593 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap(); 6594 6595 // Pause the VM 6596 assert!(remote_command(&api_socket_source, "pause", None)); 6597 6598 // Take a snapshot from the VM 6599 assert!(remote_command( 6600 &api_socket_source, 6601 "snapshot", 6602 Some(format!("file://{snapshot_dir}").as_str()), 6603 )); 6604 6605 // Wait to make sure the snapshot is completed 6606 thread::sleep(std::time::Duration::new(10, 0)); 6607 }); 6608 6609 // Shutdown the source VM 6610 kill_child(&mut child2); 6611 let output = child2.wait_with_output().unwrap(); 6612 handle_child_output(r, &output); 6613 6614 // Remove the vhost-user socket file. 6615 Command::new("rm") 6616 .arg("-f") 6617 .arg("/tmp/dpdkvhostclient2") 6618 .output() 6619 .unwrap(); 6620 6621 let api_socket_restored = format!("{}.2", temp_api_path(&guest2.tmp_dir)); 6622 // Restore the VM from the snapshot 6623 let mut child2 = GuestCommand::new(&guest2) 6624 .args(["--api-socket", &api_socket_restored]) 6625 .args([ 6626 "--restore", 6627 format!("source_url=file://{snapshot_dir}").as_str(), 6628 ]) 6629 .capture_output() 6630 .spawn() 6631 .unwrap(); 6632 6633 // Wait for the VM to be restored 6634 thread::sleep(std::time::Duration::new(10, 0)); 6635 6636 let r = std::panic::catch_unwind(|| { 6637 // Resume the VM 6638 assert!(remote_command(&api_socket_restored, "resume", None)); 6639 6640 // Spawn a new netcat listener in the first VM 6641 let guest_ip = guest1.network.guest_ip.clone(); 6642 thread::spawn(move || { 6643 ssh_command_ip( 6644 "nc -l 12345", 6645 &guest_ip, 6646 DEFAULT_SSH_RETRIES, 6647 DEFAULT_SSH_TIMEOUT, 6648 ) 6649 .unwrap(); 6650 }); 6651 6652 // Wait for the server to be listening 6653 thread::sleep(std::time::Duration::new(5, 0)); 6654 6655 // And check the connection is still functional after restore 6656 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap(); 6657 }); 6658 6659 kill_child(&mut child1); 6660 kill_child(&mut child2); 6661 6662 let output = child1.wait_with_output().unwrap(); 6663 child2.wait().unwrap(); 6664 6665 cleanup_ovs_dpdk(); 6666 6667 handle_child_output(r, &output); 6668 } 6669 6670 fn setup_spdk_nvme(nvme_dir: &std::path::Path) -> Child { 6671 cleanup_spdk_nvme(); 6672 6673 assert!(exec_host_command_status(&format!( 6674 "mkdir -p {}", 6675 nvme_dir.join("nvme-vfio-user").to_str().unwrap() 6676 )) 6677 .success()); 6678 assert!(exec_host_command_status(&format!( 6679 "truncate {} -s 128M", 6680 nvme_dir.join("test-disk.raw").to_str().unwrap() 6681 )) 6682 .success()); 6683 assert!(exec_host_command_status(&format!( 6684 "mkfs.ext4 {}", 6685 nvme_dir.join("test-disk.raw").to_str().unwrap() 6686 )) 6687 .success()); 6688 6689 // Start the SPDK nvmf_tgt daemon to present NVMe device as a VFIO user device 6690 let child = Command::new("/usr/local/bin/spdk-nvme/nvmf_tgt") 6691 .args(["-i", "0", "-m", "0x1"]) 6692 .spawn() 6693 .unwrap(); 6694 thread::sleep(std::time::Duration::new(2, 0)); 6695 6696 assert!(exec_host_command_with_retries( 6697 "/usr/local/bin/spdk-nvme/rpc.py nvmf_create_transport -t VFIOUSER", 6698 3, 6699 std::time::Duration::new(5, 0), 6700 )); 6701 assert!(exec_host_command_status(&format!( 6702 "/usr/local/bin/spdk-nvme/rpc.py bdev_aio_create {} test 512", 6703 nvme_dir.join("test-disk.raw").to_str().unwrap() 6704 )) 6705 .success()); 6706 assert!(exec_host_command_status( 6707 "/usr/local/bin/spdk-nvme/rpc.py nvmf_create_subsystem nqn.2019-07.io.spdk:cnode -a -s test" 6708 ) 6709 .success()); 6710 assert!(exec_host_command_status( 6711 "/usr/local/bin/spdk-nvme/rpc.py nvmf_subsystem_add_ns nqn.2019-07.io.spdk:cnode test" 6712 ) 6713 .success()); 6714 assert!(exec_host_command_status(&format!( 6715 "/usr/local/bin/spdk-nvme/rpc.py nvmf_subsystem_add_listener nqn.2019-07.io.spdk:cnode -t VFIOUSER -a {} -s 0", 6716 nvme_dir.join("nvme-vfio-user").to_str().unwrap() 6717 )) 6718 .success()); 6719 6720 child 6721 } 6722 6723 fn cleanup_spdk_nvme() { 6724 exec_host_command_status("pkill -f nvmf_tgt"); 6725 } 6726 6727 #[test] 6728 fn test_vfio_user() { 6729 let jammy_image = JAMMY_IMAGE_NAME.to_string(); 6730 let jammy = UbuntuDiskConfig::new(jammy_image); 6731 let guest = Guest::new(Box::new(jammy)); 6732 6733 let spdk_nvme_dir = guest.tmp_dir.as_path().join("test-vfio-user"); 6734 let mut spdk_child = setup_spdk_nvme(spdk_nvme_dir.as_path()); 6735 6736 let api_socket = temp_api_path(&guest.tmp_dir); 6737 let mut child = GuestCommand::new(&guest) 6738 .args(["--api-socket", &api_socket]) 6739 .args(["--cpus", "boot=1"]) 6740 .args(["--memory", "size=1G,shared=on,hugepages=on"]) 6741 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 6742 .args(["--serial", "tty", "--console", "off"]) 6743 .default_disks() 6744 .default_net() 6745 .capture_output() 6746 .spawn() 6747 .unwrap(); 6748 6749 let r = std::panic::catch_unwind(|| { 6750 guest.wait_vm_boot(None).unwrap(); 6751 6752 // Hotplug the SPDK-NVMe device to the VM 6753 let (cmd_success, cmd_output) = remote_command_w_output( 6754 &api_socket, 6755 "add-user-device", 6756 Some(&format!( 6757 "socket={},id=vfio_user0", 6758 spdk_nvme_dir 6759 .as_path() 6760 .join("nvme-vfio-user/cntrl") 6761 .to_str() 6762 .unwrap(), 6763 )), 6764 ); 6765 assert!(cmd_success); 6766 assert!(String::from_utf8_lossy(&cmd_output) 6767 .contains("{\"id\":\"vfio_user0\",\"bdf\":\"0000:00:05.0\"}")); 6768 6769 thread::sleep(std::time::Duration::new(10, 0)); 6770 6771 // Check both if /dev/nvme exists and if the block size is 128M. 6772 assert_eq!( 6773 guest 6774 .ssh_command("lsblk | grep nvme0n1 | grep -c 128M") 6775 .unwrap() 6776 .trim() 6777 .parse::<u32>() 6778 .unwrap_or_default(), 6779 1 6780 ); 6781 6782 // Check changes persist after reboot 6783 assert_eq!( 6784 guest.ssh_command("sudo mount /dev/nvme0n1 /mnt").unwrap(), 6785 "" 6786 ); 6787 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), "lost+found\n"); 6788 guest 6789 .ssh_command("echo test123 | sudo tee /mnt/test") 6790 .unwrap(); 6791 assert_eq!(guest.ssh_command("sudo umount /mnt").unwrap(), ""); 6792 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), ""); 6793 6794 guest.reboot_linux(0, None); 6795 assert_eq!( 6796 guest.ssh_command("sudo mount /dev/nvme0n1 /mnt").unwrap(), 6797 "" 6798 ); 6799 assert_eq!( 6800 guest.ssh_command("sudo cat /mnt/test").unwrap().trim(), 6801 "test123" 6802 ); 6803 }); 6804 6805 let _ = spdk_child.kill(); 6806 let _ = spdk_child.wait(); 6807 6808 kill_child(&mut child); 6809 let output = child.wait_with_output().unwrap(); 6810 6811 handle_child_output(r, &output); 6812 } 6813 6814 #[test] 6815 #[cfg(target_arch = "x86_64")] 6816 fn test_vdpa_block() { 6817 // Before trying to run the test, verify the vdpa_sim_blk module is correctly loaded. 6818 assert!(exec_host_command_status("lsmod | grep vdpa_sim_blk").success()); 6819 6820 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6821 let guest = Guest::new(Box::new(focal)); 6822 let api_socket = temp_api_path(&guest.tmp_dir); 6823 6824 let kernel_path = direct_kernel_boot_path(); 6825 6826 let mut child = GuestCommand::new(&guest) 6827 .args(["--cpus", "boot=2"]) 6828 .args(["--memory", "size=512M,hugepages=on"]) 6829 .args(["--kernel", kernel_path.to_str().unwrap()]) 6830 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6831 .default_disks() 6832 .default_net() 6833 .args(["--vdpa", "path=/dev/vhost-vdpa-0,num_queues=1"]) 6834 .args(["--platform", "num_pci_segments=2,iommu_segments=1"]) 6835 .args(["--api-socket", &api_socket]) 6836 .capture_output() 6837 .spawn() 6838 .unwrap(); 6839 6840 let r = std::panic::catch_unwind(|| { 6841 guest.wait_vm_boot(None).unwrap(); 6842 6843 // Check both if /dev/vdc exists and if the block size is 128M. 6844 assert_eq!( 6845 guest 6846 .ssh_command("lsblk | grep vdc | grep -c 128M") 6847 .unwrap() 6848 .trim() 6849 .parse::<u32>() 6850 .unwrap_or_default(), 6851 1 6852 ); 6853 6854 // Check the content of the block device after we wrote to it. 6855 // The vpda-sim-blk should let us read what we previously wrote. 6856 guest 6857 .ssh_command("sudo bash -c 'echo foobar > /dev/vdc'") 6858 .unwrap(); 6859 assert_eq!( 6860 guest.ssh_command("sudo head -1 /dev/vdc").unwrap().trim(), 6861 "foobar" 6862 ); 6863 6864 // Hotplug an extra vDPA block device behind the vIOMMU 6865 // Add a new vDPA device to the VM 6866 let (cmd_success, cmd_output) = remote_command_w_output( 6867 &api_socket, 6868 "add-vdpa", 6869 Some("id=myvdpa0,path=/dev/vhost-vdpa-1,num_queues=1,pci_segment=1,iommu=on"), 6870 ); 6871 assert!(cmd_success); 6872 assert!(String::from_utf8_lossy(&cmd_output) 6873 .contains("{\"id\":\"myvdpa0\",\"bdf\":\"0001:00:01.0\"}")); 6874 6875 thread::sleep(std::time::Duration::new(10, 0)); 6876 6877 // Check IOMMU setup 6878 assert!(guest 6879 .does_device_vendor_pair_match("0x1057", "0x1af4") 6880 .unwrap_or_default()); 6881 assert_eq!( 6882 guest 6883 .ssh_command("ls /sys/kernel/iommu_groups/0/devices") 6884 .unwrap() 6885 .trim(), 6886 "0001:00:01.0" 6887 ); 6888 6889 // Check both if /dev/vdd exists and if the block size is 128M. 6890 assert_eq!( 6891 guest 6892 .ssh_command("lsblk | grep vdd | grep -c 128M") 6893 .unwrap() 6894 .trim() 6895 .parse::<u32>() 6896 .unwrap_or_default(), 6897 1 6898 ); 6899 6900 // Write some content to the block device we've just plugged. 6901 guest 6902 .ssh_command("sudo bash -c 'echo foobar > /dev/vdd'") 6903 .unwrap(); 6904 6905 // Check we can read the content back. 6906 assert_eq!( 6907 guest.ssh_command("sudo head -1 /dev/vdd").unwrap().trim(), 6908 "foobar" 6909 ); 6910 6911 // Unplug the device 6912 let cmd_success = remote_command(&api_socket, "remove-device", Some("myvdpa0")); 6913 assert!(cmd_success); 6914 thread::sleep(std::time::Duration::new(10, 0)); 6915 6916 // Check /dev/vdd doesn't exist anymore 6917 assert_eq!( 6918 guest 6919 .ssh_command("lsblk | grep -c vdd || true") 6920 .unwrap() 6921 .trim() 6922 .parse::<u32>() 6923 .unwrap_or(1), 6924 0 6925 ); 6926 }); 6927 6928 kill_child(&mut child); 6929 let output = child.wait_with_output().unwrap(); 6930 6931 handle_child_output(r, &output); 6932 } 6933 6934 #[test] 6935 #[cfg(target_arch = "x86_64")] 6936 #[ignore = "See #5756"] 6937 fn test_vdpa_net() { 6938 // Before trying to run the test, verify the vdpa_sim_net module is correctly loaded. 6939 if !exec_host_command_status("lsmod | grep vdpa_sim_net").success() { 6940 return; 6941 } 6942 6943 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6944 let guest = Guest::new(Box::new(focal)); 6945 6946 let kernel_path = direct_kernel_boot_path(); 6947 6948 let mut child = GuestCommand::new(&guest) 6949 .args(["--cpus", "boot=2"]) 6950 .args(["--memory", "size=512M,hugepages=on"]) 6951 .args(["--kernel", kernel_path.to_str().unwrap()]) 6952 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6953 .default_disks() 6954 .default_net() 6955 .args(["--vdpa", "path=/dev/vhost-vdpa-2,num_queues=2"]) 6956 .capture_output() 6957 .spawn() 6958 .unwrap(); 6959 6960 let r = std::panic::catch_unwind(|| { 6961 guest.wait_vm_boot(None).unwrap(); 6962 6963 // Check we can find network interface related to vDPA device 6964 assert_eq!( 6965 guest 6966 .ssh_command("ip -o link | grep -c ens6") 6967 .unwrap() 6968 .trim() 6969 .parse::<u32>() 6970 .unwrap_or(0), 6971 1 6972 ); 6973 6974 guest 6975 .ssh_command("sudo ip addr add 172.16.1.2/24 dev ens6") 6976 .unwrap(); 6977 guest.ssh_command("sudo ip link set up dev ens6").unwrap(); 6978 6979 // Check there is no packet yet on both TX/RX of the network interface 6980 assert_eq!( 6981 guest 6982 .ssh_command("ip -j -p -s link show ens6 | grep -c '\"packets\": 0'") 6983 .unwrap() 6984 .trim() 6985 .parse::<u32>() 6986 .unwrap_or(0), 6987 2 6988 ); 6989 6990 // Send 6 packets with ping command 6991 guest.ssh_command("ping 172.16.1.10 -c 6 || true").unwrap(); 6992 6993 // Check we can find 6 packets on both TX/RX of the network interface 6994 assert_eq!( 6995 guest 6996 .ssh_command("ip -j -p -s link show ens6 | grep -c '\"packets\": 6'") 6997 .unwrap() 6998 .trim() 6999 .parse::<u32>() 7000 .unwrap_or(0), 7001 2 7002 ); 7003 7004 // No need to check for hotplug as we already tested it through 7005 // test_vdpa_block() 7006 }); 7007 7008 kill_child(&mut child); 7009 let output = child.wait_with_output().unwrap(); 7010 7011 handle_child_output(r, &output); 7012 } 7013 7014 #[test] 7015 #[cfg(target_arch = "x86_64")] 7016 fn test_tpm() { 7017 let focal = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string()); 7018 let guest = Guest::new(Box::new(focal)); 7019 7020 let (mut swtpm_command, swtpm_socket_path) = prepare_swtpm_daemon(&guest.tmp_dir); 7021 7022 let mut guest_cmd = GuestCommand::new(&guest); 7023 guest_cmd 7024 .args(["--cpus", "boot=1"]) 7025 .args(["--memory", "size=1G"]) 7026 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 7027 .args(["--tpm", &format!("socket={swtpm_socket_path}")]) 7028 .capture_output() 7029 .default_disks() 7030 .default_net(); 7031 7032 // Start swtpm daemon 7033 let mut swtpm_child = swtpm_command.spawn().unwrap(); 7034 thread::sleep(std::time::Duration::new(10, 0)); 7035 let mut child = guest_cmd.spawn().unwrap(); 7036 let r = std::panic::catch_unwind(|| { 7037 guest.wait_vm_boot(None).unwrap(); 7038 assert_eq!( 7039 guest.ssh_command("ls /dev/tpm0").unwrap().trim(), 7040 "/dev/tpm0" 7041 ); 7042 guest.ssh_command("sudo tpm2_selftest -f").unwrap(); 7043 guest 7044 .ssh_command("echo 'hello' > /tmp/checksum_test; ") 7045 .unwrap(); 7046 guest.ssh_command("cmp <(sudo tpm2_pcrevent /tmp/checksum_test | grep sha256 | awk '{print $2}') <(sha256sum /tmp/checksum_test| awk '{print $1}')").unwrap(); 7047 }); 7048 7049 let _ = swtpm_child.kill(); 7050 let _d_out = swtpm_child.wait_with_output().unwrap(); 7051 7052 kill_child(&mut child); 7053 let output = child.wait_with_output().unwrap(); 7054 7055 handle_child_output(r, &output); 7056 } 7057 7058 #[test] 7059 #[cfg(target_arch = "x86_64")] 7060 fn test_double_tty() { 7061 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7062 let guest = Guest::new(Box::new(focal)); 7063 let mut cmd = GuestCommand::new(&guest); 7064 let api_socket = temp_api_path(&guest.tmp_dir); 7065 let tty_str: &str = "console=hvc0 earlyprintk=ttyS0 "; 7066 // linux printk module enable console log. 7067 let con_dis_str: &str = "console [hvc0] enabled"; 7068 // linux printk module disable console log. 7069 let con_enb_str: &str = "bootconsole [earlyser0] disabled"; 7070 7071 let kernel_path = direct_kernel_boot_path(); 7072 7073 cmd.args(["--cpus", "boot=1"]) 7074 .args(["--memory", "size=512M"]) 7075 .args(["--kernel", kernel_path.to_str().unwrap()]) 7076 .args([ 7077 "--cmdline", 7078 DIRECT_KERNEL_BOOT_CMDLINE 7079 .replace("console=hvc0 ", tty_str) 7080 .as_str(), 7081 ]) 7082 .capture_output() 7083 .default_disks() 7084 .default_net() 7085 .args(["--serial", "tty"]) 7086 .args(["--console", "tty"]) 7087 .args(["--api-socket", &api_socket]); 7088 7089 let mut child = cmd.spawn().unwrap(); 7090 7091 let mut r = std::panic::catch_unwind(|| { 7092 guest.wait_vm_boot(None).unwrap(); 7093 }); 7094 7095 kill_child(&mut child); 7096 let output = child.wait_with_output().unwrap(); 7097 7098 if r.is_ok() { 7099 r = std::panic::catch_unwind(|| { 7100 let s = String::from_utf8_lossy(&output.stdout); 7101 assert!(s.contains(tty_str)); 7102 assert!(s.contains(con_dis_str)); 7103 assert!(s.contains(con_enb_str)); 7104 }); 7105 } 7106 7107 handle_child_output(r, &output); 7108 } 7109 7110 #[test] 7111 #[cfg(target_arch = "x86_64")] 7112 fn test_nmi() { 7113 let jammy = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string()); 7114 let guest = Guest::new(Box::new(jammy)); 7115 let api_socket = temp_api_path(&guest.tmp_dir); 7116 let event_path = temp_event_monitor_path(&guest.tmp_dir); 7117 7118 let kernel_path = direct_kernel_boot_path(); 7119 let cmd_line = format!("{} {}", DIRECT_KERNEL_BOOT_CMDLINE, "unknown_nmi_panic=1"); 7120 7121 let mut cmd = GuestCommand::new(&guest); 7122 cmd.args(["--cpus", "boot=4"]) 7123 .args(["--memory", "size=512M"]) 7124 .args(["--kernel", kernel_path.to_str().unwrap()]) 7125 .args(["--cmdline", cmd_line.as_str()]) 7126 .default_disks() 7127 .args(["--net", guest.default_net_string().as_str()]) 7128 .args(["--pvpanic"]) 7129 .args(["--api-socket", &api_socket]) 7130 .args(["--event-monitor", format!("path={event_path}").as_str()]) 7131 .capture_output(); 7132 7133 let mut child = cmd.spawn().unwrap(); 7134 7135 let r = std::panic::catch_unwind(|| { 7136 guest.wait_vm_boot(None).unwrap(); 7137 7138 assert!(remote_command(&api_socket, "nmi", None)); 7139 7140 // Wait a while for guest 7141 thread::sleep(std::time::Duration::new(3, 0)); 7142 7143 let expected_sequential_events = [&MetaEvent { 7144 event: "panic".to_string(), 7145 device_id: None, 7146 }]; 7147 assert!(check_latest_events_exact( 7148 &expected_sequential_events, 7149 &event_path 7150 )); 7151 }); 7152 7153 kill_child(&mut child); 7154 let output = child.wait_with_output().unwrap(); 7155 7156 handle_child_output(r, &output); 7157 } 7158 } 7159 7160 mod dbus_api { 7161 use crate::*; 7162 7163 // Start cloud-hypervisor with no VM parameters, running both the HTTP 7164 // and DBus APIs. Alternate calls to the external APIs (HTTP and DBus) 7165 // to create a VM, boot it, and verify that it can be shut down and then 7166 // booted again. 7167 #[test] 7168 fn test_api_dbus_and_http_interleaved() { 7169 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7170 let guest = Guest::new(Box::new(focal)); 7171 let dbus_api = TargetApi::new_dbus_api(&guest.tmp_dir); 7172 let http_api = TargetApi::new_http_api(&guest.tmp_dir); 7173 7174 let mut child = GuestCommand::new(&guest) 7175 .args(dbus_api.guest_args()) 7176 .args(http_api.guest_args()) 7177 .capture_output() 7178 .spawn() 7179 .unwrap(); 7180 7181 thread::sleep(std::time::Duration::new(1, 0)); 7182 7183 // Verify API servers are running 7184 assert!(dbus_api.remote_command("ping", None)); 7185 assert!(http_api.remote_command("ping", None)); 7186 7187 // Create the VM first 7188 let cpu_count: u8 = 4; 7189 let request_body = guest.api_create_body( 7190 cpu_count, 7191 direct_kernel_boot_path().to_str().unwrap(), 7192 DIRECT_KERNEL_BOOT_CMDLINE, 7193 ); 7194 7195 let temp_config_path = guest.tmp_dir.as_path().join("config"); 7196 std::fs::write(&temp_config_path, request_body).unwrap(); 7197 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 7198 7199 let r = std::panic::catch_unwind(|| { 7200 // Create the VM 7201 assert!(dbus_api.remote_command("create", Some(create_config),)); 7202 7203 // Then boot it 7204 assert!(http_api.remote_command("boot", None)); 7205 guest.wait_vm_boot(None).unwrap(); 7206 7207 // Check that the VM booted as expected 7208 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 7209 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 7210 7211 // Sync and shutdown without powering off to prevent filesystem 7212 // corruption. 7213 guest.ssh_command("sync").unwrap(); 7214 guest.ssh_command("sudo shutdown -H now").unwrap(); 7215 7216 // Wait for the guest to be fully shutdown 7217 thread::sleep(std::time::Duration::new(20, 0)); 7218 7219 // Then shutdown the VM 7220 assert!(dbus_api.remote_command("shutdown", None)); 7221 7222 // Then boot it again 7223 assert!(http_api.remote_command("boot", None)); 7224 guest.wait_vm_boot(None).unwrap(); 7225 7226 // Check that the VM booted as expected 7227 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 7228 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 7229 }); 7230 7231 kill_child(&mut child); 7232 let output = child.wait_with_output().unwrap(); 7233 7234 handle_child_output(r, &output); 7235 } 7236 7237 #[test] 7238 fn test_api_dbus_create_boot() { 7239 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7240 let guest = Guest::new(Box::new(focal)); 7241 7242 _test_api_create_boot(TargetApi::new_dbus_api(&guest.tmp_dir), guest) 7243 } 7244 7245 #[test] 7246 fn test_api_dbus_shutdown() { 7247 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7248 let guest = Guest::new(Box::new(focal)); 7249 7250 _test_api_shutdown(TargetApi::new_dbus_api(&guest.tmp_dir), guest) 7251 } 7252 7253 #[test] 7254 fn test_api_dbus_delete() { 7255 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7256 let guest = Guest::new(Box::new(focal)); 7257 7258 _test_api_delete(TargetApi::new_dbus_api(&guest.tmp_dir), guest); 7259 } 7260 7261 #[test] 7262 fn test_api_dbus_pause_resume() { 7263 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7264 let guest = Guest::new(Box::new(focal)); 7265 7266 _test_api_pause_resume(TargetApi::new_dbus_api(&guest.tmp_dir), guest) 7267 } 7268 } 7269 7270 mod common_sequential { 7271 use std::fs::remove_dir_all; 7272 7273 use crate::*; 7274 7275 #[test] 7276 #[cfg(not(feature = "mshv"))] 7277 fn test_memory_mergeable_on() { 7278 test_memory_mergeable(true) 7279 } 7280 7281 fn snapshot_and_check_events(api_socket: &str, snapshot_dir: &str, event_path: &str) { 7282 // Pause the VM 7283 assert!(remote_command(api_socket, "pause", None)); 7284 let latest_events: [&MetaEvent; 2] = [ 7285 &MetaEvent { 7286 event: "pausing".to_string(), 7287 device_id: None, 7288 }, 7289 &MetaEvent { 7290 event: "paused".to_string(), 7291 device_id: None, 7292 }, 7293 ]; 7294 // See: #5938 7295 thread::sleep(std::time::Duration::new(1, 0)); 7296 assert!(check_latest_events_exact(&latest_events, event_path)); 7297 7298 // Take a snapshot from the VM 7299 assert!(remote_command( 7300 api_socket, 7301 "snapshot", 7302 Some(format!("file://{snapshot_dir}").as_str()), 7303 )); 7304 7305 // Wait to make sure the snapshot is completed 7306 thread::sleep(std::time::Duration::new(10, 0)); 7307 7308 let latest_events = [ 7309 &MetaEvent { 7310 event: "snapshotting".to_string(), 7311 device_id: None, 7312 }, 7313 &MetaEvent { 7314 event: "snapshotted".to_string(), 7315 device_id: None, 7316 }, 7317 ]; 7318 // See: #5938 7319 thread::sleep(std::time::Duration::new(1, 0)); 7320 assert!(check_latest_events_exact(&latest_events, event_path)); 7321 } 7322 7323 // One thing to note about this test. The virtio-net device is heavily used 7324 // through each ssh command. There's no need to perform a dedicated test to 7325 // verify the migration went well for virtio-net. 7326 #[test] 7327 #[cfg(not(feature = "mshv"))] 7328 fn test_snapshot_restore_hotplug_virtiomem() { 7329 _test_snapshot_restore(true); 7330 } 7331 7332 #[test] 7333 fn test_snapshot_restore_basic() { 7334 _test_snapshot_restore(false); 7335 } 7336 7337 fn _test_snapshot_restore(use_hotplug: bool) { 7338 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7339 let guest = Guest::new(Box::new(focal)); 7340 let kernel_path = direct_kernel_boot_path(); 7341 7342 let api_socket_source = format!("{}.1", temp_api_path(&guest.tmp_dir)); 7343 7344 let net_id = "net123"; 7345 let net_params = format!( 7346 "id={},tap=,mac={},ip={},mask=255.255.255.0", 7347 net_id, guest.network.guest_mac, guest.network.host_ip 7348 ); 7349 let mut mem_params = "size=2G"; 7350 7351 if use_hotplug { 7352 mem_params = "size=2G,hotplug_method=virtio-mem,hotplug_size=32G" 7353 } 7354 7355 let cloudinit_params = format!( 7356 "path={},iommu=on", 7357 guest.disk_config.disk(DiskType::CloudInit).unwrap() 7358 ); 7359 7360 let socket = temp_vsock_path(&guest.tmp_dir); 7361 let event_path = temp_event_monitor_path(&guest.tmp_dir); 7362 7363 let mut child = GuestCommand::new(&guest) 7364 .args(["--api-socket", &api_socket_source]) 7365 .args(["--event-monitor", format!("path={event_path}").as_str()]) 7366 .args(["--cpus", "boot=4"]) 7367 .args(["--memory", mem_params]) 7368 .args(["--balloon", "size=0"]) 7369 .args(["--kernel", kernel_path.to_str().unwrap()]) 7370 .args([ 7371 "--disk", 7372 format!( 7373 "path={}", 7374 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 7375 ) 7376 .as_str(), 7377 cloudinit_params.as_str(), 7378 ]) 7379 .args(["--net", net_params.as_str()]) 7380 .args(["--vsock", format!("cid=3,socket={socket}").as_str()]) 7381 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 7382 .capture_output() 7383 .spawn() 7384 .unwrap(); 7385 7386 let console_text = String::from("On a branch floating down river a cricket, singing."); 7387 // Create the snapshot directory 7388 let snapshot_dir = temp_snapshot_dir_path(&guest.tmp_dir); 7389 7390 let r = std::panic::catch_unwind(|| { 7391 guest.wait_vm_boot(None).unwrap(); 7392 7393 // Check the number of vCPUs 7394 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 4); 7395 // Check the guest RAM 7396 assert!(guest.get_total_memory().unwrap_or_default() > 1_920_000); 7397 if use_hotplug { 7398 // Increase guest RAM with virtio-mem 7399 resize_command( 7400 &api_socket_source, 7401 None, 7402 Some(6 << 30), 7403 None, 7404 Some(&event_path), 7405 ); 7406 thread::sleep(std::time::Duration::new(5, 0)); 7407 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 7408 // Use balloon to remove RAM from the VM 7409 resize_command( 7410 &api_socket_source, 7411 None, 7412 None, 7413 Some(1 << 30), 7414 Some(&event_path), 7415 ); 7416 thread::sleep(std::time::Duration::new(5, 0)); 7417 let total_memory = guest.get_total_memory().unwrap_or_default(); 7418 assert!(total_memory > 4_800_000); 7419 assert!(total_memory < 5_760_000); 7420 } 7421 // Check the guest virtio-devices, e.g. block, rng, vsock, console, and net 7422 guest.check_devices_common(Some(&socket), Some(&console_text), None); 7423 7424 // x86_64: We check that removing and adding back the virtio-net device 7425 // does not break the snapshot/restore support for virtio-pci. 7426 // This is an important thing to test as the hotplug will 7427 // trigger a PCI BAR reprogramming, which is a good way of 7428 // checking if the stored resources are correctly restored. 7429 // Unplug the virtio-net device 7430 // AArch64: Device hotplug is currently not supported, skipping here. 7431 #[cfg(target_arch = "x86_64")] 7432 { 7433 assert!(remote_command( 7434 &api_socket_source, 7435 "remove-device", 7436 Some(net_id), 7437 )); 7438 thread::sleep(std::time::Duration::new(10, 0)); 7439 let latest_events = [&MetaEvent { 7440 event: "device-removed".to_string(), 7441 device_id: Some(net_id.to_string()), 7442 }]; 7443 // See: #5938 7444 thread::sleep(std::time::Duration::new(1, 0)); 7445 assert!(check_latest_events_exact(&latest_events, &event_path)); 7446 7447 // Plug the virtio-net device again 7448 assert!(remote_command( 7449 &api_socket_source, 7450 "add-net", 7451 Some(net_params.as_str()), 7452 )); 7453 thread::sleep(std::time::Duration::new(10, 0)); 7454 } 7455 7456 snapshot_and_check_events(&api_socket_source, &snapshot_dir, &event_path); 7457 }); 7458 7459 // Shutdown the source VM and check console output 7460 kill_child(&mut child); 7461 let output = child.wait_with_output().unwrap(); 7462 handle_child_output(r, &output); 7463 7464 let r = std::panic::catch_unwind(|| { 7465 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 7466 }); 7467 7468 handle_child_output(r, &output); 7469 7470 // Remove the vsock socket file. 7471 Command::new("rm") 7472 .arg("-f") 7473 .arg(socket.as_str()) 7474 .output() 7475 .unwrap(); 7476 7477 let api_socket_restored = format!("{}.2", temp_api_path(&guest.tmp_dir)); 7478 let event_path_restored = format!("{}.2", temp_event_monitor_path(&guest.tmp_dir)); 7479 7480 // Restore the VM from the snapshot 7481 let mut child = GuestCommand::new(&guest) 7482 .args(["--api-socket", &api_socket_restored]) 7483 .args([ 7484 "--event-monitor", 7485 format!("path={event_path_restored}").as_str(), 7486 ]) 7487 .args([ 7488 "--restore", 7489 format!("source_url=file://{snapshot_dir}").as_str(), 7490 ]) 7491 .capture_output() 7492 .spawn() 7493 .unwrap(); 7494 7495 // Wait for the VM to be restored 7496 thread::sleep(std::time::Duration::new(20, 0)); 7497 let expected_events = [ 7498 &MetaEvent { 7499 event: "starting".to_string(), 7500 device_id: None, 7501 }, 7502 &MetaEvent { 7503 event: "activated".to_string(), 7504 device_id: Some("__console".to_string()), 7505 }, 7506 &MetaEvent { 7507 event: "activated".to_string(), 7508 device_id: Some("__rng".to_string()), 7509 }, 7510 &MetaEvent { 7511 event: "restoring".to_string(), 7512 device_id: None, 7513 }, 7514 ]; 7515 assert!(check_sequential_events( 7516 &expected_events, 7517 &event_path_restored 7518 )); 7519 let latest_events = [&MetaEvent { 7520 event: "restored".to_string(), 7521 device_id: None, 7522 }]; 7523 assert!(check_latest_events_exact( 7524 &latest_events, 7525 &event_path_restored 7526 )); 7527 7528 // Remove the snapshot dir 7529 let _ = remove_dir_all(snapshot_dir.as_str()); 7530 7531 let r = std::panic::catch_unwind(|| { 7532 // Resume the VM 7533 assert!(remote_command(&api_socket_restored, "resume", None)); 7534 // There is no way that we can ensure the 'write()' to the 7535 // event file is completed when the 'resume' request is 7536 // returned successfully, because the 'write()' was done 7537 // asynchronously from a different thread of Cloud 7538 // Hypervisor (e.g. the event-monitor thread). 7539 thread::sleep(std::time::Duration::new(1, 0)); 7540 let latest_events = [ 7541 &MetaEvent { 7542 event: "resuming".to_string(), 7543 device_id: None, 7544 }, 7545 &MetaEvent { 7546 event: "resumed".to_string(), 7547 device_id: None, 7548 }, 7549 ]; 7550 assert!(check_latest_events_exact( 7551 &latest_events, 7552 &event_path_restored 7553 )); 7554 7555 // Perform same checks to validate VM has been properly restored 7556 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 4); 7557 let total_memory = guest.get_total_memory().unwrap_or_default(); 7558 if !use_hotplug { 7559 assert!(total_memory > 1_920_000); 7560 } else { 7561 assert!(total_memory > 4_800_000); 7562 assert!(total_memory < 5_760_000); 7563 // Deflate balloon to restore entire RAM to the VM 7564 resize_command(&api_socket_restored, None, None, Some(0), None); 7565 thread::sleep(std::time::Duration::new(5, 0)); 7566 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 7567 // Decrease guest RAM with virtio-mem 7568 resize_command(&api_socket_restored, None, Some(5 << 30), None, None); 7569 thread::sleep(std::time::Duration::new(5, 0)); 7570 let total_memory = guest.get_total_memory().unwrap_or_default(); 7571 assert!(total_memory > 4_800_000); 7572 assert!(total_memory < 5_760_000); 7573 } 7574 7575 guest.check_devices_common(Some(&socket), Some(&console_text), None); 7576 }); 7577 // Shutdown the target VM and check console output 7578 kill_child(&mut child); 7579 let output = child.wait_with_output().unwrap(); 7580 handle_child_output(r, &output); 7581 7582 let r = std::panic::catch_unwind(|| { 7583 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 7584 }); 7585 7586 handle_child_output(r, &output); 7587 } 7588 7589 #[test] 7590 fn test_snapshot_restore_with_fd() { 7591 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7592 let guest = Guest::new(Box::new(focal)); 7593 let kernel_path = direct_kernel_boot_path(); 7594 7595 let api_socket_source = format!("{}.1", temp_api_path(&guest.tmp_dir)); 7596 7597 let net_id = "net123"; 7598 let num_queue_pairs: usize = 2; 7599 // use a name that does not conflict with tap dev created from other tests 7600 let tap_name = "chtap999"; 7601 use std::str::FromStr; 7602 let taps = net_util::open_tap( 7603 Some(tap_name), 7604 Some(std::net::Ipv4Addr::from_str(&guest.network.host_ip).unwrap()), 7605 None, 7606 &mut None, 7607 None, 7608 num_queue_pairs, 7609 Some(libc::O_RDWR | libc::O_NONBLOCK), 7610 ) 7611 .unwrap(); 7612 let net_params = format!( 7613 "id={},fd=[{},{}],mac={},ip={},mask=255.255.255.0,num_queues={}", 7614 net_id, 7615 taps[0].as_raw_fd(), 7616 taps[1].as_raw_fd(), 7617 guest.network.guest_mac, 7618 guest.network.host_ip, 7619 num_queue_pairs * 2 7620 ); 7621 7622 let cloudinit_params = format!( 7623 "path={},iommu=on", 7624 guest.disk_config.disk(DiskType::CloudInit).unwrap() 7625 ); 7626 7627 let n_cpu = 2; 7628 let event_path = temp_event_monitor_path(&guest.tmp_dir); 7629 7630 let mut child = GuestCommand::new(&guest) 7631 .args(["--api-socket", &api_socket_source]) 7632 .args(["--event-monitor", format!("path={event_path}").as_str()]) 7633 .args(["--cpus", format!("boot={}", n_cpu).as_str()]) 7634 .args(["--memory", "size=1G"]) 7635 .args(["--kernel", kernel_path.to_str().unwrap()]) 7636 .args([ 7637 "--disk", 7638 format!( 7639 "path={}", 7640 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 7641 ) 7642 .as_str(), 7643 cloudinit_params.as_str(), 7644 ]) 7645 .args(["--net", net_params.as_str()]) 7646 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 7647 .capture_output() 7648 .spawn() 7649 .unwrap(); 7650 7651 let console_text = String::from("On a branch floating down river a cricket, singing."); 7652 // Create the snapshot directory 7653 let snapshot_dir = temp_snapshot_dir_path(&guest.tmp_dir); 7654 7655 let r = std::panic::catch_unwind(|| { 7656 guest.wait_vm_boot(None).unwrap(); 7657 7658 // close the fds after VM boots, as CH duplicates them before using 7659 for tap in taps.iter() { 7660 unsafe { libc::close(tap.as_raw_fd()) }; 7661 } 7662 7663 // Check the number of vCPUs 7664 assert_eq!(guest.get_cpu_count().unwrap_or_default(), n_cpu); 7665 // Check the guest RAM 7666 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 7667 7668 // Check the guest virtio-devices, e.g. block, rng, vsock, console, and net 7669 guest.check_devices_common(None, Some(&console_text), None); 7670 7671 snapshot_and_check_events(&api_socket_source, &snapshot_dir, &event_path); 7672 }); 7673 7674 // Shutdown the source VM and check console output 7675 kill_child(&mut child); 7676 let output = child.wait_with_output().unwrap(); 7677 handle_child_output(r, &output); 7678 7679 let r = std::panic::catch_unwind(|| { 7680 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 7681 }); 7682 7683 handle_child_output(r, &output); 7684 7685 let api_socket_restored = format!("{}.2", temp_api_path(&guest.tmp_dir)); 7686 let event_path_restored = format!("{}.2", temp_event_monitor_path(&guest.tmp_dir)); 7687 7688 // Restore the VM from the snapshot 7689 let mut child = GuestCommand::new(&guest) 7690 .args(["--api-socket", &api_socket_restored]) 7691 .args([ 7692 "--event-monitor", 7693 format!("path={event_path_restored}").as_str(), 7694 ]) 7695 .capture_output() 7696 .spawn() 7697 .unwrap(); 7698 thread::sleep(std::time::Duration::new(2, 0)); 7699 7700 let taps = net_util::open_tap( 7701 Some(tap_name), 7702 Some(std::net::Ipv4Addr::from_str(&guest.network.host_ip).unwrap()), 7703 None, 7704 &mut None, 7705 None, 7706 num_queue_pairs, 7707 Some(libc::O_RDWR | libc::O_NONBLOCK), 7708 ) 7709 .unwrap(); 7710 let restore_params = format!( 7711 "source_url=file://{},net_fds=[{}@[{},{}]]", 7712 snapshot_dir, 7713 net_id, 7714 taps[0].as_raw_fd(), 7715 taps[1].as_raw_fd() 7716 ); 7717 assert!(remote_command( 7718 &api_socket_restored, 7719 "restore", 7720 Some(restore_params.as_str()) 7721 )); 7722 7723 // Wait for the VM to be restored 7724 thread::sleep(std::time::Duration::new(20, 0)); 7725 7726 // close the fds as CH duplicates them before using 7727 for tap in taps.iter() { 7728 unsafe { libc::close(tap.as_raw_fd()) }; 7729 } 7730 7731 let expected_events = [ 7732 &MetaEvent { 7733 event: "starting".to_string(), 7734 device_id: None, 7735 }, 7736 &MetaEvent { 7737 event: "activated".to_string(), 7738 device_id: Some("__console".to_string()), 7739 }, 7740 &MetaEvent { 7741 event: "activated".to_string(), 7742 device_id: Some("__rng".to_string()), 7743 }, 7744 &MetaEvent { 7745 event: "restoring".to_string(), 7746 device_id: None, 7747 }, 7748 ]; 7749 assert!(check_sequential_events( 7750 &expected_events, 7751 &event_path_restored 7752 )); 7753 let latest_events = [&MetaEvent { 7754 event: "restored".to_string(), 7755 device_id: None, 7756 }]; 7757 assert!(check_latest_events_exact( 7758 &latest_events, 7759 &event_path_restored 7760 )); 7761 7762 // Remove the snapshot dir 7763 let _ = remove_dir_all(snapshot_dir.as_str()); 7764 7765 let r = std::panic::catch_unwind(|| { 7766 // Resume the VM 7767 assert!(remote_command(&api_socket_restored, "resume", None)); 7768 // There is no way that we can ensure the 'write()' to the 7769 // event file is completed when the 'resume' request is 7770 // returned successfully, because the 'write()' was done 7771 // asynchronously from a different thread of Cloud 7772 // Hypervisor (e.g. the event-monitor thread). 7773 thread::sleep(std::time::Duration::new(1, 0)); 7774 let latest_events = [ 7775 &MetaEvent { 7776 event: "resuming".to_string(), 7777 device_id: None, 7778 }, 7779 &MetaEvent { 7780 event: "resumed".to_string(), 7781 device_id: None, 7782 }, 7783 ]; 7784 assert!(check_latest_events_exact( 7785 &latest_events, 7786 &event_path_restored 7787 )); 7788 7789 // Perform same checks to validate VM has been properly restored 7790 assert_eq!(guest.get_cpu_count().unwrap_or_default(), n_cpu); 7791 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 7792 7793 guest.check_devices_common(None, Some(&console_text), None); 7794 }); 7795 // Shutdown the target VM and check console output 7796 kill_child(&mut child); 7797 let output = child.wait_with_output().unwrap(); 7798 handle_child_output(r, &output); 7799 7800 let r = std::panic::catch_unwind(|| { 7801 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 7802 }); 7803 7804 handle_child_output(r, &output); 7805 } 7806 7807 #[test] 7808 #[cfg(target_arch = "x86_64")] 7809 fn test_snapshot_restore_pvpanic() { 7810 _test_snapshot_restore_devices(true); 7811 } 7812 7813 fn _test_snapshot_restore_devices(pvpanic: bool) { 7814 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7815 let guest = Guest::new(Box::new(focal)); 7816 let kernel_path = direct_kernel_boot_path(); 7817 7818 let api_socket_source = format!("{}.1", temp_api_path(&guest.tmp_dir)); 7819 7820 let device_params = { 7821 let mut data = vec![]; 7822 if pvpanic { 7823 data.push("--pvpanic"); 7824 } 7825 data 7826 }; 7827 7828 let socket = temp_vsock_path(&guest.tmp_dir); 7829 let event_path = temp_event_monitor_path(&guest.tmp_dir); 7830 7831 let mut child = GuestCommand::new(&guest) 7832 .args(["--api-socket", &api_socket_source]) 7833 .args(["--event-monitor", format!("path={}", event_path).as_str()]) 7834 .args(["--cpus", "boot=2"]) 7835 .args(["--memory", "size=1G"]) 7836 .args(["--kernel", kernel_path.to_str().unwrap()]) 7837 .default_disks() 7838 .default_net() 7839 .args(["--vsock", format!("cid=3,socket={}", socket).as_str()]) 7840 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 7841 .args(device_params) 7842 .capture_output() 7843 .spawn() 7844 .unwrap(); 7845 7846 let console_text = String::from("On a branch floating down river a cricket, singing."); 7847 // Create the snapshot directory 7848 let snapshot_dir = temp_snapshot_dir_path(&guest.tmp_dir); 7849 7850 let r = std::panic::catch_unwind(|| { 7851 guest.wait_vm_boot(None).unwrap(); 7852 7853 // Check the number of vCPUs 7854 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 7855 7856 snapshot_and_check_events(&api_socket_source, &snapshot_dir, &event_path); 7857 }); 7858 7859 // Shutdown the source VM and check console output 7860 kill_child(&mut child); 7861 let output = child.wait_with_output().unwrap(); 7862 handle_child_output(r, &output); 7863 7864 // Remove the vsock socket file. 7865 Command::new("rm") 7866 .arg("-f") 7867 .arg(socket.as_str()) 7868 .output() 7869 .unwrap(); 7870 7871 let api_socket_restored = format!("{}.2", temp_api_path(&guest.tmp_dir)); 7872 let event_path_restored = format!("{}.2", temp_event_monitor_path(&guest.tmp_dir)); 7873 7874 // Restore the VM from the snapshot 7875 let mut child = GuestCommand::new(&guest) 7876 .args(["--api-socket", &api_socket_restored]) 7877 .args([ 7878 "--event-monitor", 7879 format!("path={event_path_restored}").as_str(), 7880 ]) 7881 .args([ 7882 "--restore", 7883 format!("source_url=file://{snapshot_dir}").as_str(), 7884 ]) 7885 .capture_output() 7886 .spawn() 7887 .unwrap(); 7888 7889 // Wait for the VM to be restored 7890 thread::sleep(std::time::Duration::new(20, 0)); 7891 7892 let latest_events = [&MetaEvent { 7893 event: "restored".to_string(), 7894 device_id: None, 7895 }]; 7896 assert!(check_latest_events_exact( 7897 &latest_events, 7898 &event_path_restored 7899 )); 7900 7901 // Remove the snapshot dir 7902 let _ = remove_dir_all(snapshot_dir.as_str()); 7903 7904 let r = std::panic::catch_unwind(|| { 7905 // Resume the VM 7906 assert!(remote_command(&api_socket_restored, "resume", None)); 7907 // There is no way that we can ensure the 'write()' to the 7908 // event file is completed when the 'resume' request is 7909 // returned successfully, because the 'write()' was done 7910 // asynchronously from a different thread of Cloud 7911 // Hypervisor (e.g. the event-monitor thread). 7912 thread::sleep(std::time::Duration::new(1, 0)); 7913 let latest_events = [ 7914 &MetaEvent { 7915 event: "resuming".to_string(), 7916 device_id: None, 7917 }, 7918 &MetaEvent { 7919 event: "resumed".to_string(), 7920 device_id: None, 7921 }, 7922 ]; 7923 assert!(check_latest_events_exact( 7924 &latest_events, 7925 &event_path_restored 7926 )); 7927 7928 // Check the number of vCPUs 7929 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 7930 guest.check_devices_common(Some(&socket), Some(&console_text), None); 7931 7932 if pvpanic { 7933 // Trigger guest a panic 7934 make_guest_panic(&guest); 7935 // Wait a while for guest 7936 thread::sleep(std::time::Duration::new(10, 0)); 7937 7938 let expected_sequential_events = [&MetaEvent { 7939 event: "panic".to_string(), 7940 device_id: None, 7941 }]; 7942 assert!(check_latest_events_exact( 7943 &expected_sequential_events, 7944 &event_path_restored 7945 )); 7946 } 7947 }); 7948 // Shutdown the target VM and check console output 7949 kill_child(&mut child); 7950 let output = child.wait_with_output().unwrap(); 7951 handle_child_output(r, &output); 7952 7953 let r = std::panic::catch_unwind(|| { 7954 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 7955 }); 7956 7957 handle_child_output(r, &output); 7958 } 7959 } 7960 7961 mod windows { 7962 use once_cell::sync::Lazy; 7963 7964 use crate::*; 7965 7966 static NEXT_DISK_ID: Lazy<Mutex<u8>> = Lazy::new(|| Mutex::new(1)); 7967 7968 struct WindowsGuest { 7969 guest: Guest, 7970 auth: PasswordAuth, 7971 } 7972 7973 trait FsType { 7974 const FS_FAT: u8; 7975 const FS_NTFS: u8; 7976 } 7977 impl FsType for WindowsGuest { 7978 const FS_FAT: u8 = 0; 7979 const FS_NTFS: u8 = 1; 7980 } 7981 7982 impl WindowsGuest { 7983 fn new() -> Self { 7984 let disk = WindowsDiskConfig::new(WINDOWS_IMAGE_NAME.to_string()); 7985 let guest = Guest::new(Box::new(disk)); 7986 let auth = PasswordAuth { 7987 username: String::from("administrator"), 7988 password: String::from("Admin123"), 7989 }; 7990 7991 WindowsGuest { guest, auth } 7992 } 7993 7994 fn guest(&self) -> &Guest { 7995 &self.guest 7996 } 7997 7998 fn ssh_cmd(&self, cmd: &str) -> String { 7999 ssh_command_ip_with_auth( 8000 cmd, 8001 &self.auth, 8002 &self.guest.network.guest_ip, 8003 DEFAULT_SSH_RETRIES, 8004 DEFAULT_SSH_TIMEOUT, 8005 ) 8006 .unwrap() 8007 } 8008 8009 fn cpu_count(&self) -> u8 { 8010 self.ssh_cmd("powershell -Command \"(Get-CimInstance win32_computersystem).NumberOfLogicalProcessors\"") 8011 .trim() 8012 .parse::<u8>() 8013 .unwrap_or(0) 8014 } 8015 8016 fn ram_size(&self) -> usize { 8017 self.ssh_cmd("powershell -Command \"(Get-CimInstance win32_computersystem).TotalPhysicalMemory\"") 8018 .trim() 8019 .parse::<usize>() 8020 .unwrap_or(0) 8021 } 8022 8023 fn netdev_count(&self) -> u8 { 8024 self.ssh_cmd("powershell -Command \"netsh int ipv4 show interfaces | Select-String ethernet | Measure-Object -Line | Format-Table -HideTableHeaders\"") 8025 .trim() 8026 .parse::<u8>() 8027 .unwrap_or(0) 8028 } 8029 8030 fn disk_count(&self) -> u8 { 8031 self.ssh_cmd("powershell -Command \"Get-Disk | Measure-Object -Line | Format-Table -HideTableHeaders\"") 8032 .trim() 8033 .parse::<u8>() 8034 .unwrap_or(0) 8035 } 8036 8037 fn reboot(&self) { 8038 let _ = self.ssh_cmd("shutdown /r /t 0"); 8039 } 8040 8041 fn shutdown(&self) { 8042 let _ = self.ssh_cmd("shutdown /s /t 0"); 8043 } 8044 8045 fn run_dnsmasq(&self) -> std::process::Child { 8046 let listen_address = format!("--listen-address={}", self.guest.network.host_ip); 8047 let dhcp_host = format!( 8048 "--dhcp-host={},{}", 8049 self.guest.network.guest_mac, self.guest.network.guest_ip 8050 ); 8051 let dhcp_range = format!( 8052 "--dhcp-range=eth,{},{}", 8053 self.guest.network.guest_ip, self.guest.network.guest_ip 8054 ); 8055 8056 Command::new("dnsmasq") 8057 .arg("--no-daemon") 8058 .arg("--log-queries") 8059 .arg(listen_address.as_str()) 8060 .arg("--except-interface=lo") 8061 .arg("--bind-dynamic") // Allow listening to host_ip while the interface is not ready yet. 8062 .arg("--conf-file=/dev/null") 8063 .arg(dhcp_host.as_str()) 8064 .arg(dhcp_range.as_str()) 8065 .spawn() 8066 .unwrap() 8067 } 8068 8069 // TODO Cleanup image file explicitly after test, if there's some space issues. 8070 fn disk_new(&self, fs: u8, sz: usize) -> String { 8071 let mut guard = NEXT_DISK_ID.lock().unwrap(); 8072 let id = *guard; 8073 *guard = id + 1; 8074 8075 let img = PathBuf::from(format!("/tmp/test-hotplug-{id}.raw")); 8076 let _ = fs::remove_file(&img); 8077 8078 // Create an image file 8079 let out = Command::new("qemu-img") 8080 .args([ 8081 "create", 8082 "-f", 8083 "raw", 8084 img.to_str().unwrap(), 8085 format!("{sz}m").as_str(), 8086 ]) 8087 .output() 8088 .expect("qemu-img command failed") 8089 .stdout; 8090 println!("{out:?}"); 8091 8092 // Associate image to a loop device 8093 let out = Command::new("losetup") 8094 .args(["--show", "-f", img.to_str().unwrap()]) 8095 .output() 8096 .expect("failed to create loop device") 8097 .stdout; 8098 let _tmp = String::from_utf8_lossy(&out); 8099 let loop_dev = _tmp.trim(); 8100 println!("{out:?}"); 8101 8102 // Create a partition table 8103 // echo 'type=7' | sudo sfdisk "${LOOP}" 8104 let mut child = Command::new("sfdisk") 8105 .args([loop_dev]) 8106 .stdin(Stdio::piped()) 8107 .spawn() 8108 .unwrap(); 8109 let stdin = child.stdin.as_mut().expect("failed to open stdin"); 8110 stdin 8111 .write_all("type=7".as_bytes()) 8112 .expect("failed to write stdin"); 8113 let out = child.wait_with_output().expect("sfdisk failed").stdout; 8114 println!("{out:?}"); 8115 8116 // Disengage the loop device 8117 let out = Command::new("losetup") 8118 .args(["-d", loop_dev]) 8119 .output() 8120 .expect("loop device not found") 8121 .stdout; 8122 println!("{out:?}"); 8123 8124 // Re-associate loop device pointing to the partition only 8125 let out = Command::new("losetup") 8126 .args([ 8127 "--show", 8128 "--offset", 8129 (512 * 2048).to_string().as_str(), 8130 "-f", 8131 img.to_str().unwrap(), 8132 ]) 8133 .output() 8134 .expect("failed to create loop device") 8135 .stdout; 8136 let _tmp = String::from_utf8_lossy(&out); 8137 let loop_dev = _tmp.trim(); 8138 println!("{out:?}"); 8139 8140 // Create filesystem. 8141 let fs_cmd = match fs { 8142 WindowsGuest::FS_FAT => "mkfs.msdos", 8143 WindowsGuest::FS_NTFS => "mkfs.ntfs", 8144 _ => panic!("Unknown filesystem type '{fs}'"), 8145 }; 8146 let out = Command::new(fs_cmd) 8147 .args([&loop_dev]) 8148 .output() 8149 .unwrap_or_else(|_| panic!("{fs_cmd} failed")) 8150 .stdout; 8151 println!("{out:?}"); 8152 8153 // Disengage the loop device 8154 let out = Command::new("losetup") 8155 .args(["-d", loop_dev]) 8156 .output() 8157 .unwrap_or_else(|_| panic!("loop device '{loop_dev}' not found")) 8158 .stdout; 8159 println!("{out:?}"); 8160 8161 img.to_str().unwrap().to_string() 8162 } 8163 8164 fn disks_set_rw(&self) { 8165 let _ = self.ssh_cmd("powershell -Command \"Get-Disk | Where-Object IsOffline -eq $True | Set-Disk -IsReadOnly $False\""); 8166 } 8167 8168 fn disks_online(&self) { 8169 let _ = self.ssh_cmd("powershell -Command \"Get-Disk | Where-Object IsOffline -eq $True | Set-Disk -IsOffline $False\""); 8170 } 8171 8172 fn disk_file_put(&self, fname: &str, data: &str) { 8173 let _ = self.ssh_cmd(&format!( 8174 "powershell -Command \"'{data}' | Set-Content -Path {fname}\"" 8175 )); 8176 } 8177 8178 fn disk_file_read(&self, fname: &str) -> String { 8179 self.ssh_cmd(&format!( 8180 "powershell -Command \"Get-Content -Path {fname}\"" 8181 )) 8182 } 8183 8184 fn wait_for_boot(&self) -> bool { 8185 let cmd = "dir /b c:\\ | find \"Windows\""; 8186 let tmo_max = 180; 8187 // The timeout increase by n*1+n*2+n*3+..., therefore the initial 8188 // interval must be small. 8189 let tmo_int = 2; 8190 let out = ssh_command_ip_with_auth( 8191 cmd, 8192 &self.auth, 8193 &self.guest.network.guest_ip, 8194 { 8195 let mut ret = 1; 8196 let mut tmo_acc = 0; 8197 loop { 8198 tmo_acc += tmo_int * ret; 8199 if tmo_acc >= tmo_max { 8200 break; 8201 } 8202 ret += 1; 8203 } 8204 ret 8205 }, 8206 tmo_int, 8207 ) 8208 .unwrap(); 8209 8210 if "Windows" == out.trim() { 8211 return true; 8212 } 8213 8214 false 8215 } 8216 } 8217 8218 fn vcpu_threads_count(pid: u32) -> u8 { 8219 // ps -T -p 12345 | grep vcpu | wc -l 8220 let out = Command::new("ps") 8221 .args(["-T", "-p", format!("{pid}").as_str()]) 8222 .output() 8223 .expect("ps command failed") 8224 .stdout; 8225 String::from_utf8_lossy(&out).matches("vcpu").count() as u8 8226 } 8227 8228 fn netdev_ctrl_threads_count(pid: u32) -> u8 { 8229 // ps -T -p 12345 | grep "_net[0-9]*_ctrl" | wc -l 8230 let out = Command::new("ps") 8231 .args(["-T", "-p", format!("{pid}").as_str()]) 8232 .output() 8233 .expect("ps command failed") 8234 .stdout; 8235 let mut n = 0; 8236 String::from_utf8_lossy(&out) 8237 .split_whitespace() 8238 .for_each(|s| n += (s.starts_with("_net") && s.ends_with("_ctrl")) as u8); // _net1_ctrl 8239 n 8240 } 8241 8242 fn disk_ctrl_threads_count(pid: u32) -> u8 { 8243 // ps -T -p 15782 | grep "_disk[0-9]*_q0" | wc -l 8244 let out = Command::new("ps") 8245 .args(["-T", "-p", format!("{pid}").as_str()]) 8246 .output() 8247 .expect("ps command failed") 8248 .stdout; 8249 let mut n = 0; 8250 String::from_utf8_lossy(&out) 8251 .split_whitespace() 8252 .for_each(|s| n += (s.starts_with("_disk") && s.ends_with("_q0")) as u8); // _disk0_q0, don't care about multiple queues as they're related to the same hdd 8253 n 8254 } 8255 8256 #[test] 8257 fn test_windows_guest() { 8258 let windows_guest = WindowsGuest::new(); 8259 8260 let mut child = GuestCommand::new(windows_guest.guest()) 8261 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8262 .args(["--memory", "size=4G"]) 8263 .args(["--kernel", edk2_path().to_str().unwrap()]) 8264 .args(["--serial", "tty"]) 8265 .args(["--console", "off"]) 8266 .default_disks() 8267 .default_net() 8268 .capture_output() 8269 .spawn() 8270 .unwrap(); 8271 8272 let fd = child.stdout.as_ref().unwrap().as_raw_fd(); 8273 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 8274 let fd = child.stderr.as_ref().unwrap().as_raw_fd(); 8275 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 8276 8277 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE); 8278 8279 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8280 8281 let r = std::panic::catch_unwind(|| { 8282 // Wait to make sure Windows boots up 8283 assert!(windows_guest.wait_for_boot()); 8284 8285 windows_guest.shutdown(); 8286 }); 8287 8288 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8289 let _ = child.kill(); 8290 let output = child.wait_with_output().unwrap(); 8291 8292 let _ = child_dnsmasq.kill(); 8293 let _ = child_dnsmasq.wait(); 8294 8295 handle_child_output(r, &output); 8296 } 8297 8298 #[test] 8299 fn test_windows_guest_multiple_queues() { 8300 let windows_guest = WindowsGuest::new(); 8301 8302 let mut ovmf_path = dirs::home_dir().unwrap(); 8303 ovmf_path.push("workloads"); 8304 ovmf_path.push(OVMF_NAME); 8305 8306 let mut child = GuestCommand::new(windows_guest.guest()) 8307 .args(["--cpus", "boot=4,kvm_hyperv=on"]) 8308 .args(["--memory", "size=4G"]) 8309 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8310 .args(["--serial", "tty"]) 8311 .args(["--console", "off"]) 8312 .args([ 8313 "--disk", 8314 format!( 8315 "path={},num_queues=4", 8316 windows_guest 8317 .guest() 8318 .disk_config 8319 .disk(DiskType::OperatingSystem) 8320 .unwrap() 8321 ) 8322 .as_str(), 8323 ]) 8324 .args([ 8325 "--net", 8326 format!( 8327 "tap=,mac={},ip={},mask=255.255.255.0,num_queues=8", 8328 windows_guest.guest().network.guest_mac, 8329 windows_guest.guest().network.host_ip 8330 ) 8331 .as_str(), 8332 ]) 8333 .capture_output() 8334 .spawn() 8335 .unwrap(); 8336 8337 let fd = child.stdout.as_ref().unwrap().as_raw_fd(); 8338 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 8339 let fd = child.stderr.as_ref().unwrap().as_raw_fd(); 8340 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 8341 8342 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE); 8343 8344 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8345 8346 let r = std::panic::catch_unwind(|| { 8347 // Wait to make sure Windows boots up 8348 assert!(windows_guest.wait_for_boot()); 8349 8350 windows_guest.shutdown(); 8351 }); 8352 8353 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8354 let _ = child.kill(); 8355 let output = child.wait_with_output().unwrap(); 8356 8357 let _ = child_dnsmasq.kill(); 8358 let _ = child_dnsmasq.wait(); 8359 8360 handle_child_output(r, &output); 8361 } 8362 8363 #[test] 8364 #[cfg(not(feature = "mshv"))] 8365 #[ignore = "See #4327"] 8366 fn test_windows_guest_snapshot_restore() { 8367 let windows_guest = WindowsGuest::new(); 8368 8369 let mut ovmf_path = dirs::home_dir().unwrap(); 8370 ovmf_path.push("workloads"); 8371 ovmf_path.push(OVMF_NAME); 8372 8373 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8374 let api_socket_source = format!("{}.1", temp_api_path(&tmp_dir)); 8375 8376 let mut child = GuestCommand::new(windows_guest.guest()) 8377 .args(["--api-socket", &api_socket_source]) 8378 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8379 .args(["--memory", "size=4G"]) 8380 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8381 .args(["--serial", "tty"]) 8382 .args(["--console", "off"]) 8383 .default_disks() 8384 .default_net() 8385 .capture_output() 8386 .spawn() 8387 .unwrap(); 8388 8389 let fd = child.stdout.as_ref().unwrap().as_raw_fd(); 8390 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 8391 let fd = child.stderr.as_ref().unwrap().as_raw_fd(); 8392 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 8393 8394 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE); 8395 8396 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8397 8398 // Wait to make sure Windows boots up 8399 assert!(windows_guest.wait_for_boot()); 8400 8401 let snapshot_dir = temp_snapshot_dir_path(&tmp_dir); 8402 8403 // Pause the VM 8404 assert!(remote_command(&api_socket_source, "pause", None)); 8405 8406 // Take a snapshot from the VM 8407 assert!(remote_command( 8408 &api_socket_source, 8409 "snapshot", 8410 Some(format!("file://{snapshot_dir}").as_str()), 8411 )); 8412 8413 // Wait to make sure the snapshot is completed 8414 thread::sleep(std::time::Duration::new(30, 0)); 8415 8416 let _ = child.kill(); 8417 child.wait().unwrap(); 8418 8419 let api_socket_restored = format!("{}.2", temp_api_path(&tmp_dir)); 8420 8421 // Restore the VM from the snapshot 8422 let mut child = GuestCommand::new(windows_guest.guest()) 8423 .args(["--api-socket", &api_socket_restored]) 8424 .args([ 8425 "--restore", 8426 format!("source_url=file://{snapshot_dir}").as_str(), 8427 ]) 8428 .capture_output() 8429 .spawn() 8430 .unwrap(); 8431 8432 // Wait for the VM to be restored 8433 thread::sleep(std::time::Duration::new(20, 0)); 8434 8435 let r = std::panic::catch_unwind(|| { 8436 // Resume the VM 8437 assert!(remote_command(&api_socket_restored, "resume", None)); 8438 8439 windows_guest.shutdown(); 8440 }); 8441 8442 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8443 let _ = child.kill(); 8444 let output = child.wait_with_output().unwrap(); 8445 8446 let _ = child_dnsmasq.kill(); 8447 let _ = child_dnsmasq.wait(); 8448 8449 handle_child_output(r, &output); 8450 } 8451 8452 #[test] 8453 #[cfg(not(feature = "mshv"))] 8454 #[cfg(not(target_arch = "aarch64"))] 8455 fn test_windows_guest_cpu_hotplug() { 8456 let windows_guest = WindowsGuest::new(); 8457 8458 let mut ovmf_path = dirs::home_dir().unwrap(); 8459 ovmf_path.push("workloads"); 8460 ovmf_path.push(OVMF_NAME); 8461 8462 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8463 let api_socket = temp_api_path(&tmp_dir); 8464 8465 let mut child = GuestCommand::new(windows_guest.guest()) 8466 .args(["--api-socket", &api_socket]) 8467 .args(["--cpus", "boot=2,max=8,kvm_hyperv=on"]) 8468 .args(["--memory", "size=4G"]) 8469 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8470 .args(["--serial", "tty"]) 8471 .args(["--console", "off"]) 8472 .default_disks() 8473 .default_net() 8474 .capture_output() 8475 .spawn() 8476 .unwrap(); 8477 8478 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8479 8480 let r = std::panic::catch_unwind(|| { 8481 // Wait to make sure Windows boots up 8482 assert!(windows_guest.wait_for_boot()); 8483 8484 let vcpu_num = 2; 8485 // Check the initial number of CPUs the guest sees 8486 assert_eq!(windows_guest.cpu_count(), vcpu_num); 8487 // Check the initial number of vcpu threads in the CH process 8488 assert_eq!(vcpu_threads_count(child.id()), vcpu_num); 8489 8490 let vcpu_num = 6; 8491 // Hotplug some CPUs 8492 resize_command(&api_socket, Some(vcpu_num), None, None, None); 8493 // Wait to make sure CPUs are added 8494 thread::sleep(std::time::Duration::new(10, 0)); 8495 // Check the guest sees the correct number 8496 assert_eq!(windows_guest.cpu_count(), vcpu_num); 8497 // Check the CH process has the correct number of vcpu threads 8498 assert_eq!(vcpu_threads_count(child.id()), vcpu_num); 8499 8500 let vcpu_num = 4; 8501 // Remove some CPUs. Note that Windows doesn't support hot-remove. 8502 resize_command(&api_socket, Some(vcpu_num), None, None, None); 8503 // Wait to make sure CPUs are removed 8504 thread::sleep(std::time::Duration::new(10, 0)); 8505 // Reboot to let Windows catch up 8506 windows_guest.reboot(); 8507 // Wait to make sure Windows completely rebooted 8508 thread::sleep(std::time::Duration::new(60, 0)); 8509 // Check the guest sees the correct number 8510 assert_eq!(windows_guest.cpu_count(), vcpu_num); 8511 // Check the CH process has the correct number of vcpu threads 8512 assert_eq!(vcpu_threads_count(child.id()), vcpu_num); 8513 8514 windows_guest.shutdown(); 8515 }); 8516 8517 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8518 let _ = child.kill(); 8519 let output = child.wait_with_output().unwrap(); 8520 8521 let _ = child_dnsmasq.kill(); 8522 let _ = child_dnsmasq.wait(); 8523 8524 handle_child_output(r, &output); 8525 } 8526 8527 #[test] 8528 #[cfg(not(feature = "mshv"))] 8529 #[cfg(not(target_arch = "aarch64"))] 8530 fn test_windows_guest_ram_hotplug() { 8531 let windows_guest = WindowsGuest::new(); 8532 8533 let mut ovmf_path = dirs::home_dir().unwrap(); 8534 ovmf_path.push("workloads"); 8535 ovmf_path.push(OVMF_NAME); 8536 8537 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8538 let api_socket = temp_api_path(&tmp_dir); 8539 8540 let mut child = GuestCommand::new(windows_guest.guest()) 8541 .args(["--api-socket", &api_socket]) 8542 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8543 .args(["--memory", "size=2G,hotplug_size=5G"]) 8544 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8545 .args(["--serial", "tty"]) 8546 .args(["--console", "off"]) 8547 .default_disks() 8548 .default_net() 8549 .capture_output() 8550 .spawn() 8551 .unwrap(); 8552 8553 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8554 8555 let r = std::panic::catch_unwind(|| { 8556 // Wait to make sure Windows boots up 8557 assert!(windows_guest.wait_for_boot()); 8558 8559 let ram_size = 2 * 1024 * 1024 * 1024; 8560 // Check the initial number of RAM the guest sees 8561 let current_ram_size = windows_guest.ram_size(); 8562 // This size seems to be reserved by the system and thus the 8563 // reported amount differs by this constant value. 8564 let reserved_ram_size = ram_size - current_ram_size; 8565 // Verify that there's not more than 4mb constant diff wasted 8566 // by the reserved ram. 8567 assert!(reserved_ram_size < 4 * 1024 * 1024); 8568 8569 let ram_size = 4 * 1024 * 1024 * 1024; 8570 // Hotplug some RAM 8571 resize_command(&api_socket, None, Some(ram_size), None, None); 8572 // Wait to make sure RAM has been added 8573 thread::sleep(std::time::Duration::new(10, 0)); 8574 // Check the guest sees the correct number 8575 assert_eq!(windows_guest.ram_size(), ram_size - reserved_ram_size); 8576 8577 let ram_size = 3 * 1024 * 1024 * 1024; 8578 // Unplug some RAM. Note that hot-remove most likely won't work. 8579 resize_command(&api_socket, None, Some(ram_size), None, None); 8580 // Wait to make sure RAM has been added 8581 thread::sleep(std::time::Duration::new(10, 0)); 8582 // Reboot to let Windows catch up 8583 windows_guest.reboot(); 8584 // Wait to make sure guest completely rebooted 8585 thread::sleep(std::time::Duration::new(60, 0)); 8586 // Check the guest sees the correct number 8587 assert_eq!(windows_guest.ram_size(), ram_size - reserved_ram_size); 8588 8589 windows_guest.shutdown(); 8590 }); 8591 8592 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8593 let _ = child.kill(); 8594 let output = child.wait_with_output().unwrap(); 8595 8596 let _ = child_dnsmasq.kill(); 8597 let _ = child_dnsmasq.wait(); 8598 8599 handle_child_output(r, &output); 8600 } 8601 8602 #[test] 8603 #[cfg(not(feature = "mshv"))] 8604 fn test_windows_guest_netdev_hotplug() { 8605 let windows_guest = WindowsGuest::new(); 8606 8607 let mut ovmf_path = dirs::home_dir().unwrap(); 8608 ovmf_path.push("workloads"); 8609 ovmf_path.push(OVMF_NAME); 8610 8611 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8612 let api_socket = temp_api_path(&tmp_dir); 8613 8614 let mut child = GuestCommand::new(windows_guest.guest()) 8615 .args(["--api-socket", &api_socket]) 8616 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8617 .args(["--memory", "size=4G"]) 8618 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8619 .args(["--serial", "tty"]) 8620 .args(["--console", "off"]) 8621 .default_disks() 8622 .default_net() 8623 .capture_output() 8624 .spawn() 8625 .unwrap(); 8626 8627 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8628 8629 let r = std::panic::catch_unwind(|| { 8630 // Wait to make sure Windows boots up 8631 assert!(windows_guest.wait_for_boot()); 8632 8633 // Initially present network device 8634 let netdev_num = 1; 8635 assert_eq!(windows_guest.netdev_count(), netdev_num); 8636 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 8637 8638 // Hotplug network device 8639 let (cmd_success, cmd_output) = remote_command_w_output( 8640 &api_socket, 8641 "add-net", 8642 Some(windows_guest.guest().default_net_string().as_str()), 8643 ); 8644 assert!(cmd_success); 8645 assert!(String::from_utf8_lossy(&cmd_output).contains("\"id\":\"_net2\"")); 8646 thread::sleep(std::time::Duration::new(5, 0)); 8647 // Verify the device is on the system 8648 let netdev_num = 2; 8649 assert_eq!(windows_guest.netdev_count(), netdev_num); 8650 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 8651 8652 // Remove network device 8653 let cmd_success = remote_command(&api_socket, "remove-device", Some("_net2")); 8654 assert!(cmd_success); 8655 thread::sleep(std::time::Duration::new(5, 0)); 8656 // Verify the device has been removed 8657 let netdev_num = 1; 8658 assert_eq!(windows_guest.netdev_count(), netdev_num); 8659 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 8660 8661 windows_guest.shutdown(); 8662 }); 8663 8664 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8665 let _ = child.kill(); 8666 let output = child.wait_with_output().unwrap(); 8667 8668 let _ = child_dnsmasq.kill(); 8669 let _ = child_dnsmasq.wait(); 8670 8671 handle_child_output(r, &output); 8672 } 8673 8674 #[test] 8675 #[ignore = "See #6037"] 8676 #[cfg(not(feature = "mshv"))] 8677 #[cfg(not(target_arch = "aarch64"))] 8678 fn test_windows_guest_disk_hotplug() { 8679 let windows_guest = WindowsGuest::new(); 8680 8681 let mut ovmf_path = dirs::home_dir().unwrap(); 8682 ovmf_path.push("workloads"); 8683 ovmf_path.push(OVMF_NAME); 8684 8685 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8686 let api_socket = temp_api_path(&tmp_dir); 8687 8688 let mut child = GuestCommand::new(windows_guest.guest()) 8689 .args(["--api-socket", &api_socket]) 8690 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8691 .args(["--memory", "size=4G"]) 8692 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8693 .args(["--serial", "tty"]) 8694 .args(["--console", "off"]) 8695 .default_disks() 8696 .default_net() 8697 .capture_output() 8698 .spawn() 8699 .unwrap(); 8700 8701 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8702 8703 let disk = windows_guest.disk_new(WindowsGuest::FS_FAT, 100); 8704 8705 let r = std::panic::catch_unwind(|| { 8706 // Wait to make sure Windows boots up 8707 assert!(windows_guest.wait_for_boot()); 8708 8709 // Initially present disk device 8710 let disk_num = 1; 8711 assert_eq!(windows_guest.disk_count(), disk_num); 8712 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8713 8714 // Hotplug disk device 8715 let (cmd_success, cmd_output) = remote_command_w_output( 8716 &api_socket, 8717 "add-disk", 8718 Some(format!("path={disk},readonly=off").as_str()), 8719 ); 8720 assert!(cmd_success); 8721 assert!(String::from_utf8_lossy(&cmd_output).contains("\"id\":\"_disk2\"")); 8722 thread::sleep(std::time::Duration::new(5, 0)); 8723 // Online disk device 8724 windows_guest.disks_set_rw(); 8725 windows_guest.disks_online(); 8726 // Verify the device is on the system 8727 let disk_num = 2; 8728 assert_eq!(windows_guest.disk_count(), disk_num); 8729 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8730 8731 let data = "hello"; 8732 let fname = "d:\\world"; 8733 windows_guest.disk_file_put(fname, data); 8734 8735 // Unmount disk device 8736 let cmd_success = remote_command(&api_socket, "remove-device", Some("_disk2")); 8737 assert!(cmd_success); 8738 thread::sleep(std::time::Duration::new(5, 0)); 8739 // Verify the device has been removed 8740 let disk_num = 1; 8741 assert_eq!(windows_guest.disk_count(), disk_num); 8742 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8743 8744 // Remount and check the file exists with the expected contents 8745 let (cmd_success, _cmd_output) = remote_command_w_output( 8746 &api_socket, 8747 "add-disk", 8748 Some(format!("path={disk},readonly=off").as_str()), 8749 ); 8750 assert!(cmd_success); 8751 thread::sleep(std::time::Duration::new(5, 0)); 8752 let out = windows_guest.disk_file_read(fname); 8753 assert_eq!(data, out.trim()); 8754 8755 // Intentionally no unmount, it'll happen at shutdown. 8756 8757 windows_guest.shutdown(); 8758 }); 8759 8760 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8761 let _ = child.kill(); 8762 let output = child.wait_with_output().unwrap(); 8763 8764 let _ = child_dnsmasq.kill(); 8765 let _ = child_dnsmasq.wait(); 8766 8767 handle_child_output(r, &output); 8768 } 8769 8770 #[test] 8771 #[ignore = "See #6037"] 8772 #[cfg(not(feature = "mshv"))] 8773 #[cfg(not(target_arch = "aarch64"))] 8774 fn test_windows_guest_disk_hotplug_multi() { 8775 let windows_guest = WindowsGuest::new(); 8776 8777 let mut ovmf_path = dirs::home_dir().unwrap(); 8778 ovmf_path.push("workloads"); 8779 ovmf_path.push(OVMF_NAME); 8780 8781 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8782 let api_socket = temp_api_path(&tmp_dir); 8783 8784 let mut child = GuestCommand::new(windows_guest.guest()) 8785 .args(["--api-socket", &api_socket]) 8786 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8787 .args(["--memory", "size=2G"]) 8788 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8789 .args(["--serial", "tty"]) 8790 .args(["--console", "off"]) 8791 .default_disks() 8792 .default_net() 8793 .capture_output() 8794 .spawn() 8795 .unwrap(); 8796 8797 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8798 8799 // Predefined data to used at various test stages 8800 let disk_test_data: [[String; 4]; 2] = [ 8801 [ 8802 "_disk2".to_string(), 8803 windows_guest.disk_new(WindowsGuest::FS_FAT, 123), 8804 "d:\\world".to_string(), 8805 "hello".to_string(), 8806 ], 8807 [ 8808 "_disk3".to_string(), 8809 windows_guest.disk_new(WindowsGuest::FS_NTFS, 333), 8810 "e:\\hello".to_string(), 8811 "world".to_string(), 8812 ], 8813 ]; 8814 8815 let r = std::panic::catch_unwind(|| { 8816 // Wait to make sure Windows boots up 8817 assert!(windows_guest.wait_for_boot()); 8818 8819 // Initially present disk device 8820 let disk_num = 1; 8821 assert_eq!(windows_guest.disk_count(), disk_num); 8822 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8823 8824 for it in &disk_test_data { 8825 let disk_id = it[0].as_str(); 8826 let disk = it[1].as_str(); 8827 // Hotplug disk device 8828 let (cmd_success, cmd_output) = remote_command_w_output( 8829 &api_socket, 8830 "add-disk", 8831 Some(format!("path={disk},readonly=off").as_str()), 8832 ); 8833 assert!(cmd_success); 8834 assert!(String::from_utf8_lossy(&cmd_output) 8835 .contains(format!("\"id\":\"{disk_id}\"").as_str())); 8836 thread::sleep(std::time::Duration::new(5, 0)); 8837 // Online disk devices 8838 windows_guest.disks_set_rw(); 8839 windows_guest.disks_online(); 8840 } 8841 // Verify the devices are on the system 8842 let disk_num = (disk_test_data.len() + 1) as u8; 8843 assert_eq!(windows_guest.disk_count(), disk_num); 8844 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8845 8846 // Put test data 8847 for it in &disk_test_data { 8848 let fname = it[2].as_str(); 8849 let data = it[3].as_str(); 8850 windows_guest.disk_file_put(fname, data); 8851 } 8852 8853 // Unmount disk devices 8854 for it in &disk_test_data { 8855 let disk_id = it[0].as_str(); 8856 let cmd_success = remote_command(&api_socket, "remove-device", Some(disk_id)); 8857 assert!(cmd_success); 8858 thread::sleep(std::time::Duration::new(5, 0)); 8859 } 8860 8861 // Verify the devices have been removed 8862 let disk_num = 1; 8863 assert_eq!(windows_guest.disk_count(), disk_num); 8864 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8865 8866 // Remount 8867 for it in &disk_test_data { 8868 let disk = it[1].as_str(); 8869 let (cmd_success, _cmd_output) = remote_command_w_output( 8870 &api_socket, 8871 "add-disk", 8872 Some(format!("path={disk},readonly=off").as_str()), 8873 ); 8874 assert!(cmd_success); 8875 thread::sleep(std::time::Duration::new(5, 0)); 8876 } 8877 8878 // Check the files exists with the expected contents 8879 for it in &disk_test_data { 8880 let fname = it[2].as_str(); 8881 let data = it[3].as_str(); 8882 let out = windows_guest.disk_file_read(fname); 8883 assert_eq!(data, out.trim()); 8884 } 8885 8886 // Intentionally no unmount, it'll happen at shutdown. 8887 8888 windows_guest.shutdown(); 8889 }); 8890 8891 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8892 let _ = child.kill(); 8893 let output = child.wait_with_output().unwrap(); 8894 8895 let _ = child_dnsmasq.kill(); 8896 let _ = child_dnsmasq.wait(); 8897 8898 handle_child_output(r, &output); 8899 } 8900 8901 #[test] 8902 #[cfg(not(feature = "mshv"))] 8903 #[cfg(not(target_arch = "aarch64"))] 8904 fn test_windows_guest_netdev_multi() { 8905 let windows_guest = WindowsGuest::new(); 8906 8907 let mut ovmf_path = dirs::home_dir().unwrap(); 8908 ovmf_path.push("workloads"); 8909 ovmf_path.push(OVMF_NAME); 8910 8911 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8912 let api_socket = temp_api_path(&tmp_dir); 8913 8914 let mut child = GuestCommand::new(windows_guest.guest()) 8915 .args(["--api-socket", &api_socket]) 8916 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8917 .args(["--memory", "size=4G"]) 8918 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8919 .args(["--serial", "tty"]) 8920 .args(["--console", "off"]) 8921 .default_disks() 8922 // The multi net dev config is borrowed from test_multiple_network_interfaces 8923 .args([ 8924 "--net", 8925 windows_guest.guest().default_net_string().as_str(), 8926 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0", 8927 "tap=mytap42,mac=fe:1f:9e:e1:60:f2,ip=192.168.4.1,mask=255.255.255.0", 8928 ]) 8929 .capture_output() 8930 .spawn() 8931 .unwrap(); 8932 8933 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8934 8935 let r = std::panic::catch_unwind(|| { 8936 // Wait to make sure Windows boots up 8937 assert!(windows_guest.wait_for_boot()); 8938 8939 let netdev_num = 3; 8940 assert_eq!(windows_guest.netdev_count(), netdev_num); 8941 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 8942 8943 let tap_count = exec_host_command_output("ip link | grep -c mytap42"); 8944 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1"); 8945 8946 windows_guest.shutdown(); 8947 }); 8948 8949 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8950 let _ = child.kill(); 8951 let output = child.wait_with_output().unwrap(); 8952 8953 let _ = child_dnsmasq.kill(); 8954 let _ = child_dnsmasq.wait(); 8955 8956 handle_child_output(r, &output); 8957 } 8958 } 8959 8960 #[cfg(target_arch = "x86_64")] 8961 mod sgx { 8962 use crate::*; 8963 8964 #[test] 8965 fn test_sgx() { 8966 let jammy_image = JAMMY_IMAGE_NAME.to_string(); 8967 let jammy = UbuntuDiskConfig::new(jammy_image); 8968 let guest = Guest::new(Box::new(jammy)); 8969 8970 let mut child = GuestCommand::new(&guest) 8971 .args(["--cpus", "boot=1"]) 8972 .args(["--memory", "size=512M"]) 8973 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 8974 .default_disks() 8975 .default_net() 8976 .args(["--sgx-epc", "id=epc0,size=64M"]) 8977 .capture_output() 8978 .spawn() 8979 .unwrap(); 8980 8981 let r = std::panic::catch_unwind(|| { 8982 guest.wait_vm_boot(None).unwrap(); 8983 8984 // Check if SGX is correctly detected in the guest. 8985 guest.check_sgx_support().unwrap(); 8986 8987 // Validate the SGX EPC section is 64MiB. 8988 assert_eq!( 8989 guest 8990 .ssh_command("cpuid -l 0x12 -s 2 | grep 'section size' | cut -d '=' -f 2") 8991 .unwrap() 8992 .trim(), 8993 "0x0000000004000000" 8994 ); 8995 }); 8996 8997 let _ = child.kill(); 8998 let output = child.wait_with_output().unwrap(); 8999 9000 handle_child_output(r, &output); 9001 } 9002 } 9003 9004 #[cfg(target_arch = "x86_64")] 9005 mod vfio { 9006 use crate::*; 9007 const NVIDIA_VFIO_DEVICE: &str = "/sys/bus/pci/devices/0002:00:01.0"; 9008 9009 fn test_nvidia_card_memory_hotplug(hotplug_method: &str) { 9010 let jammy = UbuntuDiskConfig::new(JAMMY_VFIO_IMAGE_NAME.to_string()); 9011 let guest = Guest::new(Box::new(jammy)); 9012 let api_socket = temp_api_path(&guest.tmp_dir); 9013 9014 let mut child = GuestCommand::new(&guest) 9015 .args(["--cpus", "boot=4"]) 9016 .args([ 9017 "--memory", 9018 format!("size=4G,hotplug_size=4G,hotplug_method={hotplug_method}").as_str(), 9019 ]) 9020 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 9021 .args(["--device", format!("path={NVIDIA_VFIO_DEVICE}").as_str()]) 9022 .args(["--api-socket", &api_socket]) 9023 .default_disks() 9024 .default_net() 9025 .capture_output() 9026 .spawn() 9027 .unwrap(); 9028 9029 let r = std::panic::catch_unwind(|| { 9030 guest.wait_vm_boot(None).unwrap(); 9031 9032 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9033 9034 guest.enable_memory_hotplug(); 9035 9036 // Add RAM to the VM 9037 let desired_ram = 6 << 30; 9038 resize_command(&api_socket, None, Some(desired_ram), None, None); 9039 thread::sleep(std::time::Duration::new(30, 0)); 9040 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 9041 9042 // Check the VFIO device works when RAM is increased to 6GiB 9043 guest.check_nvidia_gpu(); 9044 }); 9045 9046 let _ = child.kill(); 9047 let output = child.wait_with_output().unwrap(); 9048 9049 handle_child_output(r, &output); 9050 } 9051 9052 #[test] 9053 fn test_nvidia_card_memory_hotplug_acpi() { 9054 test_nvidia_card_memory_hotplug("acpi") 9055 } 9056 9057 #[test] 9058 fn test_nvidia_card_memory_hotplug_virtio_mem() { 9059 test_nvidia_card_memory_hotplug("virtio-mem") 9060 } 9061 9062 #[test] 9063 fn test_nvidia_card_pci_hotplug() { 9064 let jammy = UbuntuDiskConfig::new(JAMMY_VFIO_IMAGE_NAME.to_string()); 9065 let guest = Guest::new(Box::new(jammy)); 9066 let api_socket = temp_api_path(&guest.tmp_dir); 9067 9068 let mut child = GuestCommand::new(&guest) 9069 .args(["--cpus", "boot=4"]) 9070 .args(["--memory", "size=4G"]) 9071 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 9072 .args(["--api-socket", &api_socket]) 9073 .default_disks() 9074 .default_net() 9075 .capture_output() 9076 .spawn() 9077 .unwrap(); 9078 9079 let r = std::panic::catch_unwind(|| { 9080 guest.wait_vm_boot(None).unwrap(); 9081 9082 // Hotplug the card to the VM 9083 let (cmd_success, cmd_output) = remote_command_w_output( 9084 &api_socket, 9085 "add-device", 9086 Some(format!("id=vfio0,path={NVIDIA_VFIO_DEVICE}").as_str()), 9087 ); 9088 assert!(cmd_success); 9089 assert!(String::from_utf8_lossy(&cmd_output) 9090 .contains("{\"id\":\"vfio0\",\"bdf\":\"0000:00:06.0\"}")); 9091 9092 thread::sleep(std::time::Duration::new(10, 0)); 9093 9094 // Check the VFIO device works after hotplug 9095 guest.check_nvidia_gpu(); 9096 }); 9097 9098 let _ = child.kill(); 9099 let output = child.wait_with_output().unwrap(); 9100 9101 handle_child_output(r, &output); 9102 } 9103 9104 #[test] 9105 fn test_nvidia_card_reboot() { 9106 let jammy = UbuntuDiskConfig::new(JAMMY_VFIO_IMAGE_NAME.to_string()); 9107 let guest = Guest::new(Box::new(jammy)); 9108 let api_socket = temp_api_path(&guest.tmp_dir); 9109 9110 let mut child = GuestCommand::new(&guest) 9111 .args(["--cpus", "boot=4"]) 9112 .args(["--memory", "size=4G"]) 9113 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 9114 .args(["--device", format!("path={NVIDIA_VFIO_DEVICE}").as_str()]) 9115 .args(["--api-socket", &api_socket]) 9116 .default_disks() 9117 .default_net() 9118 .capture_output() 9119 .spawn() 9120 .unwrap(); 9121 9122 let r = std::panic::catch_unwind(|| { 9123 guest.wait_vm_boot(None).unwrap(); 9124 9125 // Check the VFIO device works after boot 9126 guest.check_nvidia_gpu(); 9127 9128 guest.reboot_linux(0, None); 9129 9130 // Check the VFIO device works after reboot 9131 guest.check_nvidia_gpu(); 9132 }); 9133 9134 let _ = child.kill(); 9135 let output = child.wait_with_output().unwrap(); 9136 9137 handle_child_output(r, &output); 9138 } 9139 } 9140 9141 mod live_migration { 9142 use crate::*; 9143 9144 fn start_live_migration( 9145 migration_socket: &str, 9146 src_api_socket: &str, 9147 dest_api_socket: &str, 9148 local: bool, 9149 ) -> bool { 9150 // Start to receive migration from the destination VM 9151 let mut receive_migration = Command::new(clh_command("ch-remote")) 9152 .args([ 9153 &format!("--api-socket={dest_api_socket}"), 9154 "receive-migration", 9155 &format! {"unix:{migration_socket}"}, 9156 ]) 9157 .stderr(Stdio::piped()) 9158 .stdout(Stdio::piped()) 9159 .spawn() 9160 .unwrap(); 9161 // Give it '1s' to make sure the 'migration_socket' file is properly created 9162 thread::sleep(std::time::Duration::new(1, 0)); 9163 // Start to send migration from the source VM 9164 9165 let mut args = [ 9166 format!("--api-socket={}", &src_api_socket), 9167 "send-migration".to_string(), 9168 format! {"unix:{migration_socket}"}, 9169 ] 9170 .to_vec(); 9171 9172 if local { 9173 args.insert(2, "--local".to_string()); 9174 } 9175 9176 let mut send_migration = Command::new(clh_command("ch-remote")) 9177 .args(&args) 9178 .stderr(Stdio::piped()) 9179 .stdout(Stdio::piped()) 9180 .spawn() 9181 .unwrap(); 9182 9183 // The 'send-migration' command should be executed successfully within the given timeout 9184 let send_success = if let Some(status) = send_migration 9185 .wait_timeout(std::time::Duration::from_secs(30)) 9186 .unwrap() 9187 { 9188 status.success() 9189 } else { 9190 false 9191 }; 9192 9193 if !send_success { 9194 let _ = send_migration.kill(); 9195 let output = send_migration.wait_with_output().unwrap(); 9196 eprintln!( 9197 "\n\n==== Start 'send_migration' output ==== \ 9198 \n\n---stdout---\n{}\n\n---stderr---\n{} \ 9199 \n\n==== End 'send_migration' output ====\n\n", 9200 String::from_utf8_lossy(&output.stdout), 9201 String::from_utf8_lossy(&output.stderr) 9202 ); 9203 } 9204 9205 // The 'receive-migration' command should be executed successfully within the given timeout 9206 let receive_success = if let Some(status) = receive_migration 9207 .wait_timeout(std::time::Duration::from_secs(30)) 9208 .unwrap() 9209 { 9210 status.success() 9211 } else { 9212 false 9213 }; 9214 9215 if !receive_success { 9216 let _ = receive_migration.kill(); 9217 let output = receive_migration.wait_with_output().unwrap(); 9218 eprintln!( 9219 "\n\n==== Start 'receive_migration' output ==== \ 9220 \n\n---stdout---\n{}\n\n---stderr---\n{} \ 9221 \n\n==== End 'receive_migration' output ====\n\n", 9222 String::from_utf8_lossy(&output.stdout), 9223 String::from_utf8_lossy(&output.stderr) 9224 ); 9225 } 9226 9227 send_success && receive_success 9228 } 9229 9230 fn print_and_panic(src_vm: Child, dest_vm: Child, ovs_vm: Option<Child>, message: &str) -> ! { 9231 let mut src_vm = src_vm; 9232 let mut dest_vm = dest_vm; 9233 9234 let _ = src_vm.kill(); 9235 let src_output = src_vm.wait_with_output().unwrap(); 9236 eprintln!( 9237 "\n\n==== Start 'source_vm' stdout ====\n\n{}\n\n==== End 'source_vm' stdout ====", 9238 String::from_utf8_lossy(&src_output.stdout) 9239 ); 9240 eprintln!( 9241 "\n\n==== Start 'source_vm' stderr ====\n\n{}\n\n==== End 'source_vm' stderr ====", 9242 String::from_utf8_lossy(&src_output.stderr) 9243 ); 9244 let _ = dest_vm.kill(); 9245 let dest_output = dest_vm.wait_with_output().unwrap(); 9246 eprintln!( 9247 "\n\n==== Start 'destination_vm' stdout ====\n\n{}\n\n==== End 'destination_vm' stdout ====", 9248 String::from_utf8_lossy(&dest_output.stdout) 9249 ); 9250 eprintln!( 9251 "\n\n==== Start 'destination_vm' stderr ====\n\n{}\n\n==== End 'destination_vm' stderr ====", 9252 String::from_utf8_lossy(&dest_output.stderr) 9253 ); 9254 9255 if let Some(ovs_vm) = ovs_vm { 9256 let mut ovs_vm = ovs_vm; 9257 let _ = ovs_vm.kill(); 9258 let ovs_output = ovs_vm.wait_with_output().unwrap(); 9259 eprintln!( 9260 "\n\n==== Start 'ovs_vm' stdout ====\n\n{}\n\n==== End 'ovs_vm' stdout ====", 9261 String::from_utf8_lossy(&ovs_output.stdout) 9262 ); 9263 eprintln!( 9264 "\n\n==== Start 'ovs_vm' stderr ====\n\n{}\n\n==== End 'ovs_vm' stderr ====", 9265 String::from_utf8_lossy(&ovs_output.stderr) 9266 ); 9267 9268 cleanup_ovs_dpdk(); 9269 } 9270 9271 panic!("Test failed: {message}") 9272 } 9273 9274 // This test exercises the local live-migration between two Cloud Hypervisor VMs on the 9275 // same host. It ensures the following behaviors: 9276 // 1. The source VM is up and functional (including various virtio-devices are working properly); 9277 // 2. The 'send-migration' and 'receive-migration' command finished successfully; 9278 // 3. The source VM terminated gracefully after live migration; 9279 // 4. The destination VM is functional (including various virtio-devices are working properly) after 9280 // live migration; 9281 // Note: This test does not use vsock as we can't create two identical vsock on the same host. 9282 fn _test_live_migration(upgrade_test: bool, local: bool) { 9283 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9284 let guest = Guest::new(Box::new(focal)); 9285 let kernel_path = direct_kernel_boot_path(); 9286 let console_text = String::from("On a branch floating down river a cricket, singing."); 9287 let net_id = "net123"; 9288 let net_params = format!( 9289 "id={},tap=,mac={},ip={},mask=255.255.255.0", 9290 net_id, guest.network.guest_mac, guest.network.host_ip 9291 ); 9292 9293 let memory_param: &[&str] = if local { 9294 &["--memory", "size=4G,shared=on"] 9295 } else { 9296 &["--memory", "size=4G"] 9297 }; 9298 9299 let boot_vcpus = 2; 9300 let max_vcpus = 4; 9301 9302 let pmem_temp_file = TempFile::new().unwrap(); 9303 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 9304 std::process::Command::new("mkfs.ext4") 9305 .arg(pmem_temp_file.as_path()) 9306 .output() 9307 .expect("Expect creating disk image to succeed"); 9308 let pmem_path = String::from("/dev/pmem0"); 9309 9310 // Start the source VM 9311 let src_vm_path = if !upgrade_test { 9312 clh_command("cloud-hypervisor") 9313 } else { 9314 cloud_hypervisor_release_path() 9315 }; 9316 let src_api_socket = temp_api_path(&guest.tmp_dir); 9317 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 9318 src_vm_cmd 9319 .args([ 9320 "--cpus", 9321 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 9322 ]) 9323 .args(memory_param) 9324 .args(["--kernel", kernel_path.to_str().unwrap()]) 9325 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9326 .default_disks() 9327 .args(["--net", net_params.as_str()]) 9328 .args(["--api-socket", &src_api_socket]) 9329 .args([ 9330 "--pmem", 9331 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 9332 ]); 9333 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 9334 9335 // Start the destination VM 9336 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 9337 dest_api_socket.push_str(".dest"); 9338 let mut dest_child = GuestCommand::new(&guest) 9339 .args(["--api-socket", &dest_api_socket]) 9340 .capture_output() 9341 .spawn() 9342 .unwrap(); 9343 9344 let r = std::panic::catch_unwind(|| { 9345 guest.wait_vm_boot(None).unwrap(); 9346 9347 // Make sure the source VM is functional 9348 // Check the number of vCPUs 9349 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9350 9351 // Check the guest RAM 9352 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9353 9354 // Check the guest virtio-devices, e.g. block, rng, console, and net 9355 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9356 9357 // x86_64: Following what's done in the `test_snapshot_restore`, we need 9358 // to make sure that removing and adding back the virtio-net device does 9359 // not break the live-migration support for virtio-pci. 9360 #[cfg(target_arch = "x86_64")] 9361 { 9362 assert!(remote_command( 9363 &src_api_socket, 9364 "remove-device", 9365 Some(net_id), 9366 )); 9367 thread::sleep(std::time::Duration::new(10, 0)); 9368 9369 // Plug the virtio-net device again 9370 assert!(remote_command( 9371 &src_api_socket, 9372 "add-net", 9373 Some(net_params.as_str()), 9374 )); 9375 thread::sleep(std::time::Duration::new(10, 0)); 9376 } 9377 9378 // Start the live-migration 9379 let migration_socket = String::from( 9380 guest 9381 .tmp_dir 9382 .as_path() 9383 .join("live-migration.sock") 9384 .to_str() 9385 .unwrap(), 9386 ); 9387 9388 assert!( 9389 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9390 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9391 ); 9392 }); 9393 9394 // Check and report any errors occurred during the live-migration 9395 if r.is_err() { 9396 print_and_panic( 9397 src_child, 9398 dest_child, 9399 None, 9400 "Error occurred during live-migration", 9401 ); 9402 } 9403 9404 // Check the source vm has been terminated successful (give it '3s' to settle) 9405 thread::sleep(std::time::Duration::new(3, 0)); 9406 if !src_child.try_wait().unwrap().is_some_and(|s| s.success()) { 9407 print_and_panic( 9408 src_child, 9409 dest_child, 9410 None, 9411 "source VM was not terminated successfully.", 9412 ); 9413 }; 9414 9415 // Post live-migration check to make sure the destination VM is functional 9416 let r = std::panic::catch_unwind(|| { 9417 // Perform same checks to validate VM has been properly migrated 9418 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9419 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9420 9421 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9422 }); 9423 9424 // Clean-up the destination VM and make sure it terminated correctly 9425 let _ = dest_child.kill(); 9426 let dest_output = dest_child.wait_with_output().unwrap(); 9427 handle_child_output(r, &dest_output); 9428 9429 // Check the destination VM has the expected 'console_text' from its output 9430 let r = std::panic::catch_unwind(|| { 9431 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 9432 }); 9433 handle_child_output(r, &dest_output); 9434 } 9435 9436 fn _test_live_migration_balloon(upgrade_test: bool, local: bool) { 9437 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9438 let guest = Guest::new(Box::new(focal)); 9439 let kernel_path = direct_kernel_boot_path(); 9440 let console_text = String::from("On a branch floating down river a cricket, singing."); 9441 let net_id = "net123"; 9442 let net_params = format!( 9443 "id={},tap=,mac={},ip={},mask=255.255.255.0", 9444 net_id, guest.network.guest_mac, guest.network.host_ip 9445 ); 9446 9447 let memory_param: &[&str] = if local { 9448 &[ 9449 "--memory", 9450 "size=4G,hotplug_method=virtio-mem,hotplug_size=8G,shared=on", 9451 "--balloon", 9452 "size=0", 9453 ] 9454 } else { 9455 &[ 9456 "--memory", 9457 "size=4G,hotplug_method=virtio-mem,hotplug_size=8G", 9458 "--balloon", 9459 "size=0", 9460 ] 9461 }; 9462 9463 let boot_vcpus = 2; 9464 let max_vcpus = 4; 9465 9466 let pmem_temp_file = TempFile::new().unwrap(); 9467 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 9468 std::process::Command::new("mkfs.ext4") 9469 .arg(pmem_temp_file.as_path()) 9470 .output() 9471 .expect("Expect creating disk image to succeed"); 9472 let pmem_path = String::from("/dev/pmem0"); 9473 9474 // Start the source VM 9475 let src_vm_path = if !upgrade_test { 9476 clh_command("cloud-hypervisor") 9477 } else { 9478 cloud_hypervisor_release_path() 9479 }; 9480 let src_api_socket = temp_api_path(&guest.tmp_dir); 9481 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 9482 src_vm_cmd 9483 .args([ 9484 "--cpus", 9485 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 9486 ]) 9487 .args(memory_param) 9488 .args(["--kernel", kernel_path.to_str().unwrap()]) 9489 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9490 .default_disks() 9491 .args(["--net", net_params.as_str()]) 9492 .args(["--api-socket", &src_api_socket]) 9493 .args([ 9494 "--pmem", 9495 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 9496 ]); 9497 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 9498 9499 // Start the destination VM 9500 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 9501 dest_api_socket.push_str(".dest"); 9502 let mut dest_child = GuestCommand::new(&guest) 9503 .args(["--api-socket", &dest_api_socket]) 9504 .capture_output() 9505 .spawn() 9506 .unwrap(); 9507 9508 let r = std::panic::catch_unwind(|| { 9509 guest.wait_vm_boot(None).unwrap(); 9510 9511 // Make sure the source VM is functional 9512 // Check the number of vCPUs 9513 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9514 9515 // Check the guest RAM 9516 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9517 // Increase the guest RAM 9518 resize_command(&src_api_socket, None, Some(6 << 30), None, None); 9519 thread::sleep(std::time::Duration::new(5, 0)); 9520 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 9521 // Use balloon to remove RAM from the VM 9522 resize_command(&src_api_socket, None, None, Some(1 << 30), None); 9523 thread::sleep(std::time::Duration::new(5, 0)); 9524 let total_memory = guest.get_total_memory().unwrap_or_default(); 9525 assert!(total_memory > 4_800_000); 9526 assert!(total_memory < 5_760_000); 9527 9528 // Check the guest virtio-devices, e.g. block, rng, console, and net 9529 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9530 9531 // x86_64: Following what's done in the `test_snapshot_restore`, we need 9532 // to make sure that removing and adding back the virtio-net device does 9533 // not break the live-migration support for virtio-pci. 9534 #[cfg(target_arch = "x86_64")] 9535 { 9536 assert!(remote_command( 9537 &src_api_socket, 9538 "remove-device", 9539 Some(net_id), 9540 )); 9541 thread::sleep(std::time::Duration::new(10, 0)); 9542 9543 // Plug the virtio-net device again 9544 assert!(remote_command( 9545 &src_api_socket, 9546 "add-net", 9547 Some(net_params.as_str()), 9548 )); 9549 thread::sleep(std::time::Duration::new(10, 0)); 9550 } 9551 9552 // Start the live-migration 9553 let migration_socket = String::from( 9554 guest 9555 .tmp_dir 9556 .as_path() 9557 .join("live-migration.sock") 9558 .to_str() 9559 .unwrap(), 9560 ); 9561 9562 assert!( 9563 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9564 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9565 ); 9566 }); 9567 9568 // Check and report any errors occurred during the live-migration 9569 if r.is_err() { 9570 print_and_panic( 9571 src_child, 9572 dest_child, 9573 None, 9574 "Error occurred during live-migration", 9575 ); 9576 } 9577 9578 // Check the source vm has been terminated successful (give it '3s' to settle) 9579 thread::sleep(std::time::Duration::new(3, 0)); 9580 if !src_child.try_wait().unwrap().is_some_and(|s| s.success()) { 9581 print_and_panic( 9582 src_child, 9583 dest_child, 9584 None, 9585 "source VM was not terminated successfully.", 9586 ); 9587 }; 9588 9589 // Post live-migration check to make sure the destination VM is functional 9590 let r = std::panic::catch_unwind(|| { 9591 // Perform same checks to validate VM has been properly migrated 9592 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9593 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9594 9595 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9596 9597 // Perform checks on guest RAM using balloon 9598 let total_memory = guest.get_total_memory().unwrap_or_default(); 9599 assert!(total_memory > 4_800_000); 9600 assert!(total_memory < 5_760_000); 9601 // Deflate balloon to restore entire RAM to the VM 9602 resize_command(&dest_api_socket, None, None, Some(0), None); 9603 thread::sleep(std::time::Duration::new(5, 0)); 9604 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 9605 // Decrease guest RAM with virtio-mem 9606 resize_command(&dest_api_socket, None, Some(5 << 30), None, None); 9607 thread::sleep(std::time::Duration::new(5, 0)); 9608 let total_memory = guest.get_total_memory().unwrap_or_default(); 9609 assert!(total_memory > 4_800_000); 9610 assert!(total_memory < 5_760_000); 9611 }); 9612 9613 // Clean-up the destination VM and make sure it terminated correctly 9614 let _ = dest_child.kill(); 9615 let dest_output = dest_child.wait_with_output().unwrap(); 9616 handle_child_output(r, &dest_output); 9617 9618 // Check the destination VM has the expected 'console_text' from its output 9619 let r = std::panic::catch_unwind(|| { 9620 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 9621 }); 9622 handle_child_output(r, &dest_output); 9623 } 9624 9625 fn _test_live_migration_numa(upgrade_test: bool, local: bool) { 9626 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9627 let guest = Guest::new(Box::new(focal)); 9628 let kernel_path = direct_kernel_boot_path(); 9629 let console_text = String::from("On a branch floating down river a cricket, singing."); 9630 let net_id = "net123"; 9631 let net_params = format!( 9632 "id={},tap=,mac={},ip={},mask=255.255.255.0", 9633 net_id, guest.network.guest_mac, guest.network.host_ip 9634 ); 9635 9636 let memory_param: &[&str] = if local { 9637 &[ 9638 "--memory", 9639 "size=0,hotplug_method=virtio-mem,shared=on", 9640 "--memory-zone", 9641 "id=mem0,size=1G,hotplug_size=4G,shared=on", 9642 "id=mem1,size=1G,hotplug_size=4G,shared=on", 9643 "id=mem2,size=2G,hotplug_size=4G,shared=on", 9644 "--numa", 9645 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0", 9646 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1", 9647 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2", 9648 ] 9649 } else { 9650 &[ 9651 "--memory", 9652 "size=0,hotplug_method=virtio-mem", 9653 "--memory-zone", 9654 "id=mem0,size=1G,hotplug_size=4G", 9655 "id=mem1,size=1G,hotplug_size=4G", 9656 "id=mem2,size=2G,hotplug_size=4G", 9657 "--numa", 9658 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0", 9659 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1", 9660 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2", 9661 ] 9662 }; 9663 9664 let boot_vcpus = 6; 9665 let max_vcpus = 12; 9666 9667 let pmem_temp_file = TempFile::new().unwrap(); 9668 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 9669 std::process::Command::new("mkfs.ext4") 9670 .arg(pmem_temp_file.as_path()) 9671 .output() 9672 .expect("Expect creating disk image to succeed"); 9673 let pmem_path = String::from("/dev/pmem0"); 9674 9675 // Start the source VM 9676 let src_vm_path = if !upgrade_test { 9677 clh_command("cloud-hypervisor") 9678 } else { 9679 cloud_hypervisor_release_path() 9680 }; 9681 let src_api_socket = temp_api_path(&guest.tmp_dir); 9682 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 9683 src_vm_cmd 9684 .args([ 9685 "--cpus", 9686 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 9687 ]) 9688 .args(memory_param) 9689 .args(["--kernel", kernel_path.to_str().unwrap()]) 9690 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9691 .default_disks() 9692 .args(["--net", net_params.as_str()]) 9693 .args(["--api-socket", &src_api_socket]) 9694 .args([ 9695 "--pmem", 9696 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 9697 ]); 9698 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 9699 9700 // Start the destination VM 9701 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 9702 dest_api_socket.push_str(".dest"); 9703 let mut dest_child = GuestCommand::new(&guest) 9704 .args(["--api-socket", &dest_api_socket]) 9705 .capture_output() 9706 .spawn() 9707 .unwrap(); 9708 9709 let r = std::panic::catch_unwind(|| { 9710 guest.wait_vm_boot(None).unwrap(); 9711 9712 // Make sure the source VM is functional 9713 // Check the number of vCPUs 9714 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9715 9716 // Check the guest RAM 9717 assert!(guest.get_total_memory().unwrap_or_default() > 2_880_000); 9718 9719 // Check the guest virtio-devices, e.g. block, rng, console, and net 9720 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9721 9722 // Check the NUMA parameters are applied correctly and resize 9723 // each zone to test the case where we migrate a VM with the 9724 // virtio-mem regions being used. 9725 { 9726 guest.check_numa_common( 9727 Some(&[960_000, 960_000, 1_920_000]), 9728 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 9729 Some(&["10 15 20", "20 10 25", "25 30 10"]), 9730 ); 9731 9732 // AArch64 currently does not support hotplug, and therefore we only 9733 // test hotplug-related function on x86_64 here. 9734 #[cfg(target_arch = "x86_64")] 9735 { 9736 guest.enable_memory_hotplug(); 9737 9738 // Resize every memory zone and check each associated NUMA node 9739 // has been assigned the right amount of memory. 9740 resize_zone_command(&src_api_socket, "mem0", "2G"); 9741 resize_zone_command(&src_api_socket, "mem1", "2G"); 9742 resize_zone_command(&src_api_socket, "mem2", "3G"); 9743 thread::sleep(std::time::Duration::new(5, 0)); 9744 9745 guest.check_numa_common(Some(&[1_920_000, 1_920_000, 1_920_000]), None, None); 9746 } 9747 } 9748 9749 // x86_64: Following what's done in the `test_snapshot_restore`, we need 9750 // to make sure that removing and adding back the virtio-net device does 9751 // not break the live-migration support for virtio-pci. 9752 #[cfg(target_arch = "x86_64")] 9753 { 9754 assert!(remote_command( 9755 &src_api_socket, 9756 "remove-device", 9757 Some(net_id), 9758 )); 9759 thread::sleep(std::time::Duration::new(10, 0)); 9760 9761 // Plug the virtio-net device again 9762 assert!(remote_command( 9763 &src_api_socket, 9764 "add-net", 9765 Some(net_params.as_str()), 9766 )); 9767 thread::sleep(std::time::Duration::new(10, 0)); 9768 } 9769 9770 // Start the live-migration 9771 let migration_socket = String::from( 9772 guest 9773 .tmp_dir 9774 .as_path() 9775 .join("live-migration.sock") 9776 .to_str() 9777 .unwrap(), 9778 ); 9779 9780 assert!( 9781 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9782 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9783 ); 9784 }); 9785 9786 // Check and report any errors occurred during the live-migration 9787 if r.is_err() { 9788 print_and_panic( 9789 src_child, 9790 dest_child, 9791 None, 9792 "Error occurred during live-migration", 9793 ); 9794 } 9795 9796 // Check the source vm has been terminated successful (give it '3s' to settle) 9797 thread::sleep(std::time::Duration::new(3, 0)); 9798 if !src_child.try_wait().unwrap().is_some_and(|s| s.success()) { 9799 print_and_panic( 9800 src_child, 9801 dest_child, 9802 None, 9803 "source VM was not terminated successfully.", 9804 ); 9805 }; 9806 9807 // Post live-migration check to make sure the destination VM is functional 9808 let r = std::panic::catch_unwind(|| { 9809 // Perform same checks to validate VM has been properly migrated 9810 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9811 #[cfg(target_arch = "x86_64")] 9812 assert!(guest.get_total_memory().unwrap_or_default() > 6_720_000); 9813 #[cfg(target_arch = "aarch64")] 9814 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9815 9816 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9817 9818 // Perform NUMA related checks 9819 { 9820 #[cfg(target_arch = "aarch64")] 9821 { 9822 guest.check_numa_common( 9823 Some(&[960_000, 960_000, 1_920_000]), 9824 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 9825 Some(&["10 15 20", "20 10 25", "25 30 10"]), 9826 ); 9827 } 9828 9829 // AArch64 currently does not support hotplug, and therefore we only 9830 // test hotplug-related function on x86_64 here. 9831 #[cfg(target_arch = "x86_64")] 9832 { 9833 guest.check_numa_common( 9834 Some(&[1_920_000, 1_920_000, 2_880_000]), 9835 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 9836 Some(&["10 15 20", "20 10 25", "25 30 10"]), 9837 ); 9838 9839 guest.enable_memory_hotplug(); 9840 9841 // Resize every memory zone and check each associated NUMA node 9842 // has been assigned the right amount of memory. 9843 resize_zone_command(&dest_api_socket, "mem0", "4G"); 9844 resize_zone_command(&dest_api_socket, "mem1", "4G"); 9845 resize_zone_command(&dest_api_socket, "mem2", "4G"); 9846 // Resize to the maximum amount of CPUs and check each NUMA 9847 // node has been assigned the right CPUs set. 9848 resize_command(&dest_api_socket, Some(max_vcpus), None, None, None); 9849 thread::sleep(std::time::Duration::new(5, 0)); 9850 9851 guest.check_numa_common( 9852 Some(&[3_840_000, 3_840_000, 3_840_000]), 9853 Some(&[vec![0, 1, 2, 9], vec![3, 4, 6, 7, 8], vec![5, 10, 11]]), 9854 None, 9855 ); 9856 } 9857 } 9858 }); 9859 9860 // Clean-up the destination VM and make sure it terminated correctly 9861 let _ = dest_child.kill(); 9862 let dest_output = dest_child.wait_with_output().unwrap(); 9863 handle_child_output(r, &dest_output); 9864 9865 // Check the destination VM has the expected 'console_text' from its output 9866 let r = std::panic::catch_unwind(|| { 9867 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 9868 }); 9869 handle_child_output(r, &dest_output); 9870 } 9871 9872 fn _test_live_migration_watchdog(upgrade_test: bool, local: bool) { 9873 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9874 let guest = Guest::new(Box::new(focal)); 9875 let kernel_path = direct_kernel_boot_path(); 9876 let console_text = String::from("On a branch floating down river a cricket, singing."); 9877 let net_id = "net123"; 9878 let net_params = format!( 9879 "id={},tap=,mac={},ip={},mask=255.255.255.0", 9880 net_id, guest.network.guest_mac, guest.network.host_ip 9881 ); 9882 9883 let memory_param: &[&str] = if local { 9884 &["--memory", "size=4G,shared=on"] 9885 } else { 9886 &["--memory", "size=4G"] 9887 }; 9888 9889 let boot_vcpus = 2; 9890 let max_vcpus = 4; 9891 9892 let pmem_temp_file = TempFile::new().unwrap(); 9893 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 9894 std::process::Command::new("mkfs.ext4") 9895 .arg(pmem_temp_file.as_path()) 9896 .output() 9897 .expect("Expect creating disk image to succeed"); 9898 let pmem_path = String::from("/dev/pmem0"); 9899 9900 // Start the source VM 9901 let src_vm_path = if !upgrade_test { 9902 clh_command("cloud-hypervisor") 9903 } else { 9904 cloud_hypervisor_release_path() 9905 }; 9906 let src_api_socket = temp_api_path(&guest.tmp_dir); 9907 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 9908 src_vm_cmd 9909 .args([ 9910 "--cpus", 9911 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 9912 ]) 9913 .args(memory_param) 9914 .args(["--kernel", kernel_path.to_str().unwrap()]) 9915 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9916 .default_disks() 9917 .args(["--net", net_params.as_str()]) 9918 .args(["--api-socket", &src_api_socket]) 9919 .args([ 9920 "--pmem", 9921 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 9922 ]) 9923 .args(["--watchdog"]); 9924 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 9925 9926 // Start the destination VM 9927 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 9928 dest_api_socket.push_str(".dest"); 9929 let mut dest_child = GuestCommand::new(&guest) 9930 .args(["--api-socket", &dest_api_socket]) 9931 .capture_output() 9932 .spawn() 9933 .unwrap(); 9934 9935 let r = std::panic::catch_unwind(|| { 9936 guest.wait_vm_boot(None).unwrap(); 9937 9938 // Make sure the source VM is functional 9939 // Check the number of vCPUs 9940 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9941 // Check the guest RAM 9942 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9943 // Check the guest virtio-devices, e.g. block, rng, console, and net 9944 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9945 // x86_64: Following what's done in the `test_snapshot_restore`, we need 9946 // to make sure that removing and adding back the virtio-net device does 9947 // not break the live-migration support for virtio-pci. 9948 #[cfg(target_arch = "x86_64")] 9949 { 9950 assert!(remote_command( 9951 &src_api_socket, 9952 "remove-device", 9953 Some(net_id), 9954 )); 9955 thread::sleep(std::time::Duration::new(10, 0)); 9956 9957 // Plug the virtio-net device again 9958 assert!(remote_command( 9959 &src_api_socket, 9960 "add-net", 9961 Some(net_params.as_str()), 9962 )); 9963 thread::sleep(std::time::Duration::new(10, 0)); 9964 } 9965 9966 // Enable watchdog and ensure its functional 9967 let expected_reboot_count = 1; 9968 // Enable the watchdog with a 15s timeout 9969 enable_guest_watchdog(&guest, 15); 9970 9971 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9972 assert_eq!( 9973 guest 9974 .ssh_command("sudo journalctl | grep -c -- \"Watchdog started\"") 9975 .unwrap() 9976 .trim() 9977 .parse::<u32>() 9978 .unwrap_or_default(), 9979 1 9980 ); 9981 // Allow some normal time to elapse to check we don't get spurious reboots 9982 thread::sleep(std::time::Duration::new(40, 0)); 9983 // Check no reboot 9984 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 9985 9986 // Start the live-migration 9987 let migration_socket = String::from( 9988 guest 9989 .tmp_dir 9990 .as_path() 9991 .join("live-migration.sock") 9992 .to_str() 9993 .unwrap(), 9994 ); 9995 9996 assert!( 9997 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9998 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9999 ); 10000 }); 10001 10002 // Check and report any errors occurred during the live-migration 10003 if r.is_err() { 10004 print_and_panic( 10005 src_child, 10006 dest_child, 10007 None, 10008 "Error occurred during live-migration", 10009 ); 10010 } 10011 10012 // Check the source vm has been terminated successful (give it '3s' to settle) 10013 thread::sleep(std::time::Duration::new(3, 0)); 10014 if !src_child.try_wait().unwrap().is_some_and(|s| s.success()) { 10015 print_and_panic( 10016 src_child, 10017 dest_child, 10018 None, 10019 "source VM was not terminated successfully.", 10020 ); 10021 }; 10022 10023 // Post live-migration check to make sure the destination VM is functional 10024 let r = std::panic::catch_unwind(|| { 10025 // Perform same checks to validate VM has been properly migrated 10026 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 10027 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 10028 10029 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 10030 10031 // Perform checks on watchdog 10032 let mut expected_reboot_count = 1; 10033 10034 // Allow some normal time to elapse to check we don't get spurious reboots 10035 thread::sleep(std::time::Duration::new(40, 0)); 10036 // Check no reboot 10037 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 10038 10039 // Trigger a panic (sync first). We need to do this inside a screen with a delay so the SSH command returns. 10040 guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap(); 10041 // Allow some time for the watchdog to trigger (max 30s) and reboot to happen 10042 guest.wait_vm_boot(Some(50)).unwrap(); 10043 // Check a reboot is triggered by the watchdog 10044 expected_reboot_count += 1; 10045 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 10046 10047 #[cfg(target_arch = "x86_64")] 10048 { 10049 // Now pause the VM and remain offline for 30s 10050 assert!(remote_command(&dest_api_socket, "pause", None)); 10051 thread::sleep(std::time::Duration::new(30, 0)); 10052 assert!(remote_command(&dest_api_socket, "resume", None)); 10053 10054 // Check no reboot 10055 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 10056 } 10057 }); 10058 10059 // Clean-up the destination VM and make sure it terminated correctly 10060 let _ = dest_child.kill(); 10061 let dest_output = dest_child.wait_with_output().unwrap(); 10062 handle_child_output(r, &dest_output); 10063 10064 // Check the destination VM has the expected 'console_text' from its output 10065 let r = std::panic::catch_unwind(|| { 10066 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 10067 }); 10068 handle_child_output(r, &dest_output); 10069 } 10070 10071 fn _test_live_migration_ovs_dpdk(upgrade_test: bool, local: bool) { 10072 let ovs_focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10073 let ovs_guest = Guest::new(Box::new(ovs_focal)); 10074 10075 let migration_focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10076 let migration_guest = Guest::new(Box::new(migration_focal)); 10077 let src_api_socket = temp_api_path(&migration_guest.tmp_dir); 10078 10079 // Start two VMs that are connected through ovs-dpdk and one of the VMs is the source VM for live-migration 10080 let (mut ovs_child, mut src_child) = 10081 setup_ovs_dpdk_guests(&ovs_guest, &migration_guest, &src_api_socket, upgrade_test); 10082 10083 // Start the destination VM 10084 let mut dest_api_socket = temp_api_path(&migration_guest.tmp_dir); 10085 dest_api_socket.push_str(".dest"); 10086 let mut dest_child = GuestCommand::new(&migration_guest) 10087 .args(["--api-socket", &dest_api_socket]) 10088 .capture_output() 10089 .spawn() 10090 .unwrap(); 10091 10092 let r = std::panic::catch_unwind(|| { 10093 // Give it '1s' to make sure the 'dest_api_socket' file is properly created 10094 thread::sleep(std::time::Duration::new(1, 0)); 10095 10096 // Start the live-migration 10097 let migration_socket = String::from( 10098 migration_guest 10099 .tmp_dir 10100 .as_path() 10101 .join("live-migration.sock") 10102 .to_str() 10103 .unwrap(), 10104 ); 10105 10106 assert!( 10107 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 10108 "Unsuccessful command: 'send-migration' or 'receive-migration'." 10109 ); 10110 }); 10111 10112 // Check and report any errors occurred during the live-migration 10113 if r.is_err() { 10114 print_and_panic( 10115 src_child, 10116 dest_child, 10117 Some(ovs_child), 10118 "Error occurred during live-migration", 10119 ); 10120 } 10121 10122 // Check the source vm has been terminated successful (give it '3s' to settle) 10123 thread::sleep(std::time::Duration::new(3, 0)); 10124 if !src_child.try_wait().unwrap().is_some_and(|s| s.success()) { 10125 print_and_panic( 10126 src_child, 10127 dest_child, 10128 Some(ovs_child), 10129 "source VM was not terminated successfully.", 10130 ); 10131 }; 10132 10133 // Post live-migration check to make sure the destination VM is functional 10134 let r = std::panic::catch_unwind(|| { 10135 // Perform same checks to validate VM has been properly migrated 10136 // Spawn a new netcat listener in the OVS VM 10137 let guest_ip = ovs_guest.network.guest_ip.clone(); 10138 thread::spawn(move || { 10139 ssh_command_ip( 10140 "nc -l 12345", 10141 &guest_ip, 10142 DEFAULT_SSH_RETRIES, 10143 DEFAULT_SSH_TIMEOUT, 10144 ) 10145 .unwrap(); 10146 }); 10147 10148 // Wait for the server to be listening 10149 thread::sleep(std::time::Duration::new(5, 0)); 10150 10151 // And check the connection is still functional after live-migration 10152 migration_guest 10153 .ssh_command("nc -vz 172.100.0.1 12345") 10154 .unwrap(); 10155 }); 10156 10157 // Clean-up the destination VM and OVS VM, and make sure they terminated correctly 10158 let _ = dest_child.kill(); 10159 let _ = ovs_child.kill(); 10160 let dest_output = dest_child.wait_with_output().unwrap(); 10161 let ovs_output = ovs_child.wait_with_output().unwrap(); 10162 10163 cleanup_ovs_dpdk(); 10164 10165 handle_child_output(r, &dest_output); 10166 handle_child_output(Ok(()), &ovs_output); 10167 } 10168 10169 // This test exercises the local live-migration between two Cloud Hypervisor VMs on the 10170 // same host with Landlock enabled on both VMs. The test validates the following: 10171 // 1. The source VM is up and functional 10172 // 2. Ensure Landlock is enabled on source VM by hotplugging a disk. As the path for this 10173 // disk is not known to the source VM this step will fail. 10174 // 3. The 'send-migration' and 'receive-migration' command finished successfully; 10175 // 4. The source VM terminated gracefully after live migration; 10176 // 5. The destination VM is functional after live migration; 10177 // 6. Ensure Landlock is enabled on destination VM by hotplugging a disk. As the path for 10178 // this disk is not known to the destination VM this step will fail. 10179 fn _test_live_migration_with_landlock() { 10180 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10181 let guest = Guest::new(Box::new(focal)); 10182 let kernel_path = direct_kernel_boot_path(); 10183 let net_id = "net123"; 10184 let net_params = format!( 10185 "id={},tap=,mac={},ip={},mask=255.255.255.0", 10186 net_id, guest.network.guest_mac, guest.network.host_ip 10187 ); 10188 10189 let boot_vcpus = 2; 10190 let max_vcpus = 4; 10191 10192 let mut blk_file_path = dirs::home_dir().unwrap(); 10193 blk_file_path.push("workloads"); 10194 blk_file_path.push("blk.img"); 10195 10196 let src_api_socket = temp_api_path(&guest.tmp_dir); 10197 let mut src_child = GuestCommand::new(&guest) 10198 .args([ 10199 "--cpus", 10200 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 10201 ]) 10202 .args(["--memory", "size=4G,shared=on"]) 10203 .args(["--kernel", kernel_path.to_str().unwrap()]) 10204 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 10205 .default_disks() 10206 .args(["--api-socket", &src_api_socket]) 10207 .args(["--landlock"]) 10208 .args(["--net", net_params.as_str()]) 10209 .args([ 10210 "--landlock-rules", 10211 format!("path={:?},access=rw", guest.tmp_dir.as_path()).as_str(), 10212 ]) 10213 .capture_output() 10214 .spawn() 10215 .unwrap(); 10216 10217 // Start the destination VM 10218 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 10219 dest_api_socket.push_str(".dest"); 10220 let mut dest_child = GuestCommand::new(&guest) 10221 .args(["--api-socket", &dest_api_socket]) 10222 .capture_output() 10223 .spawn() 10224 .unwrap(); 10225 10226 let r = std::panic::catch_unwind(|| { 10227 guest.wait_vm_boot(None).unwrap(); 10228 10229 // Make sure the source VM is functaionl 10230 // Check the number of vCPUs 10231 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 10232 10233 // Check the guest RAM 10234 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 10235 10236 // Check Landlock is enabled by hot-plugging a disk. 10237 assert!(!remote_command( 10238 &src_api_socket, 10239 "add-disk", 10240 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 10241 )); 10242 10243 // Start the live-migration 10244 let migration_socket = String::from( 10245 guest 10246 .tmp_dir 10247 .as_path() 10248 .join("live-migration.sock") 10249 .to_str() 10250 .unwrap(), 10251 ); 10252 10253 assert!( 10254 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, true), 10255 "Unsuccessful command: 'send-migration' or 'receive-migration'." 10256 ); 10257 }); 10258 10259 // Check and report any errors occurred during the live-migration 10260 if r.is_err() { 10261 print_and_panic( 10262 src_child, 10263 dest_child, 10264 None, 10265 "Error occurred during live-migration", 10266 ); 10267 } 10268 10269 // Check the source vm has been terminated successful (give it '3s' to settle) 10270 thread::sleep(std::time::Duration::new(3, 0)); 10271 if !src_child.try_wait().unwrap().is_some_and(|s| s.success()) { 10272 print_and_panic( 10273 src_child, 10274 dest_child, 10275 None, 10276 "source VM was not terminated successfully.", 10277 ); 10278 }; 10279 10280 // Post live-migration check to make sure the destination VM is funcational 10281 let r = std::panic::catch_unwind(|| { 10282 // Perform same checks to validate VM has been properly migrated 10283 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 10284 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 10285 }); 10286 10287 // Check Landlock is enabled on destination VM by hot-plugging a disk. 10288 assert!(!remote_command( 10289 &dest_api_socket, 10290 "add-disk", 10291 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 10292 )); 10293 10294 // Clean-up the destination VM and make sure it terminated correctly 10295 let _ = dest_child.kill(); 10296 let dest_output = dest_child.wait_with_output().unwrap(); 10297 handle_child_output(r, &dest_output); 10298 } 10299 10300 // Function to get an available port 10301 fn get_available_port() -> u16 { 10302 TcpListener::bind("127.0.0.1:0") 10303 .expect("Failed to bind to address") 10304 .local_addr() 10305 .unwrap() 10306 .port() 10307 } 10308 10309 fn start_live_migration_tcp(src_api_socket: &str, dest_api_socket: &str) -> bool { 10310 // Get an available TCP port 10311 let migration_port = get_available_port(); 10312 let host_ip = "127.0.0.1"; 10313 10314 // Start the 'receive-migration' command on the destination 10315 let mut receive_migration = Command::new(clh_command("ch-remote")) 10316 .args([ 10317 &format!("--api-socket={}", dest_api_socket), 10318 "receive-migration", 10319 &format!("tcp:0.0.0.0:{}", migration_port), 10320 ]) 10321 .stdin(Stdio::null()) 10322 .stderr(Stdio::piped()) 10323 .stdout(Stdio::piped()) 10324 .spawn() 10325 .unwrap(); 10326 10327 // Give the destination some time to start listening 10328 thread::sleep(Duration::from_secs(1)); 10329 10330 // Start the 'send-migration' command on the source 10331 let mut send_migration = Command::new(clh_command("ch-remote")) 10332 .args([ 10333 &format!("--api-socket={}", src_api_socket), 10334 "send-migration", 10335 &format!("tcp:{}:{}", host_ip, migration_port), 10336 ]) 10337 .stdin(Stdio::null()) 10338 .stderr(Stdio::piped()) 10339 .stdout(Stdio::piped()) 10340 .spawn() 10341 .unwrap(); 10342 10343 // Check if the 'send-migration' command executed successfully 10344 let send_success = if let Some(status) = send_migration 10345 .wait_timeout(Duration::from_secs(60)) 10346 .unwrap() 10347 { 10348 status.success() 10349 } else { 10350 false 10351 }; 10352 10353 if !send_success { 10354 let _ = send_migration.kill(); 10355 let output = send_migration.wait_with_output().unwrap(); 10356 eprintln!( 10357 "\n\n==== Start 'send_migration' output ====\n\n---stdout---\n{}\n\n---stderr---\n{}\n\n==== End 'send_migration' output ====\n\n", 10358 String::from_utf8_lossy(&output.stdout), 10359 String::from_utf8_lossy(&output.stderr) 10360 ); 10361 } 10362 10363 // Check if the 'receive-migration' command executed successfully 10364 let receive_success = if let Some(status) = receive_migration 10365 .wait_timeout(Duration::from_secs(60)) 10366 .unwrap() 10367 { 10368 status.success() 10369 } else { 10370 false 10371 }; 10372 10373 if !receive_success { 10374 let _ = receive_migration.kill(); 10375 let output = receive_migration.wait_with_output().unwrap(); 10376 eprintln!( 10377 "\n\n==== Start 'receive_migration' output ====\n\n---stdout---\n{}\n\n---stderr---\n{}\n\n==== End 'receive_migration' output ====\n\n", 10378 String::from_utf8_lossy(&output.stdout), 10379 String::from_utf8_lossy(&output.stderr) 10380 ); 10381 } 10382 10383 send_success && receive_success 10384 } 10385 10386 fn _test_live_migration_tcp() { 10387 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10388 let guest = Guest::new(Box::new(focal)); 10389 let kernel_path = direct_kernel_boot_path(); 10390 let console_text = String::from("On a branch floating down river a cricket, singing."); 10391 let net_id = "net123"; 10392 let net_params = format!( 10393 "id={},tap=,mac={},ip={},mask=255.255.255.0", 10394 net_id, guest.network.guest_mac, guest.network.host_ip 10395 ); 10396 let memory_param: &[&str] = &["--memory", "size=4G,shared=on"]; 10397 let boot_vcpus = 2; 10398 let max_vcpus = 4; 10399 let pmem_temp_file = TempFile::new().unwrap(); 10400 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 10401 std::process::Command::new("mkfs.ext4") 10402 .arg(pmem_temp_file.as_path()) 10403 .output() 10404 .expect("Expect creating disk image to succeed"); 10405 let pmem_path = String::from("/dev/pmem0"); 10406 10407 // Start the source VM 10408 let src_vm_path = clh_command("cloud-hypervisor"); 10409 let src_api_socket = temp_api_path(&guest.tmp_dir); 10410 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 10411 src_vm_cmd 10412 .args([ 10413 "--cpus", 10414 format!("boot={},max={}", boot_vcpus, max_vcpus).as_str(), 10415 ]) 10416 .args(memory_param) 10417 .args(["--kernel", kernel_path.to_str().unwrap()]) 10418 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 10419 .default_disks() 10420 .args(["--net", net_params.as_str()]) 10421 .args(["--api-socket", &src_api_socket]) 10422 .args([ 10423 "--pmem", 10424 format!( 10425 "file={},discard_writes=on", 10426 pmem_temp_file.as_path().to_str().unwrap(), 10427 ) 10428 .as_str(), 10429 ]) 10430 .capture_output(); 10431 let mut src_child = src_vm_cmd.spawn().unwrap(); 10432 10433 // Start the destination VM 10434 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 10435 dest_api_socket.push_str(".dest"); 10436 let mut dest_child = GuestCommand::new(&guest) 10437 .args(["--api-socket", &dest_api_socket]) 10438 .capture_output() 10439 .spawn() 10440 .unwrap(); 10441 10442 let r = std::panic::catch_unwind(|| { 10443 guest.wait_vm_boot(None).unwrap(); 10444 // Ensure the source VM is running normally 10445 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 10446 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 10447 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 10448 10449 // On x86_64 architecture, remove and re-add the virtio-net device 10450 #[cfg(target_arch = "x86_64")] 10451 { 10452 assert!(remote_command( 10453 &src_api_socket, 10454 "remove-device", 10455 Some(net_id), 10456 )); 10457 thread::sleep(Duration::new(10, 0)); 10458 // Re-add the virtio-net device 10459 assert!(remote_command( 10460 &src_api_socket, 10461 "add-net", 10462 Some(net_params.as_str()), 10463 )); 10464 thread::sleep(Duration::new(10, 0)); 10465 } 10466 // Start TCP live migration 10467 assert!( 10468 start_live_migration_tcp(&src_api_socket, &dest_api_socket), 10469 "Unsuccessful command: 'send-migration' or 'receive-migration'." 10470 ); 10471 }); 10472 10473 // Check and report any errors that occurred during live migration 10474 if r.is_err() { 10475 print_and_panic( 10476 src_child, 10477 dest_child, 10478 None, 10479 "Error occurred during live-migration", 10480 ); 10481 } 10482 10483 // Check the source vm has been terminated successful (give it '3s' to settle) 10484 thread::sleep(std::time::Duration::new(3, 0)); 10485 if !src_child.try_wait().unwrap().is_some_and(|s| s.success()) { 10486 print_and_panic( 10487 src_child, 10488 dest_child, 10489 None, 10490 "Source VM was not terminated successfully.", 10491 ); 10492 }; 10493 10494 // After live migration, ensure the destination VM is running normally 10495 let r = std::panic::catch_unwind(|| { 10496 // Perform the same checks to ensure the VM has migrated correctly 10497 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 10498 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 10499 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 10500 }); 10501 10502 // Clean up the destination VM and ensure it terminates properly 10503 let _ = dest_child.kill(); 10504 let dest_output = dest_child.wait_with_output().unwrap(); 10505 handle_child_output(r, &dest_output); 10506 10507 // Check if the expected `console_text` is present in the destination VM's output 10508 let r = std::panic::catch_unwind(|| { 10509 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 10510 }); 10511 handle_child_output(r, &dest_output); 10512 } 10513 10514 mod live_migration_parallel { 10515 use super::*; 10516 #[test] 10517 fn test_live_migration_basic() { 10518 _test_live_migration(false, false) 10519 } 10520 10521 #[test] 10522 fn test_live_migration_local() { 10523 _test_live_migration(false, true) 10524 } 10525 10526 #[test] 10527 fn test_live_migration_tcp() { 10528 _test_live_migration_tcp(); 10529 } 10530 10531 #[test] 10532 fn test_live_migration_watchdog() { 10533 _test_live_migration_watchdog(false, false) 10534 } 10535 10536 #[test] 10537 fn test_live_migration_watchdog_local() { 10538 _test_live_migration_watchdog(false, true) 10539 } 10540 10541 #[test] 10542 fn test_live_upgrade_basic() { 10543 _test_live_migration(true, false) 10544 } 10545 10546 #[test] 10547 fn test_live_upgrade_local() { 10548 _test_live_migration(true, true) 10549 } 10550 10551 #[test] 10552 fn test_live_upgrade_watchdog() { 10553 _test_live_migration_watchdog(true, false) 10554 } 10555 10556 #[test] 10557 fn test_live_upgrade_watchdog_local() { 10558 _test_live_migration_watchdog(true, true) 10559 } 10560 #[test] 10561 #[cfg(target_arch = "x86_64")] 10562 fn test_live_migration_with_landlock() { 10563 _test_live_migration_with_landlock() 10564 } 10565 } 10566 10567 mod live_migration_sequential { 10568 use super::*; 10569 10570 // NUMA & balloon live migration tests are large so run sequentially 10571 10572 #[test] 10573 fn test_live_migration_balloon() { 10574 _test_live_migration_balloon(false, false) 10575 } 10576 10577 #[test] 10578 fn test_live_migration_balloon_local() { 10579 _test_live_migration_balloon(false, true) 10580 } 10581 10582 #[test] 10583 fn test_live_upgrade_balloon() { 10584 _test_live_migration_balloon(true, false) 10585 } 10586 10587 #[test] 10588 fn test_live_upgrade_balloon_local() { 10589 _test_live_migration_balloon(true, true) 10590 } 10591 10592 #[test] 10593 #[cfg(not(feature = "mshv"))] 10594 fn test_live_migration_numa() { 10595 _test_live_migration_numa(false, false) 10596 } 10597 10598 #[test] 10599 #[cfg(not(feature = "mshv"))] 10600 fn test_live_migration_numa_local() { 10601 _test_live_migration_numa(false, true) 10602 } 10603 10604 #[test] 10605 #[cfg(not(feature = "mshv"))] 10606 fn test_live_upgrade_numa() { 10607 _test_live_migration_numa(true, false) 10608 } 10609 10610 #[test] 10611 #[cfg(not(feature = "mshv"))] 10612 fn test_live_upgrade_numa_local() { 10613 _test_live_migration_numa(true, true) 10614 } 10615 10616 // Require to run ovs-dpdk tests sequentially because they rely on the same ovs-dpdk setup 10617 #[test] 10618 #[ignore = "See #5532"] 10619 #[cfg(target_arch = "x86_64")] 10620 #[cfg(not(feature = "mshv"))] 10621 fn test_live_migration_ovs_dpdk() { 10622 _test_live_migration_ovs_dpdk(false, false); 10623 } 10624 10625 #[test] 10626 #[cfg(target_arch = "x86_64")] 10627 #[cfg(not(feature = "mshv"))] 10628 fn test_live_migration_ovs_dpdk_local() { 10629 _test_live_migration_ovs_dpdk(false, true); 10630 } 10631 10632 #[test] 10633 #[ignore = "See #5532"] 10634 #[cfg(target_arch = "x86_64")] 10635 #[cfg(not(feature = "mshv"))] 10636 fn test_live_upgrade_ovs_dpdk() { 10637 _test_live_migration_ovs_dpdk(true, false); 10638 } 10639 10640 #[test] 10641 #[ignore = "See #5532"] 10642 #[cfg(target_arch = "x86_64")] 10643 #[cfg(not(feature = "mshv"))] 10644 fn test_live_upgrade_ovs_dpdk_local() { 10645 _test_live_migration_ovs_dpdk(true, true); 10646 } 10647 } 10648 } 10649 10650 #[cfg(target_arch = "aarch64")] 10651 mod aarch64_acpi { 10652 use crate::*; 10653 10654 #[test] 10655 fn test_simple_launch_acpi() { 10656 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10657 10658 vec![Box::new(focal)].drain(..).for_each(|disk_config| { 10659 let guest = Guest::new(disk_config); 10660 10661 let mut child = GuestCommand::new(&guest) 10662 .args(["--cpus", "boot=1"]) 10663 .args(["--memory", "size=512M"]) 10664 .args(["--kernel", edk2_path().to_str().unwrap()]) 10665 .default_disks() 10666 .default_net() 10667 .args(["--serial", "tty", "--console", "off"]) 10668 .capture_output() 10669 .spawn() 10670 .unwrap(); 10671 10672 let r = std::panic::catch_unwind(|| { 10673 guest.wait_vm_boot(Some(120)).unwrap(); 10674 10675 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 10676 assert!(guest.get_total_memory().unwrap_or_default() > 400_000); 10677 assert_eq!(guest.get_pci_bridge_class().unwrap_or_default(), "0x060000"); 10678 }); 10679 10680 let _ = child.kill(); 10681 let output = child.wait_with_output().unwrap(); 10682 10683 handle_child_output(r, &output); 10684 }); 10685 } 10686 10687 #[test] 10688 fn test_guest_numa_nodes_acpi() { 10689 _test_guest_numa_nodes(true); 10690 } 10691 10692 #[test] 10693 fn test_cpu_topology_421_acpi() { 10694 test_cpu_topology(4, 2, 1, true); 10695 } 10696 10697 #[test] 10698 fn test_cpu_topology_142_acpi() { 10699 test_cpu_topology(1, 4, 2, true); 10700 } 10701 10702 #[test] 10703 fn test_cpu_topology_262_acpi() { 10704 test_cpu_topology(2, 6, 2, true); 10705 } 10706 10707 #[test] 10708 fn test_power_button_acpi() { 10709 _test_power_button(true); 10710 } 10711 10712 #[test] 10713 fn test_virtio_iommu() { 10714 _test_virtio_iommu(true) 10715 } 10716 } 10717 10718 mod rate_limiter { 10719 use super::*; 10720 10721 // Check if the 'measured' rate is within the expected 'difference' (in percentage) 10722 // compared to given 'limit' rate. 10723 fn check_rate_limit(measured: f64, limit: f64, difference: f64) -> bool { 10724 let upper_limit = limit * (1_f64 + difference); 10725 let lower_limit = limit * (1_f64 - difference); 10726 10727 if measured > lower_limit && measured < upper_limit { 10728 return true; 10729 } 10730 10731 eprintln!( 10732 "\n\n==== Start 'check_rate_limit' failed ==== \ 10733 \n\nmeasured={measured}, , lower_limit={lower_limit}, upper_limit={upper_limit} \ 10734 \n\n==== End 'check_rate_limit' failed ====\n\n" 10735 ); 10736 10737 false 10738 } 10739 10740 fn _test_rate_limiter_net(rx: bool) { 10741 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10742 let guest = Guest::new(Box::new(focal)); 10743 10744 let test_timeout = 10; 10745 let num_queues = 2; 10746 let queue_size = 256; 10747 let bw_size = 10485760_u64; // bytes 10748 let bw_refill_time = 100; // ms 10749 let limit_bps = (bw_size * 8 * 1000) as f64 / bw_refill_time as f64; 10750 10751 let net_params = format!( 10752 "tap=,mac={},ip={},mask=255.255.255.0,num_queues={},queue_size={},bw_size={},bw_refill_time={}", 10753 guest.network.guest_mac, 10754 guest.network.host_ip, 10755 num_queues, 10756 queue_size, 10757 bw_size, 10758 bw_refill_time, 10759 ); 10760 10761 let mut child = GuestCommand::new(&guest) 10762 .args(["--cpus", &format!("boot={}", num_queues / 2)]) 10763 .args(["--memory", "size=4G"]) 10764 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 10765 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 10766 .default_disks() 10767 .args(["--net", net_params.as_str()]) 10768 .capture_output() 10769 .spawn() 10770 .unwrap(); 10771 10772 let r = std::panic::catch_unwind(|| { 10773 guest.wait_vm_boot(None).unwrap(); 10774 let measured_bps = 10775 measure_virtio_net_throughput(test_timeout, num_queues / 2, &guest, rx, true) 10776 .unwrap(); 10777 assert!(check_rate_limit(measured_bps, limit_bps, 0.1)); 10778 }); 10779 10780 let _ = child.kill(); 10781 let output = child.wait_with_output().unwrap(); 10782 handle_child_output(r, &output); 10783 } 10784 10785 #[test] 10786 fn test_rate_limiter_net_rx() { 10787 _test_rate_limiter_net(true); 10788 } 10789 10790 #[test] 10791 fn test_rate_limiter_net_tx() { 10792 _test_rate_limiter_net(false); 10793 } 10794 10795 fn _test_rate_limiter_block(bandwidth: bool, num_queues: u32) { 10796 let test_timeout = 10; 10797 let fio_ops = FioOps::RandRW; 10798 10799 let bw_size = if bandwidth { 10800 10485760_u64 // bytes 10801 } else { 10802 100_u64 // I/O 10803 }; 10804 let bw_refill_time = 100; // ms 10805 let limit_rate = (bw_size * 1000) as f64 / bw_refill_time as f64; 10806 10807 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10808 let guest = Guest::new(Box::new(focal)); 10809 let api_socket = temp_api_path(&guest.tmp_dir); 10810 let test_img_dir = TempDir::new_with_prefix("/var/tmp/ch").unwrap(); 10811 let blk_rate_limiter_test_img = 10812 String::from(test_img_dir.as_path().join("blk.img").to_str().unwrap()); 10813 10814 // Create the test block image 10815 assert!(exec_host_command_output(&format!( 10816 "dd if=/dev/zero of={blk_rate_limiter_test_img} bs=1M count=1024" 10817 )) 10818 .status 10819 .success()); 10820 10821 let test_blk_params = if bandwidth { 10822 format!( 10823 "path={blk_rate_limiter_test_img},num_queues={num_queues},bw_size={bw_size},bw_refill_time={bw_refill_time}" 10824 ) 10825 } else { 10826 format!( 10827 "path={blk_rate_limiter_test_img},num_queues={num_queues},ops_size={bw_size},ops_refill_time={bw_refill_time}" 10828 ) 10829 }; 10830 10831 let mut child = GuestCommand::new(&guest) 10832 .args(["--cpus", &format!("boot={num_queues}")]) 10833 .args(["--memory", "size=4G"]) 10834 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 10835 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 10836 .args([ 10837 "--disk", 10838 format!( 10839 "path={}", 10840 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 10841 ) 10842 .as_str(), 10843 format!( 10844 "path={}", 10845 guest.disk_config.disk(DiskType::CloudInit).unwrap() 10846 ) 10847 .as_str(), 10848 test_blk_params.as_str(), 10849 ]) 10850 .default_net() 10851 .args(["--api-socket", &api_socket]) 10852 .capture_output() 10853 .spawn() 10854 .unwrap(); 10855 10856 let r = std::panic::catch_unwind(|| { 10857 guest.wait_vm_boot(None).unwrap(); 10858 10859 let fio_command = format!( 10860 "sudo fio --filename=/dev/vdc --name=test --output-format=json \ 10861 --direct=1 --bs=4k --ioengine=io_uring --iodepth=64 \ 10862 --rw={fio_ops} --runtime={test_timeout} --numjobs={num_queues}" 10863 ); 10864 let output = guest.ssh_command(&fio_command).unwrap(); 10865 10866 // Parse fio output 10867 let measured_rate = if bandwidth { 10868 parse_fio_output(&output, &fio_ops, num_queues).unwrap() 10869 } else { 10870 parse_fio_output_iops(&output, &fio_ops, num_queues).unwrap() 10871 }; 10872 assert!(check_rate_limit(measured_rate, limit_rate, 0.1)); 10873 }); 10874 10875 let _ = child.kill(); 10876 let output = child.wait_with_output().unwrap(); 10877 handle_child_output(r, &output); 10878 } 10879 10880 fn _test_rate_limiter_group_block(bandwidth: bool, num_queues: u32, num_disks: u32) { 10881 let test_timeout = 10; 10882 let fio_ops = FioOps::RandRW; 10883 10884 let bw_size = if bandwidth { 10885 10485760_u64 // bytes 10886 } else { 10887 100_u64 // I/O 10888 }; 10889 let bw_refill_time = 100; // ms 10890 let limit_rate = (bw_size * 1000) as f64 / bw_refill_time as f64; 10891 10892 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10893 let guest = Guest::new(Box::new(focal)); 10894 let api_socket = temp_api_path(&guest.tmp_dir); 10895 let test_img_dir = TempDir::new_with_prefix("/var/tmp/ch").unwrap(); 10896 10897 let rate_limit_group_arg = if bandwidth { 10898 format!("id=group0,bw_size={bw_size},bw_refill_time={bw_refill_time}") 10899 } else { 10900 format!("id=group0,ops_size={bw_size},ops_refill_time={bw_refill_time}") 10901 }; 10902 10903 let mut disk_args = vec![ 10904 "--disk".to_string(), 10905 format!( 10906 "path={}", 10907 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 10908 ), 10909 format!( 10910 "path={}", 10911 guest.disk_config.disk(DiskType::CloudInit).unwrap() 10912 ), 10913 ]; 10914 10915 for i in 0..num_disks { 10916 let test_img_path = String::from( 10917 test_img_dir 10918 .as_path() 10919 .join(format!("blk{}.img", i)) 10920 .to_str() 10921 .unwrap(), 10922 ); 10923 10924 assert!(exec_host_command_output(&format!( 10925 "dd if=/dev/zero of={test_img_path} bs=1M count=1024" 10926 )) 10927 .status 10928 .success()); 10929 10930 disk_args.push(format!( 10931 "path={test_img_path},num_queues={num_queues},rate_limit_group=group0" 10932 )); 10933 } 10934 10935 let mut child = GuestCommand::new(&guest) 10936 .args(["--cpus", &format!("boot={}", num_queues * num_disks)]) 10937 .args(["--memory", "size=4G"]) 10938 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 10939 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 10940 .args(["--rate-limit-group", &rate_limit_group_arg]) 10941 .args(disk_args) 10942 .default_net() 10943 .args(["--api-socket", &api_socket]) 10944 .capture_output() 10945 .spawn() 10946 .unwrap(); 10947 10948 let r = std::panic::catch_unwind(|| { 10949 guest.wait_vm_boot(None).unwrap(); 10950 10951 let mut fio_command = format!( 10952 "sudo fio --name=global --output-format=json \ 10953 --direct=1 --bs=4k --ioengine=io_uring --iodepth=64 \ 10954 --rw={fio_ops} --runtime={test_timeout} --numjobs={num_queues}" 10955 ); 10956 10957 // Generate additional argument for each disk: 10958 // --name=job0 --filename=/dev/vdc \ 10959 // --name=job1 --filename=/dev/vdd \ 10960 // --name=job2 --filename=/dev/vde \ 10961 // ... 10962 for i in 0..num_disks { 10963 let c: char = 'c'; 10964 let arg = format!( 10965 " --name=job{i} --filename=/dev/vd{}", 10966 char::from_u32((c as u32) + i).unwrap() 10967 ); 10968 fio_command += &arg; 10969 } 10970 let output = guest.ssh_command(&fio_command).unwrap(); 10971 10972 // Parse fio output 10973 let measured_rate = if bandwidth { 10974 parse_fio_output(&output, &fio_ops, num_queues * num_disks).unwrap() 10975 } else { 10976 parse_fio_output_iops(&output, &fio_ops, num_queues * num_disks).unwrap() 10977 }; 10978 assert!(check_rate_limit(measured_rate, limit_rate, 0.2)); 10979 }); 10980 10981 let _ = child.kill(); 10982 let output = child.wait_with_output().unwrap(); 10983 handle_child_output(r, &output); 10984 } 10985 10986 #[test] 10987 fn test_rate_limiter_block_bandwidth() { 10988 _test_rate_limiter_block(true, 1); 10989 _test_rate_limiter_block(true, 2) 10990 } 10991 10992 #[test] 10993 fn test_rate_limiter_group_block_bandwidth() { 10994 _test_rate_limiter_group_block(true, 1, 1); 10995 _test_rate_limiter_group_block(true, 2, 1); 10996 _test_rate_limiter_group_block(true, 1, 2); 10997 _test_rate_limiter_group_block(true, 2, 2); 10998 } 10999 11000 #[test] 11001 fn test_rate_limiter_block_iops() { 11002 _test_rate_limiter_block(false, 1); 11003 _test_rate_limiter_block(false, 2); 11004 } 11005 11006 #[test] 11007 fn test_rate_limiter_group_block_iops() { 11008 _test_rate_limiter_group_block(false, 1, 1); 11009 _test_rate_limiter_group_block(false, 2, 1); 11010 _test_rate_limiter_group_block(false, 1, 2); 11011 _test_rate_limiter_group_block(false, 2, 2); 11012 } 11013 } 11014