1 // Copyright © 2020 Intel Corporation 2 // 3 // SPDX-License-Identifier: Apache-2.0 4 // 5 #![allow(clippy::undocumented_unsafe_blocks)] 6 // When enabling the `mshv` feature, we skip quite some tests and 7 // hence have known dead-code. This annotation silences dead-code 8 // related warnings for our quality workflow to pass. 9 #![allow(dead_code)] 10 11 extern crate test_infra; 12 13 use std::collections::HashMap; 14 use std::io::{BufRead, Read, Seek, Write}; 15 use std::net::TcpListener; 16 use std::os::unix::io::AsRawFd; 17 use std::path::PathBuf; 18 use std::process::{Child, Command, Stdio}; 19 use std::string::String; 20 use std::sync::mpsc::Receiver; 21 use std::sync::{mpsc, Mutex}; 22 use std::time::Duration; 23 use std::{fs, io, thread}; 24 25 use net_util::MacAddr; 26 use test_infra::*; 27 use vmm_sys_util::tempdir::TempDir; 28 use vmm_sys_util::tempfile::TempFile; 29 use wait_timeout::ChildExt; 30 31 // Constant taken from the VMM crate. 32 const MAX_NUM_PCI_SEGMENTS: u16 = 96; 33 34 #[cfg(target_arch = "x86_64")] 35 mod x86_64 { 36 pub const FOCAL_IMAGE_NAME: &str = "focal-server-cloudimg-amd64-custom-20210609-0.raw"; 37 pub const JAMMY_VFIO_IMAGE_NAME: &str = 38 "jammy-server-cloudimg-amd64-custom-vfio-20241012-0.raw"; 39 pub const FOCAL_IMAGE_NAME_QCOW2: &str = "focal-server-cloudimg-amd64-custom-20210609-0.qcow2"; 40 pub const FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE: &str = 41 "focal-server-cloudimg-amd64-custom-20210609-0-backing.qcow2"; 42 pub const FOCAL_IMAGE_NAME_VHD: &str = "focal-server-cloudimg-amd64-custom-20210609-0.vhd"; 43 pub const FOCAL_IMAGE_NAME_VHDX: &str = "focal-server-cloudimg-amd64-custom-20210609-0.vhdx"; 44 pub const JAMMY_IMAGE_NAME: &str = "jammy-server-cloudimg-amd64-custom-20241017-0.raw"; 45 pub const WINDOWS_IMAGE_NAME: &str = "windows-server-2022-amd64-2.raw"; 46 pub const OVMF_NAME: &str = "CLOUDHV.fd"; 47 pub const GREP_SERIAL_IRQ_CMD: &str = "grep -c 'IO-APIC.*ttyS0' /proc/interrupts || true"; 48 } 49 50 #[cfg(target_arch = "x86_64")] 51 use x86_64::*; 52 53 #[cfg(target_arch = "aarch64")] 54 mod aarch64 { 55 pub const FOCAL_IMAGE_NAME: &str = "focal-server-cloudimg-arm64-custom-20210929-0.raw"; 56 pub const FOCAL_IMAGE_UPDATE_KERNEL_NAME: &str = 57 "focal-server-cloudimg-arm64-custom-20210929-0-update-kernel.raw"; 58 pub const FOCAL_IMAGE_NAME_QCOW2: &str = "focal-server-cloudimg-arm64-custom-20210929-0.qcow2"; 59 pub const FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE: &str = 60 "focal-server-cloudimg-arm64-custom-20210929-0-backing.qcow2"; 61 pub const FOCAL_IMAGE_NAME_VHD: &str = "focal-server-cloudimg-arm64-custom-20210929-0.vhd"; 62 pub const FOCAL_IMAGE_NAME_VHDX: &str = "focal-server-cloudimg-arm64-custom-20210929-0.vhdx"; 63 pub const JAMMY_IMAGE_NAME: &str = "jammy-server-cloudimg-arm64-custom-20220329-0.raw"; 64 pub const WINDOWS_IMAGE_NAME: &str = "windows-11-iot-enterprise-aarch64.raw"; 65 pub const OVMF_NAME: &str = "CLOUDHV_EFI.fd"; 66 pub const GREP_SERIAL_IRQ_CMD: &str = "grep -c 'GICv3.*uart-pl011' /proc/interrupts || true"; 67 pub const GREP_PMU_IRQ_CMD: &str = "grep -c 'GICv3.*arm-pmu' /proc/interrupts || true"; 68 } 69 70 #[cfg(target_arch = "aarch64")] 71 use aarch64::*; 72 73 const DIRECT_KERNEL_BOOT_CMDLINE: &str = 74 "root=/dev/vda1 console=hvc0 rw systemd.journald.forward_to_console=1"; 75 76 const CONSOLE_TEST_STRING: &str = "Started OpenBSD Secure Shell server"; 77 78 // This enum exists to make it more convenient to 79 // implement test for both D-Bus and REST APIs. 80 enum TargetApi { 81 // API socket 82 HttpApi(String), 83 // well known service name, object path 84 DBusApi(String, String), 85 } 86 87 impl TargetApi { 88 fn new_http_api(tmp_dir: &TempDir) -> Self { 89 Self::HttpApi(temp_api_path(tmp_dir)) 90 } 91 92 fn new_dbus_api(tmp_dir: &TempDir) -> Self { 93 // `tmp_dir` is in the form of "/tmp/chXXXXXX" 94 // and we take the `chXXXXXX` part as a unique identifier for the guest 95 let id = tmp_dir.as_path().file_name().unwrap().to_str().unwrap(); 96 97 Self::DBusApi( 98 format!("org.cloudhypervisor.{id}"), 99 format!("/org/cloudhypervisor/{id}"), 100 ) 101 } 102 103 fn guest_args(&self) -> Vec<String> { 104 match self { 105 TargetApi::HttpApi(api_socket) => { 106 vec![format!("--api-socket={}", api_socket.as_str())] 107 } 108 TargetApi::DBusApi(service_name, object_path) => { 109 vec![ 110 format!("--dbus-service-name={}", service_name.as_str()), 111 format!("--dbus-object-path={}", object_path.as_str()), 112 ] 113 } 114 } 115 } 116 117 fn remote_args(&self) -> Vec<String> { 118 // `guest_args` and `remote_args` are consistent with each other 119 self.guest_args() 120 } 121 122 fn remote_command(&self, command: &str, arg: Option<&str>) -> bool { 123 let mut cmd = Command::new(clh_command("ch-remote")); 124 cmd.args(self.remote_args()); 125 cmd.arg(command); 126 127 if let Some(arg) = arg { 128 cmd.arg(arg); 129 } 130 131 let output = cmd.output().unwrap(); 132 if output.status.success() { 133 true 134 } else { 135 eprintln!("Error running ch-remote command: {:?}", &cmd); 136 let stderr = String::from_utf8_lossy(&output.stderr); 137 eprintln!("stderr: {stderr}"); 138 false 139 } 140 } 141 } 142 143 // Start cloud-hypervisor with no VM parameters, only the API server running. 144 // From the API: Create a VM, boot it and check that it looks as expected. 145 fn _test_api_create_boot(target_api: TargetApi, guest: Guest) { 146 let mut child = GuestCommand::new(&guest) 147 .args(target_api.guest_args()) 148 .capture_output() 149 .spawn() 150 .unwrap(); 151 152 thread::sleep(std::time::Duration::new(1, 0)); 153 154 // Verify API server is running 155 assert!(target_api.remote_command("ping", None)); 156 157 // Create the VM first 158 let cpu_count: u8 = 4; 159 let request_body = guest.api_create_body( 160 cpu_count, 161 direct_kernel_boot_path().to_str().unwrap(), 162 DIRECT_KERNEL_BOOT_CMDLINE, 163 ); 164 165 let temp_config_path = guest.tmp_dir.as_path().join("config"); 166 std::fs::write(&temp_config_path, request_body).unwrap(); 167 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 168 169 assert!(target_api.remote_command("create", Some(create_config),)); 170 171 // Then boot it 172 assert!(target_api.remote_command("boot", None)); 173 thread::sleep(std::time::Duration::new(20, 0)); 174 175 let r = std::panic::catch_unwind(|| { 176 // Check that the VM booted as expected 177 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 178 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 179 }); 180 181 kill_child(&mut child); 182 let output = child.wait_with_output().unwrap(); 183 184 handle_child_output(r, &output); 185 } 186 187 // Start cloud-hypervisor with no VM parameters, only the API server running. 188 // From the API: Create a VM, boot it and check it can be shutdown and then 189 // booted again 190 fn _test_api_shutdown(target_api: TargetApi, guest: Guest) { 191 let mut child = GuestCommand::new(&guest) 192 .args(target_api.guest_args()) 193 .capture_output() 194 .spawn() 195 .unwrap(); 196 197 thread::sleep(std::time::Duration::new(1, 0)); 198 199 // Verify API server is running 200 assert!(target_api.remote_command("ping", None)); 201 202 // Create the VM first 203 let cpu_count: u8 = 4; 204 let request_body = guest.api_create_body( 205 cpu_count, 206 direct_kernel_boot_path().to_str().unwrap(), 207 DIRECT_KERNEL_BOOT_CMDLINE, 208 ); 209 210 let temp_config_path = guest.tmp_dir.as_path().join("config"); 211 std::fs::write(&temp_config_path, request_body).unwrap(); 212 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 213 214 let r = std::panic::catch_unwind(|| { 215 assert!(target_api.remote_command("create", Some(create_config))); 216 217 // Then boot it 218 assert!(target_api.remote_command("boot", None)); 219 220 guest.wait_vm_boot(None).unwrap(); 221 222 // Check that the VM booted as expected 223 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 224 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 225 226 // Sync and shutdown without powering off to prevent filesystem 227 // corruption. 228 guest.ssh_command("sync").unwrap(); 229 guest.ssh_command("sudo shutdown -H now").unwrap(); 230 231 // Wait for the guest to be fully shutdown 232 thread::sleep(std::time::Duration::new(20, 0)); 233 234 // Then shut it down 235 assert!(target_api.remote_command("shutdown", None)); 236 237 // Then boot it again 238 assert!(target_api.remote_command("boot", None)); 239 240 guest.wait_vm_boot(None).unwrap(); 241 242 // Check that the VM booted as expected 243 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 244 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 245 }); 246 247 kill_child(&mut child); 248 let output = child.wait_with_output().unwrap(); 249 250 handle_child_output(r, &output); 251 } 252 253 // Start cloud-hypervisor with no VM parameters, only the API server running. 254 // From the API: Create a VM, boot it and check it can be deleted and then recreated 255 // booted again. 256 fn _test_api_delete(target_api: TargetApi, guest: Guest) { 257 let mut child = GuestCommand::new(&guest) 258 .args(target_api.guest_args()) 259 .capture_output() 260 .spawn() 261 .unwrap(); 262 263 thread::sleep(std::time::Duration::new(1, 0)); 264 265 // Verify API server is running 266 assert!(target_api.remote_command("ping", None)); 267 268 // Create the VM first 269 let cpu_count: u8 = 4; 270 let request_body = guest.api_create_body( 271 cpu_count, 272 direct_kernel_boot_path().to_str().unwrap(), 273 DIRECT_KERNEL_BOOT_CMDLINE, 274 ); 275 let temp_config_path = guest.tmp_dir.as_path().join("config"); 276 std::fs::write(&temp_config_path, request_body).unwrap(); 277 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 278 279 let r = std::panic::catch_unwind(|| { 280 assert!(target_api.remote_command("create", Some(create_config))); 281 282 // Then boot it 283 assert!(target_api.remote_command("boot", None)); 284 285 guest.wait_vm_boot(None).unwrap(); 286 287 // Check that the VM booted as expected 288 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 289 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 290 291 // Sync and shutdown without powering off to prevent filesystem 292 // corruption. 293 guest.ssh_command("sync").unwrap(); 294 guest.ssh_command("sudo shutdown -H now").unwrap(); 295 296 // Wait for the guest to be fully shutdown 297 thread::sleep(std::time::Duration::new(20, 0)); 298 299 // Then delete it 300 assert!(target_api.remote_command("delete", None)); 301 302 assert!(target_api.remote_command("create", Some(create_config))); 303 304 // Then boot it again 305 assert!(target_api.remote_command("boot", None)); 306 307 guest.wait_vm_boot(None).unwrap(); 308 309 // Check that the VM booted as expected 310 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 311 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 312 }); 313 314 kill_child(&mut child); 315 let output = child.wait_with_output().unwrap(); 316 317 handle_child_output(r, &output); 318 } 319 320 // Start cloud-hypervisor with no VM parameters, only the API server running. 321 // From the API: Create a VM, boot it and check that it looks as expected. 322 // Then we pause the VM, check that it's no longer available. 323 // Finally we resume the VM and check that it's available. 324 fn _test_api_pause_resume(target_api: TargetApi, guest: Guest) { 325 let mut child = GuestCommand::new(&guest) 326 .args(target_api.guest_args()) 327 .capture_output() 328 .spawn() 329 .unwrap(); 330 331 thread::sleep(std::time::Duration::new(1, 0)); 332 333 // Verify API server is running 334 assert!(target_api.remote_command("ping", None)); 335 336 // Create the VM first 337 let cpu_count: u8 = 4; 338 let request_body = guest.api_create_body( 339 cpu_count, 340 direct_kernel_boot_path().to_str().unwrap(), 341 DIRECT_KERNEL_BOOT_CMDLINE, 342 ); 343 344 let temp_config_path = guest.tmp_dir.as_path().join("config"); 345 std::fs::write(&temp_config_path, request_body).unwrap(); 346 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 347 348 assert!(target_api.remote_command("create", Some(create_config))); 349 350 // Then boot it 351 assert!(target_api.remote_command("boot", None)); 352 thread::sleep(std::time::Duration::new(20, 0)); 353 354 let r = std::panic::catch_unwind(|| { 355 // Check that the VM booted as expected 356 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 357 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 358 359 // We now pause the VM 360 assert!(target_api.remote_command("pause", None)); 361 362 // Check pausing again fails 363 assert!(!target_api.remote_command("pause", None)); 364 365 thread::sleep(std::time::Duration::new(2, 0)); 366 367 // SSH into the VM should fail 368 ssh_command_ip( 369 "grep -c processor /proc/cpuinfo", 370 &guest.network.guest_ip, 371 2, 372 5, 373 ) 374 .unwrap_err(); 375 376 // Resume the VM 377 assert!(target_api.remote_command("resume", None)); 378 379 // Check resuming again fails 380 assert!(!target_api.remote_command("resume", None)); 381 382 thread::sleep(std::time::Duration::new(2, 0)); 383 384 // Now we should be able to SSH back in and get the right number of CPUs 385 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 386 }); 387 388 kill_child(&mut child); 389 let output = child.wait_with_output().unwrap(); 390 391 handle_child_output(r, &output); 392 } 393 394 fn _test_pty_interaction(pty_path: PathBuf) { 395 let mut cf = std::fs::OpenOptions::new() 396 .write(true) 397 .read(true) 398 .open(pty_path) 399 .unwrap(); 400 401 // Some dumb sleeps but we don't want to write 402 // before the console is up and we don't want 403 // to try and write the next line before the 404 // login process is ready. 405 thread::sleep(std::time::Duration::new(5, 0)); 406 assert_eq!(cf.write(b"cloud\n").unwrap(), 6); 407 thread::sleep(std::time::Duration::new(2, 0)); 408 assert_eq!(cf.write(b"cloud123\n").unwrap(), 9); 409 thread::sleep(std::time::Duration::new(2, 0)); 410 assert_eq!(cf.write(b"echo test_pty_console\n").unwrap(), 22); 411 thread::sleep(std::time::Duration::new(2, 0)); 412 413 // read pty and ensure they have a login shell 414 // some fairly hacky workarounds to avoid looping 415 // forever in case the channel is blocked getting output 416 let ptyc = pty_read(cf); 417 let mut empty = 0; 418 let mut prev = String::new(); 419 loop { 420 thread::sleep(std::time::Duration::new(2, 0)); 421 match ptyc.try_recv() { 422 Ok(line) => { 423 empty = 0; 424 prev = prev + &line; 425 if prev.contains("test_pty_console") { 426 break; 427 } 428 } 429 Err(mpsc::TryRecvError::Empty) => { 430 empty += 1; 431 assert!(empty <= 5, "No login on pty"); 432 } 433 _ => { 434 panic!("No login on pty") 435 } 436 } 437 } 438 } 439 440 fn prepare_virtiofsd(tmp_dir: &TempDir, shared_dir: &str) -> (std::process::Child, String) { 441 let mut workload_path = dirs::home_dir().unwrap(); 442 workload_path.push("workloads"); 443 444 let mut virtiofsd_path = workload_path; 445 virtiofsd_path.push("virtiofsd"); 446 let virtiofsd_path = String::from(virtiofsd_path.to_str().unwrap()); 447 448 let virtiofsd_socket_path = 449 String::from(tmp_dir.as_path().join("virtiofs.sock").to_str().unwrap()); 450 451 // Start the daemon 452 let child = Command::new(virtiofsd_path.as_str()) 453 .args(["--shared-dir", shared_dir]) 454 .args(["--socket-path", virtiofsd_socket_path.as_str()]) 455 .args(["--cache", "never"]) 456 .spawn() 457 .unwrap(); 458 459 thread::sleep(std::time::Duration::new(10, 0)); 460 461 (child, virtiofsd_socket_path) 462 } 463 464 fn prepare_vubd( 465 tmp_dir: &TempDir, 466 blk_img: &str, 467 num_queues: usize, 468 rdonly: bool, 469 direct: bool, 470 ) -> (std::process::Child, String) { 471 let mut workload_path = dirs::home_dir().unwrap(); 472 workload_path.push("workloads"); 473 474 let mut blk_file_path = workload_path; 475 blk_file_path.push(blk_img); 476 let blk_file_path = String::from(blk_file_path.to_str().unwrap()); 477 478 let vubd_socket_path = String::from(tmp_dir.as_path().join("vub.sock").to_str().unwrap()); 479 480 // Start the daemon 481 let child = Command::new(clh_command("vhost_user_block")) 482 .args([ 483 "--block-backend", 484 format!( 485 "path={blk_file_path},socket={vubd_socket_path},num_queues={num_queues},readonly={rdonly},direct={direct}" 486 ) 487 .as_str(), 488 ]) 489 .spawn() 490 .unwrap(); 491 492 thread::sleep(std::time::Duration::new(10, 0)); 493 494 (child, vubd_socket_path) 495 } 496 497 fn temp_vsock_path(tmp_dir: &TempDir) -> String { 498 String::from(tmp_dir.as_path().join("vsock").to_str().unwrap()) 499 } 500 501 fn temp_api_path(tmp_dir: &TempDir) -> String { 502 String::from( 503 tmp_dir 504 .as_path() 505 .join("cloud-hypervisor.sock") 506 .to_str() 507 .unwrap(), 508 ) 509 } 510 511 fn temp_event_monitor_path(tmp_dir: &TempDir) -> String { 512 String::from(tmp_dir.as_path().join("event.json").to_str().unwrap()) 513 } 514 515 // Creates the directory and returns the path. 516 fn temp_snapshot_dir_path(tmp_dir: &TempDir) -> String { 517 let snapshot_dir = String::from(tmp_dir.as_path().join("snapshot").to_str().unwrap()); 518 std::fs::create_dir(&snapshot_dir).unwrap(); 519 snapshot_dir 520 } 521 522 fn temp_vmcore_file_path(tmp_dir: &TempDir) -> String { 523 let vmcore_file = String::from(tmp_dir.as_path().join("vmcore").to_str().unwrap()); 524 vmcore_file 525 } 526 527 // Creates the path for direct kernel boot and return the path. 528 // For x86_64, this function returns the vmlinux kernel path. 529 // For AArch64, this function returns the PE kernel path. 530 fn direct_kernel_boot_path() -> PathBuf { 531 let mut workload_path = dirs::home_dir().unwrap(); 532 workload_path.push("workloads"); 533 534 let mut kernel_path = workload_path; 535 #[cfg(target_arch = "x86_64")] 536 kernel_path.push("vmlinux"); 537 #[cfg(target_arch = "aarch64")] 538 kernel_path.push("Image"); 539 540 kernel_path 541 } 542 543 fn edk2_path() -> PathBuf { 544 let mut workload_path = dirs::home_dir().unwrap(); 545 workload_path.push("workloads"); 546 let mut edk2_path = workload_path; 547 edk2_path.push(OVMF_NAME); 548 549 edk2_path 550 } 551 552 fn cloud_hypervisor_release_path() -> String { 553 let mut workload_path = dirs::home_dir().unwrap(); 554 workload_path.push("workloads"); 555 556 let mut ch_release_path = workload_path; 557 #[cfg(target_arch = "x86_64")] 558 ch_release_path.push("cloud-hypervisor-static"); 559 #[cfg(target_arch = "aarch64")] 560 ch_release_path.push("cloud-hypervisor-static-aarch64"); 561 562 ch_release_path.into_os_string().into_string().unwrap() 563 } 564 565 fn prepare_vhost_user_net_daemon( 566 tmp_dir: &TempDir, 567 ip: &str, 568 tap: Option<&str>, 569 mtu: Option<u16>, 570 num_queues: usize, 571 client_mode: bool, 572 ) -> (std::process::Command, String) { 573 let vunet_socket_path = String::from(tmp_dir.as_path().join("vunet.sock").to_str().unwrap()); 574 575 // Start the daemon 576 let mut net_params = format!( 577 "ip={ip},mask=255.255.255.0,socket={vunet_socket_path},num_queues={num_queues},queue_size=1024,client={client_mode}" 578 ); 579 580 if let Some(tap) = tap { 581 net_params.push_str(format!(",tap={tap}").as_str()); 582 } 583 584 if let Some(mtu) = mtu { 585 net_params.push_str(format!(",mtu={mtu}").as_str()); 586 } 587 588 let mut command = Command::new(clh_command("vhost_user_net")); 589 command.args(["--net-backend", net_params.as_str()]); 590 591 (command, vunet_socket_path) 592 } 593 594 fn prepare_swtpm_daemon(tmp_dir: &TempDir) -> (std::process::Command, String) { 595 let swtpm_tpm_dir = String::from(tmp_dir.as_path().join("swtpm").to_str().unwrap()); 596 let swtpm_socket_path = String::from( 597 tmp_dir 598 .as_path() 599 .join("swtpm") 600 .join("swtpm.sock") 601 .to_str() 602 .unwrap(), 603 ); 604 std::fs::create_dir(&swtpm_tpm_dir).unwrap(); 605 606 let mut swtpm_command = Command::new("swtpm"); 607 let swtpm_args = [ 608 "socket", 609 "--tpmstate", 610 &format!("dir={swtpm_tpm_dir}"), 611 "--ctrl", 612 &format!("type=unixio,path={swtpm_socket_path}"), 613 "--flags", 614 "startup-clear", 615 "--tpm2", 616 ]; 617 swtpm_command.args(swtpm_args); 618 619 (swtpm_command, swtpm_socket_path) 620 } 621 622 fn remote_command(api_socket: &str, command: &str, arg: Option<&str>) -> bool { 623 let mut cmd = Command::new(clh_command("ch-remote")); 624 cmd.args([&format!("--api-socket={api_socket}"), command]); 625 626 if let Some(arg) = arg { 627 cmd.arg(arg); 628 } 629 let output = cmd.output().unwrap(); 630 if output.status.success() { 631 true 632 } else { 633 eprintln!("Error running ch-remote command: {:?}", &cmd); 634 let stderr = String::from_utf8_lossy(&output.stderr); 635 eprintln!("stderr: {stderr}"); 636 false 637 } 638 } 639 640 fn remote_command_w_output(api_socket: &str, command: &str, arg: Option<&str>) -> (bool, Vec<u8>) { 641 let mut cmd = Command::new(clh_command("ch-remote")); 642 cmd.args([&format!("--api-socket={api_socket}"), command]); 643 644 if let Some(arg) = arg { 645 cmd.arg(arg); 646 } 647 648 let output = cmd.output().expect("Failed to launch ch-remote"); 649 650 (output.status.success(), output.stdout) 651 } 652 653 fn resize_command( 654 api_socket: &str, 655 desired_vcpus: Option<u8>, 656 desired_ram: Option<usize>, 657 desired_balloon: Option<usize>, 658 event_file: Option<&str>, 659 ) -> bool { 660 let mut cmd = Command::new(clh_command("ch-remote")); 661 cmd.args([&format!("--api-socket={api_socket}"), "resize"]); 662 663 if let Some(desired_vcpus) = desired_vcpus { 664 cmd.arg(format!("--cpus={desired_vcpus}")); 665 } 666 667 if let Some(desired_ram) = desired_ram { 668 cmd.arg(format!("--memory={desired_ram}")); 669 } 670 671 if let Some(desired_balloon) = desired_balloon { 672 cmd.arg(format!("--balloon={desired_balloon}")); 673 } 674 675 let ret = cmd.status().expect("Failed to launch ch-remote").success(); 676 677 if let Some(event_path) = event_file { 678 let latest_events = [ 679 &MetaEvent { 680 event: "resizing".to_string(), 681 device_id: None, 682 }, 683 &MetaEvent { 684 event: "resized".to_string(), 685 device_id: None, 686 }, 687 ]; 688 // See: #5938 689 thread::sleep(std::time::Duration::new(1, 0)); 690 assert!(check_latest_events_exact(&latest_events, event_path)); 691 } 692 693 ret 694 } 695 696 fn resize_zone_command(api_socket: &str, id: &str, desired_size: &str) -> bool { 697 let mut cmd = Command::new(clh_command("ch-remote")); 698 cmd.args([ 699 &format!("--api-socket={api_socket}"), 700 "resize-zone", 701 &format!("--id={id}"), 702 &format!("--size={desired_size}"), 703 ]); 704 705 cmd.status().expect("Failed to launch ch-remote").success() 706 } 707 708 // setup OVS-DPDK bridge and ports 709 fn setup_ovs_dpdk() { 710 // setup OVS-DPDK 711 assert!(exec_host_command_status("service openvswitch-switch start").success()); 712 assert!(exec_host_command_status("ovs-vsctl init").success()); 713 assert!( 714 exec_host_command_status("ovs-vsctl set Open_vSwitch . other_config:dpdk-init=true") 715 .success() 716 ); 717 assert!(exec_host_command_status("service openvswitch-switch restart").success()); 718 719 // Create OVS-DPDK bridge and ports 720 assert!(exec_host_command_status( 721 "ovs-vsctl add-br ovsbr0 -- set bridge ovsbr0 datapath_type=netdev", 722 ) 723 .success()); 724 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface vhost-user1 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient1").success()); 725 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user2 -- set Interface vhost-user2 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient2").success()); 726 assert!(exec_host_command_status("ip link set up dev ovsbr0").success()); 727 assert!(exec_host_command_status("service openvswitch-switch restart").success()); 728 } 729 fn cleanup_ovs_dpdk() { 730 assert!(exec_host_command_status("ovs-vsctl del-br ovsbr0").success()); 731 exec_host_command_status("rm -f ovs-vsctl /tmp/dpdkvhostclient1 /tmp/dpdkvhostclient2"); 732 } 733 // Setup two guests and ensure they are connected through ovs-dpdk 734 fn setup_ovs_dpdk_guests( 735 guest1: &Guest, 736 guest2: &Guest, 737 api_socket: &str, 738 release_binary: bool, 739 ) -> (Child, Child) { 740 setup_ovs_dpdk(); 741 742 let clh_path = if !release_binary { 743 clh_command("cloud-hypervisor") 744 } else { 745 cloud_hypervisor_release_path() 746 }; 747 748 let mut child1 = GuestCommand::new_with_binary_path(guest1, &clh_path) 749 .args(["--cpus", "boot=2"]) 750 .args(["--memory", "size=0,shared=on"]) 751 .args(["--memory-zone", "id=mem0,size=1G,shared=on,host_numa_node=0"]) 752 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 753 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 754 .default_disks() 755 .args(["--net", guest1.default_net_string().as_str(), "vhost_user=true,socket=/tmp/dpdkvhostclient1,num_queues=2,queue_size=256,vhost_mode=server"]) 756 .capture_output() 757 .spawn() 758 .unwrap(); 759 760 #[cfg(target_arch = "x86_64")] 761 let guest_net_iface = "ens5"; 762 #[cfg(target_arch = "aarch64")] 763 let guest_net_iface = "enp0s5"; 764 765 let r = std::panic::catch_unwind(|| { 766 guest1.wait_vm_boot(None).unwrap(); 767 768 guest1 769 .ssh_command(&format!( 770 "sudo ip addr add 172.100.0.1/24 dev {guest_net_iface}" 771 )) 772 .unwrap(); 773 guest1 774 .ssh_command(&format!("sudo ip link set up dev {guest_net_iface}")) 775 .unwrap(); 776 777 let guest_ip = guest1.network.guest_ip.clone(); 778 thread::spawn(move || { 779 ssh_command_ip( 780 "nc -l 12345", 781 &guest_ip, 782 DEFAULT_SSH_RETRIES, 783 DEFAULT_SSH_TIMEOUT, 784 ) 785 .unwrap(); 786 }); 787 }); 788 if r.is_err() { 789 cleanup_ovs_dpdk(); 790 791 let _ = child1.kill(); 792 let output = child1.wait_with_output().unwrap(); 793 handle_child_output(r, &output); 794 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return 795 } 796 797 let mut child2 = GuestCommand::new_with_binary_path(guest2, &clh_path) 798 .args(["--api-socket", api_socket]) 799 .args(["--cpus", "boot=2"]) 800 .args(["--memory", "size=0,shared=on"]) 801 .args(["--memory-zone", "id=mem0,size=1G,shared=on,host_numa_node=0"]) 802 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 803 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 804 .default_disks() 805 .args(["--net", guest2.default_net_string().as_str(), "vhost_user=true,socket=/tmp/dpdkvhostclient2,num_queues=2,queue_size=256,vhost_mode=server"]) 806 .capture_output() 807 .spawn() 808 .unwrap(); 809 810 let r = std::panic::catch_unwind(|| { 811 guest2.wait_vm_boot(None).unwrap(); 812 813 guest2 814 .ssh_command(&format!( 815 "sudo ip addr add 172.100.0.2/24 dev {guest_net_iface}" 816 )) 817 .unwrap(); 818 guest2 819 .ssh_command(&format!("sudo ip link set up dev {guest_net_iface}")) 820 .unwrap(); 821 822 // Check the connection works properly between the two VMs 823 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap(); 824 }); 825 if r.is_err() { 826 cleanup_ovs_dpdk(); 827 828 let _ = child1.kill(); 829 let _ = child2.kill(); 830 let output = child2.wait_with_output().unwrap(); 831 handle_child_output(r, &output); 832 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return 833 } 834 835 (child1, child2) 836 } 837 838 enum FwType { 839 Ovmf, 840 RustHypervisorFirmware, 841 } 842 843 fn fw_path(_fw_type: FwType) -> String { 844 let mut workload_path = dirs::home_dir().unwrap(); 845 workload_path.push("workloads"); 846 847 let mut fw_path = workload_path; 848 #[cfg(target_arch = "aarch64")] 849 fw_path.push("CLOUDHV_EFI.fd"); 850 #[cfg(target_arch = "x86_64")] 851 { 852 match _fw_type { 853 FwType::Ovmf => fw_path.push(OVMF_NAME), 854 FwType::RustHypervisorFirmware => fw_path.push("hypervisor-fw"), 855 } 856 } 857 858 fw_path.to_str().unwrap().to_string() 859 } 860 861 #[derive(Debug)] 862 struct MetaEvent { 863 event: String, 864 device_id: Option<String>, 865 } 866 867 impl MetaEvent { 868 pub fn match_with_json_event(&self, v: &serde_json::Value) -> bool { 869 let mut matched = false; 870 if v["event"].as_str().unwrap() == self.event { 871 if let Some(device_id) = &self.device_id { 872 if v["properties"]["id"].as_str().unwrap() == device_id { 873 matched = true 874 } 875 } else { 876 matched = true; 877 } 878 } 879 matched 880 } 881 } 882 883 // Parse the event_monitor file based on the format that each event 884 // is followed by a double newline 885 fn parse_event_file(event_file: &str) -> Vec<serde_json::Value> { 886 let content = fs::read(event_file).unwrap(); 887 let mut ret = Vec::new(); 888 for entry in String::from_utf8_lossy(&content) 889 .trim() 890 .split("\n\n") 891 .collect::<Vec<&str>>() 892 { 893 ret.push(serde_json::from_str(entry).unwrap()); 894 } 895 896 ret 897 } 898 899 // Return true if all events from the input 'expected_events' are matched sequentially 900 // with events from the 'event_file' 901 fn check_sequential_events(expected_events: &[&MetaEvent], event_file: &str) -> bool { 902 let json_events = parse_event_file(event_file); 903 let len = expected_events.len(); 904 let mut idx = 0; 905 for e in &json_events { 906 if idx == len { 907 break; 908 } 909 if expected_events[idx].match_with_json_event(e) { 910 idx += 1; 911 } 912 } 913 914 let ret = idx == len; 915 916 if !ret { 917 eprintln!( 918 "\n\n==== Start 'check_sequential_events' failed ==== \ 919 \n\nexpected_events={:?}\nactual_events={:?} \ 920 \n\n==== End 'check_sequential_events' failed ====", 921 expected_events, json_events, 922 ); 923 } 924 925 ret 926 } 927 928 // Return true if all events from the input 'expected_events' are matched exactly 929 // with events from the 'event_file' 930 fn check_sequential_events_exact(expected_events: &[&MetaEvent], event_file: &str) -> bool { 931 let json_events = parse_event_file(event_file); 932 assert!(expected_events.len() <= json_events.len()); 933 let json_events = &json_events[..expected_events.len()]; 934 935 for (idx, e) in json_events.iter().enumerate() { 936 if !expected_events[idx].match_with_json_event(e) { 937 eprintln!( 938 "\n\n==== Start 'check_sequential_events_exact' failed ==== \ 939 \n\nexpected_events={:?}\nactual_events={:?} \ 940 \n\n==== End 'check_sequential_events_exact' failed ====", 941 expected_events, json_events, 942 ); 943 944 return false; 945 } 946 } 947 948 true 949 } 950 951 // Return true if events from the input 'latest_events' are matched exactly 952 // with the most recent events from the 'event_file' 953 fn check_latest_events_exact(latest_events: &[&MetaEvent], event_file: &str) -> bool { 954 let json_events = parse_event_file(event_file); 955 assert!(latest_events.len() <= json_events.len()); 956 let json_events = &json_events[(json_events.len() - latest_events.len())..]; 957 958 for (idx, e) in json_events.iter().enumerate() { 959 if !latest_events[idx].match_with_json_event(e) { 960 eprintln!( 961 "\n\n==== Start 'check_latest_events_exact' failed ==== \ 962 \n\nexpected_events={:?}\nactual_events={:?} \ 963 \n\n==== End 'check_latest_events_exact' failed ====", 964 latest_events, json_events, 965 ); 966 967 return false; 968 } 969 } 970 971 true 972 } 973 974 fn test_cpu_topology(threads_per_core: u8, cores_per_package: u8, packages: u8, use_fw: bool) { 975 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 976 let guest = Guest::new(Box::new(focal)); 977 let total_vcpus = threads_per_core * cores_per_package * packages; 978 let direct_kernel_boot_path = direct_kernel_boot_path(); 979 let mut kernel_path = direct_kernel_boot_path.to_str().unwrap(); 980 let fw_path = fw_path(FwType::RustHypervisorFirmware); 981 if use_fw { 982 kernel_path = fw_path.as_str(); 983 } 984 985 let mut child = GuestCommand::new(&guest) 986 .args([ 987 "--cpus", 988 &format!( 989 "boot={total_vcpus},topology={threads_per_core}:{cores_per_package}:1:{packages}" 990 ), 991 ]) 992 .args(["--memory", "size=512M"]) 993 .args(["--kernel", kernel_path]) 994 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 995 .default_disks() 996 .default_net() 997 .capture_output() 998 .spawn() 999 .unwrap(); 1000 1001 let r = std::panic::catch_unwind(|| { 1002 guest.wait_vm_boot(None).unwrap(); 1003 assert_eq!( 1004 guest.get_cpu_count().unwrap_or_default(), 1005 u32::from(total_vcpus) 1006 ); 1007 assert_eq!( 1008 guest 1009 .ssh_command("lscpu | grep \"per core\" | cut -f 2 -d \":\" | sed \"s# *##\"") 1010 .unwrap() 1011 .trim() 1012 .parse::<u8>() 1013 .unwrap_or(0), 1014 threads_per_core 1015 ); 1016 1017 assert_eq!( 1018 guest 1019 .ssh_command("lscpu | grep \"per socket\" | cut -f 2 -d \":\" | sed \"s# *##\"") 1020 .unwrap() 1021 .trim() 1022 .parse::<u8>() 1023 .unwrap_or(0), 1024 cores_per_package 1025 ); 1026 1027 assert_eq!( 1028 guest 1029 .ssh_command("lscpu | grep \"Socket\" | cut -f 2 -d \":\" | sed \"s# *##\"") 1030 .unwrap() 1031 .trim() 1032 .parse::<u8>() 1033 .unwrap_or(0), 1034 packages 1035 ); 1036 1037 #[cfg(target_arch = "x86_64")] 1038 { 1039 let mut cpu_id = 0; 1040 for package_id in 0..packages { 1041 for core_id in 0..cores_per_package { 1042 for _ in 0..threads_per_core { 1043 assert_eq!( 1044 guest 1045 .ssh_command(&format!("cat /sys/devices/system/cpu/cpu{cpu_id}/topology/physical_package_id")) 1046 .unwrap() 1047 .trim() 1048 .parse::<u8>() 1049 .unwrap_or(0), 1050 package_id 1051 ); 1052 1053 assert_eq!( 1054 guest 1055 .ssh_command(&format!( 1056 "cat /sys/devices/system/cpu/cpu{cpu_id}/topology/core_id" 1057 )) 1058 .unwrap() 1059 .trim() 1060 .parse::<u8>() 1061 .unwrap_or(0), 1062 core_id 1063 ); 1064 1065 cpu_id += 1; 1066 } 1067 } 1068 } 1069 } 1070 }); 1071 1072 kill_child(&mut child); 1073 let output = child.wait_with_output().unwrap(); 1074 1075 handle_child_output(r, &output); 1076 } 1077 1078 #[allow(unused_variables)] 1079 fn _test_guest_numa_nodes(acpi: bool) { 1080 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1081 let guest = Guest::new(Box::new(focal)); 1082 let api_socket = temp_api_path(&guest.tmp_dir); 1083 #[cfg(target_arch = "x86_64")] 1084 let kernel_path = direct_kernel_boot_path(); 1085 #[cfg(target_arch = "aarch64")] 1086 let kernel_path = if acpi { 1087 edk2_path() 1088 } else { 1089 direct_kernel_boot_path() 1090 }; 1091 1092 let mut child = GuestCommand::new(&guest) 1093 .args(["--cpus", "boot=6,max=12"]) 1094 .args(["--memory", "size=0,hotplug_method=virtio-mem"]) 1095 .args([ 1096 "--memory-zone", 1097 "id=mem0,size=1G,hotplug_size=3G", 1098 "id=mem1,size=2G,hotplug_size=3G", 1099 "id=mem2,size=3G,hotplug_size=3G", 1100 ]) 1101 .args([ 1102 "--numa", 1103 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0", 1104 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1", 1105 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2", 1106 ]) 1107 .args(["--kernel", kernel_path.to_str().unwrap()]) 1108 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1109 .args(["--api-socket", &api_socket]) 1110 .capture_output() 1111 .default_disks() 1112 .default_net() 1113 .spawn() 1114 .unwrap(); 1115 1116 let r = std::panic::catch_unwind(|| { 1117 guest.wait_vm_boot(None).unwrap(); 1118 1119 guest.check_numa_common( 1120 Some(&[960_000, 1_920_000, 2_880_000]), 1121 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 1122 Some(&["10 15 20", "20 10 25", "25 30 10"]), 1123 ); 1124 1125 // AArch64 currently does not support hotplug, and therefore we only 1126 // test hotplug-related function on x86_64 here. 1127 #[cfg(target_arch = "x86_64")] 1128 { 1129 guest.enable_memory_hotplug(); 1130 1131 // Resize every memory zone and check each associated NUMA node 1132 // has been assigned the right amount of memory. 1133 resize_zone_command(&api_socket, "mem0", "4G"); 1134 resize_zone_command(&api_socket, "mem1", "4G"); 1135 resize_zone_command(&api_socket, "mem2", "4G"); 1136 // Resize to the maximum amount of CPUs and check each NUMA 1137 // node has been assigned the right CPUs set. 1138 resize_command(&api_socket, Some(12), None, None, None); 1139 thread::sleep(std::time::Duration::new(5, 0)); 1140 1141 guest.check_numa_common( 1142 Some(&[3_840_000, 3_840_000, 3_840_000]), 1143 Some(&[vec![0, 1, 2, 9], vec![3, 4, 6, 7, 8], vec![5, 10, 11]]), 1144 None, 1145 ); 1146 } 1147 }); 1148 1149 kill_child(&mut child); 1150 let output = child.wait_with_output().unwrap(); 1151 1152 handle_child_output(r, &output); 1153 } 1154 1155 #[allow(unused_variables)] 1156 fn _test_power_button(acpi: bool) { 1157 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1158 let guest = Guest::new(Box::new(focal)); 1159 let mut cmd = GuestCommand::new(&guest); 1160 let api_socket = temp_api_path(&guest.tmp_dir); 1161 1162 #[cfg(target_arch = "x86_64")] 1163 let kernel_path = direct_kernel_boot_path(); 1164 #[cfg(target_arch = "aarch64")] 1165 let kernel_path = if acpi { 1166 edk2_path() 1167 } else { 1168 direct_kernel_boot_path() 1169 }; 1170 1171 cmd.args(["--cpus", "boot=1"]) 1172 .args(["--memory", "size=512M"]) 1173 .args(["--kernel", kernel_path.to_str().unwrap()]) 1174 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1175 .capture_output() 1176 .default_disks() 1177 .default_net() 1178 .args(["--api-socket", &api_socket]); 1179 1180 let child = cmd.spawn().unwrap(); 1181 1182 let r = std::panic::catch_unwind(|| { 1183 guest.wait_vm_boot(None).unwrap(); 1184 assert!(remote_command(&api_socket, "power-button", None)); 1185 }); 1186 1187 let output = child.wait_with_output().unwrap(); 1188 assert!(output.status.success()); 1189 handle_child_output(r, &output); 1190 } 1191 1192 type PrepareNetDaemon = dyn Fn( 1193 &TempDir, 1194 &str, 1195 Option<&str>, 1196 Option<u16>, 1197 usize, 1198 bool, 1199 ) -> (std::process::Command, String); 1200 1201 fn test_vhost_user_net( 1202 tap: Option<&str>, 1203 num_queues: usize, 1204 prepare_daemon: &PrepareNetDaemon, 1205 generate_host_mac: bool, 1206 client_mode_daemon: bool, 1207 ) { 1208 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1209 let guest = Guest::new(Box::new(focal)); 1210 let api_socket = temp_api_path(&guest.tmp_dir); 1211 1212 let kernel_path = direct_kernel_boot_path(); 1213 1214 let host_mac = if generate_host_mac { 1215 Some(MacAddr::local_random()) 1216 } else { 1217 None 1218 }; 1219 1220 let mtu = Some(3000); 1221 1222 let (mut daemon_command, vunet_socket_path) = prepare_daemon( 1223 &guest.tmp_dir, 1224 &guest.network.host_ip, 1225 tap, 1226 mtu, 1227 num_queues, 1228 client_mode_daemon, 1229 ); 1230 1231 let net_params = format!( 1232 "vhost_user=true,mac={},socket={},num_queues={},queue_size=1024{},vhost_mode={},mtu=3000", 1233 guest.network.guest_mac, 1234 vunet_socket_path, 1235 num_queues, 1236 if let Some(host_mac) = host_mac { 1237 format!(",host_mac={host_mac}") 1238 } else { 1239 "".to_owned() 1240 }, 1241 if client_mode_daemon { 1242 "server" 1243 } else { 1244 "client" 1245 }, 1246 ); 1247 1248 let mut ch_command = GuestCommand::new(&guest); 1249 ch_command 1250 .args(["--cpus", format!("boot={}", num_queues / 2).as_str()]) 1251 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"]) 1252 .args(["--kernel", kernel_path.to_str().unwrap()]) 1253 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1254 .default_disks() 1255 .args(["--net", net_params.as_str()]) 1256 .args(["--api-socket", &api_socket]) 1257 .capture_output(); 1258 1259 let mut daemon_child: std::process::Child; 1260 let mut child: std::process::Child; 1261 1262 if client_mode_daemon { 1263 child = ch_command.spawn().unwrap(); 1264 // Make sure the VMM is waiting for the backend to connect 1265 thread::sleep(std::time::Duration::new(10, 0)); 1266 daemon_child = daemon_command.spawn().unwrap(); 1267 } else { 1268 daemon_child = daemon_command.spawn().unwrap(); 1269 // Make sure the backend is waiting for the VMM to connect 1270 thread::sleep(std::time::Duration::new(10, 0)); 1271 child = ch_command.spawn().unwrap(); 1272 } 1273 1274 let r = std::panic::catch_unwind(|| { 1275 guest.wait_vm_boot(None).unwrap(); 1276 1277 if let Some(tap_name) = tap { 1278 let tap_count = exec_host_command_output(&format!("ip link | grep -c {tap_name}")); 1279 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1"); 1280 } 1281 1282 if let Some(host_mac) = tap { 1283 let mac_count = exec_host_command_output(&format!("ip link | grep -c {host_mac}")); 1284 assert_eq!(String::from_utf8_lossy(&mac_count.stdout).trim(), "1"); 1285 } 1286 1287 #[cfg(target_arch = "aarch64")] 1288 let iface = "enp0s4"; 1289 #[cfg(target_arch = "x86_64")] 1290 let iface = "ens4"; 1291 1292 assert_eq!( 1293 guest 1294 .ssh_command(format!("cat /sys/class/net/{iface}/mtu").as_str()) 1295 .unwrap() 1296 .trim(), 1297 "3000" 1298 ); 1299 1300 // 1 network interface + default localhost ==> 2 interfaces 1301 // It's important to note that this test is fully exercising the 1302 // vhost-user-net implementation and the associated backend since 1303 // it does not define any --net network interface. That means all 1304 // the ssh communication in that test happens through the network 1305 // interface backed by vhost-user-net. 1306 assert_eq!( 1307 guest 1308 .ssh_command("ip -o link | wc -l") 1309 .unwrap() 1310 .trim() 1311 .parse::<u32>() 1312 .unwrap_or_default(), 1313 2 1314 ); 1315 1316 // The following pci devices will appear on guest with PCI-MSI 1317 // interrupt vectors assigned. 1318 // 1 virtio-console with 3 vectors: config, Rx, Tx 1319 // 1 virtio-blk with 2 vectors: config, Request 1320 // 1 virtio-blk with 2 vectors: config, Request 1321 // 1 virtio-rng with 2 vectors: config, Request 1322 // Since virtio-net has 2 queue pairs, its vectors is as follows: 1323 // 1 virtio-net with 5 vectors: config, Rx (2), Tx (2) 1324 // Based on the above, the total vectors should 14. 1325 #[cfg(target_arch = "x86_64")] 1326 let grep_cmd = "grep -c PCI-MSI /proc/interrupts"; 1327 #[cfg(target_arch = "aarch64")] 1328 let grep_cmd = "grep -c ITS-PCI-MSIX /proc/interrupts"; 1329 assert_eq!( 1330 guest 1331 .ssh_command(grep_cmd) 1332 .unwrap() 1333 .trim() 1334 .parse::<u32>() 1335 .unwrap_or_default(), 1336 10 + (num_queues as u32) 1337 ); 1338 1339 // ACPI feature is needed. 1340 #[cfg(target_arch = "x86_64")] 1341 { 1342 guest.enable_memory_hotplug(); 1343 1344 // Add RAM to the VM 1345 let desired_ram = 1024 << 20; 1346 resize_command(&api_socket, None, Some(desired_ram), None, None); 1347 1348 thread::sleep(std::time::Duration::new(10, 0)); 1349 1350 // Here by simply checking the size (through ssh), we validate 1351 // the connection is still working, which means vhost-user-net 1352 // keeps working after the resize. 1353 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 1354 } 1355 }); 1356 1357 kill_child(&mut child); 1358 let output = child.wait_with_output().unwrap(); 1359 1360 thread::sleep(std::time::Duration::new(5, 0)); 1361 let _ = daemon_child.kill(); 1362 let _ = daemon_child.wait(); 1363 1364 handle_child_output(r, &output); 1365 } 1366 1367 type PrepareBlkDaemon = dyn Fn(&TempDir, &str, usize, bool, bool) -> (std::process::Child, String); 1368 1369 fn test_vhost_user_blk( 1370 num_queues: usize, 1371 readonly: bool, 1372 direct: bool, 1373 prepare_vhost_user_blk_daemon: Option<&PrepareBlkDaemon>, 1374 ) { 1375 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1376 let guest = Guest::new(Box::new(focal)); 1377 let api_socket = temp_api_path(&guest.tmp_dir); 1378 1379 let kernel_path = direct_kernel_boot_path(); 1380 1381 let (blk_params, daemon_child) = { 1382 let prepare_daemon = prepare_vhost_user_blk_daemon.unwrap(); 1383 // Start the daemon 1384 let (daemon_child, vubd_socket_path) = 1385 prepare_daemon(&guest.tmp_dir, "blk.img", num_queues, readonly, direct); 1386 1387 ( 1388 format!( 1389 "vhost_user=true,socket={vubd_socket_path},num_queues={num_queues},queue_size=128", 1390 ), 1391 Some(daemon_child), 1392 ) 1393 }; 1394 1395 let mut child = GuestCommand::new(&guest) 1396 .args(["--cpus", format!("boot={num_queues}").as_str()]) 1397 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"]) 1398 .args(["--kernel", kernel_path.to_str().unwrap()]) 1399 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1400 .args([ 1401 "--disk", 1402 format!( 1403 "path={}", 1404 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 1405 ) 1406 .as_str(), 1407 format!( 1408 "path={}", 1409 guest.disk_config.disk(DiskType::CloudInit).unwrap() 1410 ) 1411 .as_str(), 1412 blk_params.as_str(), 1413 ]) 1414 .default_net() 1415 .args(["--api-socket", &api_socket]) 1416 .capture_output() 1417 .spawn() 1418 .unwrap(); 1419 1420 let r = std::panic::catch_unwind(|| { 1421 guest.wait_vm_boot(None).unwrap(); 1422 1423 // Check both if /dev/vdc exists and if the block size is 16M. 1424 assert_eq!( 1425 guest 1426 .ssh_command("lsblk | grep vdc | grep -c 16M") 1427 .unwrap() 1428 .trim() 1429 .parse::<u32>() 1430 .unwrap_or_default(), 1431 1 1432 ); 1433 1434 // Check if this block is RO or RW. 1435 assert_eq!( 1436 guest 1437 .ssh_command("lsblk | grep vdc | awk '{print $5}'") 1438 .unwrap() 1439 .trim() 1440 .parse::<u32>() 1441 .unwrap_or_default(), 1442 readonly as u32 1443 ); 1444 1445 // Check if the number of queues in /sys/block/vdc/mq matches the 1446 // expected num_queues. 1447 assert_eq!( 1448 guest 1449 .ssh_command("ls -ll /sys/block/vdc/mq | grep ^d | wc -l") 1450 .unwrap() 1451 .trim() 1452 .parse::<u32>() 1453 .unwrap_or_default(), 1454 num_queues as u32 1455 ); 1456 1457 // Mount the device 1458 let mount_ro_rw_flag = if readonly { "ro,noload" } else { "rw" }; 1459 guest.ssh_command("mkdir mount_image").unwrap(); 1460 guest 1461 .ssh_command( 1462 format!("sudo mount -o {mount_ro_rw_flag} -t ext4 /dev/vdc mount_image/").as_str(), 1463 ) 1464 .unwrap(); 1465 1466 // Check the content of the block device. The file "foo" should 1467 // contain "bar". 1468 assert_eq!( 1469 guest.ssh_command("cat mount_image/foo").unwrap().trim(), 1470 "bar" 1471 ); 1472 1473 // ACPI feature is needed. 1474 #[cfg(target_arch = "x86_64")] 1475 { 1476 guest.enable_memory_hotplug(); 1477 1478 // Add RAM to the VM 1479 let desired_ram = 1024 << 20; 1480 resize_command(&api_socket, None, Some(desired_ram), None, None); 1481 1482 thread::sleep(std::time::Duration::new(10, 0)); 1483 1484 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 1485 1486 // Check again the content of the block device after the resize 1487 // has been performed. 1488 assert_eq!( 1489 guest.ssh_command("cat mount_image/foo").unwrap().trim(), 1490 "bar" 1491 ); 1492 } 1493 1494 // Unmount the device 1495 guest.ssh_command("sudo umount /dev/vdc").unwrap(); 1496 guest.ssh_command("rm -r mount_image").unwrap(); 1497 }); 1498 1499 kill_child(&mut child); 1500 let output = child.wait_with_output().unwrap(); 1501 1502 if let Some(mut daemon_child) = daemon_child { 1503 thread::sleep(std::time::Duration::new(5, 0)); 1504 let _ = daemon_child.kill(); 1505 let _ = daemon_child.wait(); 1506 } 1507 1508 handle_child_output(r, &output); 1509 } 1510 1511 fn test_boot_from_vhost_user_blk( 1512 num_queues: usize, 1513 readonly: bool, 1514 direct: bool, 1515 prepare_vhost_user_blk_daemon: Option<&PrepareBlkDaemon>, 1516 ) { 1517 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1518 let guest = Guest::new(Box::new(focal)); 1519 1520 let kernel_path = direct_kernel_boot_path(); 1521 1522 let disk_path = guest.disk_config.disk(DiskType::OperatingSystem).unwrap(); 1523 1524 let (blk_boot_params, daemon_child) = { 1525 let prepare_daemon = prepare_vhost_user_blk_daemon.unwrap(); 1526 // Start the daemon 1527 let (daemon_child, vubd_socket_path) = prepare_daemon( 1528 &guest.tmp_dir, 1529 disk_path.as_str(), 1530 num_queues, 1531 readonly, 1532 direct, 1533 ); 1534 1535 ( 1536 format!( 1537 "vhost_user=true,socket={vubd_socket_path},num_queues={num_queues},queue_size=128", 1538 ), 1539 Some(daemon_child), 1540 ) 1541 }; 1542 1543 let mut child = GuestCommand::new(&guest) 1544 .args(["--cpus", format!("boot={num_queues}").as_str()]) 1545 .args(["--memory", "size=512M,shared=on"]) 1546 .args(["--kernel", kernel_path.to_str().unwrap()]) 1547 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1548 .args([ 1549 "--disk", 1550 blk_boot_params.as_str(), 1551 format!( 1552 "path={}", 1553 guest.disk_config.disk(DiskType::CloudInit).unwrap() 1554 ) 1555 .as_str(), 1556 ]) 1557 .default_net() 1558 .capture_output() 1559 .spawn() 1560 .unwrap(); 1561 1562 let r = std::panic::catch_unwind(|| { 1563 guest.wait_vm_boot(None).unwrap(); 1564 1565 // Just check the VM booted correctly. 1566 assert_eq!(guest.get_cpu_count().unwrap_or_default(), num_queues as u32); 1567 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 1568 }); 1569 kill_child(&mut child); 1570 let output = child.wait_with_output().unwrap(); 1571 1572 if let Some(mut daemon_child) = daemon_child { 1573 thread::sleep(std::time::Duration::new(5, 0)); 1574 let _ = daemon_child.kill(); 1575 let _ = daemon_child.wait(); 1576 } 1577 1578 handle_child_output(r, &output); 1579 } 1580 1581 fn _test_virtio_fs( 1582 prepare_daemon: &dyn Fn(&TempDir, &str) -> (std::process::Child, String), 1583 hotplug: bool, 1584 pci_segment: Option<u16>, 1585 ) { 1586 #[cfg(target_arch = "aarch64")] 1587 let focal_image = if hotplug { 1588 FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string() 1589 } else { 1590 FOCAL_IMAGE_NAME.to_string() 1591 }; 1592 #[cfg(target_arch = "x86_64")] 1593 let focal_image = FOCAL_IMAGE_NAME.to_string(); 1594 let focal = UbuntuDiskConfig::new(focal_image); 1595 let guest = Guest::new(Box::new(focal)); 1596 let api_socket = temp_api_path(&guest.tmp_dir); 1597 1598 let mut workload_path = dirs::home_dir().unwrap(); 1599 workload_path.push("workloads"); 1600 1601 let mut shared_dir = workload_path; 1602 shared_dir.push("shared_dir"); 1603 1604 #[cfg(target_arch = "x86_64")] 1605 let kernel_path = direct_kernel_boot_path(); 1606 #[cfg(target_arch = "aarch64")] 1607 let kernel_path = if hotplug { 1608 edk2_path() 1609 } else { 1610 direct_kernel_boot_path() 1611 }; 1612 1613 let (mut daemon_child, virtiofsd_socket_path) = 1614 prepare_daemon(&guest.tmp_dir, shared_dir.to_str().unwrap()); 1615 1616 let mut guest_command = GuestCommand::new(&guest); 1617 guest_command 1618 .args(["--cpus", "boot=1"]) 1619 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"]) 1620 .args(["--kernel", kernel_path.to_str().unwrap()]) 1621 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1622 .default_disks() 1623 .default_net() 1624 .args(["--api-socket", &api_socket]); 1625 if pci_segment.is_some() { 1626 guest_command.args([ 1627 "--platform", 1628 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 1629 ]); 1630 } 1631 1632 let fs_params = format!( 1633 "id=myfs0,tag=myfs,socket={},num_queues=1,queue_size=1024{}", 1634 virtiofsd_socket_path, 1635 if let Some(pci_segment) = pci_segment { 1636 format!(",pci_segment={pci_segment}") 1637 } else { 1638 "".to_owned() 1639 } 1640 ); 1641 1642 if !hotplug { 1643 guest_command.args(["--fs", fs_params.as_str()]); 1644 } 1645 1646 let mut child = guest_command.capture_output().spawn().unwrap(); 1647 1648 let r = std::panic::catch_unwind(|| { 1649 guest.wait_vm_boot(None).unwrap(); 1650 1651 if hotplug { 1652 // Add fs to the VM 1653 let (cmd_success, cmd_output) = 1654 remote_command_w_output(&api_socket, "add-fs", Some(&fs_params)); 1655 assert!(cmd_success); 1656 1657 if let Some(pci_segment) = pci_segment { 1658 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 1659 "{{\"id\":\"myfs0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 1660 ))); 1661 } else { 1662 assert!(String::from_utf8_lossy(&cmd_output) 1663 .contains("{\"id\":\"myfs0\",\"bdf\":\"0000:00:06.0\"}")); 1664 } 1665 1666 thread::sleep(std::time::Duration::new(10, 0)); 1667 } 1668 1669 // Mount shared directory through virtio_fs filesystem 1670 guest 1671 .ssh_command("mkdir -p mount_dir && sudo mount -t virtiofs myfs mount_dir/") 1672 .unwrap(); 1673 1674 // Check file1 exists and its content is "foo" 1675 assert_eq!( 1676 guest.ssh_command("cat mount_dir/file1").unwrap().trim(), 1677 "foo" 1678 ); 1679 // Check file2 does not exist 1680 guest 1681 .ssh_command("[ ! -f 'mount_dir/file2' ] || true") 1682 .unwrap(); 1683 1684 // Check file3 exists and its content is "bar" 1685 assert_eq!( 1686 guest.ssh_command("cat mount_dir/file3").unwrap().trim(), 1687 "bar" 1688 ); 1689 1690 // ACPI feature is needed. 1691 #[cfg(target_arch = "x86_64")] 1692 { 1693 guest.enable_memory_hotplug(); 1694 1695 // Add RAM to the VM 1696 let desired_ram = 1024 << 20; 1697 resize_command(&api_socket, None, Some(desired_ram), None, None); 1698 1699 thread::sleep(std::time::Duration::new(30, 0)); 1700 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 1701 1702 // After the resize, check again that file1 exists and its 1703 // content is "foo". 1704 assert_eq!( 1705 guest.ssh_command("cat mount_dir/file1").unwrap().trim(), 1706 "foo" 1707 ); 1708 } 1709 1710 if hotplug { 1711 // Remove from VM 1712 guest.ssh_command("sudo umount mount_dir").unwrap(); 1713 assert!(remote_command(&api_socket, "remove-device", Some("myfs0"))); 1714 } 1715 }); 1716 1717 let (r, hotplug_daemon_child) = if r.is_ok() && hotplug { 1718 thread::sleep(std::time::Duration::new(10, 0)); 1719 let (daemon_child, virtiofsd_socket_path) = 1720 prepare_daemon(&guest.tmp_dir, shared_dir.to_str().unwrap()); 1721 1722 let r = std::panic::catch_unwind(|| { 1723 thread::sleep(std::time::Duration::new(10, 0)); 1724 let fs_params = format!( 1725 "id=myfs0,tag=myfs,socket={},num_queues=1,queue_size=1024{}", 1726 virtiofsd_socket_path, 1727 if let Some(pci_segment) = pci_segment { 1728 format!(",pci_segment={pci_segment}") 1729 } else { 1730 "".to_owned() 1731 } 1732 ); 1733 1734 // Add back and check it works 1735 let (cmd_success, cmd_output) = 1736 remote_command_w_output(&api_socket, "add-fs", Some(&fs_params)); 1737 assert!(cmd_success); 1738 if let Some(pci_segment) = pci_segment { 1739 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 1740 "{{\"id\":\"myfs0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 1741 ))); 1742 } else { 1743 assert!(String::from_utf8_lossy(&cmd_output) 1744 .contains("{\"id\":\"myfs0\",\"bdf\":\"0000:00:06.0\"}")); 1745 } 1746 1747 thread::sleep(std::time::Duration::new(10, 0)); 1748 // Mount shared directory through virtio_fs filesystem 1749 guest 1750 .ssh_command("mkdir -p mount_dir && sudo mount -t virtiofs myfs mount_dir/") 1751 .unwrap(); 1752 1753 // Check file1 exists and its content is "foo" 1754 assert_eq!( 1755 guest.ssh_command("cat mount_dir/file1").unwrap().trim(), 1756 "foo" 1757 ); 1758 }); 1759 1760 (r, Some(daemon_child)) 1761 } else { 1762 (r, None) 1763 }; 1764 1765 kill_child(&mut child); 1766 let output = child.wait_with_output().unwrap(); 1767 1768 let _ = daemon_child.kill(); 1769 let _ = daemon_child.wait(); 1770 1771 if let Some(mut daemon_child) = hotplug_daemon_child { 1772 let _ = daemon_child.kill(); 1773 let _ = daemon_child.wait(); 1774 } 1775 1776 handle_child_output(r, &output); 1777 } 1778 1779 fn test_virtio_pmem(discard_writes: bool, specify_size: bool) { 1780 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1781 let guest = Guest::new(Box::new(focal)); 1782 1783 let kernel_path = direct_kernel_boot_path(); 1784 1785 let pmem_temp_file = TempFile::new().unwrap(); 1786 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 1787 1788 std::process::Command::new("mkfs.ext4") 1789 .arg(pmem_temp_file.as_path()) 1790 .output() 1791 .expect("Expect creating disk image to succeed"); 1792 1793 let mut child = GuestCommand::new(&guest) 1794 .args(["--cpus", "boot=1"]) 1795 .args(["--memory", "size=512M"]) 1796 .args(["--kernel", kernel_path.to_str().unwrap()]) 1797 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1798 .default_disks() 1799 .default_net() 1800 .args([ 1801 "--pmem", 1802 format!( 1803 "file={}{}{}", 1804 pmem_temp_file.as_path().to_str().unwrap(), 1805 if specify_size { ",size=128M" } else { "" }, 1806 if discard_writes { 1807 ",discard_writes=on" 1808 } else { 1809 "" 1810 } 1811 ) 1812 .as_str(), 1813 ]) 1814 .capture_output() 1815 .spawn() 1816 .unwrap(); 1817 1818 let r = std::panic::catch_unwind(|| { 1819 guest.wait_vm_boot(None).unwrap(); 1820 1821 // Check for the presence of /dev/pmem0 1822 assert_eq!( 1823 guest.ssh_command("ls /dev/pmem0").unwrap().trim(), 1824 "/dev/pmem0" 1825 ); 1826 1827 // Check changes persist after reboot 1828 assert_eq!(guest.ssh_command("sudo mount /dev/pmem0 /mnt").unwrap(), ""); 1829 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), "lost+found\n"); 1830 guest 1831 .ssh_command("echo test123 | sudo tee /mnt/test") 1832 .unwrap(); 1833 assert_eq!(guest.ssh_command("sudo umount /mnt").unwrap(), ""); 1834 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), ""); 1835 1836 guest.reboot_linux(0, None); 1837 assert_eq!(guest.ssh_command("sudo mount /dev/pmem0 /mnt").unwrap(), ""); 1838 assert_eq!( 1839 guest 1840 .ssh_command("sudo cat /mnt/test || true") 1841 .unwrap() 1842 .trim(), 1843 if discard_writes { "" } else { "test123" } 1844 ); 1845 }); 1846 1847 kill_child(&mut child); 1848 let output = child.wait_with_output().unwrap(); 1849 1850 handle_child_output(r, &output); 1851 } 1852 1853 fn get_fd_count(pid: u32) -> usize { 1854 fs::read_dir(format!("/proc/{pid}/fd")).unwrap().count() 1855 } 1856 1857 fn _test_virtio_vsock(hotplug: bool) { 1858 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1859 let guest = Guest::new(Box::new(focal)); 1860 1861 #[cfg(target_arch = "x86_64")] 1862 let kernel_path = direct_kernel_boot_path(); 1863 #[cfg(target_arch = "aarch64")] 1864 let kernel_path = if hotplug { 1865 edk2_path() 1866 } else { 1867 direct_kernel_boot_path() 1868 }; 1869 1870 let socket = temp_vsock_path(&guest.tmp_dir); 1871 let api_socket = temp_api_path(&guest.tmp_dir); 1872 1873 let mut cmd = GuestCommand::new(&guest); 1874 cmd.args(["--api-socket", &api_socket]); 1875 cmd.args(["--cpus", "boot=1"]); 1876 cmd.args(["--memory", "size=512M"]); 1877 cmd.args(["--kernel", kernel_path.to_str().unwrap()]); 1878 cmd.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]); 1879 cmd.default_disks(); 1880 cmd.default_net(); 1881 1882 if !hotplug { 1883 cmd.args(["--vsock", format!("cid=3,socket={socket}").as_str()]); 1884 } 1885 1886 let mut child = cmd.capture_output().spawn().unwrap(); 1887 1888 let r = std::panic::catch_unwind(|| { 1889 guest.wait_vm_boot(None).unwrap(); 1890 1891 if hotplug { 1892 let (cmd_success, cmd_output) = remote_command_w_output( 1893 &api_socket, 1894 "add-vsock", 1895 Some(format!("cid=3,socket={socket},id=test0").as_str()), 1896 ); 1897 assert!(cmd_success); 1898 assert!(String::from_utf8_lossy(&cmd_output) 1899 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 1900 thread::sleep(std::time::Duration::new(10, 0)); 1901 // Check adding a second one fails 1902 assert!(!remote_command( 1903 &api_socket, 1904 "add-vsock", 1905 Some("cid=1234,socket=/tmp/fail") 1906 )); 1907 } 1908 1909 // Validate vsock works as expected. 1910 guest.check_vsock(socket.as_str()); 1911 guest.reboot_linux(0, None); 1912 // Validate vsock still works after a reboot. 1913 guest.check_vsock(socket.as_str()); 1914 1915 if hotplug { 1916 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 1917 } 1918 }); 1919 1920 kill_child(&mut child); 1921 let output = child.wait_with_output().unwrap(); 1922 1923 handle_child_output(r, &output); 1924 } 1925 1926 fn get_ksm_pages_shared() -> u32 { 1927 fs::read_to_string("/sys/kernel/mm/ksm/pages_shared") 1928 .unwrap() 1929 .trim() 1930 .parse::<u32>() 1931 .unwrap() 1932 } 1933 1934 fn test_memory_mergeable(mergeable: bool) { 1935 let memory_param = if mergeable { 1936 "mergeable=on" 1937 } else { 1938 "mergeable=off" 1939 }; 1940 1941 // We are assuming the rest of the system in our CI is not using mergeable memory 1942 let ksm_ps_init = get_ksm_pages_shared(); 1943 assert!(ksm_ps_init == 0); 1944 1945 let focal1 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1946 let guest1 = Guest::new(Box::new(focal1)); 1947 let mut child1 = GuestCommand::new(&guest1) 1948 .args(["--cpus", "boot=1"]) 1949 .args(["--memory", format!("size=512M,{memory_param}").as_str()]) 1950 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 1951 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1952 .default_disks() 1953 .args(["--net", guest1.default_net_string().as_str()]) 1954 .args(["--serial", "tty", "--console", "off"]) 1955 .capture_output() 1956 .spawn() 1957 .unwrap(); 1958 1959 let r = std::panic::catch_unwind(|| { 1960 guest1.wait_vm_boot(None).unwrap(); 1961 }); 1962 if r.is_err() { 1963 kill_child(&mut child1); 1964 let output = child1.wait_with_output().unwrap(); 1965 handle_child_output(r, &output); 1966 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return 1967 } 1968 1969 let ksm_ps_guest1 = get_ksm_pages_shared(); 1970 1971 let focal2 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 1972 let guest2 = Guest::new(Box::new(focal2)); 1973 let mut child2 = GuestCommand::new(&guest2) 1974 .args(["--cpus", "boot=1"]) 1975 .args(["--memory", format!("size=512M,{memory_param}").as_str()]) 1976 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 1977 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 1978 .default_disks() 1979 .args(["--net", guest2.default_net_string().as_str()]) 1980 .args(["--serial", "tty", "--console", "off"]) 1981 .capture_output() 1982 .spawn() 1983 .unwrap(); 1984 1985 let r = std::panic::catch_unwind(|| { 1986 guest2.wait_vm_boot(None).unwrap(); 1987 let ksm_ps_guest2 = get_ksm_pages_shared(); 1988 1989 if mergeable { 1990 println!( 1991 "ksm pages_shared after vm1 booted '{ksm_ps_guest1}', ksm pages_shared after vm2 booted '{ksm_ps_guest2}'" 1992 ); 1993 // We are expecting the number of shared pages to increase as the number of VM increases 1994 assert!(ksm_ps_guest1 < ksm_ps_guest2); 1995 } else { 1996 assert!(ksm_ps_guest1 == 0); 1997 assert!(ksm_ps_guest2 == 0); 1998 } 1999 }); 2000 2001 kill_child(&mut child1); 2002 kill_child(&mut child2); 2003 2004 let output = child1.wait_with_output().unwrap(); 2005 child2.wait().unwrap(); 2006 2007 handle_child_output(r, &output); 2008 } 2009 2010 fn _get_vmm_overhead(pid: u32, guest_memory_size: u32) -> HashMap<String, u32> { 2011 let smaps = fs::File::open(format!("/proc/{pid}/smaps")).unwrap(); 2012 let reader = io::BufReader::new(smaps); 2013 2014 let mut skip_map: bool = false; 2015 let mut region_name: String = "".to_string(); 2016 let mut region_maps = HashMap::new(); 2017 for line in reader.lines() { 2018 let l = line.unwrap(); 2019 2020 if l.contains('-') { 2021 let values: Vec<&str> = l.split_whitespace().collect(); 2022 region_name = values.last().unwrap().trim().to_string(); 2023 if region_name == "0" { 2024 region_name = "anonymous".to_string() 2025 } 2026 } 2027 2028 // Each section begins with something that looks like: 2029 // Size: 2184 kB 2030 if l.starts_with("Size:") { 2031 let values: Vec<&str> = l.split_whitespace().collect(); 2032 let map_size = values[1].parse::<u32>().unwrap(); 2033 // We skip the assigned guest RAM map, its RSS is only 2034 // dependent on the guest actual memory usage. 2035 // Everything else can be added to the VMM overhead. 2036 skip_map = map_size >= guest_memory_size; 2037 continue; 2038 } 2039 2040 // If this is a map we're taking into account, then we only 2041 // count the RSS. The sum of all counted RSS is the VMM overhead. 2042 if !skip_map && l.starts_with("Rss:") { 2043 let values: Vec<&str> = l.split_whitespace().collect(); 2044 let value = values[1].trim().parse::<u32>().unwrap(); 2045 *region_maps.entry(region_name.clone()).or_insert(0) += value; 2046 } 2047 } 2048 2049 region_maps 2050 } 2051 2052 fn get_vmm_overhead(pid: u32, guest_memory_size: u32) -> u32 { 2053 let mut total = 0; 2054 2055 for (region_name, value) in &_get_vmm_overhead(pid, guest_memory_size) { 2056 eprintln!("{region_name}: {value}"); 2057 total += value; 2058 } 2059 2060 total 2061 } 2062 2063 fn process_rss_kib(pid: u32) -> usize { 2064 let command = format!("ps -q {pid} -o rss="); 2065 let rss = exec_host_command_output(&command); 2066 String::from_utf8_lossy(&rss.stdout).trim().parse().unwrap() 2067 } 2068 2069 // 10MB is our maximum accepted overhead. 2070 const MAXIMUM_VMM_OVERHEAD_KB: u32 = 10 * 1024; 2071 2072 #[derive(PartialEq, Eq, PartialOrd)] 2073 struct Counters { 2074 rx_bytes: u64, 2075 rx_frames: u64, 2076 tx_bytes: u64, 2077 tx_frames: u64, 2078 read_bytes: u64, 2079 write_bytes: u64, 2080 read_ops: u64, 2081 write_ops: u64, 2082 } 2083 2084 fn get_counters(api_socket: &str) -> Counters { 2085 // Get counters 2086 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "counters", None); 2087 assert!(cmd_success); 2088 2089 let counters: HashMap<&str, HashMap<&str, u64>> = 2090 serde_json::from_slice(&cmd_output).unwrap_or_default(); 2091 2092 let rx_bytes = *counters.get("_net2").unwrap().get("rx_bytes").unwrap(); 2093 let rx_frames = *counters.get("_net2").unwrap().get("rx_frames").unwrap(); 2094 let tx_bytes = *counters.get("_net2").unwrap().get("tx_bytes").unwrap(); 2095 let tx_frames = *counters.get("_net2").unwrap().get("tx_frames").unwrap(); 2096 2097 let read_bytes = *counters.get("_disk0").unwrap().get("read_bytes").unwrap(); 2098 let write_bytes = *counters.get("_disk0").unwrap().get("write_bytes").unwrap(); 2099 let read_ops = *counters.get("_disk0").unwrap().get("read_ops").unwrap(); 2100 let write_ops = *counters.get("_disk0").unwrap().get("write_ops").unwrap(); 2101 2102 Counters { 2103 rx_bytes, 2104 rx_frames, 2105 tx_bytes, 2106 tx_frames, 2107 read_bytes, 2108 write_bytes, 2109 read_ops, 2110 write_ops, 2111 } 2112 } 2113 2114 fn pty_read(mut pty: std::fs::File) -> Receiver<String> { 2115 let (tx, rx) = mpsc::channel::<String>(); 2116 thread::spawn(move || loop { 2117 thread::sleep(std::time::Duration::new(1, 0)); 2118 let mut buf = [0; 512]; 2119 match pty.read(&mut buf) { 2120 Ok(_bytes) => { 2121 let output = std::str::from_utf8(&buf).unwrap().to_string(); 2122 match tx.send(output) { 2123 Ok(_) => (), 2124 Err(_) => break, 2125 } 2126 } 2127 Err(_) => break, 2128 } 2129 }); 2130 rx 2131 } 2132 2133 fn get_pty_path(api_socket: &str, pty_type: &str) -> PathBuf { 2134 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None); 2135 assert!(cmd_success); 2136 let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default(); 2137 assert_eq!("Pty", info["config"][pty_type]["mode"]); 2138 PathBuf::from( 2139 info["config"][pty_type]["file"] 2140 .as_str() 2141 .expect("Missing pty path"), 2142 ) 2143 } 2144 2145 // VFIO test network setup. 2146 // We reserve a different IP class for it: 172.18.0.0/24. 2147 #[cfg(target_arch = "x86_64")] 2148 fn setup_vfio_network_interfaces() { 2149 // 'vfio-br0' 2150 assert!(exec_host_command_status("sudo ip link add name vfio-br0 type bridge").success()); 2151 assert!(exec_host_command_status("sudo ip link set vfio-br0 up").success()); 2152 assert!(exec_host_command_status("sudo ip addr add 172.18.0.1/24 dev vfio-br0").success()); 2153 // 'vfio-tap0' 2154 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap0 mode tap").success()); 2155 assert!(exec_host_command_status("sudo ip link set vfio-tap0 master vfio-br0").success()); 2156 assert!(exec_host_command_status("sudo ip link set vfio-tap0 up").success()); 2157 // 'vfio-tap1' 2158 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap1 mode tap").success()); 2159 assert!(exec_host_command_status("sudo ip link set vfio-tap1 master vfio-br0").success()); 2160 assert!(exec_host_command_status("sudo ip link set vfio-tap1 up").success()); 2161 // 'vfio-tap2' 2162 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap2 mode tap").success()); 2163 assert!(exec_host_command_status("sudo ip link set vfio-tap2 master vfio-br0").success()); 2164 assert!(exec_host_command_status("sudo ip link set vfio-tap2 up").success()); 2165 // 'vfio-tap3' 2166 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap3 mode tap").success()); 2167 assert!(exec_host_command_status("sudo ip link set vfio-tap3 master vfio-br0").success()); 2168 assert!(exec_host_command_status("sudo ip link set vfio-tap3 up").success()); 2169 } 2170 2171 // Tear VFIO test network down 2172 #[cfg(target_arch = "x86_64")] 2173 fn cleanup_vfio_network_interfaces() { 2174 assert!(exec_host_command_status("sudo ip link del vfio-br0").success()); 2175 assert!(exec_host_command_status("sudo ip link del vfio-tap0").success()); 2176 assert!(exec_host_command_status("sudo ip link del vfio-tap1").success()); 2177 assert!(exec_host_command_status("sudo ip link del vfio-tap2").success()); 2178 assert!(exec_host_command_status("sudo ip link del vfio-tap3").success()); 2179 } 2180 2181 fn balloon_size(api_socket: &str) -> u64 { 2182 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None); 2183 assert!(cmd_success); 2184 2185 let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default(); 2186 let total_mem = &info["config"]["memory"]["size"] 2187 .to_string() 2188 .parse::<u64>() 2189 .unwrap(); 2190 let actual_mem = &info["memory_actual_size"] 2191 .to_string() 2192 .parse::<u64>() 2193 .unwrap(); 2194 total_mem - actual_mem 2195 } 2196 2197 fn vm_state(api_socket: &str) -> String { 2198 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None); 2199 assert!(cmd_success); 2200 2201 let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default(); 2202 let state = &info["state"].as_str().unwrap(); 2203 2204 state.to_string() 2205 } 2206 2207 // This test validates that it can find the virtio-iommu device at first. 2208 // It also verifies that both disks and the network card are attached to 2209 // the virtual IOMMU by looking at /sys/kernel/iommu_groups directory. 2210 // The last interesting part of this test is that it exercises the network 2211 // interface attached to the virtual IOMMU since this is the one used to 2212 // send all commands through SSH. 2213 fn _test_virtio_iommu(acpi: bool) { 2214 // Virtio-iommu support is ready in recent kernel (v5.14). But the kernel in 2215 // Focal image is still old. 2216 // So if ACPI is enabled on AArch64, we use a modified Focal image in which 2217 // the kernel binary has been updated. 2218 #[cfg(target_arch = "aarch64")] 2219 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string(); 2220 #[cfg(target_arch = "x86_64")] 2221 let focal_image = FOCAL_IMAGE_NAME.to_string(); 2222 let focal = UbuntuDiskConfig::new(focal_image); 2223 let guest = Guest::new(Box::new(focal)); 2224 2225 #[cfg(target_arch = "x86_64")] 2226 let kernel_path = direct_kernel_boot_path(); 2227 #[cfg(target_arch = "aarch64")] 2228 let kernel_path = if acpi { 2229 edk2_path() 2230 } else { 2231 direct_kernel_boot_path() 2232 }; 2233 2234 let mut child = GuestCommand::new(&guest) 2235 .args(["--cpus", "boot=1"]) 2236 .args(["--memory", "size=512M"]) 2237 .args(["--kernel", kernel_path.to_str().unwrap()]) 2238 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2239 .args([ 2240 "--disk", 2241 format!( 2242 "path={},iommu=on", 2243 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 2244 ) 2245 .as_str(), 2246 format!( 2247 "path={},iommu=on", 2248 guest.disk_config.disk(DiskType::CloudInit).unwrap() 2249 ) 2250 .as_str(), 2251 ]) 2252 .args(["--net", guest.default_net_string_w_iommu().as_str()]) 2253 .capture_output() 2254 .spawn() 2255 .unwrap(); 2256 2257 let r = std::panic::catch_unwind(|| { 2258 guest.wait_vm_boot(None).unwrap(); 2259 2260 // Verify the virtio-iommu device is present. 2261 assert!(guest 2262 .does_device_vendor_pair_match("0x1057", "0x1af4") 2263 .unwrap_or_default()); 2264 2265 // On AArch64, if the guest system boots from FDT, the behavior of IOMMU is a bit 2266 // different with ACPI. 2267 // All devices on the PCI bus will be attached to the virtual IOMMU, except the 2268 // virtio-iommu device itself. So these devices will all be added to IOMMU groups, 2269 // and appear under folder '/sys/kernel/iommu_groups/'. 2270 // The result is, in the case of FDT, IOMMU group '0' contains "0000:00:01.0" 2271 // which is the console. The first disk "0000:00:02.0" is in group '1'. 2272 // While on ACPI, console device is not attached to IOMMU. So the IOMMU group '0' 2273 // contains "0000:00:02.0" which is the first disk. 2274 // 2275 // Verify the iommu group of the first disk. 2276 let iommu_group = !acpi as i32; 2277 assert_eq!( 2278 guest 2279 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str()) 2280 .unwrap() 2281 .trim(), 2282 "0000:00:02.0" 2283 ); 2284 2285 // Verify the iommu group of the second disk. 2286 let iommu_group = if acpi { 1 } else { 2 }; 2287 assert_eq!( 2288 guest 2289 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str()) 2290 .unwrap() 2291 .trim(), 2292 "0000:00:03.0" 2293 ); 2294 2295 // Verify the iommu group of the network card. 2296 let iommu_group = if acpi { 2 } else { 3 }; 2297 assert_eq!( 2298 guest 2299 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str()) 2300 .unwrap() 2301 .trim(), 2302 "0000:00:04.0" 2303 ); 2304 }); 2305 2306 kill_child(&mut child); 2307 let output = child.wait_with_output().unwrap(); 2308 2309 handle_child_output(r, &output); 2310 } 2311 2312 fn get_reboot_count(guest: &Guest) -> u32 { 2313 guest 2314 .ssh_command("sudo last | grep -c reboot") 2315 .unwrap() 2316 .trim() 2317 .parse::<u32>() 2318 .unwrap_or_default() 2319 } 2320 2321 fn enable_guest_watchdog(guest: &Guest, watchdog_sec: u32) { 2322 // Check for PCI device 2323 assert!(guest 2324 .does_device_vendor_pair_match("0x1063", "0x1af4") 2325 .unwrap_or_default()); 2326 2327 // Enable systemd watchdog 2328 guest 2329 .ssh_command(&format!( 2330 "echo RuntimeWatchdogSec={watchdog_sec}s | sudo tee -a /etc/systemd/system.conf" 2331 )) 2332 .unwrap(); 2333 2334 guest.ssh_command("sudo systemctl daemon-reexec").unwrap(); 2335 } 2336 2337 fn make_guest_panic(guest: &Guest) { 2338 // Check for pvpanic device 2339 assert!(guest 2340 .does_device_vendor_pair_match("0x0011", "0x1b36") 2341 .unwrap_or_default()); 2342 2343 // Trigger guest a panic 2344 guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap(); 2345 } 2346 2347 mod common_parallel { 2348 use std::fs::OpenOptions; 2349 use std::io::SeekFrom; 2350 2351 use crate::*; 2352 2353 #[test] 2354 #[cfg(target_arch = "x86_64")] 2355 fn test_focal_hypervisor_fw() { 2356 test_simple_launch(fw_path(FwType::RustHypervisorFirmware), FOCAL_IMAGE_NAME) 2357 } 2358 2359 #[test] 2360 #[cfg(target_arch = "x86_64")] 2361 fn test_focal_ovmf() { 2362 test_simple_launch(fw_path(FwType::Ovmf), FOCAL_IMAGE_NAME) 2363 } 2364 2365 #[cfg(target_arch = "x86_64")] 2366 fn test_simple_launch(fw_path: String, disk_path: &str) { 2367 let disk_config = Box::new(UbuntuDiskConfig::new(disk_path.to_string())); 2368 let guest = Guest::new(disk_config); 2369 let event_path = temp_event_monitor_path(&guest.tmp_dir); 2370 2371 let mut child = GuestCommand::new(&guest) 2372 .args(["--cpus", "boot=1"]) 2373 .args(["--memory", "size=512M"]) 2374 .args(["--kernel", fw_path.as_str()]) 2375 .default_disks() 2376 .default_net() 2377 .args(["--serial", "tty", "--console", "off"]) 2378 .args(["--event-monitor", format!("path={event_path}").as_str()]) 2379 .capture_output() 2380 .spawn() 2381 .unwrap(); 2382 2383 let r = std::panic::catch_unwind(|| { 2384 guest.wait_vm_boot(Some(120)).unwrap(); 2385 2386 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 2387 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 2388 assert_eq!(guest.get_pci_bridge_class().unwrap_or_default(), "0x060000"); 2389 2390 let expected_sequential_events = [ 2391 &MetaEvent { 2392 event: "starting".to_string(), 2393 device_id: None, 2394 }, 2395 &MetaEvent { 2396 event: "booting".to_string(), 2397 device_id: None, 2398 }, 2399 &MetaEvent { 2400 event: "booted".to_string(), 2401 device_id: None, 2402 }, 2403 &MetaEvent { 2404 event: "activated".to_string(), 2405 device_id: Some("_disk0".to_string()), 2406 }, 2407 &MetaEvent { 2408 event: "reset".to_string(), 2409 device_id: Some("_disk0".to_string()), 2410 }, 2411 ]; 2412 assert!(check_sequential_events( 2413 &expected_sequential_events, 2414 &event_path 2415 )); 2416 2417 // It's been observed on the Bionic image that udev and snapd 2418 // services can cause some delay in the VM's shutdown. Disabling 2419 // them improves the reliability of this test. 2420 let _ = guest.ssh_command("sudo systemctl disable udev"); 2421 let _ = guest.ssh_command("sudo systemctl stop udev"); 2422 let _ = guest.ssh_command("sudo systemctl disable snapd"); 2423 let _ = guest.ssh_command("sudo systemctl stop snapd"); 2424 2425 guest.ssh_command("sudo poweroff").unwrap(); 2426 thread::sleep(std::time::Duration::new(20, 0)); 2427 let latest_events = [ 2428 &MetaEvent { 2429 event: "shutdown".to_string(), 2430 device_id: None, 2431 }, 2432 &MetaEvent { 2433 event: "deleted".to_string(), 2434 device_id: None, 2435 }, 2436 &MetaEvent { 2437 event: "shutdown".to_string(), 2438 device_id: None, 2439 }, 2440 ]; 2441 assert!(check_latest_events_exact(&latest_events, &event_path)); 2442 }); 2443 2444 kill_child(&mut child); 2445 let output = child.wait_with_output().unwrap(); 2446 2447 handle_child_output(r, &output); 2448 } 2449 2450 #[test] 2451 fn test_multi_cpu() { 2452 let jammy_image = JAMMY_IMAGE_NAME.to_string(); 2453 let jammy = UbuntuDiskConfig::new(jammy_image); 2454 let guest = Guest::new(Box::new(jammy)); 2455 2456 let mut cmd = GuestCommand::new(&guest); 2457 cmd.args(["--cpus", "boot=2,max=4"]) 2458 .args(["--memory", "size=512M"]) 2459 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2460 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2461 .capture_output() 2462 .default_disks() 2463 .default_net(); 2464 2465 let mut child = cmd.spawn().unwrap(); 2466 2467 let r = std::panic::catch_unwind(|| { 2468 guest.wait_vm_boot(Some(120)).unwrap(); 2469 2470 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 2471 2472 assert_eq!( 2473 guest 2474 .ssh_command( 2475 r#"sudo dmesg | grep "smp: Brought up" | sed "s/\[\ *[0-9.]*\] //""# 2476 ) 2477 .unwrap() 2478 .trim(), 2479 "smp: Brought up 1 node, 2 CPUs" 2480 ); 2481 }); 2482 2483 kill_child(&mut child); 2484 let output = child.wait_with_output().unwrap(); 2485 2486 handle_child_output(r, &output); 2487 } 2488 2489 #[test] 2490 fn test_cpu_topology_421() { 2491 test_cpu_topology(4, 2, 1, false); 2492 } 2493 2494 #[test] 2495 fn test_cpu_topology_142() { 2496 test_cpu_topology(1, 4, 2, false); 2497 } 2498 2499 #[test] 2500 fn test_cpu_topology_262() { 2501 test_cpu_topology(2, 6, 2, false); 2502 } 2503 2504 #[test] 2505 #[cfg(target_arch = "x86_64")] 2506 #[cfg(not(feature = "mshv"))] 2507 fn test_cpu_physical_bits() { 2508 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2509 let guest = Guest::new(Box::new(focal)); 2510 let max_phys_bits: u8 = 36; 2511 let mut child = GuestCommand::new(&guest) 2512 .args(["--cpus", &format!("max_phys_bits={max_phys_bits}")]) 2513 .args(["--memory", "size=512M"]) 2514 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2515 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2516 .default_disks() 2517 .default_net() 2518 .capture_output() 2519 .spawn() 2520 .unwrap(); 2521 2522 let r = std::panic::catch_unwind(|| { 2523 guest.wait_vm_boot(None).unwrap(); 2524 2525 assert!( 2526 guest 2527 .ssh_command("lscpu | grep \"Address sizes:\" | cut -f 2 -d \":\" | sed \"s# *##\" | cut -f 1 -d \" \"") 2528 .unwrap() 2529 .trim() 2530 .parse::<u8>() 2531 .unwrap_or(max_phys_bits + 1) <= max_phys_bits, 2532 ); 2533 }); 2534 2535 kill_child(&mut child); 2536 let output = child.wait_with_output().unwrap(); 2537 2538 handle_child_output(r, &output); 2539 } 2540 2541 #[test] 2542 fn test_cpu_affinity() { 2543 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2544 let guest = Guest::new(Box::new(focal)); 2545 2546 // We need the host to have at least 4 CPUs if we want to be able 2547 // to run this test. 2548 let host_cpus_count = exec_host_command_output("nproc"); 2549 assert!( 2550 String::from_utf8_lossy(&host_cpus_count.stdout) 2551 .trim() 2552 .parse::<u16>() 2553 .unwrap_or(0) 2554 >= 4 2555 ); 2556 2557 let mut child = GuestCommand::new(&guest) 2558 .args(["--cpus", "boot=2,affinity=[0@[0,2],1@[1,3]]"]) 2559 .args(["--memory", "size=512M"]) 2560 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2561 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2562 .default_disks() 2563 .default_net() 2564 .capture_output() 2565 .spawn() 2566 .unwrap(); 2567 2568 let r = std::panic::catch_unwind(|| { 2569 guest.wait_vm_boot(None).unwrap(); 2570 let pid = child.id(); 2571 let taskset_vcpu0 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep vcpu0 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2572 assert_eq!(String::from_utf8_lossy(&taskset_vcpu0.stdout).trim(), "0,2"); 2573 let taskset_vcpu1 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep vcpu1 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2574 assert_eq!(String::from_utf8_lossy(&taskset_vcpu1.stdout).trim(), "1,3"); 2575 }); 2576 2577 kill_child(&mut child); 2578 let output = child.wait_with_output().unwrap(); 2579 handle_child_output(r, &output); 2580 } 2581 2582 #[test] 2583 fn test_virtio_queue_affinity() { 2584 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2585 let guest = Guest::new(Box::new(focal)); 2586 2587 // We need the host to have at least 4 CPUs if we want to be able 2588 // to run this test. 2589 let host_cpus_count = exec_host_command_output("nproc"); 2590 assert!( 2591 String::from_utf8_lossy(&host_cpus_count.stdout) 2592 .trim() 2593 .parse::<u16>() 2594 .unwrap_or(0) 2595 >= 4 2596 ); 2597 2598 let mut child = GuestCommand::new(&guest) 2599 .args(["--cpus", "boot=4"]) 2600 .args(["--memory", "size=512M"]) 2601 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2602 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2603 .args([ 2604 "--disk", 2605 format!( 2606 "path={}", 2607 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 2608 ) 2609 .as_str(), 2610 format!( 2611 "path={},num_queues=4,queue_affinity=[0@[0,2],1@[1,3],2@[1],3@[3]]", 2612 guest.disk_config.disk(DiskType::CloudInit).unwrap() 2613 ) 2614 .as_str(), 2615 ]) 2616 .default_net() 2617 .capture_output() 2618 .spawn() 2619 .unwrap(); 2620 2621 let r = std::panic::catch_unwind(|| { 2622 guest.wait_vm_boot(None).unwrap(); 2623 let pid = child.id(); 2624 let taskset_q0 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep disk1_q0 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2625 assert_eq!(String::from_utf8_lossy(&taskset_q0.stdout).trim(), "0,2"); 2626 let taskset_q1 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep disk1_q1 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2627 assert_eq!(String::from_utf8_lossy(&taskset_q1.stdout).trim(), "1,3"); 2628 let taskset_q2 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep disk1_q2 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2629 assert_eq!(String::from_utf8_lossy(&taskset_q2.stdout).trim(), "1"); 2630 let taskset_q3 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep disk1_q3 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str()); 2631 assert_eq!(String::from_utf8_lossy(&taskset_q3.stdout).trim(), "3"); 2632 }); 2633 2634 kill_child(&mut child); 2635 let output = child.wait_with_output().unwrap(); 2636 handle_child_output(r, &output); 2637 } 2638 2639 #[test] 2640 #[cfg(not(feature = "mshv"))] 2641 fn test_large_vm() { 2642 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2643 let guest = Guest::new(Box::new(focal)); 2644 let mut cmd = GuestCommand::new(&guest); 2645 cmd.args(["--cpus", "boot=48"]) 2646 .args(["--memory", "size=5120M"]) 2647 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2648 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2649 .args(["--serial", "tty"]) 2650 .args(["--console", "off"]) 2651 .capture_output() 2652 .default_disks() 2653 .default_net(); 2654 2655 let mut child = cmd.spawn().unwrap(); 2656 2657 guest.wait_vm_boot(None).unwrap(); 2658 2659 let r = std::panic::catch_unwind(|| { 2660 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 48); 2661 assert_eq!( 2662 guest 2663 .ssh_command("lscpu | grep \"On-line\" | cut -f 2 -d \":\" | sed \"s# *##\"") 2664 .unwrap() 2665 .trim(), 2666 "0-47" 2667 ); 2668 2669 assert!(guest.get_total_memory().unwrap_or_default() > 5_000_000); 2670 }); 2671 2672 kill_child(&mut child); 2673 let output = child.wait_with_output().unwrap(); 2674 2675 handle_child_output(r, &output); 2676 } 2677 2678 #[test] 2679 #[cfg(not(feature = "mshv"))] 2680 fn test_huge_memory() { 2681 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2682 let guest = Guest::new(Box::new(focal)); 2683 let mut cmd = GuestCommand::new(&guest); 2684 cmd.args(["--cpus", "boot=1"]) 2685 .args(["--memory", "size=128G"]) 2686 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2687 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2688 .capture_output() 2689 .default_disks() 2690 .default_net(); 2691 2692 let mut child = cmd.spawn().unwrap(); 2693 2694 guest.wait_vm_boot(Some(120)).unwrap(); 2695 2696 let r = std::panic::catch_unwind(|| { 2697 assert!(guest.get_total_memory().unwrap_or_default() > 128_000_000); 2698 }); 2699 2700 kill_child(&mut child); 2701 let output = child.wait_with_output().unwrap(); 2702 2703 handle_child_output(r, &output); 2704 } 2705 2706 #[test] 2707 fn test_power_button() { 2708 _test_power_button(false); 2709 } 2710 2711 #[test] 2712 #[cfg(not(feature = "mshv"))] 2713 fn test_user_defined_memory_regions() { 2714 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2715 let guest = Guest::new(Box::new(focal)); 2716 let api_socket = temp_api_path(&guest.tmp_dir); 2717 2718 let kernel_path = direct_kernel_boot_path(); 2719 2720 let mut child = GuestCommand::new(&guest) 2721 .args(["--cpus", "boot=1"]) 2722 .args(["--memory", "size=0,hotplug_method=virtio-mem"]) 2723 .args([ 2724 "--memory-zone", 2725 "id=mem0,size=1G,hotplug_size=2G", 2726 "id=mem1,size=1G,shared=on", 2727 "id=mem2,size=1G,host_numa_node=0,hotplug_size=2G", 2728 ]) 2729 .args(["--kernel", kernel_path.to_str().unwrap()]) 2730 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2731 .args(["--api-socket", &api_socket]) 2732 .capture_output() 2733 .default_disks() 2734 .default_net() 2735 .spawn() 2736 .unwrap(); 2737 2738 let r = std::panic::catch_unwind(|| { 2739 guest.wait_vm_boot(None).unwrap(); 2740 2741 assert!(guest.get_total_memory().unwrap_or_default() > 2_880_000); 2742 2743 guest.enable_memory_hotplug(); 2744 2745 resize_zone_command(&api_socket, "mem0", "3G"); 2746 thread::sleep(std::time::Duration::new(5, 0)); 2747 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000); 2748 resize_zone_command(&api_socket, "mem2", "3G"); 2749 thread::sleep(std::time::Duration::new(5, 0)); 2750 assert!(guest.get_total_memory().unwrap_or_default() > 6_720_000); 2751 resize_zone_command(&api_socket, "mem0", "2G"); 2752 thread::sleep(std::time::Duration::new(5, 0)); 2753 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 2754 resize_zone_command(&api_socket, "mem2", "2G"); 2755 thread::sleep(std::time::Duration::new(5, 0)); 2756 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000); 2757 2758 guest.reboot_linux(0, None); 2759 2760 // Check the amount of RAM after reboot 2761 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000); 2762 assert!(guest.get_total_memory().unwrap_or_default() < 5_760_000); 2763 2764 // Check if we can still resize down to the initial 'boot'size 2765 resize_zone_command(&api_socket, "mem0", "1G"); 2766 thread::sleep(std::time::Duration::new(5, 0)); 2767 assert!(guest.get_total_memory().unwrap_or_default() < 4_800_000); 2768 resize_zone_command(&api_socket, "mem2", "1G"); 2769 thread::sleep(std::time::Duration::new(5, 0)); 2770 assert!(guest.get_total_memory().unwrap_or_default() < 3_840_000); 2771 }); 2772 2773 kill_child(&mut child); 2774 let output = child.wait_with_output().unwrap(); 2775 2776 handle_child_output(r, &output); 2777 } 2778 2779 #[test] 2780 #[cfg(not(feature = "mshv"))] 2781 fn test_guest_numa_nodes() { 2782 _test_guest_numa_nodes(false); 2783 } 2784 2785 #[test] 2786 #[cfg(target_arch = "x86_64")] 2787 fn test_iommu_segments() { 2788 let focal_image = FOCAL_IMAGE_NAME.to_string(); 2789 let focal = UbuntuDiskConfig::new(focal_image); 2790 let guest = Guest::new(Box::new(focal)); 2791 2792 // Prepare another disk file for the virtio-disk device 2793 let test_disk_path = String::from( 2794 guest 2795 .tmp_dir 2796 .as_path() 2797 .join("test-disk.raw") 2798 .to_str() 2799 .unwrap(), 2800 ); 2801 assert!( 2802 exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success() 2803 ); 2804 assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success()); 2805 2806 let api_socket = temp_api_path(&guest.tmp_dir); 2807 let mut cmd = GuestCommand::new(&guest); 2808 2809 cmd.args(["--cpus", "boot=1"]) 2810 .args(["--api-socket", &api_socket]) 2811 .args(["--memory", "size=512M"]) 2812 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2813 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2814 .args([ 2815 "--platform", 2816 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS},iommu_segments=[1]"), 2817 ]) 2818 .default_disks() 2819 .capture_output() 2820 .default_net(); 2821 2822 let mut child = cmd.spawn().unwrap(); 2823 2824 guest.wait_vm_boot(None).unwrap(); 2825 2826 let r = std::panic::catch_unwind(|| { 2827 let (cmd_success, cmd_output) = remote_command_w_output( 2828 &api_socket, 2829 "add-disk", 2830 Some( 2831 format!( 2832 "path={},id=test0,pci_segment=1,iommu=on", 2833 test_disk_path.as_str() 2834 ) 2835 .as_str(), 2836 ), 2837 ); 2838 assert!(cmd_success); 2839 assert!(String::from_utf8_lossy(&cmd_output) 2840 .contains("{\"id\":\"test0\",\"bdf\":\"0001:00:01.0\"}")); 2841 2842 // Check IOMMU setup 2843 assert!(guest 2844 .does_device_vendor_pair_match("0x1057", "0x1af4") 2845 .unwrap_or_default()); 2846 assert_eq!( 2847 guest 2848 .ssh_command("ls /sys/kernel/iommu_groups/0/devices") 2849 .unwrap() 2850 .trim(), 2851 "0001:00:01.0" 2852 ); 2853 }); 2854 2855 kill_child(&mut child); 2856 let output = child.wait_with_output().unwrap(); 2857 2858 handle_child_output(r, &output); 2859 } 2860 2861 #[test] 2862 fn test_pci_msi() { 2863 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2864 let guest = Guest::new(Box::new(focal)); 2865 let mut cmd = GuestCommand::new(&guest); 2866 cmd.args(["--cpus", "boot=1"]) 2867 .args(["--memory", "size=512M"]) 2868 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2869 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2870 .capture_output() 2871 .default_disks() 2872 .default_net(); 2873 2874 let mut child = cmd.spawn().unwrap(); 2875 2876 guest.wait_vm_boot(None).unwrap(); 2877 2878 #[cfg(target_arch = "x86_64")] 2879 let grep_cmd = "grep -c PCI-MSI /proc/interrupts"; 2880 #[cfg(target_arch = "aarch64")] 2881 let grep_cmd = "grep -c ITS-PCI-MSIX /proc/interrupts"; 2882 2883 let r = std::panic::catch_unwind(|| { 2884 assert_eq!( 2885 guest 2886 .ssh_command(grep_cmd) 2887 .unwrap() 2888 .trim() 2889 .parse::<u32>() 2890 .unwrap_or_default(), 2891 12 2892 ); 2893 }); 2894 2895 kill_child(&mut child); 2896 let output = child.wait_with_output().unwrap(); 2897 2898 handle_child_output(r, &output); 2899 } 2900 2901 #[test] 2902 fn test_virtio_net_ctrl_queue() { 2903 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2904 let guest = Guest::new(Box::new(focal)); 2905 let mut cmd = GuestCommand::new(&guest); 2906 cmd.args(["--cpus", "boot=1"]) 2907 .args(["--memory", "size=512M"]) 2908 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2909 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2910 .args(["--net", guest.default_net_string_w_mtu(3000).as_str()]) 2911 .capture_output() 2912 .default_disks(); 2913 2914 let mut child = cmd.spawn().unwrap(); 2915 2916 guest.wait_vm_boot(None).unwrap(); 2917 2918 #[cfg(target_arch = "aarch64")] 2919 let iface = "enp0s4"; 2920 #[cfg(target_arch = "x86_64")] 2921 let iface = "ens4"; 2922 2923 let r = std::panic::catch_unwind(|| { 2924 assert_eq!( 2925 guest 2926 .ssh_command( 2927 format!("sudo ethtool -K {iface} rx-gro-hw off && echo success").as_str() 2928 ) 2929 .unwrap() 2930 .trim(), 2931 "success" 2932 ); 2933 assert_eq!( 2934 guest 2935 .ssh_command(format!("cat /sys/class/net/{iface}/mtu").as_str()) 2936 .unwrap() 2937 .trim(), 2938 "3000" 2939 ); 2940 }); 2941 2942 kill_child(&mut child); 2943 let output = child.wait_with_output().unwrap(); 2944 2945 handle_child_output(r, &output); 2946 } 2947 2948 #[test] 2949 fn test_pci_multiple_segments() { 2950 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 2951 let guest = Guest::new(Box::new(focal)); 2952 2953 // Prepare another disk file for the virtio-disk device 2954 let test_disk_path = String::from( 2955 guest 2956 .tmp_dir 2957 .as_path() 2958 .join("test-disk.raw") 2959 .to_str() 2960 .unwrap(), 2961 ); 2962 assert!( 2963 exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success() 2964 ); 2965 assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success()); 2966 2967 let mut cmd = GuestCommand::new(&guest); 2968 cmd.args(["--cpus", "boot=1"]) 2969 .args(["--memory", "size=512M"]) 2970 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 2971 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 2972 .args([ 2973 "--platform", 2974 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 2975 ]) 2976 .args([ 2977 "--disk", 2978 format!( 2979 "path={}", 2980 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 2981 ) 2982 .as_str(), 2983 format!( 2984 "path={}", 2985 guest.disk_config.disk(DiskType::CloudInit).unwrap() 2986 ) 2987 .as_str(), 2988 format!("path={test_disk_path},pci_segment=15").as_str(), 2989 ]) 2990 .capture_output() 2991 .default_net(); 2992 2993 let mut child = cmd.spawn().unwrap(); 2994 2995 guest.wait_vm_boot(None).unwrap(); 2996 2997 let grep_cmd = "lspci | grep \"Host bridge\" | wc -l"; 2998 2999 let r = std::panic::catch_unwind(|| { 3000 // There should be MAX_NUM_PCI_SEGMENTS PCI host bridges in the guest. 3001 assert_eq!( 3002 guest 3003 .ssh_command(grep_cmd) 3004 .unwrap() 3005 .trim() 3006 .parse::<u16>() 3007 .unwrap_or_default(), 3008 MAX_NUM_PCI_SEGMENTS 3009 ); 3010 3011 // Check both if /dev/vdc exists and if the block size is 4M. 3012 assert_eq!( 3013 guest 3014 .ssh_command("lsblk | grep vdc | grep -c 4M") 3015 .unwrap() 3016 .trim() 3017 .parse::<u32>() 3018 .unwrap_or_default(), 3019 1 3020 ); 3021 3022 // Mount the device. 3023 guest.ssh_command("mkdir mount_image").unwrap(); 3024 guest 3025 .ssh_command("sudo mount -o rw -t ext4 /dev/vdc mount_image/") 3026 .unwrap(); 3027 // Grant all users with write permission. 3028 guest.ssh_command("sudo chmod a+w mount_image/").unwrap(); 3029 3030 // Write something to the device. 3031 guest 3032 .ssh_command("sudo echo \"bar\" >> mount_image/foo") 3033 .unwrap(); 3034 3035 // Check the content of the block device. The file "foo" should 3036 // contain "bar". 3037 assert_eq!( 3038 guest 3039 .ssh_command("sudo cat mount_image/foo") 3040 .unwrap() 3041 .trim(), 3042 "bar" 3043 ); 3044 }); 3045 3046 kill_child(&mut child); 3047 let output = child.wait_with_output().unwrap(); 3048 3049 handle_child_output(r, &output); 3050 } 3051 3052 #[test] 3053 fn test_pci_multiple_segments_numa_node() { 3054 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3055 let guest = Guest::new(Box::new(focal)); 3056 let api_socket = temp_api_path(&guest.tmp_dir); 3057 #[cfg(target_arch = "x86_64")] 3058 let kernel_path = direct_kernel_boot_path(); 3059 #[cfg(target_arch = "aarch64")] 3060 let kernel_path = edk2_path(); 3061 3062 // Prepare another disk file for the virtio-disk device 3063 let test_disk_path = String::from( 3064 guest 3065 .tmp_dir 3066 .as_path() 3067 .join("test-disk.raw") 3068 .to_str() 3069 .unwrap(), 3070 ); 3071 assert!( 3072 exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success() 3073 ); 3074 assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success()); 3075 const TEST_DISK_NODE: u16 = 1; 3076 3077 let mut child = GuestCommand::new(&guest) 3078 .args(["--platform", "num_pci_segments=2"]) 3079 .args(["--cpus", "boot=2"]) 3080 .args(["--memory", "size=0"]) 3081 .args(["--memory-zone", "id=mem0,size=256M", "id=mem1,size=256M"]) 3082 .args([ 3083 "--numa", 3084 "guest_numa_id=0,cpus=[0],distances=[1@20],memory_zones=mem0,pci_segments=[0]", 3085 "guest_numa_id=1,cpus=[1],distances=[0@20],memory_zones=mem1,pci_segments=[1]", 3086 ]) 3087 .args(["--kernel", kernel_path.to_str().unwrap()]) 3088 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3089 .args(["--api-socket", &api_socket]) 3090 .capture_output() 3091 .args([ 3092 "--disk", 3093 format!( 3094 "path={}", 3095 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 3096 ) 3097 .as_str(), 3098 format!( 3099 "path={}", 3100 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3101 ) 3102 .as_str(), 3103 format!("path={test_disk_path},pci_segment={TEST_DISK_NODE}").as_str(), 3104 ]) 3105 .default_net() 3106 .spawn() 3107 .unwrap(); 3108 3109 let cmd = "cat /sys/block/vdc/device/../numa_node"; 3110 3111 let r = std::panic::catch_unwind(|| { 3112 guest.wait_vm_boot(None).unwrap(); 3113 3114 assert_eq!( 3115 guest 3116 .ssh_command(cmd) 3117 .unwrap() 3118 .trim() 3119 .parse::<u16>() 3120 .unwrap_or_default(), 3121 TEST_DISK_NODE 3122 ); 3123 }); 3124 3125 kill_child(&mut child); 3126 let output = child.wait_with_output().unwrap(); 3127 3128 handle_child_output(r, &output); 3129 } 3130 3131 #[test] 3132 fn test_direct_kernel_boot() { 3133 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3134 let guest = Guest::new(Box::new(focal)); 3135 3136 let kernel_path = direct_kernel_boot_path(); 3137 3138 let mut child = GuestCommand::new(&guest) 3139 .args(["--cpus", "boot=1"]) 3140 .args(["--memory", "size=512M"]) 3141 .args(["--kernel", kernel_path.to_str().unwrap()]) 3142 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3143 .default_disks() 3144 .default_net() 3145 .capture_output() 3146 .spawn() 3147 .unwrap(); 3148 3149 let r = std::panic::catch_unwind(|| { 3150 guest.wait_vm_boot(None).unwrap(); 3151 3152 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 3153 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 3154 3155 let grep_cmd = if cfg!(target_arch = "x86_64") { 3156 "grep -c PCI-MSI /proc/interrupts" 3157 } else { 3158 "grep -c ITS-PCI-MSIX /proc/interrupts" 3159 }; 3160 assert_eq!( 3161 guest 3162 .ssh_command(grep_cmd) 3163 .unwrap() 3164 .trim() 3165 .parse::<u32>() 3166 .unwrap_or_default(), 3167 12 3168 ); 3169 }); 3170 3171 kill_child(&mut child); 3172 let output = child.wait_with_output().unwrap(); 3173 3174 handle_child_output(r, &output); 3175 } 3176 3177 #[test] 3178 #[cfg(target_arch = "x86_64")] 3179 fn test_direct_kernel_boot_bzimage() { 3180 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3181 let guest = Guest::new(Box::new(focal)); 3182 3183 let mut kernel_path = direct_kernel_boot_path(); 3184 // Replace the default kernel with the bzImage. 3185 kernel_path.pop(); 3186 kernel_path.push("bzImage"); 3187 3188 let mut child = GuestCommand::new(&guest) 3189 .args(["--cpus", "boot=1"]) 3190 .args(["--memory", "size=512M"]) 3191 .args(["--kernel", kernel_path.to_str().unwrap()]) 3192 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3193 .default_disks() 3194 .default_net() 3195 .capture_output() 3196 .spawn() 3197 .unwrap(); 3198 3199 let r = std::panic::catch_unwind(|| { 3200 guest.wait_vm_boot(None).unwrap(); 3201 3202 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 3203 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 3204 3205 let grep_cmd = if cfg!(target_arch = "x86_64") { 3206 "grep -c PCI-MSI /proc/interrupts" 3207 } else { 3208 "grep -c ITS-PCI-MSIX /proc/interrupts" 3209 }; 3210 assert_eq!( 3211 guest 3212 .ssh_command(grep_cmd) 3213 .unwrap() 3214 .trim() 3215 .parse::<u32>() 3216 .unwrap_or_default(), 3217 12 3218 ); 3219 }); 3220 3221 kill_child(&mut child); 3222 let output = child.wait_with_output().unwrap(); 3223 3224 handle_child_output(r, &output); 3225 } 3226 3227 fn _test_virtio_block(image_name: &str, disable_io_uring: bool, disable_aio: bool) { 3228 let focal = UbuntuDiskConfig::new(image_name.to_string()); 3229 let guest = Guest::new(Box::new(focal)); 3230 3231 let mut workload_path = dirs::home_dir().unwrap(); 3232 workload_path.push("workloads"); 3233 3234 let mut blk_file_path = workload_path; 3235 blk_file_path.push("blk.img"); 3236 3237 let kernel_path = direct_kernel_boot_path(); 3238 3239 let mut cloud_child = GuestCommand::new(&guest) 3240 .args(["--cpus", "boot=4"]) 3241 .args(["--memory", "size=512M,shared=on"]) 3242 .args(["--kernel", kernel_path.to_str().unwrap()]) 3243 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3244 .args([ 3245 "--disk", 3246 format!( 3247 "path={}", 3248 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 3249 ) 3250 .as_str(), 3251 format!( 3252 "path={}", 3253 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3254 ) 3255 .as_str(), 3256 format!( 3257 "path={},readonly=on,direct=on,num_queues=4,_disable_io_uring={},_disable_aio={}", 3258 blk_file_path.to_str().unwrap(), 3259 disable_io_uring, 3260 disable_aio, 3261 ) 3262 .as_str(), 3263 ]) 3264 .default_net() 3265 .capture_output() 3266 .spawn() 3267 .unwrap(); 3268 3269 let r = std::panic::catch_unwind(|| { 3270 guest.wait_vm_boot(None).unwrap(); 3271 3272 // Check both if /dev/vdc exists and if the block size is 16M. 3273 assert_eq!( 3274 guest 3275 .ssh_command("lsblk | grep vdc | grep -c 16M") 3276 .unwrap() 3277 .trim() 3278 .parse::<u32>() 3279 .unwrap_or_default(), 3280 1 3281 ); 3282 3283 // Check both if /dev/vdc exists and if this block is RO. 3284 assert_eq!( 3285 guest 3286 .ssh_command("lsblk | grep vdc | awk '{print $5}'") 3287 .unwrap() 3288 .trim() 3289 .parse::<u32>() 3290 .unwrap_or_default(), 3291 1 3292 ); 3293 3294 // Check if the number of queues is 4. 3295 assert_eq!( 3296 guest 3297 .ssh_command("ls -ll /sys/block/vdc/mq | grep ^d | wc -l") 3298 .unwrap() 3299 .trim() 3300 .parse::<u32>() 3301 .unwrap_or_default(), 3302 4 3303 ); 3304 }); 3305 3306 let _ = cloud_child.kill(); 3307 let output = cloud_child.wait_with_output().unwrap(); 3308 3309 handle_child_output(r, &output); 3310 } 3311 3312 #[test] 3313 fn test_virtio_block_io_uring() { 3314 _test_virtio_block(FOCAL_IMAGE_NAME, false, true) 3315 } 3316 3317 #[test] 3318 fn test_virtio_block_aio() { 3319 _test_virtio_block(FOCAL_IMAGE_NAME, true, false) 3320 } 3321 3322 #[test] 3323 fn test_virtio_block_sync() { 3324 _test_virtio_block(FOCAL_IMAGE_NAME, true, true) 3325 } 3326 3327 #[test] 3328 fn test_virtio_block_qcow2() { 3329 _test_virtio_block(FOCAL_IMAGE_NAME_QCOW2, false, false) 3330 } 3331 3332 #[test] 3333 fn test_virtio_block_qcow2_backing_file() { 3334 _test_virtio_block(FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE, false, false) 3335 } 3336 3337 #[test] 3338 fn test_virtio_block_vhd() { 3339 let mut workload_path = dirs::home_dir().unwrap(); 3340 workload_path.push("workloads"); 3341 3342 let mut raw_file_path = workload_path.clone(); 3343 let mut vhd_file_path = workload_path; 3344 raw_file_path.push(FOCAL_IMAGE_NAME); 3345 vhd_file_path.push(FOCAL_IMAGE_NAME_VHD); 3346 3347 // Generate VHD file from RAW file 3348 std::process::Command::new("qemu-img") 3349 .arg("convert") 3350 .arg("-p") 3351 .args(["-f", "raw"]) 3352 .args(["-O", "vpc"]) 3353 .args(["-o", "subformat=fixed"]) 3354 .arg(raw_file_path.to_str().unwrap()) 3355 .arg(vhd_file_path.to_str().unwrap()) 3356 .output() 3357 .expect("Expect generating VHD image from RAW image"); 3358 3359 _test_virtio_block(FOCAL_IMAGE_NAME_VHD, false, false) 3360 } 3361 3362 #[test] 3363 fn test_virtio_block_vhdx() { 3364 let mut workload_path = dirs::home_dir().unwrap(); 3365 workload_path.push("workloads"); 3366 3367 let mut raw_file_path = workload_path.clone(); 3368 let mut vhdx_file_path = workload_path; 3369 raw_file_path.push(FOCAL_IMAGE_NAME); 3370 vhdx_file_path.push(FOCAL_IMAGE_NAME_VHDX); 3371 3372 // Generate dynamic VHDX file from RAW file 3373 std::process::Command::new("qemu-img") 3374 .arg("convert") 3375 .arg("-p") 3376 .args(["-f", "raw"]) 3377 .args(["-O", "vhdx"]) 3378 .arg(raw_file_path.to_str().unwrap()) 3379 .arg(vhdx_file_path.to_str().unwrap()) 3380 .output() 3381 .expect("Expect generating dynamic VHDx image from RAW image"); 3382 3383 _test_virtio_block(FOCAL_IMAGE_NAME_VHDX, false, false) 3384 } 3385 3386 #[test] 3387 fn test_virtio_block_dynamic_vhdx_expand() { 3388 const VIRTUAL_DISK_SIZE: u64 = 100 << 20; 3389 const EMPTY_VHDX_FILE_SIZE: u64 = 8 << 20; 3390 const FULL_VHDX_FILE_SIZE: u64 = 112 << 20; 3391 const DYNAMIC_VHDX_NAME: &str = "dynamic.vhdx"; 3392 3393 let mut workload_path = dirs::home_dir().unwrap(); 3394 workload_path.push("workloads"); 3395 3396 let mut vhdx_file_path = workload_path; 3397 vhdx_file_path.push(DYNAMIC_VHDX_NAME); 3398 let vhdx_path = vhdx_file_path.to_str().unwrap(); 3399 3400 // Generate a 100 MiB dynamic VHDX file 3401 std::process::Command::new("qemu-img") 3402 .arg("create") 3403 .args(["-f", "vhdx"]) 3404 .arg(vhdx_path) 3405 .arg(VIRTUAL_DISK_SIZE.to_string()) 3406 .output() 3407 .expect("Expect generating dynamic VHDx image from RAW image"); 3408 3409 // Check if the size matches with empty VHDx file size 3410 assert_eq!(vhdx_image_size(vhdx_path), EMPTY_VHDX_FILE_SIZE); 3411 3412 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3413 let guest = Guest::new(Box::new(focal)); 3414 let kernel_path = direct_kernel_boot_path(); 3415 3416 let mut cloud_child = GuestCommand::new(&guest) 3417 .args(["--cpus", "boot=1"]) 3418 .args(["--memory", "size=512M"]) 3419 .args(["--kernel", kernel_path.to_str().unwrap()]) 3420 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3421 .args([ 3422 "--disk", 3423 format!( 3424 "path={}", 3425 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 3426 ) 3427 .as_str(), 3428 format!( 3429 "path={}", 3430 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3431 ) 3432 .as_str(), 3433 format!("path={vhdx_path}").as_str(), 3434 ]) 3435 .default_net() 3436 .capture_output() 3437 .spawn() 3438 .unwrap(); 3439 3440 let r = std::panic::catch_unwind(|| { 3441 guest.wait_vm_boot(None).unwrap(); 3442 3443 // Check both if /dev/vdc exists and if the block size is 100 MiB. 3444 assert_eq!( 3445 guest 3446 .ssh_command("lsblk | grep vdc | grep -c 100M") 3447 .unwrap() 3448 .trim() 3449 .parse::<u32>() 3450 .unwrap_or_default(), 3451 1 3452 ); 3453 3454 // Write 100 MB of data to the VHDx disk 3455 guest 3456 .ssh_command("sudo dd if=/dev/urandom of=/dev/vdc bs=1M count=100") 3457 .unwrap(); 3458 }); 3459 3460 // Check if the size matches with expected expanded VHDx file size 3461 assert_eq!(vhdx_image_size(vhdx_path), FULL_VHDX_FILE_SIZE); 3462 3463 kill_child(&mut cloud_child); 3464 let output = cloud_child.wait_with_output().unwrap(); 3465 3466 handle_child_output(r, &output); 3467 } 3468 3469 fn vhdx_image_size(disk_name: &str) -> u64 { 3470 std::fs::File::open(disk_name) 3471 .unwrap() 3472 .seek(SeekFrom::End(0)) 3473 .unwrap() 3474 } 3475 3476 #[test] 3477 fn test_virtio_block_direct_and_firmware() { 3478 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3479 let guest = Guest::new(Box::new(focal)); 3480 3481 // The OS disk must be copied to a location that is not backed by 3482 // tmpfs, otherwise the syscall openat(2) with O_DIRECT simply fails 3483 // with EINVAL because tmpfs doesn't support this flag. 3484 let mut workloads_path = dirs::home_dir().unwrap(); 3485 workloads_path.push("workloads"); 3486 let os_dir = TempDir::new_in(workloads_path.as_path()).unwrap(); 3487 let mut os_path = os_dir.as_path().to_path_buf(); 3488 os_path.push("osdisk.img"); 3489 rate_limited_copy( 3490 guest.disk_config.disk(DiskType::OperatingSystem).unwrap(), 3491 os_path.as_path(), 3492 ) 3493 .expect("copying of OS disk failed"); 3494 3495 let mut child = GuestCommand::new(&guest) 3496 .args(["--cpus", "boot=1"]) 3497 .args(["--memory", "size=512M"]) 3498 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 3499 .args([ 3500 "--disk", 3501 format!("path={},direct=on", os_path.as_path().to_str().unwrap()).as_str(), 3502 format!( 3503 "path={}", 3504 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3505 ) 3506 .as_str(), 3507 ]) 3508 .default_net() 3509 .capture_output() 3510 .spawn() 3511 .unwrap(); 3512 3513 let r = std::panic::catch_unwind(|| { 3514 guest.wait_vm_boot(Some(120)).unwrap(); 3515 }); 3516 3517 kill_child(&mut child); 3518 let output = child.wait_with_output().unwrap(); 3519 3520 handle_child_output(r, &output); 3521 } 3522 3523 #[test] 3524 fn test_vhost_user_net_default() { 3525 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, false, false) 3526 } 3527 3528 #[test] 3529 fn test_vhost_user_net_named_tap() { 3530 test_vhost_user_net( 3531 Some("mytap0"), 3532 2, 3533 &prepare_vhost_user_net_daemon, 3534 false, 3535 false, 3536 ) 3537 } 3538 3539 #[test] 3540 fn test_vhost_user_net_existing_tap() { 3541 test_vhost_user_net( 3542 Some("vunet-tap0"), 3543 2, 3544 &prepare_vhost_user_net_daemon, 3545 false, 3546 false, 3547 ) 3548 } 3549 3550 #[test] 3551 fn test_vhost_user_net_multiple_queues() { 3552 test_vhost_user_net(None, 4, &prepare_vhost_user_net_daemon, false, false) 3553 } 3554 3555 #[test] 3556 fn test_vhost_user_net_tap_multiple_queues() { 3557 test_vhost_user_net( 3558 Some("vunet-tap1"), 3559 4, 3560 &prepare_vhost_user_net_daemon, 3561 false, 3562 false, 3563 ) 3564 } 3565 3566 #[test] 3567 fn test_vhost_user_net_host_mac() { 3568 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, true, false) 3569 } 3570 3571 #[test] 3572 fn test_vhost_user_net_client_mode() { 3573 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, false, true) 3574 } 3575 3576 #[test] 3577 #[cfg(not(target_arch = "aarch64"))] 3578 fn test_vhost_user_blk_default() { 3579 test_vhost_user_blk(2, false, false, Some(&prepare_vubd)) 3580 } 3581 3582 #[test] 3583 #[cfg(not(target_arch = "aarch64"))] 3584 fn test_vhost_user_blk_readonly() { 3585 test_vhost_user_blk(1, true, false, Some(&prepare_vubd)) 3586 } 3587 3588 #[test] 3589 #[cfg(not(target_arch = "aarch64"))] 3590 fn test_vhost_user_blk_direct() { 3591 test_vhost_user_blk(1, false, true, Some(&prepare_vubd)) 3592 } 3593 3594 #[test] 3595 fn test_boot_from_vhost_user_blk_default() { 3596 test_boot_from_vhost_user_blk(1, false, false, Some(&prepare_vubd)) 3597 } 3598 3599 #[test] 3600 #[cfg(target_arch = "x86_64")] 3601 fn test_split_irqchip() { 3602 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3603 let guest = Guest::new(Box::new(focal)); 3604 3605 let mut child = GuestCommand::new(&guest) 3606 .args(["--cpus", "boot=1"]) 3607 .args(["--memory", "size=512M"]) 3608 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3609 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3610 .default_disks() 3611 .default_net() 3612 .capture_output() 3613 .spawn() 3614 .unwrap(); 3615 3616 let r = std::panic::catch_unwind(|| { 3617 guest.wait_vm_boot(None).unwrap(); 3618 3619 assert_eq!( 3620 guest 3621 .ssh_command("grep -c IO-APIC.*timer /proc/interrupts || true") 3622 .unwrap() 3623 .trim() 3624 .parse::<u32>() 3625 .unwrap_or(1), 3626 0 3627 ); 3628 assert_eq!( 3629 guest 3630 .ssh_command("grep -c IO-APIC.*cascade /proc/interrupts || true") 3631 .unwrap() 3632 .trim() 3633 .parse::<u32>() 3634 .unwrap_or(1), 3635 0 3636 ); 3637 }); 3638 3639 kill_child(&mut child); 3640 let output = child.wait_with_output().unwrap(); 3641 3642 handle_child_output(r, &output); 3643 } 3644 3645 #[test] 3646 #[cfg(target_arch = "x86_64")] 3647 fn test_dmi_serial_number() { 3648 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3649 let guest = Guest::new(Box::new(focal)); 3650 3651 let mut child = GuestCommand::new(&guest) 3652 .args(["--cpus", "boot=1"]) 3653 .args(["--memory", "size=512M"]) 3654 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3655 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3656 .args(["--platform", "serial_number=a=b;c=d"]) 3657 .default_disks() 3658 .default_net() 3659 .capture_output() 3660 .spawn() 3661 .unwrap(); 3662 3663 let r = std::panic::catch_unwind(|| { 3664 guest.wait_vm_boot(None).unwrap(); 3665 3666 assert_eq!( 3667 guest 3668 .ssh_command("sudo cat /sys/class/dmi/id/product_serial") 3669 .unwrap() 3670 .trim(), 3671 "a=b;c=d" 3672 ); 3673 }); 3674 3675 kill_child(&mut child); 3676 let output = child.wait_with_output().unwrap(); 3677 3678 handle_child_output(r, &output); 3679 } 3680 3681 #[test] 3682 #[cfg(target_arch = "x86_64")] 3683 fn test_dmi_uuid() { 3684 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3685 let guest = Guest::new(Box::new(focal)); 3686 3687 let mut child = GuestCommand::new(&guest) 3688 .args(["--cpus", "boot=1"]) 3689 .args(["--memory", "size=512M"]) 3690 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3691 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3692 .args(["--platform", "uuid=1e8aa28a-435d-4027-87f4-40dceff1fa0a"]) 3693 .default_disks() 3694 .default_net() 3695 .capture_output() 3696 .spawn() 3697 .unwrap(); 3698 3699 let r = std::panic::catch_unwind(|| { 3700 guest.wait_vm_boot(None).unwrap(); 3701 3702 assert_eq!( 3703 guest 3704 .ssh_command("sudo cat /sys/class/dmi/id/product_uuid") 3705 .unwrap() 3706 .trim(), 3707 "1e8aa28a-435d-4027-87f4-40dceff1fa0a" 3708 ); 3709 }); 3710 3711 kill_child(&mut child); 3712 let output = child.wait_with_output().unwrap(); 3713 3714 handle_child_output(r, &output); 3715 } 3716 3717 #[test] 3718 #[cfg(target_arch = "x86_64")] 3719 fn test_dmi_oem_strings() { 3720 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3721 let guest = Guest::new(Box::new(focal)); 3722 3723 let s1 = "io.systemd.credential:xx=yy"; 3724 let s2 = "This is a test string"; 3725 3726 let oem_strings = format!("oem_strings=[{s1},{s2}]"); 3727 3728 let mut child = GuestCommand::new(&guest) 3729 .args(["--cpus", "boot=1"]) 3730 .args(["--memory", "size=512M"]) 3731 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3732 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3733 .args(["--platform", &oem_strings]) 3734 .default_disks() 3735 .default_net() 3736 .capture_output() 3737 .spawn() 3738 .unwrap(); 3739 3740 let r = std::panic::catch_unwind(|| { 3741 guest.wait_vm_boot(None).unwrap(); 3742 3743 assert_eq!( 3744 guest 3745 .ssh_command("sudo dmidecode --oem-string count") 3746 .unwrap() 3747 .trim(), 3748 "2" 3749 ); 3750 3751 assert_eq!( 3752 guest 3753 .ssh_command("sudo dmidecode --oem-string 1") 3754 .unwrap() 3755 .trim(), 3756 s1 3757 ); 3758 3759 assert_eq!( 3760 guest 3761 .ssh_command("sudo dmidecode --oem-string 2") 3762 .unwrap() 3763 .trim(), 3764 s2 3765 ); 3766 }); 3767 3768 kill_child(&mut child); 3769 let output = child.wait_with_output().unwrap(); 3770 3771 handle_child_output(r, &output); 3772 } 3773 3774 #[test] 3775 fn test_virtio_fs() { 3776 _test_virtio_fs(&prepare_virtiofsd, false, None) 3777 } 3778 3779 #[test] 3780 fn test_virtio_fs_hotplug() { 3781 _test_virtio_fs(&prepare_virtiofsd, true, None) 3782 } 3783 3784 #[test] 3785 #[cfg(not(feature = "mshv"))] 3786 fn test_virtio_fs_multi_segment_hotplug() { 3787 _test_virtio_fs(&prepare_virtiofsd, true, Some(15)) 3788 } 3789 3790 #[test] 3791 #[cfg(not(feature = "mshv"))] 3792 fn test_virtio_fs_multi_segment() { 3793 _test_virtio_fs(&prepare_virtiofsd, false, Some(15)) 3794 } 3795 3796 #[test] 3797 fn test_virtio_pmem_persist_writes() { 3798 test_virtio_pmem(false, false) 3799 } 3800 3801 #[test] 3802 fn test_virtio_pmem_discard_writes() { 3803 test_virtio_pmem(true, false) 3804 } 3805 3806 #[test] 3807 fn test_virtio_pmem_with_size() { 3808 test_virtio_pmem(true, true) 3809 } 3810 3811 #[test] 3812 fn test_boot_from_virtio_pmem() { 3813 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3814 let guest = Guest::new(Box::new(focal)); 3815 3816 let kernel_path = direct_kernel_boot_path(); 3817 3818 let mut child = GuestCommand::new(&guest) 3819 .args(["--cpus", "boot=1"]) 3820 .args(["--memory", "size=512M"]) 3821 .args(["--kernel", kernel_path.to_str().unwrap()]) 3822 .args([ 3823 "--disk", 3824 format!( 3825 "path={}", 3826 guest.disk_config.disk(DiskType::CloudInit).unwrap() 3827 ) 3828 .as_str(), 3829 ]) 3830 .default_net() 3831 .args([ 3832 "--pmem", 3833 format!( 3834 "file={},size={}", 3835 guest.disk_config.disk(DiskType::OperatingSystem).unwrap(), 3836 fs::metadata(guest.disk_config.disk(DiskType::OperatingSystem).unwrap()) 3837 .unwrap() 3838 .len() 3839 ) 3840 .as_str(), 3841 ]) 3842 .args([ 3843 "--cmdline", 3844 DIRECT_KERNEL_BOOT_CMDLINE 3845 .replace("vda1", "pmem0p1") 3846 .as_str(), 3847 ]) 3848 .capture_output() 3849 .spawn() 3850 .unwrap(); 3851 3852 let r = std::panic::catch_unwind(|| { 3853 guest.wait_vm_boot(None).unwrap(); 3854 3855 // Simple checks to validate the VM booted properly 3856 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 3857 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 3858 }); 3859 3860 kill_child(&mut child); 3861 let output = child.wait_with_output().unwrap(); 3862 3863 handle_child_output(r, &output); 3864 } 3865 3866 #[test] 3867 fn test_multiple_network_interfaces() { 3868 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3869 let guest = Guest::new(Box::new(focal)); 3870 3871 let kernel_path = direct_kernel_boot_path(); 3872 3873 let mut child = GuestCommand::new(&guest) 3874 .args(["--cpus", "boot=1"]) 3875 .args(["--memory", "size=512M"]) 3876 .args(["--kernel", kernel_path.to_str().unwrap()]) 3877 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3878 .default_disks() 3879 .args([ 3880 "--net", 3881 guest.default_net_string().as_str(), 3882 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0", 3883 "tap=mytap1,mac=fe:1f:9e:e1:60:f2,ip=192.168.4.1,mask=255.255.255.0", 3884 ]) 3885 .capture_output() 3886 .spawn() 3887 .unwrap(); 3888 3889 let r = std::panic::catch_unwind(|| { 3890 guest.wait_vm_boot(None).unwrap(); 3891 3892 let tap_count = exec_host_command_output("ip link | grep -c mytap1"); 3893 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1"); 3894 3895 // 3 network interfaces + default localhost ==> 4 interfaces 3896 assert_eq!( 3897 guest 3898 .ssh_command("ip -o link | wc -l") 3899 .unwrap() 3900 .trim() 3901 .parse::<u32>() 3902 .unwrap_or_default(), 3903 4 3904 ); 3905 }); 3906 3907 kill_child(&mut child); 3908 let output = child.wait_with_output().unwrap(); 3909 3910 handle_child_output(r, &output); 3911 } 3912 3913 #[test] 3914 #[cfg(target_arch = "aarch64")] 3915 fn test_pmu_on() { 3916 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3917 let guest = Guest::new(Box::new(focal)); 3918 let mut child = GuestCommand::new(&guest) 3919 .args(["--cpus", "boot=1"]) 3920 .args(["--memory", "size=512M"]) 3921 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3922 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3923 .default_disks() 3924 .default_net() 3925 .capture_output() 3926 .spawn() 3927 .unwrap(); 3928 3929 let r = std::panic::catch_unwind(|| { 3930 guest.wait_vm_boot(None).unwrap(); 3931 3932 // Test that PMU exists. 3933 assert_eq!( 3934 guest 3935 .ssh_command(GREP_PMU_IRQ_CMD) 3936 .unwrap() 3937 .trim() 3938 .parse::<u32>() 3939 .unwrap_or_default(), 3940 1 3941 ); 3942 }); 3943 3944 kill_child(&mut child); 3945 let output = child.wait_with_output().unwrap(); 3946 3947 handle_child_output(r, &output); 3948 } 3949 3950 #[test] 3951 fn test_serial_off() { 3952 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3953 let guest = Guest::new(Box::new(focal)); 3954 let mut child = GuestCommand::new(&guest) 3955 .args(["--cpus", "boot=1"]) 3956 .args(["--memory", "size=512M"]) 3957 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 3958 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 3959 .default_disks() 3960 .default_net() 3961 .args(["--serial", "off"]) 3962 .capture_output() 3963 .spawn() 3964 .unwrap(); 3965 3966 let r = std::panic::catch_unwind(|| { 3967 guest.wait_vm_boot(None).unwrap(); 3968 3969 // Test that there is no ttyS0 3970 assert_eq!( 3971 guest 3972 .ssh_command(GREP_SERIAL_IRQ_CMD) 3973 .unwrap() 3974 .trim() 3975 .parse::<u32>() 3976 .unwrap_or(1), 3977 0 3978 ); 3979 }); 3980 3981 kill_child(&mut child); 3982 let output = child.wait_with_output().unwrap(); 3983 3984 handle_child_output(r, &output); 3985 } 3986 3987 #[test] 3988 fn test_serial_null() { 3989 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 3990 let guest = Guest::new(Box::new(focal)); 3991 let mut cmd = GuestCommand::new(&guest); 3992 #[cfg(target_arch = "x86_64")] 3993 let console_str: &str = "console=ttyS0"; 3994 #[cfg(target_arch = "aarch64")] 3995 let console_str: &str = "console=ttyAMA0"; 3996 3997 cmd.args(["--cpus", "boot=1"]) 3998 .args(["--memory", "size=512M"]) 3999 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4000 .args([ 4001 "--cmdline", 4002 DIRECT_KERNEL_BOOT_CMDLINE 4003 .replace("console=hvc0 ", console_str) 4004 .as_str(), 4005 ]) 4006 .default_disks() 4007 .default_net() 4008 .args(["--serial", "null"]) 4009 .args(["--console", "off"]) 4010 .capture_output(); 4011 4012 let mut child = cmd.spawn().unwrap(); 4013 4014 let r = std::panic::catch_unwind(|| { 4015 guest.wait_vm_boot(None).unwrap(); 4016 4017 // Test that there is a ttyS0 4018 assert_eq!( 4019 guest 4020 .ssh_command(GREP_SERIAL_IRQ_CMD) 4021 .unwrap() 4022 .trim() 4023 .parse::<u32>() 4024 .unwrap_or_default(), 4025 1 4026 ); 4027 }); 4028 4029 kill_child(&mut child); 4030 let output = child.wait_with_output().unwrap(); 4031 handle_child_output(r, &output); 4032 4033 let r = std::panic::catch_unwind(|| { 4034 assert!(!String::from_utf8_lossy(&output.stdout).contains(CONSOLE_TEST_STRING)); 4035 }); 4036 4037 handle_child_output(r, &output); 4038 } 4039 4040 #[test] 4041 fn test_serial_tty() { 4042 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4043 let guest = Guest::new(Box::new(focal)); 4044 4045 let kernel_path = direct_kernel_boot_path(); 4046 4047 #[cfg(target_arch = "x86_64")] 4048 let console_str: &str = "console=ttyS0"; 4049 #[cfg(target_arch = "aarch64")] 4050 let console_str: &str = "console=ttyAMA0"; 4051 4052 let mut child = GuestCommand::new(&guest) 4053 .args(["--cpus", "boot=1"]) 4054 .args(["--memory", "size=512M"]) 4055 .args(["--kernel", kernel_path.to_str().unwrap()]) 4056 .args([ 4057 "--cmdline", 4058 DIRECT_KERNEL_BOOT_CMDLINE 4059 .replace("console=hvc0 ", console_str) 4060 .as_str(), 4061 ]) 4062 .default_disks() 4063 .default_net() 4064 .args(["--serial", "tty"]) 4065 .args(["--console", "off"]) 4066 .capture_output() 4067 .spawn() 4068 .unwrap(); 4069 4070 let r = std::panic::catch_unwind(|| { 4071 guest.wait_vm_boot(None).unwrap(); 4072 4073 // Test that there is a ttyS0 4074 assert_eq!( 4075 guest 4076 .ssh_command(GREP_SERIAL_IRQ_CMD) 4077 .unwrap() 4078 .trim() 4079 .parse::<u32>() 4080 .unwrap_or_default(), 4081 1 4082 ); 4083 }); 4084 4085 // This sleep is needed to wait for the login prompt 4086 thread::sleep(std::time::Duration::new(2, 0)); 4087 4088 kill_child(&mut child); 4089 let output = child.wait_with_output().unwrap(); 4090 handle_child_output(r, &output); 4091 4092 let r = std::panic::catch_unwind(|| { 4093 assert!(String::from_utf8_lossy(&output.stdout).contains(CONSOLE_TEST_STRING)); 4094 }); 4095 4096 handle_child_output(r, &output); 4097 } 4098 4099 #[test] 4100 fn test_serial_file() { 4101 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4102 let guest = Guest::new(Box::new(focal)); 4103 4104 let serial_path = guest.tmp_dir.as_path().join("serial-output"); 4105 #[cfg(target_arch = "x86_64")] 4106 let console_str: &str = "console=ttyS0"; 4107 #[cfg(target_arch = "aarch64")] 4108 let console_str: &str = "console=ttyAMA0"; 4109 4110 let mut child = GuestCommand::new(&guest) 4111 .args(["--cpus", "boot=1"]) 4112 .args(["--memory", "size=512M"]) 4113 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4114 .args([ 4115 "--cmdline", 4116 DIRECT_KERNEL_BOOT_CMDLINE 4117 .replace("console=hvc0 ", console_str) 4118 .as_str(), 4119 ]) 4120 .default_disks() 4121 .default_net() 4122 .args([ 4123 "--serial", 4124 format!("file={}", serial_path.to_str().unwrap()).as_str(), 4125 ]) 4126 .capture_output() 4127 .spawn() 4128 .unwrap(); 4129 4130 let r = std::panic::catch_unwind(|| { 4131 guest.wait_vm_boot(None).unwrap(); 4132 4133 // Test that there is a ttyS0 4134 assert_eq!( 4135 guest 4136 .ssh_command(GREP_SERIAL_IRQ_CMD) 4137 .unwrap() 4138 .trim() 4139 .parse::<u32>() 4140 .unwrap_or_default(), 4141 1 4142 ); 4143 4144 guest.ssh_command("sudo shutdown -h now").unwrap(); 4145 }); 4146 4147 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 4148 kill_child(&mut child); 4149 let output = child.wait_with_output().unwrap(); 4150 handle_child_output(r, &output); 4151 4152 let r = std::panic::catch_unwind(|| { 4153 // Check that the cloud-hypervisor binary actually terminated 4154 assert!(output.status.success()); 4155 4156 // Do this check after shutdown of the VM as an easy way to ensure 4157 // all writes are flushed to disk 4158 let mut f = std::fs::File::open(serial_path).unwrap(); 4159 let mut buf = String::new(); 4160 f.read_to_string(&mut buf).unwrap(); 4161 assert!(buf.contains(CONSOLE_TEST_STRING)); 4162 }); 4163 4164 handle_child_output(r, &output); 4165 } 4166 4167 #[test] 4168 fn test_pty_interaction() { 4169 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4170 let guest = Guest::new(Box::new(focal)); 4171 let api_socket = temp_api_path(&guest.tmp_dir); 4172 let serial_option = if cfg!(target_arch = "x86_64") { 4173 " console=ttyS0" 4174 } else { 4175 " console=ttyAMA0" 4176 }; 4177 let cmdline = DIRECT_KERNEL_BOOT_CMDLINE.to_owned() + serial_option; 4178 4179 let mut child = GuestCommand::new(&guest) 4180 .args(["--cpus", "boot=1"]) 4181 .args(["--memory", "size=512M"]) 4182 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4183 .args(["--cmdline", &cmdline]) 4184 .default_disks() 4185 .default_net() 4186 .args(["--serial", "null"]) 4187 .args(["--console", "pty"]) 4188 .args(["--api-socket", &api_socket]) 4189 .spawn() 4190 .unwrap(); 4191 4192 let r = std::panic::catch_unwind(|| { 4193 guest.wait_vm_boot(None).unwrap(); 4194 // Get pty fd for console 4195 let console_path = get_pty_path(&api_socket, "console"); 4196 _test_pty_interaction(console_path); 4197 4198 guest.ssh_command("sudo shutdown -h now").unwrap(); 4199 }); 4200 4201 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 4202 let _ = child.kill(); 4203 let output = child.wait_with_output().unwrap(); 4204 handle_child_output(r, &output); 4205 4206 let r = std::panic::catch_unwind(|| { 4207 // Check that the cloud-hypervisor binary actually terminated 4208 assert!(output.status.success()) 4209 }); 4210 handle_child_output(r, &output); 4211 } 4212 4213 #[test] 4214 fn test_serial_socket_interaction() { 4215 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4216 let guest = Guest::new(Box::new(focal)); 4217 let serial_socket = guest.tmp_dir.as_path().join("serial.socket"); 4218 let serial_socket_pty = guest.tmp_dir.as_path().join("serial.pty"); 4219 let serial_option = if cfg!(target_arch = "x86_64") { 4220 " console=ttyS0" 4221 } else { 4222 " console=ttyAMA0" 4223 }; 4224 let cmdline = DIRECT_KERNEL_BOOT_CMDLINE.to_owned() + serial_option; 4225 4226 let mut child = GuestCommand::new(&guest) 4227 .args(["--cpus", "boot=1"]) 4228 .args(["--memory", "size=512M"]) 4229 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4230 .args(["--cmdline", &cmdline]) 4231 .default_disks() 4232 .default_net() 4233 .args(["--console", "null"]) 4234 .args([ 4235 "--serial", 4236 format!("socket={}", serial_socket.to_str().unwrap()).as_str(), 4237 ]) 4238 .spawn() 4239 .unwrap(); 4240 4241 let _ = std::panic::catch_unwind(|| { 4242 guest.wait_vm_boot(None).unwrap(); 4243 }); 4244 4245 let mut socat_command = Command::new("socat"); 4246 let socat_args = [ 4247 &format!("pty,link={},raw", serial_socket_pty.display()), 4248 &format!("UNIX-CONNECT:{}", serial_socket.display()), 4249 ]; 4250 socat_command.args(socat_args); 4251 4252 let mut socat_child = socat_command.spawn().unwrap(); 4253 thread::sleep(std::time::Duration::new(1, 0)); 4254 4255 let _ = std::panic::catch_unwind(|| { 4256 _test_pty_interaction(serial_socket_pty); 4257 }); 4258 4259 let _ = socat_child.kill(); 4260 let _ = socat_child.wait(); 4261 4262 let r = std::panic::catch_unwind(|| { 4263 guest.ssh_command("sudo shutdown -h now").unwrap(); 4264 }); 4265 4266 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 4267 kill_child(&mut child); 4268 let output = child.wait_with_output().unwrap(); 4269 handle_child_output(r, &output); 4270 4271 let r = std::panic::catch_unwind(|| { 4272 // Check that the cloud-hypervisor binary actually terminated 4273 if !output.status.success() { 4274 panic!( 4275 "Cloud Hypervisor process failed to terminate gracefully: {:?}", 4276 output.status 4277 ); 4278 } 4279 }); 4280 handle_child_output(r, &output); 4281 } 4282 4283 #[test] 4284 fn test_virtio_console() { 4285 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4286 let guest = Guest::new(Box::new(focal)); 4287 4288 let kernel_path = direct_kernel_boot_path(); 4289 4290 let mut child = GuestCommand::new(&guest) 4291 .args(["--cpus", "boot=1"]) 4292 .args(["--memory", "size=512M"]) 4293 .args(["--kernel", kernel_path.to_str().unwrap()]) 4294 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4295 .default_disks() 4296 .default_net() 4297 .args(["--console", "tty"]) 4298 .args(["--serial", "null"]) 4299 .capture_output() 4300 .spawn() 4301 .unwrap(); 4302 4303 let text = String::from("On a branch floating down river a cricket, singing."); 4304 let cmd = format!("echo {text} | sudo tee /dev/hvc0"); 4305 4306 let r = std::panic::catch_unwind(|| { 4307 guest.wait_vm_boot(None).unwrap(); 4308 4309 assert!(guest 4310 .does_device_vendor_pair_match("0x1043", "0x1af4") 4311 .unwrap_or_default()); 4312 4313 guest.ssh_command(&cmd).unwrap(); 4314 }); 4315 4316 kill_child(&mut child); 4317 let output = child.wait_with_output().unwrap(); 4318 handle_child_output(r, &output); 4319 4320 let r = std::panic::catch_unwind(|| { 4321 assert!(String::from_utf8_lossy(&output.stdout).contains(&text)); 4322 }); 4323 4324 handle_child_output(r, &output); 4325 } 4326 4327 #[test] 4328 fn test_console_file() { 4329 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4330 let guest = Guest::new(Box::new(focal)); 4331 4332 let console_path = guest.tmp_dir.as_path().join("console-output"); 4333 let mut child = GuestCommand::new(&guest) 4334 .args(["--cpus", "boot=1"]) 4335 .args(["--memory", "size=512M"]) 4336 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 4337 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4338 .default_disks() 4339 .default_net() 4340 .args([ 4341 "--console", 4342 format!("file={}", console_path.to_str().unwrap()).as_str(), 4343 ]) 4344 .capture_output() 4345 .spawn() 4346 .unwrap(); 4347 4348 guest.wait_vm_boot(None).unwrap(); 4349 4350 guest.ssh_command("sudo shutdown -h now").unwrap(); 4351 4352 let _ = child.wait_timeout(std::time::Duration::from_secs(20)); 4353 kill_child(&mut child); 4354 let output = child.wait_with_output().unwrap(); 4355 4356 let r = std::panic::catch_unwind(|| { 4357 // Check that the cloud-hypervisor binary actually terminated 4358 assert!(output.status.success()); 4359 4360 // Do this check after shutdown of the VM as an easy way to ensure 4361 // all writes are flushed to disk 4362 let mut f = std::fs::File::open(console_path).unwrap(); 4363 let mut buf = String::new(); 4364 f.read_to_string(&mut buf).unwrap(); 4365 4366 if !buf.contains(CONSOLE_TEST_STRING) { 4367 eprintln!( 4368 "\n\n==== Console file output ====\n\n{buf}\n\n==== End console file output ====" 4369 ); 4370 } 4371 assert!(buf.contains(CONSOLE_TEST_STRING)); 4372 }); 4373 4374 handle_child_output(r, &output); 4375 } 4376 4377 #[test] 4378 #[cfg(target_arch = "x86_64")] 4379 #[cfg(not(feature = "mshv"))] 4380 // The VFIO integration test starts cloud-hypervisor guest with 3 TAP 4381 // backed networking interfaces, bound through a simple bridge on the host. 4382 // So if the nested cloud-hypervisor succeeds in getting a directly 4383 // assigned interface from its cloud-hypervisor host, we should be able to 4384 // ssh into it, and verify that it's running with the right kernel command 4385 // line (We tag the command line from cloud-hypervisor for that purpose). 4386 // The third device is added to validate that hotplug works correctly since 4387 // it is being added to the L2 VM through hotplugging mechanism. 4388 // Also, we pass-through a virtio-blk device to the L2 VM to test the 32-bit 4389 // vfio device support 4390 fn test_vfio() { 4391 setup_vfio_network_interfaces(); 4392 4393 let jammy = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string()); 4394 let guest = Guest::new_from_ip_range(Box::new(jammy), "172.18", 0); 4395 4396 let mut workload_path = dirs::home_dir().unwrap(); 4397 workload_path.push("workloads"); 4398 4399 let kernel_path = direct_kernel_boot_path(); 4400 4401 let mut vfio_path = workload_path.clone(); 4402 vfio_path.push("vfio"); 4403 4404 let mut cloud_init_vfio_base_path = vfio_path.clone(); 4405 cloud_init_vfio_base_path.push("cloudinit.img"); 4406 4407 // We copy our cloudinit into the vfio mount point, for the nested 4408 // cloud-hypervisor guest to use. 4409 rate_limited_copy( 4410 guest.disk_config.disk(DiskType::CloudInit).unwrap(), 4411 &cloud_init_vfio_base_path, 4412 ) 4413 .expect("copying of cloud-init disk failed"); 4414 4415 let mut vfio_disk_path = workload_path.clone(); 4416 vfio_disk_path.push("vfio.img"); 4417 4418 // Create the vfio disk image 4419 let output = Command::new("mkfs.ext4") 4420 .arg("-d") 4421 .arg(vfio_path.to_str().unwrap()) 4422 .arg(vfio_disk_path.to_str().unwrap()) 4423 .arg("2g") 4424 .output() 4425 .unwrap(); 4426 if !output.status.success() { 4427 eprintln!("{}", String::from_utf8_lossy(&output.stderr)); 4428 panic!("mkfs.ext4 command generated an error"); 4429 } 4430 4431 let mut blk_file_path = workload_path; 4432 blk_file_path.push("blk.img"); 4433 4434 let vfio_tap0 = "vfio-tap0"; 4435 let vfio_tap1 = "vfio-tap1"; 4436 let vfio_tap2 = "vfio-tap2"; 4437 let vfio_tap3 = "vfio-tap3"; 4438 4439 let mut child = GuestCommand::new(&guest) 4440 .args(["--cpus", "boot=4"]) 4441 .args(["--memory", "size=2G,hugepages=on,shared=on"]) 4442 .args(["--kernel", kernel_path.to_str().unwrap()]) 4443 .args([ 4444 "--disk", 4445 format!( 4446 "path={}", 4447 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 4448 ) 4449 .as_str(), 4450 format!( 4451 "path={}", 4452 guest.disk_config.disk(DiskType::CloudInit).unwrap() 4453 ) 4454 .as_str(), 4455 format!("path={}", vfio_disk_path.to_str().unwrap()).as_str(), 4456 format!("path={},iommu=on", blk_file_path.to_str().unwrap()).as_str(), 4457 ]) 4458 .args([ 4459 "--cmdline", 4460 format!( 4461 "{DIRECT_KERNEL_BOOT_CMDLINE} kvm-intel.nested=1 vfio_iommu_type1.allow_unsafe_interrupts" 4462 ) 4463 .as_str(), 4464 ]) 4465 .args([ 4466 "--net", 4467 format!("tap={},mac={}", vfio_tap0, guest.network.guest_mac).as_str(), 4468 format!( 4469 "tap={},mac={},iommu=on", 4470 vfio_tap1, guest.network.l2_guest_mac1 4471 ) 4472 .as_str(), 4473 format!( 4474 "tap={},mac={},iommu=on", 4475 vfio_tap2, guest.network.l2_guest_mac2 4476 ) 4477 .as_str(), 4478 format!( 4479 "tap={},mac={},iommu=on", 4480 vfio_tap3, guest.network.l2_guest_mac3 4481 ) 4482 .as_str(), 4483 ]) 4484 .capture_output() 4485 .spawn() 4486 .unwrap(); 4487 4488 thread::sleep(std::time::Duration::new(30, 0)); 4489 4490 let r = std::panic::catch_unwind(|| { 4491 guest.ssh_command_l1("sudo systemctl start vfio").unwrap(); 4492 thread::sleep(std::time::Duration::new(120, 0)); 4493 4494 // We booted our cloud hypervisor L2 guest with a "VFIOTAG" tag 4495 // added to its kernel command line. 4496 // Let's ssh into it and verify that it's there. If it is it means 4497 // we're in the right guest (The L2 one) because the QEMU L1 guest 4498 // does not have this command line tag. 4499 assert!(check_matched_lines_count( 4500 guest.ssh_command_l2_1("cat /proc/cmdline").unwrap().trim(), 4501 vec!["VFIOTAG"], 4502 1 4503 )); 4504 4505 // Let's also verify from the second virtio-net device passed to 4506 // the L2 VM. 4507 assert!(check_matched_lines_count( 4508 guest.ssh_command_l2_2("cat /proc/cmdline").unwrap().trim(), 4509 vec!["VFIOTAG"], 4510 1 4511 )); 4512 4513 // Check the amount of PCI devices appearing in L2 VM. 4514 assert!(check_lines_count( 4515 guest 4516 .ssh_command_l2_1("ls /sys/bus/pci/devices") 4517 .unwrap() 4518 .trim(), 4519 8 4520 )); 4521 4522 // Check both if /dev/vdc exists and if the block size is 16M in L2 VM 4523 assert!(check_matched_lines_count( 4524 guest.ssh_command_l2_1("lsblk").unwrap().trim(), 4525 vec!["vdc", "16M"], 4526 1 4527 )); 4528 4529 // Hotplug an extra virtio-net device through L2 VM. 4530 guest 4531 .ssh_command_l1( 4532 "echo 0000:00:09.0 | sudo tee /sys/bus/pci/devices/0000:00:09.0/driver/unbind", 4533 ) 4534 .unwrap(); 4535 guest 4536 .ssh_command_l1("echo 0000:00:09.0 | sudo tee /sys/bus/pci/drivers/vfio-pci/bind") 4537 .unwrap(); 4538 let vfio_hotplug_output = guest 4539 .ssh_command_l1( 4540 "sudo /mnt/ch-remote \ 4541 --api-socket=/tmp/ch_api.sock \ 4542 add-device path=/sys/bus/pci/devices/0000:00:09.0,id=vfio123", 4543 ) 4544 .unwrap(); 4545 assert!(check_matched_lines_count( 4546 vfio_hotplug_output.trim(), 4547 vec!["{\"id\":\"vfio123\",\"bdf\":\"0000:00:08.0\"}"], 4548 1 4549 )); 4550 4551 thread::sleep(std::time::Duration::new(10, 0)); 4552 4553 // Let's also verify from the third virtio-net device passed to 4554 // the L2 VM. This third device has been hotplugged through the L2 4555 // VM, so this is our way to validate hotplug works for VFIO PCI. 4556 assert!(check_matched_lines_count( 4557 guest.ssh_command_l2_3("cat /proc/cmdline").unwrap().trim(), 4558 vec!["VFIOTAG"], 4559 1 4560 )); 4561 4562 // Check the amount of PCI devices appearing in L2 VM. 4563 // There should be one more device than before, raising the count 4564 // up to 9 PCI devices. 4565 assert!(check_lines_count( 4566 guest 4567 .ssh_command_l2_1("ls /sys/bus/pci/devices") 4568 .unwrap() 4569 .trim(), 4570 9 4571 )); 4572 4573 // Let's now verify that we can correctly remove the virtio-net 4574 // device through the "remove-device" command responsible for 4575 // unplugging VFIO devices. 4576 guest 4577 .ssh_command_l1( 4578 "sudo /mnt/ch-remote \ 4579 --api-socket=/tmp/ch_api.sock \ 4580 remove-device vfio123", 4581 ) 4582 .unwrap(); 4583 thread::sleep(std::time::Duration::new(10, 0)); 4584 4585 // Check the amount of PCI devices appearing in L2 VM is back down 4586 // to 8 devices. 4587 assert!(check_lines_count( 4588 guest 4589 .ssh_command_l2_1("ls /sys/bus/pci/devices") 4590 .unwrap() 4591 .trim(), 4592 8 4593 )); 4594 4595 // Perform memory hotplug in L2 and validate the memory is showing 4596 // up as expected. In order to check, we will use the virtio-net 4597 // device already passed through L2 as a VFIO device, this will 4598 // verify that VFIO devices are functional with memory hotplug. 4599 assert!(guest.get_total_memory_l2().unwrap_or_default() > 480_000); 4600 guest 4601 .ssh_command_l2_1( 4602 "sudo bash -c 'echo online > /sys/devices/system/memory/auto_online_blocks'", 4603 ) 4604 .unwrap(); 4605 guest 4606 .ssh_command_l1( 4607 "sudo /mnt/ch-remote \ 4608 --api-socket=/tmp/ch_api.sock \ 4609 resize --memory=1073741824", 4610 ) 4611 .unwrap(); 4612 assert!(guest.get_total_memory_l2().unwrap_or_default() > 960_000); 4613 }); 4614 4615 kill_child(&mut child); 4616 let output = child.wait_with_output().unwrap(); 4617 4618 cleanup_vfio_network_interfaces(); 4619 4620 handle_child_output(r, &output); 4621 } 4622 4623 #[test] 4624 fn test_direct_kernel_boot_noacpi() { 4625 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4626 let guest = Guest::new(Box::new(focal)); 4627 4628 let kernel_path = direct_kernel_boot_path(); 4629 4630 let mut child = GuestCommand::new(&guest) 4631 .args(["--cpus", "boot=1"]) 4632 .args(["--memory", "size=512M"]) 4633 .args(["--kernel", kernel_path.to_str().unwrap()]) 4634 .args([ 4635 "--cmdline", 4636 format!("{DIRECT_KERNEL_BOOT_CMDLINE} acpi=off").as_str(), 4637 ]) 4638 .default_disks() 4639 .default_net() 4640 .capture_output() 4641 .spawn() 4642 .unwrap(); 4643 4644 let r = std::panic::catch_unwind(|| { 4645 guest.wait_vm_boot(None).unwrap(); 4646 4647 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 4648 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4649 }); 4650 4651 kill_child(&mut child); 4652 let output = child.wait_with_output().unwrap(); 4653 4654 handle_child_output(r, &output); 4655 } 4656 4657 #[test] 4658 fn test_virtio_vsock() { 4659 _test_virtio_vsock(false) 4660 } 4661 4662 #[test] 4663 fn test_virtio_vsock_hotplug() { 4664 _test_virtio_vsock(true); 4665 } 4666 4667 #[test] 4668 fn test_api_http_shutdown() { 4669 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4670 let guest = Guest::new(Box::new(focal)); 4671 4672 _test_api_shutdown(TargetApi::new_http_api(&guest.tmp_dir), guest) 4673 } 4674 4675 #[test] 4676 fn test_api_http_delete() { 4677 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4678 let guest = Guest::new(Box::new(focal)); 4679 4680 _test_api_delete(TargetApi::new_http_api(&guest.tmp_dir), guest); 4681 } 4682 4683 #[test] 4684 fn test_api_http_pause_resume() { 4685 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4686 let guest = Guest::new(Box::new(focal)); 4687 4688 _test_api_pause_resume(TargetApi::new_http_api(&guest.tmp_dir), guest) 4689 } 4690 4691 #[test] 4692 fn test_api_http_create_boot() { 4693 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4694 let guest = Guest::new(Box::new(focal)); 4695 4696 _test_api_create_boot(TargetApi::new_http_api(&guest.tmp_dir), guest) 4697 } 4698 4699 #[test] 4700 fn test_virtio_iommu() { 4701 _test_virtio_iommu(cfg!(target_arch = "x86_64")) 4702 } 4703 4704 #[test] 4705 // We cannot force the software running in the guest to reprogram the BAR 4706 // with some different addresses, but we have a reliable way of testing it 4707 // with a standard Linux kernel. 4708 // By removing a device from the PCI tree, and then rescanning the tree, 4709 // Linux consistently chooses to reorganize the PCI device BARs to other 4710 // locations in the guest address space. 4711 // This test creates a dedicated PCI network device to be checked as being 4712 // properly probed first, then removing it, and adding it again by doing a 4713 // rescan. 4714 fn test_pci_bar_reprogramming() { 4715 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4716 let guest = Guest::new(Box::new(focal)); 4717 4718 #[cfg(target_arch = "x86_64")] 4719 let kernel_path = direct_kernel_boot_path(); 4720 #[cfg(target_arch = "aarch64")] 4721 let kernel_path = edk2_path(); 4722 4723 let mut child = GuestCommand::new(&guest) 4724 .args(["--cpus", "boot=1"]) 4725 .args(["--memory", "size=512M"]) 4726 .args(["--kernel", kernel_path.to_str().unwrap()]) 4727 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4728 .default_disks() 4729 .args([ 4730 "--net", 4731 guest.default_net_string().as_str(), 4732 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0", 4733 ]) 4734 .capture_output() 4735 .spawn() 4736 .unwrap(); 4737 4738 let r = std::panic::catch_unwind(|| { 4739 guest.wait_vm_boot(None).unwrap(); 4740 4741 // 2 network interfaces + default localhost ==> 3 interfaces 4742 assert_eq!( 4743 guest 4744 .ssh_command("ip -o link | wc -l") 4745 .unwrap() 4746 .trim() 4747 .parse::<u32>() 4748 .unwrap_or_default(), 4749 3 4750 ); 4751 4752 let init_bar_addr = guest 4753 .ssh_command( 4754 "sudo awk '{print $1; exit}' /sys/bus/pci/devices/0000:00:05.0/resource", 4755 ) 4756 .unwrap(); 4757 4758 // Remove the PCI device 4759 guest 4760 .ssh_command("echo 1 | sudo tee /sys/bus/pci/devices/0000:00:05.0/remove") 4761 .unwrap(); 4762 4763 // Only 1 network interface left + default localhost ==> 2 interfaces 4764 assert_eq!( 4765 guest 4766 .ssh_command("ip -o link | wc -l") 4767 .unwrap() 4768 .trim() 4769 .parse::<u32>() 4770 .unwrap_or_default(), 4771 2 4772 ); 4773 4774 // Remove the PCI device 4775 guest 4776 .ssh_command("echo 1 | sudo tee /sys/bus/pci/rescan") 4777 .unwrap(); 4778 4779 // Back to 2 network interface + default localhost ==> 3 interfaces 4780 assert_eq!( 4781 guest 4782 .ssh_command("ip -o link | wc -l") 4783 .unwrap() 4784 .trim() 4785 .parse::<u32>() 4786 .unwrap_or_default(), 4787 3 4788 ); 4789 4790 let new_bar_addr = guest 4791 .ssh_command( 4792 "sudo awk '{print $1; exit}' /sys/bus/pci/devices/0000:00:05.0/resource", 4793 ) 4794 .unwrap(); 4795 4796 // Let's compare the BAR addresses for our virtio-net device. 4797 // They should be different as we expect the BAR reprogramming 4798 // to have happened. 4799 assert_ne!(init_bar_addr, new_bar_addr); 4800 }); 4801 4802 kill_child(&mut child); 4803 let output = child.wait_with_output().unwrap(); 4804 4805 handle_child_output(r, &output); 4806 } 4807 4808 #[test] 4809 fn test_memory_mergeable_off() { 4810 test_memory_mergeable(false) 4811 } 4812 4813 #[test] 4814 #[cfg(target_arch = "x86_64")] 4815 fn test_cpu_hotplug() { 4816 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4817 let guest = Guest::new(Box::new(focal)); 4818 let api_socket = temp_api_path(&guest.tmp_dir); 4819 4820 let kernel_path = direct_kernel_boot_path(); 4821 4822 let mut child = GuestCommand::new(&guest) 4823 .args(["--cpus", "boot=2,max=4"]) 4824 .args(["--memory", "size=512M"]) 4825 .args(["--kernel", kernel_path.to_str().unwrap()]) 4826 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4827 .default_disks() 4828 .default_net() 4829 .args(["--api-socket", &api_socket]) 4830 .capture_output() 4831 .spawn() 4832 .unwrap(); 4833 4834 let r = std::panic::catch_unwind(|| { 4835 guest.wait_vm_boot(None).unwrap(); 4836 4837 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 4838 4839 // Resize the VM 4840 let desired_vcpus = 4; 4841 resize_command(&api_socket, Some(desired_vcpus), None, None, None); 4842 4843 guest 4844 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online") 4845 .unwrap(); 4846 guest 4847 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online") 4848 .unwrap(); 4849 thread::sleep(std::time::Duration::new(10, 0)); 4850 assert_eq!( 4851 guest.get_cpu_count().unwrap_or_default(), 4852 u32::from(desired_vcpus) 4853 ); 4854 4855 guest.reboot_linux(0, None); 4856 4857 assert_eq!( 4858 guest.get_cpu_count().unwrap_or_default(), 4859 u32::from(desired_vcpus) 4860 ); 4861 4862 // Resize the VM 4863 let desired_vcpus = 2; 4864 resize_command(&api_socket, Some(desired_vcpus), None, None, None); 4865 4866 thread::sleep(std::time::Duration::new(10, 0)); 4867 assert_eq!( 4868 guest.get_cpu_count().unwrap_or_default(), 4869 u32::from(desired_vcpus) 4870 ); 4871 4872 // Resize the VM back up to 4 4873 let desired_vcpus = 4; 4874 resize_command(&api_socket, Some(desired_vcpus), None, None, None); 4875 4876 guest 4877 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online") 4878 .unwrap(); 4879 guest 4880 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online") 4881 .unwrap(); 4882 thread::sleep(std::time::Duration::new(10, 0)); 4883 assert_eq!( 4884 guest.get_cpu_count().unwrap_or_default(), 4885 u32::from(desired_vcpus) 4886 ); 4887 }); 4888 4889 kill_child(&mut child); 4890 let output = child.wait_with_output().unwrap(); 4891 4892 handle_child_output(r, &output); 4893 } 4894 4895 #[test] 4896 fn test_memory_hotplug() { 4897 #[cfg(target_arch = "aarch64")] 4898 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string(); 4899 #[cfg(target_arch = "x86_64")] 4900 let focal_image = FOCAL_IMAGE_NAME.to_string(); 4901 let focal = UbuntuDiskConfig::new(focal_image); 4902 let guest = Guest::new(Box::new(focal)); 4903 let api_socket = temp_api_path(&guest.tmp_dir); 4904 4905 #[cfg(target_arch = "aarch64")] 4906 let kernel_path = edk2_path(); 4907 #[cfg(target_arch = "x86_64")] 4908 let kernel_path = direct_kernel_boot_path(); 4909 4910 let mut child = GuestCommand::new(&guest) 4911 .args(["--cpus", "boot=2,max=4"]) 4912 .args(["--memory", "size=512M,hotplug_size=8192M"]) 4913 .args(["--kernel", kernel_path.to_str().unwrap()]) 4914 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4915 .default_disks() 4916 .default_net() 4917 .args(["--balloon", "size=0"]) 4918 .args(["--api-socket", &api_socket]) 4919 .capture_output() 4920 .spawn() 4921 .unwrap(); 4922 4923 let r = std::panic::catch_unwind(|| { 4924 guest.wait_vm_boot(None).unwrap(); 4925 4926 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4927 4928 guest.enable_memory_hotplug(); 4929 4930 // Add RAM to the VM 4931 let desired_ram = 1024 << 20; 4932 resize_command(&api_socket, None, Some(desired_ram), None, None); 4933 4934 thread::sleep(std::time::Duration::new(10, 0)); 4935 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4936 4937 // Use balloon to remove RAM from the VM 4938 let desired_balloon = 512 << 20; 4939 resize_command(&api_socket, None, None, Some(desired_balloon), None); 4940 4941 thread::sleep(std::time::Duration::new(10, 0)); 4942 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 4943 assert!(guest.get_total_memory().unwrap_or_default() < 960_000); 4944 4945 guest.reboot_linux(0, None); 4946 4947 assert!(guest.get_total_memory().unwrap_or_default() < 960_000); 4948 4949 // Use balloon add RAM to the VM 4950 let desired_balloon = 0; 4951 resize_command(&api_socket, None, None, Some(desired_balloon), None); 4952 4953 thread::sleep(std::time::Duration::new(10, 0)); 4954 4955 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4956 4957 guest.enable_memory_hotplug(); 4958 4959 // Add RAM to the VM 4960 let desired_ram = 2048 << 20; 4961 resize_command(&api_socket, None, Some(desired_ram), None, None); 4962 4963 thread::sleep(std::time::Duration::new(10, 0)); 4964 assert!(guest.get_total_memory().unwrap_or_default() > 1_920_000); 4965 4966 // Remove RAM to the VM (only applies after reboot) 4967 let desired_ram = 1024 << 20; 4968 resize_command(&api_socket, None, Some(desired_ram), None, None); 4969 4970 guest.reboot_linux(1, None); 4971 4972 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 4973 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000); 4974 }); 4975 4976 kill_child(&mut child); 4977 let output = child.wait_with_output().unwrap(); 4978 4979 handle_child_output(r, &output); 4980 } 4981 4982 #[test] 4983 #[cfg(not(feature = "mshv"))] 4984 fn test_virtio_mem() { 4985 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 4986 let guest = Guest::new(Box::new(focal)); 4987 let api_socket = temp_api_path(&guest.tmp_dir); 4988 4989 let kernel_path = direct_kernel_boot_path(); 4990 4991 let mut child = GuestCommand::new(&guest) 4992 .args(["--cpus", "boot=2,max=4"]) 4993 .args([ 4994 "--memory", 4995 "size=512M,hotplug_method=virtio-mem,hotplug_size=8192M", 4996 ]) 4997 .args(["--kernel", kernel_path.to_str().unwrap()]) 4998 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 4999 .default_disks() 5000 .default_net() 5001 .args(["--api-socket", &api_socket]) 5002 .capture_output() 5003 .spawn() 5004 .unwrap(); 5005 5006 let r = std::panic::catch_unwind(|| { 5007 guest.wait_vm_boot(None).unwrap(); 5008 5009 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 5010 5011 guest.enable_memory_hotplug(); 5012 5013 // Add RAM to the VM 5014 let desired_ram = 1024 << 20; 5015 resize_command(&api_socket, None, Some(desired_ram), None, None); 5016 5017 thread::sleep(std::time::Duration::new(10, 0)); 5018 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 5019 5020 // Add RAM to the VM 5021 let desired_ram = 2048 << 20; 5022 resize_command(&api_socket, None, Some(desired_ram), None, None); 5023 5024 thread::sleep(std::time::Duration::new(10, 0)); 5025 assert!(guest.get_total_memory().unwrap_or_default() > 1_920_000); 5026 5027 // Remove RAM from the VM 5028 let desired_ram = 1024 << 20; 5029 resize_command(&api_socket, None, Some(desired_ram), None, None); 5030 5031 thread::sleep(std::time::Duration::new(10, 0)); 5032 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 5033 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000); 5034 5035 guest.reboot_linux(0, None); 5036 5037 // Check the amount of memory after reboot is 1GiB 5038 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 5039 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000); 5040 5041 // Check we can still resize to 512MiB 5042 let desired_ram = 512 << 20; 5043 resize_command(&api_socket, None, Some(desired_ram), None, None); 5044 thread::sleep(std::time::Duration::new(10, 0)); 5045 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 5046 assert!(guest.get_total_memory().unwrap_or_default() < 960_000); 5047 }); 5048 5049 kill_child(&mut child); 5050 let output = child.wait_with_output().unwrap(); 5051 5052 handle_child_output(r, &output); 5053 } 5054 5055 #[test] 5056 #[cfg(target_arch = "x86_64")] 5057 #[cfg(not(feature = "mshv"))] 5058 // Test both vCPU and memory resizing together 5059 fn test_resize() { 5060 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5061 let guest = Guest::new(Box::new(focal)); 5062 let api_socket = temp_api_path(&guest.tmp_dir); 5063 5064 let kernel_path = direct_kernel_boot_path(); 5065 5066 let mut child = GuestCommand::new(&guest) 5067 .args(["--cpus", "boot=2,max=4"]) 5068 .args(["--memory", "size=512M,hotplug_size=8192M"]) 5069 .args(["--kernel", kernel_path.to_str().unwrap()]) 5070 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5071 .default_disks() 5072 .default_net() 5073 .args(["--api-socket", &api_socket]) 5074 .capture_output() 5075 .spawn() 5076 .unwrap(); 5077 5078 let r = std::panic::catch_unwind(|| { 5079 guest.wait_vm_boot(None).unwrap(); 5080 5081 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 5082 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 5083 5084 guest.enable_memory_hotplug(); 5085 5086 // Resize the VM 5087 let desired_vcpus = 4; 5088 let desired_ram = 1024 << 20; 5089 resize_command( 5090 &api_socket, 5091 Some(desired_vcpus), 5092 Some(desired_ram), 5093 None, 5094 None, 5095 ); 5096 5097 guest 5098 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online") 5099 .unwrap(); 5100 guest 5101 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online") 5102 .unwrap(); 5103 thread::sleep(std::time::Duration::new(10, 0)); 5104 assert_eq!( 5105 guest.get_cpu_count().unwrap_or_default(), 5106 u32::from(desired_vcpus) 5107 ); 5108 5109 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 5110 }); 5111 5112 kill_child(&mut child); 5113 let output = child.wait_with_output().unwrap(); 5114 5115 handle_child_output(r, &output); 5116 } 5117 5118 #[test] 5119 fn test_memory_overhead() { 5120 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5121 let guest = Guest::new(Box::new(focal)); 5122 5123 let kernel_path = direct_kernel_boot_path(); 5124 5125 let guest_memory_size_kb = 512 * 1024; 5126 5127 let mut child = GuestCommand::new(&guest) 5128 .args(["--cpus", "boot=1"]) 5129 .args(["--memory", format!("size={guest_memory_size_kb}K").as_str()]) 5130 .args(["--kernel", kernel_path.to_str().unwrap()]) 5131 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5132 .default_net() 5133 .default_disks() 5134 .capture_output() 5135 .spawn() 5136 .unwrap(); 5137 5138 guest.wait_vm_boot(None).unwrap(); 5139 5140 let r = std::panic::catch_unwind(|| { 5141 let overhead = get_vmm_overhead(child.id(), guest_memory_size_kb); 5142 eprintln!("Guest memory overhead: {overhead} vs {MAXIMUM_VMM_OVERHEAD_KB}"); 5143 assert!(overhead <= MAXIMUM_VMM_OVERHEAD_KB); 5144 }); 5145 5146 kill_child(&mut child); 5147 let output = child.wait_with_output().unwrap(); 5148 5149 handle_child_output(r, &output); 5150 } 5151 5152 #[test] 5153 #[cfg(target_arch = "x86_64")] 5154 // This test runs a guest with Landlock enabled and hotplugs a new disk. As 5155 // the path for the hotplug disk is not pre-added to Landlock rules, this 5156 // the test will result in a failure. 5157 fn test_landlock() { 5158 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5159 let guest = Guest::new(Box::new(focal)); 5160 5161 #[cfg(target_arch = "x86_64")] 5162 let kernel_path = direct_kernel_boot_path(); 5163 #[cfg(target_arch = "aarch64")] 5164 let kernel_path = edk2_path(); 5165 5166 let api_socket = temp_api_path(&guest.tmp_dir); 5167 5168 let mut child = GuestCommand::new(&guest) 5169 .args(["--api-socket", &api_socket]) 5170 .args(["--cpus", "boot=1"]) 5171 .args(["--memory", "size=512M"]) 5172 .args(["--kernel", kernel_path.to_str().unwrap()]) 5173 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5174 .args(["--landlock"]) 5175 .default_disks() 5176 .default_net() 5177 .capture_output() 5178 .spawn() 5179 .unwrap(); 5180 5181 let r = std::panic::catch_unwind(|| { 5182 guest.wait_vm_boot(None).unwrap(); 5183 5184 // Check /dev/vdc is not there 5185 assert_eq!( 5186 guest 5187 .ssh_command("lsblk | grep -c vdc.*16M || true") 5188 .unwrap() 5189 .trim() 5190 .parse::<u32>() 5191 .unwrap_or(1), 5192 0 5193 ); 5194 5195 // Now let's add the extra disk. 5196 let mut blk_file_path = dirs::home_dir().unwrap(); 5197 blk_file_path.push("workloads"); 5198 blk_file_path.push("blk.img"); 5199 // As the path to the hotplug disk is not pre-added, this remote 5200 // command will fail. 5201 assert!(!remote_command( 5202 &api_socket, 5203 "add-disk", 5204 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 5205 )); 5206 }); 5207 5208 let _ = child.kill(); 5209 let output = child.wait_with_output().unwrap(); 5210 5211 handle_child_output(r, &output); 5212 } 5213 5214 fn _test_disk_hotplug(landlock_enabled: bool) { 5215 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5216 let guest = Guest::new(Box::new(focal)); 5217 5218 #[cfg(target_arch = "x86_64")] 5219 let kernel_path = direct_kernel_boot_path(); 5220 #[cfg(target_arch = "aarch64")] 5221 let kernel_path = edk2_path(); 5222 5223 let api_socket = temp_api_path(&guest.tmp_dir); 5224 5225 let mut blk_file_path = dirs::home_dir().unwrap(); 5226 blk_file_path.push("workloads"); 5227 blk_file_path.push("blk.img"); 5228 5229 let mut cmd = GuestCommand::new(&guest); 5230 if landlock_enabled { 5231 cmd.args(["--landlock"]).args([ 5232 "--landlock-rules", 5233 format!("path={:?},access=rw", blk_file_path).as_str(), 5234 ]); 5235 } 5236 5237 cmd.args(["--api-socket", &api_socket]) 5238 .args(["--cpus", "boot=1"]) 5239 .args(["--memory", "size=512M"]) 5240 .args(["--kernel", kernel_path.to_str().unwrap()]) 5241 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5242 .default_disks() 5243 .default_net() 5244 .capture_output(); 5245 5246 let mut child = cmd.spawn().unwrap(); 5247 5248 let r = std::panic::catch_unwind(|| { 5249 guest.wait_vm_boot(None).unwrap(); 5250 5251 // Check /dev/vdc is not there 5252 assert_eq!( 5253 guest 5254 .ssh_command("lsblk | grep -c vdc.*16M || true") 5255 .unwrap() 5256 .trim() 5257 .parse::<u32>() 5258 .unwrap_or(1), 5259 0 5260 ); 5261 5262 // Now let's add the extra disk. 5263 let (cmd_success, cmd_output) = remote_command_w_output( 5264 &api_socket, 5265 "add-disk", 5266 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 5267 ); 5268 assert!(cmd_success); 5269 assert!(String::from_utf8_lossy(&cmd_output) 5270 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 5271 5272 thread::sleep(std::time::Duration::new(10, 0)); 5273 5274 // Check that /dev/vdc exists and the block size is 16M. 5275 assert_eq!( 5276 guest 5277 .ssh_command("lsblk | grep vdc | grep -c 16M") 5278 .unwrap() 5279 .trim() 5280 .parse::<u32>() 5281 .unwrap_or_default(), 5282 1 5283 ); 5284 // And check the block device can be read. 5285 guest 5286 .ssh_command("sudo dd if=/dev/vdc of=/dev/null bs=1M iflag=direct count=16") 5287 .unwrap(); 5288 5289 // Let's remove it the extra disk. 5290 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 5291 thread::sleep(std::time::Duration::new(5, 0)); 5292 // And check /dev/vdc is not there 5293 assert_eq!( 5294 guest 5295 .ssh_command("lsblk | grep -c vdc.*16M || true") 5296 .unwrap() 5297 .trim() 5298 .parse::<u32>() 5299 .unwrap_or(1), 5300 0 5301 ); 5302 5303 // And add it back to validate unplug did work correctly. 5304 let (cmd_success, cmd_output) = remote_command_w_output( 5305 &api_socket, 5306 "add-disk", 5307 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 5308 ); 5309 assert!(cmd_success); 5310 assert!(String::from_utf8_lossy(&cmd_output) 5311 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 5312 5313 thread::sleep(std::time::Duration::new(10, 0)); 5314 5315 // Check that /dev/vdc exists and the block size is 16M. 5316 assert_eq!( 5317 guest 5318 .ssh_command("lsblk | grep vdc | grep -c 16M") 5319 .unwrap() 5320 .trim() 5321 .parse::<u32>() 5322 .unwrap_or_default(), 5323 1 5324 ); 5325 // And check the block device can be read. 5326 guest 5327 .ssh_command("sudo dd if=/dev/vdc of=/dev/null bs=1M iflag=direct count=16") 5328 .unwrap(); 5329 5330 // Reboot the VM. 5331 guest.reboot_linux(0, None); 5332 5333 // Check still there after reboot 5334 assert_eq!( 5335 guest 5336 .ssh_command("lsblk | grep vdc | grep -c 16M") 5337 .unwrap() 5338 .trim() 5339 .parse::<u32>() 5340 .unwrap_or_default(), 5341 1 5342 ); 5343 5344 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 5345 5346 thread::sleep(std::time::Duration::new(20, 0)); 5347 5348 // Check device has gone away 5349 assert_eq!( 5350 guest 5351 .ssh_command("lsblk | grep -c vdc.*16M || true") 5352 .unwrap() 5353 .trim() 5354 .parse::<u32>() 5355 .unwrap_or(1), 5356 0 5357 ); 5358 5359 guest.reboot_linux(1, None); 5360 5361 // Check device still absent 5362 assert_eq!( 5363 guest 5364 .ssh_command("lsblk | grep -c vdc.*16M || true") 5365 .unwrap() 5366 .trim() 5367 .parse::<u32>() 5368 .unwrap_or(1), 5369 0 5370 ); 5371 }); 5372 5373 kill_child(&mut child); 5374 let output = child.wait_with_output().unwrap(); 5375 5376 handle_child_output(r, &output); 5377 } 5378 5379 #[test] 5380 fn test_disk_hotplug() { 5381 _test_disk_hotplug(false) 5382 } 5383 5384 #[test] 5385 #[cfg(target_arch = "x86_64")] 5386 fn test_disk_hotplug_with_landlock() { 5387 _test_disk_hotplug(true) 5388 } 5389 5390 fn create_loop_device(backing_file_path: &str, block_size: u32, num_retries: usize) -> String { 5391 const LOOP_CONFIGURE: u64 = 0x4c0a; 5392 const LOOP_CTL_GET_FREE: u64 = 0x4c82; 5393 const LOOP_CTL_PATH: &str = "/dev/loop-control"; 5394 const LOOP_DEVICE_PREFIX: &str = "/dev/loop"; 5395 5396 #[repr(C)] 5397 struct LoopInfo64 { 5398 lo_device: u64, 5399 lo_inode: u64, 5400 lo_rdevice: u64, 5401 lo_offset: u64, 5402 lo_sizelimit: u64, 5403 lo_number: u32, 5404 lo_encrypt_type: u32, 5405 lo_encrypt_key_size: u32, 5406 lo_flags: u32, 5407 lo_file_name: [u8; 64], 5408 lo_crypt_name: [u8; 64], 5409 lo_encrypt_key: [u8; 32], 5410 lo_init: [u64; 2], 5411 } 5412 5413 impl Default for LoopInfo64 { 5414 fn default() -> Self { 5415 LoopInfo64 { 5416 lo_device: 0, 5417 lo_inode: 0, 5418 lo_rdevice: 0, 5419 lo_offset: 0, 5420 lo_sizelimit: 0, 5421 lo_number: 0, 5422 lo_encrypt_type: 0, 5423 lo_encrypt_key_size: 0, 5424 lo_flags: 0, 5425 lo_file_name: [0; 64], 5426 lo_crypt_name: [0; 64], 5427 lo_encrypt_key: [0; 32], 5428 lo_init: [0; 2], 5429 } 5430 } 5431 } 5432 5433 #[derive(Default)] 5434 #[repr(C)] 5435 struct LoopConfig { 5436 fd: u32, 5437 block_size: u32, 5438 info: LoopInfo64, 5439 _reserved: [u64; 8], 5440 } 5441 5442 // Open loop-control device 5443 let loop_ctl_file = OpenOptions::new() 5444 .read(true) 5445 .write(true) 5446 .open(LOOP_CTL_PATH) 5447 .unwrap(); 5448 5449 // Request a free loop device 5450 let loop_device_number = 5451 unsafe { libc::ioctl(loop_ctl_file.as_raw_fd(), LOOP_CTL_GET_FREE as _) }; 5452 5453 if loop_device_number < 0 { 5454 panic!("Couldn't find a free loop device"); 5455 } 5456 5457 // Create loop device path 5458 let loop_device_path = format!("{LOOP_DEVICE_PREFIX}{loop_device_number}"); 5459 5460 // Open loop device 5461 let loop_device_file = OpenOptions::new() 5462 .read(true) 5463 .write(true) 5464 .open(&loop_device_path) 5465 .unwrap(); 5466 5467 // Open backing file 5468 let backing_file = OpenOptions::new() 5469 .read(true) 5470 .write(true) 5471 .open(backing_file_path) 5472 .unwrap(); 5473 5474 let loop_config = LoopConfig { 5475 fd: backing_file.as_raw_fd() as u32, 5476 block_size, 5477 ..Default::default() 5478 }; 5479 5480 for i in 0..num_retries { 5481 let ret = unsafe { 5482 libc::ioctl( 5483 loop_device_file.as_raw_fd(), 5484 LOOP_CONFIGURE as _, 5485 &loop_config, 5486 ) 5487 }; 5488 if ret != 0 { 5489 if i < num_retries - 1 { 5490 println!( 5491 "Iteration {}: Failed to configure the loop device {}: {}", 5492 i, 5493 loop_device_path, 5494 std::io::Error::last_os_error() 5495 ); 5496 } else { 5497 panic!( 5498 "Failed {} times trying to configure the loop device {}: {}", 5499 num_retries, 5500 loop_device_path, 5501 std::io::Error::last_os_error() 5502 ); 5503 } 5504 } else { 5505 break; 5506 } 5507 5508 // Wait for a bit before retrying 5509 thread::sleep(std::time::Duration::new(5, 0)); 5510 } 5511 5512 loop_device_path 5513 } 5514 5515 #[test] 5516 fn test_virtio_block_topology() { 5517 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5518 let guest = Guest::new(Box::new(focal)); 5519 5520 let kernel_path = direct_kernel_boot_path(); 5521 let test_disk_path = guest.tmp_dir.as_path().join("test.img"); 5522 5523 let output = exec_host_command_output( 5524 format!( 5525 "qemu-img create -f raw {} 16M", 5526 test_disk_path.to_str().unwrap() 5527 ) 5528 .as_str(), 5529 ); 5530 if !output.status.success() { 5531 let stdout = String::from_utf8_lossy(&output.stdout); 5532 let stderr = String::from_utf8_lossy(&output.stderr); 5533 panic!("qemu-img command failed\nstdout\n{stdout}\nstderr\n{stderr}"); 5534 } 5535 5536 let loop_dev = create_loop_device(test_disk_path.to_str().unwrap(), 4096, 5); 5537 5538 let mut child = GuestCommand::new(&guest) 5539 .args(["--cpus", "boot=1"]) 5540 .args(["--memory", "size=512M"]) 5541 .args(["--kernel", kernel_path.to_str().unwrap()]) 5542 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5543 .args([ 5544 "--disk", 5545 format!( 5546 "path={}", 5547 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 5548 ) 5549 .as_str(), 5550 format!( 5551 "path={}", 5552 guest.disk_config.disk(DiskType::CloudInit).unwrap() 5553 ) 5554 .as_str(), 5555 format!("path={}", &loop_dev).as_str(), 5556 ]) 5557 .default_net() 5558 .capture_output() 5559 .spawn() 5560 .unwrap(); 5561 5562 let r = std::panic::catch_unwind(|| { 5563 guest.wait_vm_boot(None).unwrap(); 5564 5565 // MIN-IO column 5566 assert_eq!( 5567 guest 5568 .ssh_command("lsblk -t| grep vdc | awk '{print $3}'") 5569 .unwrap() 5570 .trim() 5571 .parse::<u32>() 5572 .unwrap_or_default(), 5573 4096 5574 ); 5575 // PHY-SEC column 5576 assert_eq!( 5577 guest 5578 .ssh_command("lsblk -t| grep vdc | awk '{print $5}'") 5579 .unwrap() 5580 .trim() 5581 .parse::<u32>() 5582 .unwrap_or_default(), 5583 4096 5584 ); 5585 // LOG-SEC column 5586 assert_eq!( 5587 guest 5588 .ssh_command("lsblk -t| grep vdc | awk '{print $6}'") 5589 .unwrap() 5590 .trim() 5591 .parse::<u32>() 5592 .unwrap_or_default(), 5593 4096 5594 ); 5595 }); 5596 5597 kill_child(&mut child); 5598 let output = child.wait_with_output().unwrap(); 5599 5600 handle_child_output(r, &output); 5601 5602 Command::new("losetup") 5603 .args(["-d", &loop_dev]) 5604 .output() 5605 .expect("loop device not found"); 5606 } 5607 5608 #[test] 5609 fn test_virtio_balloon_deflate_on_oom() { 5610 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5611 let guest = Guest::new(Box::new(focal)); 5612 5613 let kernel_path = direct_kernel_boot_path(); 5614 5615 let api_socket = temp_api_path(&guest.tmp_dir); 5616 5617 //Let's start a 4G guest with balloon occupied 2G memory 5618 let mut child = GuestCommand::new(&guest) 5619 .args(["--api-socket", &api_socket]) 5620 .args(["--cpus", "boot=1"]) 5621 .args(["--memory", "size=4G"]) 5622 .args(["--kernel", kernel_path.to_str().unwrap()]) 5623 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5624 .args(["--balloon", "size=2G,deflate_on_oom=on"]) 5625 .default_disks() 5626 .default_net() 5627 .capture_output() 5628 .spawn() 5629 .unwrap(); 5630 5631 let r = std::panic::catch_unwind(|| { 5632 guest.wait_vm_boot(None).unwrap(); 5633 5634 // Wait for balloon memory's initialization and check its size. 5635 // The virtio-balloon driver might take a few seconds to report the 5636 // balloon effective size back to the VMM. 5637 thread::sleep(std::time::Duration::new(20, 0)); 5638 5639 let orig_balloon = balloon_size(&api_socket); 5640 println!("The original balloon memory size is {orig_balloon} bytes"); 5641 assert!(orig_balloon == 2147483648); 5642 5643 // Two steps to verify if the 'deflate_on_oom' parameter works. 5644 // 1st: run a command to trigger an OOM in the guest. 5645 guest 5646 .ssh_command("echo f | sudo tee /proc/sysrq-trigger") 5647 .unwrap(); 5648 5649 // Give some time for the OOM to happen in the guest and be reported 5650 // back to the host. 5651 thread::sleep(std::time::Duration::new(20, 0)); 5652 5653 // 2nd: check balloon_mem's value to verify balloon has been automatically deflated 5654 let deflated_balloon = balloon_size(&api_socket); 5655 println!("After deflating, balloon memory size is {deflated_balloon} bytes"); 5656 // Verify the balloon size deflated 5657 assert!(deflated_balloon < 2147483648); 5658 }); 5659 5660 kill_child(&mut child); 5661 let output = child.wait_with_output().unwrap(); 5662 5663 handle_child_output(r, &output); 5664 } 5665 5666 #[test] 5667 #[cfg(not(feature = "mshv"))] 5668 fn test_virtio_balloon_free_page_reporting() { 5669 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5670 let guest = Guest::new(Box::new(focal)); 5671 5672 //Let's start a 4G guest with balloon occupied 2G memory 5673 let mut child = GuestCommand::new(&guest) 5674 .args(["--cpus", "boot=1"]) 5675 .args(["--memory", "size=4G"]) 5676 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 5677 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5678 .args(["--balloon", "size=0,free_page_reporting=on"]) 5679 .default_disks() 5680 .default_net() 5681 .capture_output() 5682 .spawn() 5683 .unwrap(); 5684 5685 let pid = child.id(); 5686 let r = std::panic::catch_unwind(|| { 5687 guest.wait_vm_boot(None).unwrap(); 5688 5689 // Check the initial RSS is less than 1GiB 5690 let rss = process_rss_kib(pid); 5691 println!("RSS {rss} < 1048576"); 5692 assert!(rss < 1048576); 5693 5694 // Spawn a command inside the guest to consume 2GiB of RAM for 60 5695 // seconds 5696 let guest_ip = guest.network.guest_ip.clone(); 5697 thread::spawn(move || { 5698 ssh_command_ip( 5699 "stress --vm 1 --vm-bytes 2G --vm-keep --timeout 60", 5700 &guest_ip, 5701 DEFAULT_SSH_RETRIES, 5702 DEFAULT_SSH_TIMEOUT, 5703 ) 5704 .unwrap(); 5705 }); 5706 5707 // Wait for 50 seconds to make sure the stress command is consuming 5708 // the expected amount of memory. 5709 thread::sleep(std::time::Duration::new(50, 0)); 5710 let rss = process_rss_kib(pid); 5711 println!("RSS {rss} >= 2097152"); 5712 assert!(rss >= 2097152); 5713 5714 // Wait for an extra minute to make sure the stress command has 5715 // completed and that the guest reported the free pages to the VMM 5716 // through the virtio-balloon device. We expect the RSS to be under 5717 // 2GiB. 5718 thread::sleep(std::time::Duration::new(60, 0)); 5719 let rss = process_rss_kib(pid); 5720 println!("RSS {rss} < 2097152"); 5721 assert!(rss < 2097152); 5722 }); 5723 5724 kill_child(&mut child); 5725 let output = child.wait_with_output().unwrap(); 5726 5727 handle_child_output(r, &output); 5728 } 5729 5730 #[test] 5731 fn test_pmem_hotplug() { 5732 _test_pmem_hotplug(None) 5733 } 5734 5735 #[test] 5736 fn test_pmem_multi_segment_hotplug() { 5737 _test_pmem_hotplug(Some(15)) 5738 } 5739 5740 fn _test_pmem_hotplug(pci_segment: Option<u16>) { 5741 #[cfg(target_arch = "aarch64")] 5742 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string(); 5743 #[cfg(target_arch = "x86_64")] 5744 let focal_image = FOCAL_IMAGE_NAME.to_string(); 5745 let focal = UbuntuDiskConfig::new(focal_image); 5746 let guest = Guest::new(Box::new(focal)); 5747 5748 #[cfg(target_arch = "x86_64")] 5749 let kernel_path = direct_kernel_boot_path(); 5750 #[cfg(target_arch = "aarch64")] 5751 let kernel_path = edk2_path(); 5752 5753 let api_socket = temp_api_path(&guest.tmp_dir); 5754 5755 let mut cmd = GuestCommand::new(&guest); 5756 5757 cmd.args(["--api-socket", &api_socket]) 5758 .args(["--cpus", "boot=1"]) 5759 .args(["--memory", "size=512M"]) 5760 .args(["--kernel", kernel_path.to_str().unwrap()]) 5761 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5762 .default_disks() 5763 .default_net() 5764 .capture_output(); 5765 5766 if pci_segment.is_some() { 5767 cmd.args([ 5768 "--platform", 5769 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 5770 ]); 5771 } 5772 5773 let mut child = cmd.spawn().unwrap(); 5774 5775 let r = std::panic::catch_unwind(|| { 5776 guest.wait_vm_boot(None).unwrap(); 5777 5778 // Check /dev/pmem0 is not there 5779 assert_eq!( 5780 guest 5781 .ssh_command("lsblk | grep -c pmem0 || true") 5782 .unwrap() 5783 .trim() 5784 .parse::<u32>() 5785 .unwrap_or(1), 5786 0 5787 ); 5788 5789 let pmem_temp_file = TempFile::new().unwrap(); 5790 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 5791 let (cmd_success, cmd_output) = remote_command_w_output( 5792 &api_socket, 5793 "add-pmem", 5794 Some(&format!( 5795 "file={},id=test0{}", 5796 pmem_temp_file.as_path().to_str().unwrap(), 5797 if let Some(pci_segment) = pci_segment { 5798 format!(",pci_segment={pci_segment}") 5799 } else { 5800 "".to_owned() 5801 } 5802 )), 5803 ); 5804 assert!(cmd_success); 5805 if let Some(pci_segment) = pci_segment { 5806 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 5807 "{{\"id\":\"test0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 5808 ))); 5809 } else { 5810 assert!(String::from_utf8_lossy(&cmd_output) 5811 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}")); 5812 } 5813 5814 // Check that /dev/pmem0 exists and the block size is 128M 5815 assert_eq!( 5816 guest 5817 .ssh_command("lsblk | grep pmem0 | grep -c 128M") 5818 .unwrap() 5819 .trim() 5820 .parse::<u32>() 5821 .unwrap_or_default(), 5822 1 5823 ); 5824 5825 guest.reboot_linux(0, None); 5826 5827 // Check still there after reboot 5828 assert_eq!( 5829 guest 5830 .ssh_command("lsblk | grep pmem0 | grep -c 128M") 5831 .unwrap() 5832 .trim() 5833 .parse::<u32>() 5834 .unwrap_or_default(), 5835 1 5836 ); 5837 5838 assert!(remote_command(&api_socket, "remove-device", Some("test0"))); 5839 5840 thread::sleep(std::time::Duration::new(20, 0)); 5841 5842 // Check device has gone away 5843 assert_eq!( 5844 guest 5845 .ssh_command("lsblk | grep -c pmem0.*128M || true") 5846 .unwrap() 5847 .trim() 5848 .parse::<u32>() 5849 .unwrap_or(1), 5850 0 5851 ); 5852 5853 guest.reboot_linux(1, None); 5854 5855 // Check still absent after reboot 5856 assert_eq!( 5857 guest 5858 .ssh_command("lsblk | grep -c pmem0.*128M || true") 5859 .unwrap() 5860 .trim() 5861 .parse::<u32>() 5862 .unwrap_or(1), 5863 0 5864 ); 5865 }); 5866 5867 kill_child(&mut child); 5868 let output = child.wait_with_output().unwrap(); 5869 5870 handle_child_output(r, &output); 5871 } 5872 5873 #[test] 5874 fn test_net_hotplug() { 5875 _test_net_hotplug(None) 5876 } 5877 5878 #[test] 5879 fn test_net_multi_segment_hotplug() { 5880 _test_net_hotplug(Some(15)) 5881 } 5882 5883 fn _test_net_hotplug(pci_segment: Option<u16>) { 5884 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 5885 let guest = Guest::new(Box::new(focal)); 5886 5887 #[cfg(target_arch = "x86_64")] 5888 let kernel_path = direct_kernel_boot_path(); 5889 #[cfg(target_arch = "aarch64")] 5890 let kernel_path = edk2_path(); 5891 5892 let api_socket = temp_api_path(&guest.tmp_dir); 5893 5894 // Boot without network 5895 let mut cmd = GuestCommand::new(&guest); 5896 5897 cmd.args(["--api-socket", &api_socket]) 5898 .args(["--cpus", "boot=1"]) 5899 .args(["--memory", "size=512M"]) 5900 .args(["--kernel", kernel_path.to_str().unwrap()]) 5901 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 5902 .default_disks() 5903 .capture_output(); 5904 5905 if pci_segment.is_some() { 5906 cmd.args([ 5907 "--platform", 5908 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"), 5909 ]); 5910 } 5911 5912 let mut child = cmd.spawn().unwrap(); 5913 5914 thread::sleep(std::time::Duration::new(20, 0)); 5915 5916 let r = std::panic::catch_unwind(|| { 5917 // Add network 5918 let (cmd_success, cmd_output) = remote_command_w_output( 5919 &api_socket, 5920 "add-net", 5921 Some( 5922 format!( 5923 "{}{},id=test0", 5924 guest.default_net_string(), 5925 if let Some(pci_segment) = pci_segment { 5926 format!(",pci_segment={pci_segment}") 5927 } else { 5928 "".to_owned() 5929 } 5930 ) 5931 .as_str(), 5932 ), 5933 ); 5934 assert!(cmd_success); 5935 5936 if let Some(pci_segment) = pci_segment { 5937 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 5938 "{{\"id\":\"test0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 5939 ))); 5940 } else { 5941 assert!(String::from_utf8_lossy(&cmd_output) 5942 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:05.0\"}")); 5943 } 5944 5945 thread::sleep(std::time::Duration::new(5, 0)); 5946 5947 // 1 network interfaces + default localhost ==> 2 interfaces 5948 assert_eq!( 5949 guest 5950 .ssh_command("ip -o link | wc -l") 5951 .unwrap() 5952 .trim() 5953 .parse::<u32>() 5954 .unwrap_or_default(), 5955 2 5956 ); 5957 5958 // Remove network 5959 assert!(remote_command(&api_socket, "remove-device", Some("test0"),)); 5960 thread::sleep(std::time::Duration::new(5, 0)); 5961 5962 let (cmd_success, cmd_output) = remote_command_w_output( 5963 &api_socket, 5964 "add-net", 5965 Some( 5966 format!( 5967 "{}{},id=test1", 5968 guest.default_net_string(), 5969 if let Some(pci_segment) = pci_segment { 5970 format!(",pci_segment={pci_segment}") 5971 } else { 5972 "".to_owned() 5973 } 5974 ) 5975 .as_str(), 5976 ), 5977 ); 5978 assert!(cmd_success); 5979 5980 if let Some(pci_segment) = pci_segment { 5981 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!( 5982 "{{\"id\":\"test1\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}" 5983 ))); 5984 } else { 5985 assert!(String::from_utf8_lossy(&cmd_output) 5986 .contains("{\"id\":\"test1\",\"bdf\":\"0000:00:05.0\"}")); 5987 } 5988 5989 thread::sleep(std::time::Duration::new(5, 0)); 5990 5991 // 1 network interfaces + default localhost ==> 2 interfaces 5992 assert_eq!( 5993 guest 5994 .ssh_command("ip -o link | wc -l") 5995 .unwrap() 5996 .trim() 5997 .parse::<u32>() 5998 .unwrap_or_default(), 5999 2 6000 ); 6001 6002 guest.reboot_linux(0, None); 6003 6004 // Check still there after reboot 6005 // 1 network interfaces + default localhost ==> 2 interfaces 6006 assert_eq!( 6007 guest 6008 .ssh_command("ip -o link | wc -l") 6009 .unwrap() 6010 .trim() 6011 .parse::<u32>() 6012 .unwrap_or_default(), 6013 2 6014 ); 6015 }); 6016 6017 kill_child(&mut child); 6018 let output = child.wait_with_output().unwrap(); 6019 6020 handle_child_output(r, &output); 6021 } 6022 6023 #[test] 6024 fn test_initramfs() { 6025 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6026 let guest = Guest::new(Box::new(focal)); 6027 let mut workload_path = dirs::home_dir().unwrap(); 6028 workload_path.push("workloads"); 6029 6030 #[cfg(target_arch = "x86_64")] 6031 let mut kernels = vec![direct_kernel_boot_path()]; 6032 #[cfg(target_arch = "aarch64")] 6033 let kernels = [direct_kernel_boot_path()]; 6034 6035 #[cfg(target_arch = "x86_64")] 6036 { 6037 let mut pvh_kernel_path = workload_path.clone(); 6038 pvh_kernel_path.push("vmlinux"); 6039 kernels.push(pvh_kernel_path); 6040 } 6041 6042 let mut initramfs_path = workload_path; 6043 initramfs_path.push("alpine_initramfs.img"); 6044 6045 let test_string = String::from("axz34i9rylotd8n50wbv6kcj7f2qushme1pg"); 6046 let cmdline = format!("console=hvc0 quiet TEST_STRING={test_string}"); 6047 6048 kernels.iter().for_each(|k_path| { 6049 let mut child = GuestCommand::new(&guest) 6050 .args(["--kernel", k_path.to_str().unwrap()]) 6051 .args(["--initramfs", initramfs_path.to_str().unwrap()]) 6052 .args(["--cmdline", &cmdline]) 6053 .capture_output() 6054 .spawn() 6055 .unwrap(); 6056 6057 thread::sleep(std::time::Duration::new(20, 0)); 6058 6059 kill_child(&mut child); 6060 let output = child.wait_with_output().unwrap(); 6061 6062 let r = std::panic::catch_unwind(|| { 6063 let s = String::from_utf8_lossy(&output.stdout); 6064 6065 assert_ne!(s.lines().position(|line| line == test_string), None); 6066 }); 6067 6068 handle_child_output(r, &output); 6069 }); 6070 } 6071 6072 #[test] 6073 fn test_counters() { 6074 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6075 let guest = Guest::new(Box::new(focal)); 6076 let api_socket = temp_api_path(&guest.tmp_dir); 6077 6078 let mut cmd = GuestCommand::new(&guest); 6079 cmd.args(["--cpus", "boot=1"]) 6080 .args(["--memory", "size=512M"]) 6081 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 6082 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6083 .default_disks() 6084 .args(["--net", guest.default_net_string().as_str()]) 6085 .args(["--api-socket", &api_socket]) 6086 .capture_output(); 6087 6088 let mut child = cmd.spawn().unwrap(); 6089 6090 let r = std::panic::catch_unwind(|| { 6091 guest.wait_vm_boot(None).unwrap(); 6092 6093 let orig_counters = get_counters(&api_socket); 6094 guest 6095 .ssh_command("dd if=/dev/zero of=test count=8 bs=1M") 6096 .unwrap(); 6097 6098 let new_counters = get_counters(&api_socket); 6099 6100 // Check that all the counters have increased 6101 assert!(new_counters > orig_counters); 6102 }); 6103 6104 kill_child(&mut child); 6105 let output = child.wait_with_output().unwrap(); 6106 6107 handle_child_output(r, &output); 6108 } 6109 6110 #[test] 6111 #[cfg(feature = "guest_debug")] 6112 fn test_coredump() { 6113 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6114 let guest = Guest::new(Box::new(focal)); 6115 let api_socket = temp_api_path(&guest.tmp_dir); 6116 6117 let mut cmd = GuestCommand::new(&guest); 6118 cmd.args(["--cpus", "boot=4"]) 6119 .args(["--memory", "size=4G"]) 6120 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 6121 .default_disks() 6122 .args(["--net", guest.default_net_string().as_str()]) 6123 .args(["--api-socket", &api_socket]) 6124 .capture_output(); 6125 6126 let mut child = cmd.spawn().unwrap(); 6127 let vmcore_file = temp_vmcore_file_path(&guest.tmp_dir); 6128 6129 let r = std::panic::catch_unwind(|| { 6130 guest.wait_vm_boot(None).unwrap(); 6131 6132 assert!(remote_command(&api_socket, "pause", None)); 6133 6134 assert!(remote_command( 6135 &api_socket, 6136 "coredump", 6137 Some(format!("file://{vmcore_file}").as_str()), 6138 )); 6139 6140 // the num of CORE notes should equals to vcpu 6141 let readelf_core_num_cmd = 6142 format!("readelf --all {vmcore_file} |grep CORE |grep -v Type |wc -l"); 6143 let core_num_in_elf = exec_host_command_output(&readelf_core_num_cmd); 6144 assert_eq!(String::from_utf8_lossy(&core_num_in_elf.stdout).trim(), "4"); 6145 6146 // the num of QEMU notes should equals to vcpu 6147 let readelf_vmm_num_cmd = format!("readelf --all {vmcore_file} |grep QEMU |wc -l"); 6148 let vmm_num_in_elf = exec_host_command_output(&readelf_vmm_num_cmd); 6149 assert_eq!(String::from_utf8_lossy(&vmm_num_in_elf.stdout).trim(), "4"); 6150 }); 6151 6152 kill_child(&mut child); 6153 let output = child.wait_with_output().unwrap(); 6154 6155 handle_child_output(r, &output); 6156 } 6157 6158 #[test] 6159 #[cfg(feature = "guest_debug")] 6160 fn test_coredump_no_pause() { 6161 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6162 let guest = Guest::new(Box::new(focal)); 6163 let api_socket = temp_api_path(&guest.tmp_dir); 6164 6165 let mut cmd = GuestCommand::new(&guest); 6166 cmd.args(["--cpus", "boot=4"]) 6167 .args(["--memory", "size=4G"]) 6168 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 6169 .default_disks() 6170 .args(["--net", guest.default_net_string().as_str()]) 6171 .args(["--api-socket", &api_socket]) 6172 .capture_output(); 6173 6174 let mut child = cmd.spawn().unwrap(); 6175 let vmcore_file = temp_vmcore_file_path(&guest.tmp_dir); 6176 6177 let r = std::panic::catch_unwind(|| { 6178 guest.wait_vm_boot(None).unwrap(); 6179 6180 assert!(remote_command( 6181 &api_socket, 6182 "coredump", 6183 Some(format!("file://{vmcore_file}").as_str()), 6184 )); 6185 6186 assert_eq!(vm_state(&api_socket), "Running"); 6187 }); 6188 6189 kill_child(&mut child); 6190 let output = child.wait_with_output().unwrap(); 6191 6192 handle_child_output(r, &output); 6193 } 6194 6195 #[test] 6196 fn test_watchdog() { 6197 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6198 let guest = Guest::new(Box::new(focal)); 6199 let api_socket = temp_api_path(&guest.tmp_dir); 6200 6201 let kernel_path = direct_kernel_boot_path(); 6202 let event_path = temp_event_monitor_path(&guest.tmp_dir); 6203 6204 let mut cmd = GuestCommand::new(&guest); 6205 cmd.args(["--cpus", "boot=1"]) 6206 .args(["--memory", "size=512M"]) 6207 .args(["--kernel", kernel_path.to_str().unwrap()]) 6208 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6209 .default_disks() 6210 .args(["--net", guest.default_net_string().as_str()]) 6211 .args(["--watchdog"]) 6212 .args(["--api-socket", &api_socket]) 6213 .args(["--event-monitor", format!("path={event_path}").as_str()]) 6214 .capture_output(); 6215 6216 let mut child = cmd.spawn().unwrap(); 6217 6218 let r = std::panic::catch_unwind(|| { 6219 guest.wait_vm_boot(None).unwrap(); 6220 6221 let mut expected_reboot_count = 1; 6222 6223 // Enable the watchdog with a 15s timeout 6224 enable_guest_watchdog(&guest, 15); 6225 6226 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6227 assert_eq!( 6228 guest 6229 .ssh_command("sudo journalctl | grep -c -- \"Watchdog started\"") 6230 .unwrap() 6231 .trim() 6232 .parse::<u32>() 6233 .unwrap_or_default(), 6234 1 6235 ); 6236 6237 // Allow some normal time to elapse to check we don't get spurious reboots 6238 thread::sleep(std::time::Duration::new(40, 0)); 6239 // Check no reboot 6240 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6241 6242 // Trigger a panic (sync first). We need to do this inside a screen with a delay so the SSH command returns. 6243 guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap(); 6244 // Allow some time for the watchdog to trigger (max 30s) and reboot to happen 6245 guest.wait_vm_boot(Some(50)).unwrap(); 6246 // Check a reboot is triggered by the watchdog 6247 expected_reboot_count += 1; 6248 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6249 6250 #[cfg(target_arch = "x86_64")] 6251 { 6252 // Now pause the VM and remain offline for 30s 6253 assert!(remote_command(&api_socket, "pause", None)); 6254 let latest_events = [ 6255 &MetaEvent { 6256 event: "pausing".to_string(), 6257 device_id: None, 6258 }, 6259 &MetaEvent { 6260 event: "paused".to_string(), 6261 device_id: None, 6262 }, 6263 ]; 6264 assert!(check_latest_events_exact(&latest_events, &event_path)); 6265 assert!(remote_command(&api_socket, "resume", None)); 6266 6267 // Check no reboot 6268 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 6269 } 6270 }); 6271 6272 kill_child(&mut child); 6273 let output = child.wait_with_output().unwrap(); 6274 6275 handle_child_output(r, &output); 6276 } 6277 6278 #[test] 6279 fn test_pvpanic() { 6280 let jammy = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string()); 6281 let guest = Guest::new(Box::new(jammy)); 6282 let api_socket = temp_api_path(&guest.tmp_dir); 6283 let event_path = temp_event_monitor_path(&guest.tmp_dir); 6284 6285 let kernel_path = direct_kernel_boot_path(); 6286 6287 let mut cmd = GuestCommand::new(&guest); 6288 cmd.args(["--cpus", "boot=1"]) 6289 .args(["--memory", "size=512M"]) 6290 .args(["--kernel", kernel_path.to_str().unwrap()]) 6291 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6292 .default_disks() 6293 .args(["--net", guest.default_net_string().as_str()]) 6294 .args(["--pvpanic"]) 6295 .args(["--api-socket", &api_socket]) 6296 .args(["--event-monitor", format!("path={event_path}").as_str()]) 6297 .capture_output(); 6298 6299 let mut child = cmd.spawn().unwrap(); 6300 6301 let r = std::panic::catch_unwind(|| { 6302 guest.wait_vm_boot(None).unwrap(); 6303 6304 // Trigger guest a panic 6305 make_guest_panic(&guest); 6306 6307 // Wait a while for guest 6308 thread::sleep(std::time::Duration::new(10, 0)); 6309 6310 let expected_sequential_events = [&MetaEvent { 6311 event: "panic".to_string(), 6312 device_id: None, 6313 }]; 6314 assert!(check_latest_events_exact( 6315 &expected_sequential_events, 6316 &event_path 6317 )); 6318 }); 6319 6320 kill_child(&mut child); 6321 let output = child.wait_with_output().unwrap(); 6322 6323 handle_child_output(r, &output); 6324 } 6325 6326 #[test] 6327 fn test_tap_from_fd() { 6328 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6329 let guest = Guest::new(Box::new(focal)); 6330 let kernel_path = direct_kernel_boot_path(); 6331 6332 // Create a TAP interface with multi-queue enabled 6333 let num_queue_pairs: usize = 2; 6334 6335 use std::str::FromStr; 6336 let taps = net_util::open_tap( 6337 Some("chtap0"), 6338 Some(std::net::Ipv4Addr::from_str(&guest.network.host_ip).unwrap()), 6339 None, 6340 &mut None, 6341 None, 6342 num_queue_pairs, 6343 Some(libc::O_RDWR | libc::O_NONBLOCK), 6344 ) 6345 .unwrap(); 6346 6347 let mut child = GuestCommand::new(&guest) 6348 .args(["--cpus", &format!("boot={num_queue_pairs}")]) 6349 .args(["--memory", "size=512M"]) 6350 .args(["--kernel", kernel_path.to_str().unwrap()]) 6351 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6352 .default_disks() 6353 .args([ 6354 "--net", 6355 &format!( 6356 "fd=[{},{}],mac={},num_queues={}", 6357 taps[0].as_raw_fd(), 6358 taps[1].as_raw_fd(), 6359 guest.network.guest_mac, 6360 num_queue_pairs * 2 6361 ), 6362 ]) 6363 .capture_output() 6364 .spawn() 6365 .unwrap(); 6366 6367 let r = std::panic::catch_unwind(|| { 6368 guest.wait_vm_boot(None).unwrap(); 6369 6370 assert_eq!( 6371 guest 6372 .ssh_command("ip -o link | wc -l") 6373 .unwrap() 6374 .trim() 6375 .parse::<u32>() 6376 .unwrap_or_default(), 6377 2 6378 ); 6379 6380 guest.reboot_linux(0, None); 6381 6382 assert_eq!( 6383 guest 6384 .ssh_command("ip -o link | wc -l") 6385 .unwrap() 6386 .trim() 6387 .parse::<u32>() 6388 .unwrap_or_default(), 6389 2 6390 ); 6391 }); 6392 6393 kill_child(&mut child); 6394 let output = child.wait_with_output().unwrap(); 6395 6396 handle_child_output(r, &output); 6397 } 6398 6399 // By design, a guest VM won't be able to connect to the host 6400 // machine when using a macvtap network interface (while it can 6401 // communicate externally). As a workaround, this integration 6402 // test creates two macvtap interfaces in 'bridge' mode on the 6403 // same physical net interface, one for the guest and one for 6404 // the host. With additional setup on the IP address and the 6405 // routing table, it enables the communications between the 6406 // guest VM and the host machine. 6407 // Details: https://wiki.libvirt.org/page/TroubleshootMacvtapHostFail 6408 fn _test_macvtap(hotplug: bool, guest_macvtap_name: &str, host_macvtap_name: &str) { 6409 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6410 let guest = Guest::new(Box::new(focal)); 6411 let api_socket = temp_api_path(&guest.tmp_dir); 6412 6413 #[cfg(target_arch = "x86_64")] 6414 let kernel_path = direct_kernel_boot_path(); 6415 #[cfg(target_arch = "aarch64")] 6416 let kernel_path = edk2_path(); 6417 6418 let phy_net = "eth0"; 6419 6420 // Create a macvtap interface for the guest VM to use 6421 assert!(exec_host_command_status(&format!( 6422 "sudo ip link add link {phy_net} name {guest_macvtap_name} type macvtap mod bridge" 6423 )) 6424 .success()); 6425 assert!(exec_host_command_status(&format!( 6426 "sudo ip link set {} address {} up", 6427 guest_macvtap_name, guest.network.guest_mac 6428 )) 6429 .success()); 6430 assert!( 6431 exec_host_command_status(&format!("sudo ip link show {guest_macvtap_name}")).success() 6432 ); 6433 6434 let tap_index = 6435 fs::read_to_string(format!("/sys/class/net/{guest_macvtap_name}/ifindex")).unwrap(); 6436 let tap_device = format!("/dev/tap{}", tap_index.trim()); 6437 6438 assert!(exec_host_command_status(&format!("sudo chown $UID.$UID {tap_device}")).success()); 6439 6440 let cstr_tap_device = std::ffi::CString::new(tap_device).unwrap(); 6441 let tap_fd1 = unsafe { libc::open(cstr_tap_device.as_ptr(), libc::O_RDWR) }; 6442 assert!(tap_fd1 > 0); 6443 let tap_fd2 = unsafe { libc::open(cstr_tap_device.as_ptr(), libc::O_RDWR) }; 6444 assert!(tap_fd2 > 0); 6445 6446 // Create a macvtap on the same physical net interface for 6447 // the host machine to use 6448 assert!(exec_host_command_status(&format!( 6449 "sudo ip link add link {phy_net} name {host_macvtap_name} type macvtap mod bridge" 6450 )) 6451 .success()); 6452 // Use default mask "255.255.255.0" 6453 assert!(exec_host_command_status(&format!( 6454 "sudo ip address add {}/24 dev {}", 6455 guest.network.host_ip, host_macvtap_name 6456 )) 6457 .success()); 6458 assert!( 6459 exec_host_command_status(&format!("sudo ip link set dev {host_macvtap_name} up")) 6460 .success() 6461 ); 6462 6463 let mut guest_command = GuestCommand::new(&guest); 6464 guest_command 6465 .args(["--cpus", "boot=2"]) 6466 .args(["--memory", "size=512M"]) 6467 .args(["--kernel", kernel_path.to_str().unwrap()]) 6468 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6469 .default_disks() 6470 .args(["--api-socket", &api_socket]); 6471 6472 let net_params = format!( 6473 "fd=[{},{}],mac={},num_queues=4", 6474 tap_fd1, tap_fd2, guest.network.guest_mac 6475 ); 6476 6477 if !hotplug { 6478 guest_command.args(["--net", &net_params]); 6479 } 6480 6481 let mut child = guest_command.capture_output().spawn().unwrap(); 6482 6483 if hotplug { 6484 // Give some time to the VMM process to listen to the API 6485 // socket. This is the only requirement to avoid the following 6486 // call to ch-remote from failing. 6487 thread::sleep(std::time::Duration::new(10, 0)); 6488 // Hotplug the virtio-net device 6489 let (cmd_success, cmd_output) = 6490 remote_command_w_output(&api_socket, "add-net", Some(&net_params)); 6491 assert!(cmd_success); 6492 #[cfg(target_arch = "x86_64")] 6493 assert!(String::from_utf8_lossy(&cmd_output) 6494 .contains("{\"id\":\"_net2\",\"bdf\":\"0000:00:05.0\"}")); 6495 #[cfg(target_arch = "aarch64")] 6496 assert!(String::from_utf8_lossy(&cmd_output) 6497 .contains("{\"id\":\"_net0\",\"bdf\":\"0000:00:05.0\"}")); 6498 } 6499 6500 // The functional connectivity provided by the virtio-net device 6501 // gets tested through wait_vm_boot() as it expects to receive a 6502 // HTTP request, and through the SSH command as well. 6503 let r = std::panic::catch_unwind(|| { 6504 guest.wait_vm_boot(None).unwrap(); 6505 6506 assert_eq!( 6507 guest 6508 .ssh_command("ip -o link | wc -l") 6509 .unwrap() 6510 .trim() 6511 .parse::<u32>() 6512 .unwrap_or_default(), 6513 2 6514 ); 6515 6516 guest.reboot_linux(0, None); 6517 6518 assert_eq!( 6519 guest 6520 .ssh_command("ip -o link | wc -l") 6521 .unwrap() 6522 .trim() 6523 .parse::<u32>() 6524 .unwrap_or_default(), 6525 2 6526 ); 6527 }); 6528 6529 kill_child(&mut child); 6530 6531 exec_host_command_status(&format!("sudo ip link del {guest_macvtap_name}")); 6532 exec_host_command_status(&format!("sudo ip link del {host_macvtap_name}")); 6533 6534 let output = child.wait_with_output().unwrap(); 6535 6536 handle_child_output(r, &output); 6537 } 6538 6539 #[test] 6540 #[cfg_attr(target_arch = "aarch64", ignore = "See #5443")] 6541 fn test_macvtap() { 6542 _test_macvtap(false, "guestmacvtap0", "hostmacvtap0") 6543 } 6544 6545 #[test] 6546 #[cfg_attr(target_arch = "aarch64", ignore = "See #5443")] 6547 fn test_macvtap_hotplug() { 6548 _test_macvtap(true, "guestmacvtap1", "hostmacvtap1") 6549 } 6550 6551 #[test] 6552 #[cfg(not(feature = "mshv"))] 6553 fn test_ovs_dpdk() { 6554 let focal1 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6555 let guest1 = Guest::new(Box::new(focal1)); 6556 6557 let focal2 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6558 let guest2 = Guest::new(Box::new(focal2)); 6559 let api_socket_source = format!("{}.1", temp_api_path(&guest2.tmp_dir)); 6560 6561 let (mut child1, mut child2) = 6562 setup_ovs_dpdk_guests(&guest1, &guest2, &api_socket_source, false); 6563 6564 // Create the snapshot directory 6565 let snapshot_dir = temp_snapshot_dir_path(&guest2.tmp_dir); 6566 6567 let r = std::panic::catch_unwind(|| { 6568 // Remove one of the two ports from the OVS bridge 6569 assert!(exec_host_command_status("ovs-vsctl del-port vhost-user1").success()); 6570 6571 // Spawn a new netcat listener in the first VM 6572 let guest_ip = guest1.network.guest_ip.clone(); 6573 thread::spawn(move || { 6574 ssh_command_ip( 6575 "nc -l 12345", 6576 &guest_ip, 6577 DEFAULT_SSH_RETRIES, 6578 DEFAULT_SSH_TIMEOUT, 6579 ) 6580 .unwrap(); 6581 }); 6582 6583 // Wait for the server to be listening 6584 thread::sleep(std::time::Duration::new(5, 0)); 6585 6586 // Check the connection fails this time 6587 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap_err(); 6588 6589 // Add the OVS port back 6590 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface vhost-user1 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient1").success()); 6591 6592 // And finally check the connection is functional again 6593 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap(); 6594 6595 // Pause the VM 6596 assert!(remote_command(&api_socket_source, "pause", None)); 6597 6598 // Take a snapshot from the VM 6599 assert!(remote_command( 6600 &api_socket_source, 6601 "snapshot", 6602 Some(format!("file://{snapshot_dir}").as_str()), 6603 )); 6604 6605 // Wait to make sure the snapshot is completed 6606 thread::sleep(std::time::Duration::new(10, 0)); 6607 }); 6608 6609 // Shutdown the source VM 6610 kill_child(&mut child2); 6611 let output = child2.wait_with_output().unwrap(); 6612 handle_child_output(r, &output); 6613 6614 // Remove the vhost-user socket file. 6615 Command::new("rm") 6616 .arg("-f") 6617 .arg("/tmp/dpdkvhostclient2") 6618 .output() 6619 .unwrap(); 6620 6621 let api_socket_restored = format!("{}.2", temp_api_path(&guest2.tmp_dir)); 6622 // Restore the VM from the snapshot 6623 let mut child2 = GuestCommand::new(&guest2) 6624 .args(["--api-socket", &api_socket_restored]) 6625 .args([ 6626 "--restore", 6627 format!("source_url=file://{snapshot_dir}").as_str(), 6628 ]) 6629 .capture_output() 6630 .spawn() 6631 .unwrap(); 6632 6633 // Wait for the VM to be restored 6634 thread::sleep(std::time::Duration::new(10, 0)); 6635 6636 let r = std::panic::catch_unwind(|| { 6637 // Resume the VM 6638 assert!(remote_command(&api_socket_restored, "resume", None)); 6639 6640 // Spawn a new netcat listener in the first VM 6641 let guest_ip = guest1.network.guest_ip.clone(); 6642 thread::spawn(move || { 6643 ssh_command_ip( 6644 "nc -l 12345", 6645 &guest_ip, 6646 DEFAULT_SSH_RETRIES, 6647 DEFAULT_SSH_TIMEOUT, 6648 ) 6649 .unwrap(); 6650 }); 6651 6652 // Wait for the server to be listening 6653 thread::sleep(std::time::Duration::new(5, 0)); 6654 6655 // And check the connection is still functional after restore 6656 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap(); 6657 }); 6658 6659 kill_child(&mut child1); 6660 kill_child(&mut child2); 6661 6662 let output = child1.wait_with_output().unwrap(); 6663 child2.wait().unwrap(); 6664 6665 cleanup_ovs_dpdk(); 6666 6667 handle_child_output(r, &output); 6668 } 6669 6670 fn setup_spdk_nvme(nvme_dir: &std::path::Path) -> Child { 6671 cleanup_spdk_nvme(); 6672 6673 assert!(exec_host_command_status(&format!( 6674 "mkdir -p {}", 6675 nvme_dir.join("nvme-vfio-user").to_str().unwrap() 6676 )) 6677 .success()); 6678 assert!(exec_host_command_status(&format!( 6679 "truncate {} -s 128M", 6680 nvme_dir.join("test-disk.raw").to_str().unwrap() 6681 )) 6682 .success()); 6683 assert!(exec_host_command_status(&format!( 6684 "mkfs.ext4 {}", 6685 nvme_dir.join("test-disk.raw").to_str().unwrap() 6686 )) 6687 .success()); 6688 6689 // Start the SPDK nvmf_tgt daemon to present NVMe device as a VFIO user device 6690 let child = Command::new("/usr/local/bin/spdk-nvme/nvmf_tgt") 6691 .args(["-i", "0", "-m", "0x1"]) 6692 .spawn() 6693 .unwrap(); 6694 thread::sleep(std::time::Duration::new(2, 0)); 6695 6696 assert!(exec_host_command_with_retries( 6697 "/usr/local/bin/spdk-nvme/rpc.py nvmf_create_transport -t VFIOUSER", 6698 3, 6699 std::time::Duration::new(5, 0), 6700 )); 6701 assert!(exec_host_command_status(&format!( 6702 "/usr/local/bin/spdk-nvme/rpc.py bdev_aio_create {} test 512", 6703 nvme_dir.join("test-disk.raw").to_str().unwrap() 6704 )) 6705 .success()); 6706 assert!(exec_host_command_status( 6707 "/usr/local/bin/spdk-nvme/rpc.py nvmf_create_subsystem nqn.2019-07.io.spdk:cnode -a -s test" 6708 ) 6709 .success()); 6710 assert!(exec_host_command_status( 6711 "/usr/local/bin/spdk-nvme/rpc.py nvmf_subsystem_add_ns nqn.2019-07.io.spdk:cnode test" 6712 ) 6713 .success()); 6714 assert!(exec_host_command_status(&format!( 6715 "/usr/local/bin/spdk-nvme/rpc.py nvmf_subsystem_add_listener nqn.2019-07.io.spdk:cnode -t VFIOUSER -a {} -s 0", 6716 nvme_dir.join("nvme-vfio-user").to_str().unwrap() 6717 )) 6718 .success()); 6719 6720 child 6721 } 6722 6723 fn cleanup_spdk_nvme() { 6724 exec_host_command_status("pkill -f nvmf_tgt"); 6725 } 6726 6727 #[test] 6728 fn test_vfio_user() { 6729 let jammy_image = JAMMY_IMAGE_NAME.to_string(); 6730 let jammy = UbuntuDiskConfig::new(jammy_image); 6731 let guest = Guest::new(Box::new(jammy)); 6732 6733 let spdk_nvme_dir = guest.tmp_dir.as_path().join("test-vfio-user"); 6734 let mut spdk_child = setup_spdk_nvme(spdk_nvme_dir.as_path()); 6735 6736 let api_socket = temp_api_path(&guest.tmp_dir); 6737 let mut child = GuestCommand::new(&guest) 6738 .args(["--api-socket", &api_socket]) 6739 .args(["--cpus", "boot=1"]) 6740 .args(["--memory", "size=1G,shared=on,hugepages=on"]) 6741 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 6742 .args(["--serial", "tty", "--console", "off"]) 6743 .default_disks() 6744 .default_net() 6745 .capture_output() 6746 .spawn() 6747 .unwrap(); 6748 6749 let r = std::panic::catch_unwind(|| { 6750 guest.wait_vm_boot(None).unwrap(); 6751 6752 // Hotplug the SPDK-NVMe device to the VM 6753 let (cmd_success, cmd_output) = remote_command_w_output( 6754 &api_socket, 6755 "add-user-device", 6756 Some(&format!( 6757 "socket={},id=vfio_user0", 6758 spdk_nvme_dir 6759 .as_path() 6760 .join("nvme-vfio-user/cntrl") 6761 .to_str() 6762 .unwrap(), 6763 )), 6764 ); 6765 assert!(cmd_success); 6766 assert!(String::from_utf8_lossy(&cmd_output) 6767 .contains("{\"id\":\"vfio_user0\",\"bdf\":\"0000:00:05.0\"}")); 6768 6769 thread::sleep(std::time::Duration::new(10, 0)); 6770 6771 // Check both if /dev/nvme exists and if the block size is 128M. 6772 assert_eq!( 6773 guest 6774 .ssh_command("lsblk | grep nvme0n1 | grep -c 128M") 6775 .unwrap() 6776 .trim() 6777 .parse::<u32>() 6778 .unwrap_or_default(), 6779 1 6780 ); 6781 6782 // Check changes persist after reboot 6783 assert_eq!( 6784 guest.ssh_command("sudo mount /dev/nvme0n1 /mnt").unwrap(), 6785 "" 6786 ); 6787 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), "lost+found\n"); 6788 guest 6789 .ssh_command("echo test123 | sudo tee /mnt/test") 6790 .unwrap(); 6791 assert_eq!(guest.ssh_command("sudo umount /mnt").unwrap(), ""); 6792 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), ""); 6793 6794 guest.reboot_linux(0, None); 6795 assert_eq!( 6796 guest.ssh_command("sudo mount /dev/nvme0n1 /mnt").unwrap(), 6797 "" 6798 ); 6799 assert_eq!( 6800 guest.ssh_command("sudo cat /mnt/test").unwrap().trim(), 6801 "test123" 6802 ); 6803 }); 6804 6805 let _ = spdk_child.kill(); 6806 let _ = spdk_child.wait(); 6807 6808 kill_child(&mut child); 6809 let output = child.wait_with_output().unwrap(); 6810 6811 handle_child_output(r, &output); 6812 } 6813 6814 #[test] 6815 #[cfg(target_arch = "x86_64")] 6816 fn test_vdpa_block() { 6817 // Before trying to run the test, verify the vdpa_sim_blk module is correctly loaded. 6818 assert!(exec_host_command_status("lsmod | grep vdpa_sim_blk").success()); 6819 6820 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6821 let guest = Guest::new(Box::new(focal)); 6822 let api_socket = temp_api_path(&guest.tmp_dir); 6823 6824 let kernel_path = direct_kernel_boot_path(); 6825 6826 let mut child = GuestCommand::new(&guest) 6827 .args(["--cpus", "boot=2"]) 6828 .args(["--memory", "size=512M,hugepages=on"]) 6829 .args(["--kernel", kernel_path.to_str().unwrap()]) 6830 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6831 .default_disks() 6832 .default_net() 6833 .args(["--vdpa", "path=/dev/vhost-vdpa-0,num_queues=1"]) 6834 .args(["--platform", "num_pci_segments=2,iommu_segments=1"]) 6835 .args(["--api-socket", &api_socket]) 6836 .capture_output() 6837 .spawn() 6838 .unwrap(); 6839 6840 let r = std::panic::catch_unwind(|| { 6841 guest.wait_vm_boot(None).unwrap(); 6842 6843 // Check both if /dev/vdc exists and if the block size is 128M. 6844 assert_eq!( 6845 guest 6846 .ssh_command("lsblk | grep vdc | grep -c 128M") 6847 .unwrap() 6848 .trim() 6849 .parse::<u32>() 6850 .unwrap_or_default(), 6851 1 6852 ); 6853 6854 // Check the content of the block device after we wrote to it. 6855 // The vpda-sim-blk should let us read what we previously wrote. 6856 guest 6857 .ssh_command("sudo bash -c 'echo foobar > /dev/vdc'") 6858 .unwrap(); 6859 assert_eq!( 6860 guest.ssh_command("sudo head -1 /dev/vdc").unwrap().trim(), 6861 "foobar" 6862 ); 6863 6864 // Hotplug an extra vDPA block device behind the vIOMMU 6865 // Add a new vDPA device to the VM 6866 let (cmd_success, cmd_output) = remote_command_w_output( 6867 &api_socket, 6868 "add-vdpa", 6869 Some("id=myvdpa0,path=/dev/vhost-vdpa-1,num_queues=1,pci_segment=1,iommu=on"), 6870 ); 6871 assert!(cmd_success); 6872 assert!(String::from_utf8_lossy(&cmd_output) 6873 .contains("{\"id\":\"myvdpa0\",\"bdf\":\"0001:00:01.0\"}")); 6874 6875 thread::sleep(std::time::Duration::new(10, 0)); 6876 6877 // Check IOMMU setup 6878 assert!(guest 6879 .does_device_vendor_pair_match("0x1057", "0x1af4") 6880 .unwrap_or_default()); 6881 assert_eq!( 6882 guest 6883 .ssh_command("ls /sys/kernel/iommu_groups/0/devices") 6884 .unwrap() 6885 .trim(), 6886 "0001:00:01.0" 6887 ); 6888 6889 // Check both if /dev/vdd exists and if the block size is 128M. 6890 assert_eq!( 6891 guest 6892 .ssh_command("lsblk | grep vdd | grep -c 128M") 6893 .unwrap() 6894 .trim() 6895 .parse::<u32>() 6896 .unwrap_or_default(), 6897 1 6898 ); 6899 6900 // Write some content to the block device we've just plugged. 6901 guest 6902 .ssh_command("sudo bash -c 'echo foobar > /dev/vdd'") 6903 .unwrap(); 6904 6905 // Check we can read the content back. 6906 assert_eq!( 6907 guest.ssh_command("sudo head -1 /dev/vdd").unwrap().trim(), 6908 "foobar" 6909 ); 6910 6911 // Unplug the device 6912 let cmd_success = remote_command(&api_socket, "remove-device", Some("myvdpa0")); 6913 assert!(cmd_success); 6914 thread::sleep(std::time::Duration::new(10, 0)); 6915 6916 // Check /dev/vdd doesn't exist anymore 6917 assert_eq!( 6918 guest 6919 .ssh_command("lsblk | grep -c vdd || true") 6920 .unwrap() 6921 .trim() 6922 .parse::<u32>() 6923 .unwrap_or(1), 6924 0 6925 ); 6926 }); 6927 6928 kill_child(&mut child); 6929 let output = child.wait_with_output().unwrap(); 6930 6931 handle_child_output(r, &output); 6932 } 6933 6934 #[test] 6935 #[cfg(target_arch = "x86_64")] 6936 #[ignore = "See #5756"] 6937 fn test_vdpa_net() { 6938 // Before trying to run the test, verify the vdpa_sim_net module is correctly loaded. 6939 if !exec_host_command_status("lsmod | grep vdpa_sim_net").success() { 6940 return; 6941 } 6942 6943 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 6944 let guest = Guest::new(Box::new(focal)); 6945 6946 let kernel_path = direct_kernel_boot_path(); 6947 6948 let mut child = GuestCommand::new(&guest) 6949 .args(["--cpus", "boot=2"]) 6950 .args(["--memory", "size=512M,hugepages=on"]) 6951 .args(["--kernel", kernel_path.to_str().unwrap()]) 6952 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 6953 .default_disks() 6954 .default_net() 6955 .args(["--vdpa", "path=/dev/vhost-vdpa-2,num_queues=2"]) 6956 .capture_output() 6957 .spawn() 6958 .unwrap(); 6959 6960 let r = std::panic::catch_unwind(|| { 6961 guest.wait_vm_boot(None).unwrap(); 6962 6963 // Check we can find network interface related to vDPA device 6964 assert_eq!( 6965 guest 6966 .ssh_command("ip -o link | grep -c ens6") 6967 .unwrap() 6968 .trim() 6969 .parse::<u32>() 6970 .unwrap_or(0), 6971 1 6972 ); 6973 6974 guest 6975 .ssh_command("sudo ip addr add 172.16.1.2/24 dev ens6") 6976 .unwrap(); 6977 guest.ssh_command("sudo ip link set up dev ens6").unwrap(); 6978 6979 // Check there is no packet yet on both TX/RX of the network interface 6980 assert_eq!( 6981 guest 6982 .ssh_command("ip -j -p -s link show ens6 | grep -c '\"packets\": 0'") 6983 .unwrap() 6984 .trim() 6985 .parse::<u32>() 6986 .unwrap_or(0), 6987 2 6988 ); 6989 6990 // Send 6 packets with ping command 6991 guest.ssh_command("ping 172.16.1.10 -c 6 || true").unwrap(); 6992 6993 // Check we can find 6 packets on both TX/RX of the network interface 6994 assert_eq!( 6995 guest 6996 .ssh_command("ip -j -p -s link show ens6 | grep -c '\"packets\": 6'") 6997 .unwrap() 6998 .trim() 6999 .parse::<u32>() 7000 .unwrap_or(0), 7001 2 7002 ); 7003 7004 // No need to check for hotplug as we already tested it through 7005 // test_vdpa_block() 7006 }); 7007 7008 kill_child(&mut child); 7009 let output = child.wait_with_output().unwrap(); 7010 7011 handle_child_output(r, &output); 7012 } 7013 7014 #[test] 7015 #[cfg(target_arch = "x86_64")] 7016 fn test_tpm() { 7017 let focal = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string()); 7018 let guest = Guest::new(Box::new(focal)); 7019 7020 let (mut swtpm_command, swtpm_socket_path) = prepare_swtpm_daemon(&guest.tmp_dir); 7021 7022 let mut guest_cmd = GuestCommand::new(&guest); 7023 guest_cmd 7024 .args(["--cpus", "boot=1"]) 7025 .args(["--memory", "size=1G"]) 7026 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 7027 .args(["--tpm", &format!("socket={swtpm_socket_path}")]) 7028 .capture_output() 7029 .default_disks() 7030 .default_net(); 7031 7032 // Start swtpm daemon 7033 let mut swtpm_child = swtpm_command.spawn().unwrap(); 7034 thread::sleep(std::time::Duration::new(10, 0)); 7035 let mut child = guest_cmd.spawn().unwrap(); 7036 let r = std::panic::catch_unwind(|| { 7037 guest.wait_vm_boot(None).unwrap(); 7038 assert_eq!( 7039 guest.ssh_command("ls /dev/tpm0").unwrap().trim(), 7040 "/dev/tpm0" 7041 ); 7042 guest.ssh_command("sudo tpm2_selftest -f").unwrap(); 7043 guest 7044 .ssh_command("echo 'hello' > /tmp/checksum_test; ") 7045 .unwrap(); 7046 guest.ssh_command("cmp <(sudo tpm2_pcrevent /tmp/checksum_test | grep sha256 | awk '{print $2}') <(sha256sum /tmp/checksum_test| awk '{print $1}')").unwrap(); 7047 }); 7048 7049 let _ = swtpm_child.kill(); 7050 let _d_out = swtpm_child.wait_with_output().unwrap(); 7051 7052 kill_child(&mut child); 7053 let output = child.wait_with_output().unwrap(); 7054 7055 handle_child_output(r, &output); 7056 } 7057 7058 #[test] 7059 #[cfg(target_arch = "x86_64")] 7060 fn test_double_tty() { 7061 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7062 let guest = Guest::new(Box::new(focal)); 7063 let mut cmd = GuestCommand::new(&guest); 7064 let api_socket = temp_api_path(&guest.tmp_dir); 7065 let tty_str: &str = "console=hvc0 earlyprintk=ttyS0 "; 7066 // linux printk module enable console log. 7067 let con_dis_str: &str = "console [hvc0] enabled"; 7068 // linux printk module disable console log. 7069 let con_enb_str: &str = "bootconsole [earlyser0] disabled"; 7070 7071 let kernel_path = direct_kernel_boot_path(); 7072 7073 cmd.args(["--cpus", "boot=1"]) 7074 .args(["--memory", "size=512M"]) 7075 .args(["--kernel", kernel_path.to_str().unwrap()]) 7076 .args([ 7077 "--cmdline", 7078 DIRECT_KERNEL_BOOT_CMDLINE 7079 .replace("console=hvc0 ", tty_str) 7080 .as_str(), 7081 ]) 7082 .capture_output() 7083 .default_disks() 7084 .default_net() 7085 .args(["--serial", "tty"]) 7086 .args(["--console", "tty"]) 7087 .args(["--api-socket", &api_socket]); 7088 7089 let mut child = cmd.spawn().unwrap(); 7090 7091 let mut r = std::panic::catch_unwind(|| { 7092 guest.wait_vm_boot(None).unwrap(); 7093 }); 7094 7095 kill_child(&mut child); 7096 let output = child.wait_with_output().unwrap(); 7097 7098 if r.is_ok() { 7099 r = std::panic::catch_unwind(|| { 7100 let s = String::from_utf8_lossy(&output.stdout); 7101 assert!(s.contains(tty_str)); 7102 assert!(s.contains(con_dis_str)); 7103 assert!(s.contains(con_enb_str)); 7104 }); 7105 } 7106 7107 handle_child_output(r, &output); 7108 } 7109 7110 #[test] 7111 #[cfg(target_arch = "x86_64")] 7112 fn test_nmi() { 7113 let jammy = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string()); 7114 let guest = Guest::new(Box::new(jammy)); 7115 let api_socket = temp_api_path(&guest.tmp_dir); 7116 let event_path = temp_event_monitor_path(&guest.tmp_dir); 7117 7118 let kernel_path = direct_kernel_boot_path(); 7119 let cmd_line = format!("{} {}", DIRECT_KERNEL_BOOT_CMDLINE, "unknown_nmi_panic=1"); 7120 7121 let mut cmd = GuestCommand::new(&guest); 7122 cmd.args(["--cpus", "boot=4"]) 7123 .args(["--memory", "size=512M"]) 7124 .args(["--kernel", kernel_path.to_str().unwrap()]) 7125 .args(["--cmdline", cmd_line.as_str()]) 7126 .default_disks() 7127 .args(["--net", guest.default_net_string().as_str()]) 7128 .args(["--pvpanic"]) 7129 .args(["--api-socket", &api_socket]) 7130 .args(["--event-monitor", format!("path={event_path}").as_str()]) 7131 .capture_output(); 7132 7133 let mut child = cmd.spawn().unwrap(); 7134 7135 let r = std::panic::catch_unwind(|| { 7136 guest.wait_vm_boot(None).unwrap(); 7137 7138 assert!(remote_command(&api_socket, "nmi", None)); 7139 7140 // Wait a while for guest 7141 thread::sleep(std::time::Duration::new(3, 0)); 7142 7143 let expected_sequential_events = [&MetaEvent { 7144 event: "panic".to_string(), 7145 device_id: None, 7146 }]; 7147 assert!(check_latest_events_exact( 7148 &expected_sequential_events, 7149 &event_path 7150 )); 7151 }); 7152 7153 kill_child(&mut child); 7154 let output = child.wait_with_output().unwrap(); 7155 7156 handle_child_output(r, &output); 7157 } 7158 } 7159 7160 mod dbus_api { 7161 use crate::*; 7162 7163 // Start cloud-hypervisor with no VM parameters, running both the HTTP 7164 // and DBus APIs. Alternate calls to the external APIs (HTTP and DBus) 7165 // to create a VM, boot it, and verify that it can be shut down and then 7166 // booted again. 7167 #[test] 7168 fn test_api_dbus_and_http_interleaved() { 7169 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7170 let guest = Guest::new(Box::new(focal)); 7171 let dbus_api = TargetApi::new_dbus_api(&guest.tmp_dir); 7172 let http_api = TargetApi::new_http_api(&guest.tmp_dir); 7173 7174 let mut child = GuestCommand::new(&guest) 7175 .args(dbus_api.guest_args()) 7176 .args(http_api.guest_args()) 7177 .capture_output() 7178 .spawn() 7179 .unwrap(); 7180 7181 thread::sleep(std::time::Duration::new(1, 0)); 7182 7183 // Verify API servers are running 7184 assert!(dbus_api.remote_command("ping", None)); 7185 assert!(http_api.remote_command("ping", None)); 7186 7187 // Create the VM first 7188 let cpu_count: u8 = 4; 7189 let request_body = guest.api_create_body( 7190 cpu_count, 7191 direct_kernel_boot_path().to_str().unwrap(), 7192 DIRECT_KERNEL_BOOT_CMDLINE, 7193 ); 7194 7195 let temp_config_path = guest.tmp_dir.as_path().join("config"); 7196 std::fs::write(&temp_config_path, request_body).unwrap(); 7197 let create_config = temp_config_path.as_os_str().to_str().unwrap(); 7198 7199 let r = std::panic::catch_unwind(|| { 7200 // Create the VM 7201 assert!(dbus_api.remote_command("create", Some(create_config),)); 7202 7203 // Then boot it 7204 assert!(http_api.remote_command("boot", None)); 7205 guest.wait_vm_boot(None).unwrap(); 7206 7207 // Check that the VM booted as expected 7208 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 7209 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 7210 7211 // Sync and shutdown without powering off to prevent filesystem 7212 // corruption. 7213 guest.ssh_command("sync").unwrap(); 7214 guest.ssh_command("sudo shutdown -H now").unwrap(); 7215 7216 // Wait for the guest to be fully shutdown 7217 thread::sleep(std::time::Duration::new(20, 0)); 7218 7219 // Then shutdown the VM 7220 assert!(dbus_api.remote_command("shutdown", None)); 7221 7222 // Then boot it again 7223 assert!(http_api.remote_command("boot", None)); 7224 guest.wait_vm_boot(None).unwrap(); 7225 7226 // Check that the VM booted as expected 7227 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count); 7228 assert!(guest.get_total_memory().unwrap_or_default() > 480_000); 7229 }); 7230 7231 kill_child(&mut child); 7232 let output = child.wait_with_output().unwrap(); 7233 7234 handle_child_output(r, &output); 7235 } 7236 7237 #[test] 7238 fn test_api_dbus_create_boot() { 7239 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7240 let guest = Guest::new(Box::new(focal)); 7241 7242 _test_api_create_boot(TargetApi::new_dbus_api(&guest.tmp_dir), guest) 7243 } 7244 7245 #[test] 7246 fn test_api_dbus_shutdown() { 7247 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7248 let guest = Guest::new(Box::new(focal)); 7249 7250 _test_api_shutdown(TargetApi::new_dbus_api(&guest.tmp_dir), guest) 7251 } 7252 7253 #[test] 7254 fn test_api_dbus_delete() { 7255 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7256 let guest = Guest::new(Box::new(focal)); 7257 7258 _test_api_delete(TargetApi::new_dbus_api(&guest.tmp_dir), guest); 7259 } 7260 7261 #[test] 7262 fn test_api_dbus_pause_resume() { 7263 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7264 let guest = Guest::new(Box::new(focal)); 7265 7266 _test_api_pause_resume(TargetApi::new_dbus_api(&guest.tmp_dir), guest) 7267 } 7268 } 7269 7270 mod common_sequential { 7271 use std::fs::remove_dir_all; 7272 7273 use crate::*; 7274 7275 #[test] 7276 #[cfg(not(feature = "mshv"))] 7277 fn test_memory_mergeable_on() { 7278 test_memory_mergeable(true) 7279 } 7280 7281 fn snapshot_and_check_events(api_socket: &str, snapshot_dir: &str, event_path: &str) { 7282 // Pause the VM 7283 assert!(remote_command(api_socket, "pause", None)); 7284 let latest_events: [&MetaEvent; 2] = [ 7285 &MetaEvent { 7286 event: "pausing".to_string(), 7287 device_id: None, 7288 }, 7289 &MetaEvent { 7290 event: "paused".to_string(), 7291 device_id: None, 7292 }, 7293 ]; 7294 // See: #5938 7295 thread::sleep(std::time::Duration::new(1, 0)); 7296 assert!(check_latest_events_exact(&latest_events, event_path)); 7297 7298 // Take a snapshot from the VM 7299 assert!(remote_command( 7300 api_socket, 7301 "snapshot", 7302 Some(format!("file://{snapshot_dir}").as_str()), 7303 )); 7304 7305 // Wait to make sure the snapshot is completed 7306 thread::sleep(std::time::Duration::new(10, 0)); 7307 7308 let latest_events = [ 7309 &MetaEvent { 7310 event: "snapshotting".to_string(), 7311 device_id: None, 7312 }, 7313 &MetaEvent { 7314 event: "snapshotted".to_string(), 7315 device_id: None, 7316 }, 7317 ]; 7318 // See: #5938 7319 thread::sleep(std::time::Duration::new(1, 0)); 7320 assert!(check_latest_events_exact(&latest_events, event_path)); 7321 } 7322 7323 // One thing to note about this test. The virtio-net device is heavily used 7324 // through each ssh command. There's no need to perform a dedicated test to 7325 // verify the migration went well for virtio-net. 7326 #[test] 7327 #[cfg(not(feature = "mshv"))] 7328 fn test_snapshot_restore_hotplug_virtiomem() { 7329 _test_snapshot_restore(true); 7330 } 7331 7332 #[test] 7333 fn test_snapshot_restore_basic() { 7334 _test_snapshot_restore(false); 7335 } 7336 7337 fn _test_snapshot_restore(use_hotplug: bool) { 7338 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7339 let guest = Guest::new(Box::new(focal)); 7340 let kernel_path = direct_kernel_boot_path(); 7341 7342 let api_socket_source = format!("{}.1", temp_api_path(&guest.tmp_dir)); 7343 7344 let net_id = "net123"; 7345 let net_params = format!( 7346 "id={},tap=,mac={},ip={},mask=255.255.255.0", 7347 net_id, guest.network.guest_mac, guest.network.host_ip 7348 ); 7349 let mut mem_params = "size=2G"; 7350 7351 if use_hotplug { 7352 mem_params = "size=2G,hotplug_method=virtio-mem,hotplug_size=32G" 7353 } 7354 7355 let cloudinit_params = format!( 7356 "path={},iommu=on", 7357 guest.disk_config.disk(DiskType::CloudInit).unwrap() 7358 ); 7359 7360 let socket = temp_vsock_path(&guest.tmp_dir); 7361 let event_path = temp_event_monitor_path(&guest.tmp_dir); 7362 7363 let mut child = GuestCommand::new(&guest) 7364 .args(["--api-socket", &api_socket_source]) 7365 .args(["--event-monitor", format!("path={event_path}").as_str()]) 7366 .args(["--cpus", "boot=4"]) 7367 .args(["--memory", mem_params]) 7368 .args(["--balloon", "size=0"]) 7369 .args(["--kernel", kernel_path.to_str().unwrap()]) 7370 .args([ 7371 "--disk", 7372 format!( 7373 "path={}", 7374 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 7375 ) 7376 .as_str(), 7377 cloudinit_params.as_str(), 7378 ]) 7379 .args(["--net", net_params.as_str()]) 7380 .args(["--vsock", format!("cid=3,socket={socket}").as_str()]) 7381 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 7382 .capture_output() 7383 .spawn() 7384 .unwrap(); 7385 7386 let console_text = String::from("On a branch floating down river a cricket, singing."); 7387 // Create the snapshot directory 7388 let snapshot_dir = temp_snapshot_dir_path(&guest.tmp_dir); 7389 7390 let r = std::panic::catch_unwind(|| { 7391 guest.wait_vm_boot(None).unwrap(); 7392 7393 // Check the number of vCPUs 7394 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 4); 7395 // Check the guest RAM 7396 assert!(guest.get_total_memory().unwrap_or_default() > 1_920_000); 7397 if use_hotplug { 7398 // Increase guest RAM with virtio-mem 7399 resize_command( 7400 &api_socket_source, 7401 None, 7402 Some(6 << 30), 7403 None, 7404 Some(&event_path), 7405 ); 7406 thread::sleep(std::time::Duration::new(5, 0)); 7407 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 7408 // Use balloon to remove RAM from the VM 7409 resize_command( 7410 &api_socket_source, 7411 None, 7412 None, 7413 Some(1 << 30), 7414 Some(&event_path), 7415 ); 7416 thread::sleep(std::time::Duration::new(5, 0)); 7417 let total_memory = guest.get_total_memory().unwrap_or_default(); 7418 assert!(total_memory > 4_800_000); 7419 assert!(total_memory < 5_760_000); 7420 } 7421 // Check the guest virtio-devices, e.g. block, rng, vsock, console, and net 7422 guest.check_devices_common(Some(&socket), Some(&console_text), None); 7423 7424 // x86_64: We check that removing and adding back the virtio-net device 7425 // does not break the snapshot/restore support for virtio-pci. 7426 // This is an important thing to test as the hotplug will 7427 // trigger a PCI BAR reprogramming, which is a good way of 7428 // checking if the stored resources are correctly restored. 7429 // Unplug the virtio-net device 7430 // AArch64: Device hotplug is currently not supported, skipping here. 7431 #[cfg(target_arch = "x86_64")] 7432 { 7433 assert!(remote_command( 7434 &api_socket_source, 7435 "remove-device", 7436 Some(net_id), 7437 )); 7438 thread::sleep(std::time::Duration::new(10, 0)); 7439 let latest_events = [&MetaEvent { 7440 event: "device-removed".to_string(), 7441 device_id: Some(net_id.to_string()), 7442 }]; 7443 // See: #5938 7444 thread::sleep(std::time::Duration::new(1, 0)); 7445 assert!(check_latest_events_exact(&latest_events, &event_path)); 7446 7447 // Plug the virtio-net device again 7448 assert!(remote_command( 7449 &api_socket_source, 7450 "add-net", 7451 Some(net_params.as_str()), 7452 )); 7453 thread::sleep(std::time::Duration::new(10, 0)); 7454 } 7455 7456 snapshot_and_check_events(&api_socket_source, &snapshot_dir, &event_path); 7457 }); 7458 7459 // Shutdown the source VM and check console output 7460 kill_child(&mut child); 7461 let output = child.wait_with_output().unwrap(); 7462 handle_child_output(r, &output); 7463 7464 let r = std::panic::catch_unwind(|| { 7465 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 7466 }); 7467 7468 handle_child_output(r, &output); 7469 7470 // Remove the vsock socket file. 7471 Command::new("rm") 7472 .arg("-f") 7473 .arg(socket.as_str()) 7474 .output() 7475 .unwrap(); 7476 7477 let api_socket_restored = format!("{}.2", temp_api_path(&guest.tmp_dir)); 7478 let event_path_restored = format!("{}.2", temp_event_monitor_path(&guest.tmp_dir)); 7479 7480 // Restore the VM from the snapshot 7481 let mut child = GuestCommand::new(&guest) 7482 .args(["--api-socket", &api_socket_restored]) 7483 .args([ 7484 "--event-monitor", 7485 format!("path={event_path_restored}").as_str(), 7486 ]) 7487 .args([ 7488 "--restore", 7489 format!("source_url=file://{snapshot_dir}").as_str(), 7490 ]) 7491 .capture_output() 7492 .spawn() 7493 .unwrap(); 7494 7495 // Wait for the VM to be restored 7496 thread::sleep(std::time::Duration::new(20, 0)); 7497 let expected_events = [ 7498 &MetaEvent { 7499 event: "starting".to_string(), 7500 device_id: None, 7501 }, 7502 &MetaEvent { 7503 event: "activated".to_string(), 7504 device_id: Some("__console".to_string()), 7505 }, 7506 &MetaEvent { 7507 event: "activated".to_string(), 7508 device_id: Some("__rng".to_string()), 7509 }, 7510 &MetaEvent { 7511 event: "restoring".to_string(), 7512 device_id: None, 7513 }, 7514 ]; 7515 assert!(check_sequential_events( 7516 &expected_events, 7517 &event_path_restored 7518 )); 7519 let latest_events = [&MetaEvent { 7520 event: "restored".to_string(), 7521 device_id: None, 7522 }]; 7523 assert!(check_latest_events_exact( 7524 &latest_events, 7525 &event_path_restored 7526 )); 7527 7528 // Remove the snapshot dir 7529 let _ = remove_dir_all(snapshot_dir.as_str()); 7530 7531 let r = std::panic::catch_unwind(|| { 7532 // Resume the VM 7533 assert!(remote_command(&api_socket_restored, "resume", None)); 7534 // There is no way that we can ensure the 'write()' to the 7535 // event file is completed when the 'resume' request is 7536 // returned successfully, because the 'write()' was done 7537 // asynchronously from a different thread of Cloud 7538 // Hypervisor (e.g. the event-monitor thread). 7539 thread::sleep(std::time::Duration::new(1, 0)); 7540 let latest_events = [ 7541 &MetaEvent { 7542 event: "resuming".to_string(), 7543 device_id: None, 7544 }, 7545 &MetaEvent { 7546 event: "resumed".to_string(), 7547 device_id: None, 7548 }, 7549 ]; 7550 assert!(check_latest_events_exact( 7551 &latest_events, 7552 &event_path_restored 7553 )); 7554 7555 // Perform same checks to validate VM has been properly restored 7556 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 4); 7557 let total_memory = guest.get_total_memory().unwrap_or_default(); 7558 if !use_hotplug { 7559 assert!(total_memory > 1_920_000); 7560 } else { 7561 assert!(total_memory > 4_800_000); 7562 assert!(total_memory < 5_760_000); 7563 // Deflate balloon to restore entire RAM to the VM 7564 resize_command(&api_socket_restored, None, None, Some(0), None); 7565 thread::sleep(std::time::Duration::new(5, 0)); 7566 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 7567 // Decrease guest RAM with virtio-mem 7568 resize_command(&api_socket_restored, None, Some(5 << 30), None, None); 7569 thread::sleep(std::time::Duration::new(5, 0)); 7570 let total_memory = guest.get_total_memory().unwrap_or_default(); 7571 assert!(total_memory > 4_800_000); 7572 assert!(total_memory < 5_760_000); 7573 } 7574 7575 guest.check_devices_common(Some(&socket), Some(&console_text), None); 7576 }); 7577 // Shutdown the target VM and check console output 7578 kill_child(&mut child); 7579 let output = child.wait_with_output().unwrap(); 7580 handle_child_output(r, &output); 7581 7582 let r = std::panic::catch_unwind(|| { 7583 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 7584 }); 7585 7586 handle_child_output(r, &output); 7587 } 7588 7589 #[test] 7590 #[cfg_attr(target_arch = "aarch64", ignore = "See #6970")] 7591 fn test_snapshot_restore_with_fd() { 7592 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7593 let guest = Guest::new(Box::new(focal)); 7594 let kernel_path = direct_kernel_boot_path(); 7595 7596 let api_socket_source = format!("{}.1", temp_api_path(&guest.tmp_dir)); 7597 7598 let net_id = "net123"; 7599 let num_queue_pairs: usize = 2; 7600 // use a name that does not conflict with tap dev created from other tests 7601 let tap_name = "chtap999"; 7602 use std::str::FromStr; 7603 let taps = net_util::open_tap( 7604 Some(tap_name), 7605 Some(std::net::Ipv4Addr::from_str(&guest.network.host_ip).unwrap()), 7606 None, 7607 &mut None, 7608 None, 7609 num_queue_pairs, 7610 Some(libc::O_RDWR | libc::O_NONBLOCK), 7611 ) 7612 .unwrap(); 7613 let net_params = format!( 7614 "id={},fd=[{},{}],mac={},ip={},mask=255.255.255.0,num_queues={}", 7615 net_id, 7616 taps[0].as_raw_fd(), 7617 taps[1].as_raw_fd(), 7618 guest.network.guest_mac, 7619 guest.network.host_ip, 7620 num_queue_pairs * 2 7621 ); 7622 7623 let cloudinit_params = format!( 7624 "path={},iommu=on", 7625 guest.disk_config.disk(DiskType::CloudInit).unwrap() 7626 ); 7627 7628 let n_cpu = 2; 7629 let event_path = temp_event_monitor_path(&guest.tmp_dir); 7630 7631 let mut child = GuestCommand::new(&guest) 7632 .args(["--api-socket", &api_socket_source]) 7633 .args(["--event-monitor", format!("path={event_path}").as_str()]) 7634 .args(["--cpus", format!("boot={}", n_cpu).as_str()]) 7635 .args(["--memory", "size=1G"]) 7636 .args(["--kernel", kernel_path.to_str().unwrap()]) 7637 .args([ 7638 "--disk", 7639 format!( 7640 "path={}", 7641 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 7642 ) 7643 .as_str(), 7644 cloudinit_params.as_str(), 7645 ]) 7646 .args(["--net", net_params.as_str()]) 7647 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 7648 .capture_output() 7649 .spawn() 7650 .unwrap(); 7651 7652 let console_text = String::from("On a branch floating down river a cricket, singing."); 7653 // Create the snapshot directory 7654 let snapshot_dir = temp_snapshot_dir_path(&guest.tmp_dir); 7655 7656 let r = std::panic::catch_unwind(|| { 7657 guest.wait_vm_boot(None).unwrap(); 7658 7659 // close the fds after VM boots, as CH duplicates them before using 7660 for tap in taps.iter() { 7661 unsafe { libc::close(tap.as_raw_fd()) }; 7662 } 7663 7664 // Check the number of vCPUs 7665 assert_eq!(guest.get_cpu_count().unwrap_or_default(), n_cpu); 7666 // Check the guest RAM 7667 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 7668 7669 // Check the guest virtio-devices, e.g. block, rng, vsock, console, and net 7670 guest.check_devices_common(None, Some(&console_text), None); 7671 7672 snapshot_and_check_events(&api_socket_source, &snapshot_dir, &event_path); 7673 }); 7674 7675 // Shutdown the source VM and check console output 7676 kill_child(&mut child); 7677 let output = child.wait_with_output().unwrap(); 7678 handle_child_output(r, &output); 7679 7680 let r = std::panic::catch_unwind(|| { 7681 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 7682 }); 7683 7684 handle_child_output(r, &output); 7685 7686 let api_socket_restored = format!("{}.2", temp_api_path(&guest.tmp_dir)); 7687 let event_path_restored = format!("{}.2", temp_event_monitor_path(&guest.tmp_dir)); 7688 7689 // Restore the VM from the snapshot 7690 let mut child = GuestCommand::new(&guest) 7691 .args(["--api-socket", &api_socket_restored]) 7692 .args([ 7693 "--event-monitor", 7694 format!("path={event_path_restored}").as_str(), 7695 ]) 7696 .capture_output() 7697 .spawn() 7698 .unwrap(); 7699 thread::sleep(std::time::Duration::new(2, 0)); 7700 7701 let taps = net_util::open_tap( 7702 Some(tap_name), 7703 Some(std::net::Ipv4Addr::from_str(&guest.network.host_ip).unwrap()), 7704 None, 7705 &mut None, 7706 None, 7707 num_queue_pairs, 7708 Some(libc::O_RDWR | libc::O_NONBLOCK), 7709 ) 7710 .unwrap(); 7711 let restore_params = format!( 7712 "source_url=file://{},net_fds=[{}@[{},{}]]", 7713 snapshot_dir, 7714 net_id, 7715 taps[0].as_raw_fd(), 7716 taps[1].as_raw_fd() 7717 ); 7718 assert!(remote_command( 7719 &api_socket_restored, 7720 "restore", 7721 Some(restore_params.as_str()) 7722 )); 7723 7724 // Wait for the VM to be restored 7725 thread::sleep(std::time::Duration::new(20, 0)); 7726 7727 // close the fds as CH duplicates them before using 7728 for tap in taps.iter() { 7729 unsafe { libc::close(tap.as_raw_fd()) }; 7730 } 7731 7732 let expected_events = [ 7733 &MetaEvent { 7734 event: "starting".to_string(), 7735 device_id: None, 7736 }, 7737 &MetaEvent { 7738 event: "activated".to_string(), 7739 device_id: Some("__console".to_string()), 7740 }, 7741 &MetaEvent { 7742 event: "activated".to_string(), 7743 device_id: Some("__rng".to_string()), 7744 }, 7745 &MetaEvent { 7746 event: "restoring".to_string(), 7747 device_id: None, 7748 }, 7749 ]; 7750 assert!(check_sequential_events( 7751 &expected_events, 7752 &event_path_restored 7753 )); 7754 let latest_events = [&MetaEvent { 7755 event: "restored".to_string(), 7756 device_id: None, 7757 }]; 7758 assert!(check_latest_events_exact( 7759 &latest_events, 7760 &event_path_restored 7761 )); 7762 7763 // Remove the snapshot dir 7764 let _ = remove_dir_all(snapshot_dir.as_str()); 7765 7766 let r = std::panic::catch_unwind(|| { 7767 // Resume the VM 7768 assert!(remote_command(&api_socket_restored, "resume", None)); 7769 // There is no way that we can ensure the 'write()' to the 7770 // event file is completed when the 'resume' request is 7771 // returned successfully, because the 'write()' was done 7772 // asynchronously from a different thread of Cloud 7773 // Hypervisor (e.g. the event-monitor thread). 7774 thread::sleep(std::time::Duration::new(1, 0)); 7775 let latest_events = [ 7776 &MetaEvent { 7777 event: "resuming".to_string(), 7778 device_id: None, 7779 }, 7780 &MetaEvent { 7781 event: "resumed".to_string(), 7782 device_id: None, 7783 }, 7784 ]; 7785 assert!(check_latest_events_exact( 7786 &latest_events, 7787 &event_path_restored 7788 )); 7789 7790 // Perform same checks to validate VM has been properly restored 7791 assert_eq!(guest.get_cpu_count().unwrap_or_default(), n_cpu); 7792 assert!(guest.get_total_memory().unwrap_or_default() > 960_000); 7793 7794 guest.check_devices_common(None, Some(&console_text), None); 7795 }); 7796 // Shutdown the target VM and check console output 7797 kill_child(&mut child); 7798 let output = child.wait_with_output().unwrap(); 7799 handle_child_output(r, &output); 7800 7801 let r = std::panic::catch_unwind(|| { 7802 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 7803 }); 7804 7805 handle_child_output(r, &output); 7806 } 7807 7808 #[test] 7809 #[cfg(target_arch = "x86_64")] 7810 fn test_snapshot_restore_pvpanic() { 7811 _test_snapshot_restore_devices(true); 7812 } 7813 7814 fn _test_snapshot_restore_devices(pvpanic: bool) { 7815 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 7816 let guest = Guest::new(Box::new(focal)); 7817 let kernel_path = direct_kernel_boot_path(); 7818 7819 let api_socket_source = format!("{}.1", temp_api_path(&guest.tmp_dir)); 7820 7821 let device_params = { 7822 let mut data = vec![]; 7823 if pvpanic { 7824 data.push("--pvpanic"); 7825 } 7826 data 7827 }; 7828 7829 let socket = temp_vsock_path(&guest.tmp_dir); 7830 let event_path = temp_event_monitor_path(&guest.tmp_dir); 7831 7832 let mut child = GuestCommand::new(&guest) 7833 .args(["--api-socket", &api_socket_source]) 7834 .args(["--event-monitor", format!("path={}", event_path).as_str()]) 7835 .args(["--cpus", "boot=2"]) 7836 .args(["--memory", "size=1G"]) 7837 .args(["--kernel", kernel_path.to_str().unwrap()]) 7838 .default_disks() 7839 .default_net() 7840 .args(["--vsock", format!("cid=3,socket={}", socket).as_str()]) 7841 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 7842 .args(device_params) 7843 .capture_output() 7844 .spawn() 7845 .unwrap(); 7846 7847 let console_text = String::from("On a branch floating down river a cricket, singing."); 7848 // Create the snapshot directory 7849 let snapshot_dir = temp_snapshot_dir_path(&guest.tmp_dir); 7850 7851 let r = std::panic::catch_unwind(|| { 7852 guest.wait_vm_boot(None).unwrap(); 7853 7854 // Check the number of vCPUs 7855 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 7856 7857 snapshot_and_check_events(&api_socket_source, &snapshot_dir, &event_path); 7858 }); 7859 7860 // Shutdown the source VM and check console output 7861 kill_child(&mut child); 7862 let output = child.wait_with_output().unwrap(); 7863 handle_child_output(r, &output); 7864 7865 // Remove the vsock socket file. 7866 Command::new("rm") 7867 .arg("-f") 7868 .arg(socket.as_str()) 7869 .output() 7870 .unwrap(); 7871 7872 let api_socket_restored = format!("{}.2", temp_api_path(&guest.tmp_dir)); 7873 let event_path_restored = format!("{}.2", temp_event_monitor_path(&guest.tmp_dir)); 7874 7875 // Restore the VM from the snapshot 7876 let mut child = GuestCommand::new(&guest) 7877 .args(["--api-socket", &api_socket_restored]) 7878 .args([ 7879 "--event-monitor", 7880 format!("path={event_path_restored}").as_str(), 7881 ]) 7882 .args([ 7883 "--restore", 7884 format!("source_url=file://{snapshot_dir}").as_str(), 7885 ]) 7886 .capture_output() 7887 .spawn() 7888 .unwrap(); 7889 7890 // Wait for the VM to be restored 7891 thread::sleep(std::time::Duration::new(20, 0)); 7892 7893 let latest_events = [&MetaEvent { 7894 event: "restored".to_string(), 7895 device_id: None, 7896 }]; 7897 assert!(check_latest_events_exact( 7898 &latest_events, 7899 &event_path_restored 7900 )); 7901 7902 // Remove the snapshot dir 7903 let _ = remove_dir_all(snapshot_dir.as_str()); 7904 7905 let r = std::panic::catch_unwind(|| { 7906 // Resume the VM 7907 assert!(remote_command(&api_socket_restored, "resume", None)); 7908 // There is no way that we can ensure the 'write()' to the 7909 // event file is completed when the 'resume' request is 7910 // returned successfully, because the 'write()' was done 7911 // asynchronously from a different thread of Cloud 7912 // Hypervisor (e.g. the event-monitor thread). 7913 thread::sleep(std::time::Duration::new(1, 0)); 7914 let latest_events = [ 7915 &MetaEvent { 7916 event: "resuming".to_string(), 7917 device_id: None, 7918 }, 7919 &MetaEvent { 7920 event: "resumed".to_string(), 7921 device_id: None, 7922 }, 7923 ]; 7924 assert!(check_latest_events_exact( 7925 &latest_events, 7926 &event_path_restored 7927 )); 7928 7929 // Check the number of vCPUs 7930 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2); 7931 guest.check_devices_common(Some(&socket), Some(&console_text), None); 7932 7933 if pvpanic { 7934 // Trigger guest a panic 7935 make_guest_panic(&guest); 7936 // Wait a while for guest 7937 thread::sleep(std::time::Duration::new(10, 0)); 7938 7939 let expected_sequential_events = [&MetaEvent { 7940 event: "panic".to_string(), 7941 device_id: None, 7942 }]; 7943 assert!(check_latest_events_exact( 7944 &expected_sequential_events, 7945 &event_path_restored 7946 )); 7947 } 7948 }); 7949 // Shutdown the target VM and check console output 7950 kill_child(&mut child); 7951 let output = child.wait_with_output().unwrap(); 7952 handle_child_output(r, &output); 7953 7954 let r = std::panic::catch_unwind(|| { 7955 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text)); 7956 }); 7957 7958 handle_child_output(r, &output); 7959 } 7960 } 7961 7962 mod windows { 7963 use once_cell::sync::Lazy; 7964 7965 use crate::*; 7966 7967 static NEXT_DISK_ID: Lazy<Mutex<u8>> = Lazy::new(|| Mutex::new(1)); 7968 7969 struct WindowsGuest { 7970 guest: Guest, 7971 auth: PasswordAuth, 7972 } 7973 7974 trait FsType { 7975 const FS_FAT: u8; 7976 const FS_NTFS: u8; 7977 } 7978 impl FsType for WindowsGuest { 7979 const FS_FAT: u8 = 0; 7980 const FS_NTFS: u8 = 1; 7981 } 7982 7983 impl WindowsGuest { 7984 fn new() -> Self { 7985 let disk = WindowsDiskConfig::new(WINDOWS_IMAGE_NAME.to_string()); 7986 let guest = Guest::new(Box::new(disk)); 7987 let auth = PasswordAuth { 7988 username: String::from("administrator"), 7989 password: String::from("Admin123"), 7990 }; 7991 7992 WindowsGuest { guest, auth } 7993 } 7994 7995 fn guest(&self) -> &Guest { 7996 &self.guest 7997 } 7998 7999 fn ssh_cmd(&self, cmd: &str) -> String { 8000 ssh_command_ip_with_auth( 8001 cmd, 8002 &self.auth, 8003 &self.guest.network.guest_ip, 8004 DEFAULT_SSH_RETRIES, 8005 DEFAULT_SSH_TIMEOUT, 8006 ) 8007 .unwrap() 8008 } 8009 8010 fn cpu_count(&self) -> u8 { 8011 self.ssh_cmd("powershell -Command \"(Get-CimInstance win32_computersystem).NumberOfLogicalProcessors\"") 8012 .trim() 8013 .parse::<u8>() 8014 .unwrap_or(0) 8015 } 8016 8017 fn ram_size(&self) -> usize { 8018 self.ssh_cmd("powershell -Command \"(Get-CimInstance win32_computersystem).TotalPhysicalMemory\"") 8019 .trim() 8020 .parse::<usize>() 8021 .unwrap_or(0) 8022 } 8023 8024 fn netdev_count(&self) -> u8 { 8025 self.ssh_cmd("powershell -Command \"netsh int ipv4 show interfaces | Select-String ethernet | Measure-Object -Line | Format-Table -HideTableHeaders\"") 8026 .trim() 8027 .parse::<u8>() 8028 .unwrap_or(0) 8029 } 8030 8031 fn disk_count(&self) -> u8 { 8032 self.ssh_cmd("powershell -Command \"Get-Disk | Measure-Object -Line | Format-Table -HideTableHeaders\"") 8033 .trim() 8034 .parse::<u8>() 8035 .unwrap_or(0) 8036 } 8037 8038 fn reboot(&self) { 8039 let _ = self.ssh_cmd("shutdown /r /t 0"); 8040 } 8041 8042 fn shutdown(&self) { 8043 let _ = self.ssh_cmd("shutdown /s /t 0"); 8044 } 8045 8046 fn run_dnsmasq(&self) -> std::process::Child { 8047 let listen_address = format!("--listen-address={}", self.guest.network.host_ip); 8048 let dhcp_host = format!( 8049 "--dhcp-host={},{}", 8050 self.guest.network.guest_mac, self.guest.network.guest_ip 8051 ); 8052 let dhcp_range = format!( 8053 "--dhcp-range=eth,{},{}", 8054 self.guest.network.guest_ip, self.guest.network.guest_ip 8055 ); 8056 8057 Command::new("dnsmasq") 8058 .arg("--no-daemon") 8059 .arg("--log-queries") 8060 .arg(listen_address.as_str()) 8061 .arg("--except-interface=lo") 8062 .arg("--bind-dynamic") // Allow listening to host_ip while the interface is not ready yet. 8063 .arg("--conf-file=/dev/null") 8064 .arg(dhcp_host.as_str()) 8065 .arg(dhcp_range.as_str()) 8066 .spawn() 8067 .unwrap() 8068 } 8069 8070 // TODO Cleanup image file explicitly after test, if there's some space issues. 8071 fn disk_new(&self, fs: u8, sz: usize) -> String { 8072 let mut guard = NEXT_DISK_ID.lock().unwrap(); 8073 let id = *guard; 8074 *guard = id + 1; 8075 8076 let img = PathBuf::from(format!("/tmp/test-hotplug-{id}.raw")); 8077 let _ = fs::remove_file(&img); 8078 8079 // Create an image file 8080 let out = Command::new("qemu-img") 8081 .args([ 8082 "create", 8083 "-f", 8084 "raw", 8085 img.to_str().unwrap(), 8086 format!("{sz}m").as_str(), 8087 ]) 8088 .output() 8089 .expect("qemu-img command failed") 8090 .stdout; 8091 println!("{out:?}"); 8092 8093 // Associate image to a loop device 8094 let out = Command::new("losetup") 8095 .args(["--show", "-f", img.to_str().unwrap()]) 8096 .output() 8097 .expect("failed to create loop device") 8098 .stdout; 8099 let _tmp = String::from_utf8_lossy(&out); 8100 let loop_dev = _tmp.trim(); 8101 println!("{out:?}"); 8102 8103 // Create a partition table 8104 // echo 'type=7' | sudo sfdisk "${LOOP}" 8105 let mut child = Command::new("sfdisk") 8106 .args([loop_dev]) 8107 .stdin(Stdio::piped()) 8108 .spawn() 8109 .unwrap(); 8110 let stdin = child.stdin.as_mut().expect("failed to open stdin"); 8111 stdin 8112 .write_all("type=7".as_bytes()) 8113 .expect("failed to write stdin"); 8114 let out = child.wait_with_output().expect("sfdisk failed").stdout; 8115 println!("{out:?}"); 8116 8117 // Disengage the loop device 8118 let out = Command::new("losetup") 8119 .args(["-d", loop_dev]) 8120 .output() 8121 .expect("loop device not found") 8122 .stdout; 8123 println!("{out:?}"); 8124 8125 // Re-associate loop device pointing to the partition only 8126 let out = Command::new("losetup") 8127 .args([ 8128 "--show", 8129 "--offset", 8130 (512 * 2048).to_string().as_str(), 8131 "-f", 8132 img.to_str().unwrap(), 8133 ]) 8134 .output() 8135 .expect("failed to create loop device") 8136 .stdout; 8137 let _tmp = String::from_utf8_lossy(&out); 8138 let loop_dev = _tmp.trim(); 8139 println!("{out:?}"); 8140 8141 // Create filesystem. 8142 let fs_cmd = match fs { 8143 WindowsGuest::FS_FAT => "mkfs.msdos", 8144 WindowsGuest::FS_NTFS => "mkfs.ntfs", 8145 _ => panic!("Unknown filesystem type '{fs}'"), 8146 }; 8147 let out = Command::new(fs_cmd) 8148 .args([&loop_dev]) 8149 .output() 8150 .unwrap_or_else(|_| panic!("{fs_cmd} failed")) 8151 .stdout; 8152 println!("{out:?}"); 8153 8154 // Disengage the loop device 8155 let out = Command::new("losetup") 8156 .args(["-d", loop_dev]) 8157 .output() 8158 .unwrap_or_else(|_| panic!("loop device '{loop_dev}' not found")) 8159 .stdout; 8160 println!("{out:?}"); 8161 8162 img.to_str().unwrap().to_string() 8163 } 8164 8165 fn disks_set_rw(&self) { 8166 let _ = self.ssh_cmd("powershell -Command \"Get-Disk | Where-Object IsOffline -eq $True | Set-Disk -IsReadOnly $False\""); 8167 } 8168 8169 fn disks_online(&self) { 8170 let _ = self.ssh_cmd("powershell -Command \"Get-Disk | Where-Object IsOffline -eq $True | Set-Disk -IsOffline $False\""); 8171 } 8172 8173 fn disk_file_put(&self, fname: &str, data: &str) { 8174 let _ = self.ssh_cmd(&format!( 8175 "powershell -Command \"'{data}' | Set-Content -Path {fname}\"" 8176 )); 8177 } 8178 8179 fn disk_file_read(&self, fname: &str) -> String { 8180 self.ssh_cmd(&format!( 8181 "powershell -Command \"Get-Content -Path {fname}\"" 8182 )) 8183 } 8184 8185 fn wait_for_boot(&self) -> bool { 8186 let cmd = "dir /b c:\\ | find \"Windows\""; 8187 let tmo_max = 180; 8188 // The timeout increase by n*1+n*2+n*3+..., therefore the initial 8189 // interval must be small. 8190 let tmo_int = 2; 8191 let out = ssh_command_ip_with_auth( 8192 cmd, 8193 &self.auth, 8194 &self.guest.network.guest_ip, 8195 { 8196 let mut ret = 1; 8197 let mut tmo_acc = 0; 8198 loop { 8199 tmo_acc += tmo_int * ret; 8200 if tmo_acc >= tmo_max { 8201 break; 8202 } 8203 ret += 1; 8204 } 8205 ret 8206 }, 8207 tmo_int, 8208 ) 8209 .unwrap(); 8210 8211 if "Windows" == out.trim() { 8212 return true; 8213 } 8214 8215 false 8216 } 8217 } 8218 8219 fn vcpu_threads_count(pid: u32) -> u8 { 8220 // ps -T -p 12345 | grep vcpu | wc -l 8221 let out = Command::new("ps") 8222 .args(["-T", "-p", format!("{pid}").as_str()]) 8223 .output() 8224 .expect("ps command failed") 8225 .stdout; 8226 String::from_utf8_lossy(&out).matches("vcpu").count() as u8 8227 } 8228 8229 fn netdev_ctrl_threads_count(pid: u32) -> u8 { 8230 // ps -T -p 12345 | grep "_net[0-9]*_ctrl" | wc -l 8231 let out = Command::new("ps") 8232 .args(["-T", "-p", format!("{pid}").as_str()]) 8233 .output() 8234 .expect("ps command failed") 8235 .stdout; 8236 let mut n = 0; 8237 String::from_utf8_lossy(&out) 8238 .split_whitespace() 8239 .for_each(|s| n += (s.starts_with("_net") && s.ends_with("_ctrl")) as u8); // _net1_ctrl 8240 n 8241 } 8242 8243 fn disk_ctrl_threads_count(pid: u32) -> u8 { 8244 // ps -T -p 15782 | grep "_disk[0-9]*_q0" | wc -l 8245 let out = Command::new("ps") 8246 .args(["-T", "-p", format!("{pid}").as_str()]) 8247 .output() 8248 .expect("ps command failed") 8249 .stdout; 8250 let mut n = 0; 8251 String::from_utf8_lossy(&out) 8252 .split_whitespace() 8253 .for_each(|s| n += (s.starts_with("_disk") && s.ends_with("_q0")) as u8); // _disk0_q0, don't care about multiple queues as they're related to the same hdd 8254 n 8255 } 8256 8257 #[test] 8258 fn test_windows_guest() { 8259 let windows_guest = WindowsGuest::new(); 8260 8261 let mut child = GuestCommand::new(windows_guest.guest()) 8262 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8263 .args(["--memory", "size=4G"]) 8264 .args(["--kernel", edk2_path().to_str().unwrap()]) 8265 .args(["--serial", "tty"]) 8266 .args(["--console", "off"]) 8267 .default_disks() 8268 .default_net() 8269 .capture_output() 8270 .spawn() 8271 .unwrap(); 8272 8273 let fd = child.stdout.as_ref().unwrap().as_raw_fd(); 8274 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 8275 let fd = child.stderr.as_ref().unwrap().as_raw_fd(); 8276 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 8277 8278 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE); 8279 8280 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8281 8282 let r = std::panic::catch_unwind(|| { 8283 // Wait to make sure Windows boots up 8284 assert!(windows_guest.wait_for_boot()); 8285 8286 windows_guest.shutdown(); 8287 }); 8288 8289 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8290 let _ = child.kill(); 8291 let output = child.wait_with_output().unwrap(); 8292 8293 let _ = child_dnsmasq.kill(); 8294 let _ = child_dnsmasq.wait(); 8295 8296 handle_child_output(r, &output); 8297 } 8298 8299 #[test] 8300 fn test_windows_guest_multiple_queues() { 8301 let windows_guest = WindowsGuest::new(); 8302 8303 let mut ovmf_path = dirs::home_dir().unwrap(); 8304 ovmf_path.push("workloads"); 8305 ovmf_path.push(OVMF_NAME); 8306 8307 let mut child = GuestCommand::new(windows_guest.guest()) 8308 .args(["--cpus", "boot=4,kvm_hyperv=on"]) 8309 .args(["--memory", "size=4G"]) 8310 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8311 .args(["--serial", "tty"]) 8312 .args(["--console", "off"]) 8313 .args([ 8314 "--disk", 8315 format!( 8316 "path={},num_queues=4", 8317 windows_guest 8318 .guest() 8319 .disk_config 8320 .disk(DiskType::OperatingSystem) 8321 .unwrap() 8322 ) 8323 .as_str(), 8324 ]) 8325 .args([ 8326 "--net", 8327 format!( 8328 "tap=,mac={},ip={},mask=255.255.255.0,num_queues=8", 8329 windows_guest.guest().network.guest_mac, 8330 windows_guest.guest().network.host_ip 8331 ) 8332 .as_str(), 8333 ]) 8334 .capture_output() 8335 .spawn() 8336 .unwrap(); 8337 8338 let fd = child.stdout.as_ref().unwrap().as_raw_fd(); 8339 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 8340 let fd = child.stderr.as_ref().unwrap().as_raw_fd(); 8341 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 8342 8343 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE); 8344 8345 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8346 8347 let r = std::panic::catch_unwind(|| { 8348 // Wait to make sure Windows boots up 8349 assert!(windows_guest.wait_for_boot()); 8350 8351 windows_guest.shutdown(); 8352 }); 8353 8354 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8355 let _ = child.kill(); 8356 let output = child.wait_with_output().unwrap(); 8357 8358 let _ = child_dnsmasq.kill(); 8359 let _ = child_dnsmasq.wait(); 8360 8361 handle_child_output(r, &output); 8362 } 8363 8364 #[test] 8365 #[cfg(not(feature = "mshv"))] 8366 #[ignore = "See #4327"] 8367 fn test_windows_guest_snapshot_restore() { 8368 let windows_guest = WindowsGuest::new(); 8369 8370 let mut ovmf_path = dirs::home_dir().unwrap(); 8371 ovmf_path.push("workloads"); 8372 ovmf_path.push(OVMF_NAME); 8373 8374 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8375 let api_socket_source = format!("{}.1", temp_api_path(&tmp_dir)); 8376 8377 let mut child = GuestCommand::new(windows_guest.guest()) 8378 .args(["--api-socket", &api_socket_source]) 8379 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8380 .args(["--memory", "size=4G"]) 8381 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8382 .args(["--serial", "tty"]) 8383 .args(["--console", "off"]) 8384 .default_disks() 8385 .default_net() 8386 .capture_output() 8387 .spawn() 8388 .unwrap(); 8389 8390 let fd = child.stdout.as_ref().unwrap().as_raw_fd(); 8391 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 8392 let fd = child.stderr.as_ref().unwrap().as_raw_fd(); 8393 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) }; 8394 8395 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE); 8396 8397 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8398 8399 // Wait to make sure Windows boots up 8400 assert!(windows_guest.wait_for_boot()); 8401 8402 let snapshot_dir = temp_snapshot_dir_path(&tmp_dir); 8403 8404 // Pause the VM 8405 assert!(remote_command(&api_socket_source, "pause", None)); 8406 8407 // Take a snapshot from the VM 8408 assert!(remote_command( 8409 &api_socket_source, 8410 "snapshot", 8411 Some(format!("file://{snapshot_dir}").as_str()), 8412 )); 8413 8414 // Wait to make sure the snapshot is completed 8415 thread::sleep(std::time::Duration::new(30, 0)); 8416 8417 let _ = child.kill(); 8418 child.wait().unwrap(); 8419 8420 let api_socket_restored = format!("{}.2", temp_api_path(&tmp_dir)); 8421 8422 // Restore the VM from the snapshot 8423 let mut child = GuestCommand::new(windows_guest.guest()) 8424 .args(["--api-socket", &api_socket_restored]) 8425 .args([ 8426 "--restore", 8427 format!("source_url=file://{snapshot_dir}").as_str(), 8428 ]) 8429 .capture_output() 8430 .spawn() 8431 .unwrap(); 8432 8433 // Wait for the VM to be restored 8434 thread::sleep(std::time::Duration::new(20, 0)); 8435 8436 let r = std::panic::catch_unwind(|| { 8437 // Resume the VM 8438 assert!(remote_command(&api_socket_restored, "resume", None)); 8439 8440 windows_guest.shutdown(); 8441 }); 8442 8443 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8444 let _ = child.kill(); 8445 let output = child.wait_with_output().unwrap(); 8446 8447 let _ = child_dnsmasq.kill(); 8448 let _ = child_dnsmasq.wait(); 8449 8450 handle_child_output(r, &output); 8451 } 8452 8453 #[test] 8454 #[cfg(not(feature = "mshv"))] 8455 #[cfg(not(target_arch = "aarch64"))] 8456 fn test_windows_guest_cpu_hotplug() { 8457 let windows_guest = WindowsGuest::new(); 8458 8459 let mut ovmf_path = dirs::home_dir().unwrap(); 8460 ovmf_path.push("workloads"); 8461 ovmf_path.push(OVMF_NAME); 8462 8463 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8464 let api_socket = temp_api_path(&tmp_dir); 8465 8466 let mut child = GuestCommand::new(windows_guest.guest()) 8467 .args(["--api-socket", &api_socket]) 8468 .args(["--cpus", "boot=2,max=8,kvm_hyperv=on"]) 8469 .args(["--memory", "size=4G"]) 8470 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8471 .args(["--serial", "tty"]) 8472 .args(["--console", "off"]) 8473 .default_disks() 8474 .default_net() 8475 .capture_output() 8476 .spawn() 8477 .unwrap(); 8478 8479 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8480 8481 let r = std::panic::catch_unwind(|| { 8482 // Wait to make sure Windows boots up 8483 assert!(windows_guest.wait_for_boot()); 8484 8485 let vcpu_num = 2; 8486 // Check the initial number of CPUs the guest sees 8487 assert_eq!(windows_guest.cpu_count(), vcpu_num); 8488 // Check the initial number of vcpu threads in the CH process 8489 assert_eq!(vcpu_threads_count(child.id()), vcpu_num); 8490 8491 let vcpu_num = 6; 8492 // Hotplug some CPUs 8493 resize_command(&api_socket, Some(vcpu_num), None, None, None); 8494 // Wait to make sure CPUs are added 8495 thread::sleep(std::time::Duration::new(10, 0)); 8496 // Check the guest sees the correct number 8497 assert_eq!(windows_guest.cpu_count(), vcpu_num); 8498 // Check the CH process has the correct number of vcpu threads 8499 assert_eq!(vcpu_threads_count(child.id()), vcpu_num); 8500 8501 let vcpu_num = 4; 8502 // Remove some CPUs. Note that Windows doesn't support hot-remove. 8503 resize_command(&api_socket, Some(vcpu_num), None, None, None); 8504 // Wait to make sure CPUs are removed 8505 thread::sleep(std::time::Duration::new(10, 0)); 8506 // Reboot to let Windows catch up 8507 windows_guest.reboot(); 8508 // Wait to make sure Windows completely rebooted 8509 thread::sleep(std::time::Duration::new(60, 0)); 8510 // Check the guest sees the correct number 8511 assert_eq!(windows_guest.cpu_count(), vcpu_num); 8512 // Check the CH process has the correct number of vcpu threads 8513 assert_eq!(vcpu_threads_count(child.id()), vcpu_num); 8514 8515 windows_guest.shutdown(); 8516 }); 8517 8518 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8519 let _ = child.kill(); 8520 let output = child.wait_with_output().unwrap(); 8521 8522 let _ = child_dnsmasq.kill(); 8523 let _ = child_dnsmasq.wait(); 8524 8525 handle_child_output(r, &output); 8526 } 8527 8528 #[test] 8529 #[cfg(not(feature = "mshv"))] 8530 #[cfg(not(target_arch = "aarch64"))] 8531 fn test_windows_guest_ram_hotplug() { 8532 let windows_guest = WindowsGuest::new(); 8533 8534 let mut ovmf_path = dirs::home_dir().unwrap(); 8535 ovmf_path.push("workloads"); 8536 ovmf_path.push(OVMF_NAME); 8537 8538 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8539 let api_socket = temp_api_path(&tmp_dir); 8540 8541 let mut child = GuestCommand::new(windows_guest.guest()) 8542 .args(["--api-socket", &api_socket]) 8543 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8544 .args(["--memory", "size=2G,hotplug_size=5G"]) 8545 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8546 .args(["--serial", "tty"]) 8547 .args(["--console", "off"]) 8548 .default_disks() 8549 .default_net() 8550 .capture_output() 8551 .spawn() 8552 .unwrap(); 8553 8554 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8555 8556 let r = std::panic::catch_unwind(|| { 8557 // Wait to make sure Windows boots up 8558 assert!(windows_guest.wait_for_boot()); 8559 8560 let ram_size = 2 * 1024 * 1024 * 1024; 8561 // Check the initial number of RAM the guest sees 8562 let current_ram_size = windows_guest.ram_size(); 8563 // This size seems to be reserved by the system and thus the 8564 // reported amount differs by this constant value. 8565 let reserved_ram_size = ram_size - current_ram_size; 8566 // Verify that there's not more than 4mb constant diff wasted 8567 // by the reserved ram. 8568 assert!(reserved_ram_size < 4 * 1024 * 1024); 8569 8570 let ram_size = 4 * 1024 * 1024 * 1024; 8571 // Hotplug some RAM 8572 resize_command(&api_socket, None, Some(ram_size), None, None); 8573 // Wait to make sure RAM has been added 8574 thread::sleep(std::time::Duration::new(10, 0)); 8575 // Check the guest sees the correct number 8576 assert_eq!(windows_guest.ram_size(), ram_size - reserved_ram_size); 8577 8578 let ram_size = 3 * 1024 * 1024 * 1024; 8579 // Unplug some RAM. Note that hot-remove most likely won't work. 8580 resize_command(&api_socket, None, Some(ram_size), None, None); 8581 // Wait to make sure RAM has been added 8582 thread::sleep(std::time::Duration::new(10, 0)); 8583 // Reboot to let Windows catch up 8584 windows_guest.reboot(); 8585 // Wait to make sure guest completely rebooted 8586 thread::sleep(std::time::Duration::new(60, 0)); 8587 // Check the guest sees the correct number 8588 assert_eq!(windows_guest.ram_size(), ram_size - reserved_ram_size); 8589 8590 windows_guest.shutdown(); 8591 }); 8592 8593 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8594 let _ = child.kill(); 8595 let output = child.wait_with_output().unwrap(); 8596 8597 let _ = child_dnsmasq.kill(); 8598 let _ = child_dnsmasq.wait(); 8599 8600 handle_child_output(r, &output); 8601 } 8602 8603 #[test] 8604 #[cfg(not(feature = "mshv"))] 8605 fn test_windows_guest_netdev_hotplug() { 8606 let windows_guest = WindowsGuest::new(); 8607 8608 let mut ovmf_path = dirs::home_dir().unwrap(); 8609 ovmf_path.push("workloads"); 8610 ovmf_path.push(OVMF_NAME); 8611 8612 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8613 let api_socket = temp_api_path(&tmp_dir); 8614 8615 let mut child = GuestCommand::new(windows_guest.guest()) 8616 .args(["--api-socket", &api_socket]) 8617 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8618 .args(["--memory", "size=4G"]) 8619 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8620 .args(["--serial", "tty"]) 8621 .args(["--console", "off"]) 8622 .default_disks() 8623 .default_net() 8624 .capture_output() 8625 .spawn() 8626 .unwrap(); 8627 8628 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8629 8630 let r = std::panic::catch_unwind(|| { 8631 // Wait to make sure Windows boots up 8632 assert!(windows_guest.wait_for_boot()); 8633 8634 // Initially present network device 8635 let netdev_num = 1; 8636 assert_eq!(windows_guest.netdev_count(), netdev_num); 8637 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 8638 8639 // Hotplug network device 8640 let (cmd_success, cmd_output) = remote_command_w_output( 8641 &api_socket, 8642 "add-net", 8643 Some(windows_guest.guest().default_net_string().as_str()), 8644 ); 8645 assert!(cmd_success); 8646 assert!(String::from_utf8_lossy(&cmd_output).contains("\"id\":\"_net2\"")); 8647 thread::sleep(std::time::Duration::new(5, 0)); 8648 // Verify the device is on the system 8649 let netdev_num = 2; 8650 assert_eq!(windows_guest.netdev_count(), netdev_num); 8651 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 8652 8653 // Remove network device 8654 let cmd_success = remote_command(&api_socket, "remove-device", Some("_net2")); 8655 assert!(cmd_success); 8656 thread::sleep(std::time::Duration::new(5, 0)); 8657 // Verify the device has been removed 8658 let netdev_num = 1; 8659 assert_eq!(windows_guest.netdev_count(), netdev_num); 8660 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 8661 8662 windows_guest.shutdown(); 8663 }); 8664 8665 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8666 let _ = child.kill(); 8667 let output = child.wait_with_output().unwrap(); 8668 8669 let _ = child_dnsmasq.kill(); 8670 let _ = child_dnsmasq.wait(); 8671 8672 handle_child_output(r, &output); 8673 } 8674 8675 #[test] 8676 #[ignore = "See #6037"] 8677 #[cfg(not(feature = "mshv"))] 8678 #[cfg(not(target_arch = "aarch64"))] 8679 fn test_windows_guest_disk_hotplug() { 8680 let windows_guest = WindowsGuest::new(); 8681 8682 let mut ovmf_path = dirs::home_dir().unwrap(); 8683 ovmf_path.push("workloads"); 8684 ovmf_path.push(OVMF_NAME); 8685 8686 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8687 let api_socket = temp_api_path(&tmp_dir); 8688 8689 let mut child = GuestCommand::new(windows_guest.guest()) 8690 .args(["--api-socket", &api_socket]) 8691 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8692 .args(["--memory", "size=4G"]) 8693 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8694 .args(["--serial", "tty"]) 8695 .args(["--console", "off"]) 8696 .default_disks() 8697 .default_net() 8698 .capture_output() 8699 .spawn() 8700 .unwrap(); 8701 8702 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8703 8704 let disk = windows_guest.disk_new(WindowsGuest::FS_FAT, 100); 8705 8706 let r = std::panic::catch_unwind(|| { 8707 // Wait to make sure Windows boots up 8708 assert!(windows_guest.wait_for_boot()); 8709 8710 // Initially present disk device 8711 let disk_num = 1; 8712 assert_eq!(windows_guest.disk_count(), disk_num); 8713 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8714 8715 // Hotplug disk device 8716 let (cmd_success, cmd_output) = remote_command_w_output( 8717 &api_socket, 8718 "add-disk", 8719 Some(format!("path={disk},readonly=off").as_str()), 8720 ); 8721 assert!(cmd_success); 8722 assert!(String::from_utf8_lossy(&cmd_output).contains("\"id\":\"_disk2\"")); 8723 thread::sleep(std::time::Duration::new(5, 0)); 8724 // Online disk device 8725 windows_guest.disks_set_rw(); 8726 windows_guest.disks_online(); 8727 // Verify the device is on the system 8728 let disk_num = 2; 8729 assert_eq!(windows_guest.disk_count(), disk_num); 8730 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8731 8732 let data = "hello"; 8733 let fname = "d:\\world"; 8734 windows_guest.disk_file_put(fname, data); 8735 8736 // Unmount disk device 8737 let cmd_success = remote_command(&api_socket, "remove-device", Some("_disk2")); 8738 assert!(cmd_success); 8739 thread::sleep(std::time::Duration::new(5, 0)); 8740 // Verify the device has been removed 8741 let disk_num = 1; 8742 assert_eq!(windows_guest.disk_count(), disk_num); 8743 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8744 8745 // Remount and check the file exists with the expected contents 8746 let (cmd_success, _cmd_output) = remote_command_w_output( 8747 &api_socket, 8748 "add-disk", 8749 Some(format!("path={disk},readonly=off").as_str()), 8750 ); 8751 assert!(cmd_success); 8752 thread::sleep(std::time::Duration::new(5, 0)); 8753 let out = windows_guest.disk_file_read(fname); 8754 assert_eq!(data, out.trim()); 8755 8756 // Intentionally no unmount, it'll happen at shutdown. 8757 8758 windows_guest.shutdown(); 8759 }); 8760 8761 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8762 let _ = child.kill(); 8763 let output = child.wait_with_output().unwrap(); 8764 8765 let _ = child_dnsmasq.kill(); 8766 let _ = child_dnsmasq.wait(); 8767 8768 handle_child_output(r, &output); 8769 } 8770 8771 #[test] 8772 #[ignore = "See #6037"] 8773 #[cfg(not(feature = "mshv"))] 8774 #[cfg(not(target_arch = "aarch64"))] 8775 fn test_windows_guest_disk_hotplug_multi() { 8776 let windows_guest = WindowsGuest::new(); 8777 8778 let mut ovmf_path = dirs::home_dir().unwrap(); 8779 ovmf_path.push("workloads"); 8780 ovmf_path.push(OVMF_NAME); 8781 8782 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8783 let api_socket = temp_api_path(&tmp_dir); 8784 8785 let mut child = GuestCommand::new(windows_guest.guest()) 8786 .args(["--api-socket", &api_socket]) 8787 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8788 .args(["--memory", "size=2G"]) 8789 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8790 .args(["--serial", "tty"]) 8791 .args(["--console", "off"]) 8792 .default_disks() 8793 .default_net() 8794 .capture_output() 8795 .spawn() 8796 .unwrap(); 8797 8798 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8799 8800 // Predefined data to used at various test stages 8801 let disk_test_data: [[String; 4]; 2] = [ 8802 [ 8803 "_disk2".to_string(), 8804 windows_guest.disk_new(WindowsGuest::FS_FAT, 123), 8805 "d:\\world".to_string(), 8806 "hello".to_string(), 8807 ], 8808 [ 8809 "_disk3".to_string(), 8810 windows_guest.disk_new(WindowsGuest::FS_NTFS, 333), 8811 "e:\\hello".to_string(), 8812 "world".to_string(), 8813 ], 8814 ]; 8815 8816 let r = std::panic::catch_unwind(|| { 8817 // Wait to make sure Windows boots up 8818 assert!(windows_guest.wait_for_boot()); 8819 8820 // Initially present disk device 8821 let disk_num = 1; 8822 assert_eq!(windows_guest.disk_count(), disk_num); 8823 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8824 8825 for it in &disk_test_data { 8826 let disk_id = it[0].as_str(); 8827 let disk = it[1].as_str(); 8828 // Hotplug disk device 8829 let (cmd_success, cmd_output) = remote_command_w_output( 8830 &api_socket, 8831 "add-disk", 8832 Some(format!("path={disk},readonly=off").as_str()), 8833 ); 8834 assert!(cmd_success); 8835 assert!(String::from_utf8_lossy(&cmd_output) 8836 .contains(format!("\"id\":\"{disk_id}\"").as_str())); 8837 thread::sleep(std::time::Duration::new(5, 0)); 8838 // Online disk devices 8839 windows_guest.disks_set_rw(); 8840 windows_guest.disks_online(); 8841 } 8842 // Verify the devices are on the system 8843 let disk_num = (disk_test_data.len() + 1) as u8; 8844 assert_eq!(windows_guest.disk_count(), disk_num); 8845 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8846 8847 // Put test data 8848 for it in &disk_test_data { 8849 let fname = it[2].as_str(); 8850 let data = it[3].as_str(); 8851 windows_guest.disk_file_put(fname, data); 8852 } 8853 8854 // Unmount disk devices 8855 for it in &disk_test_data { 8856 let disk_id = it[0].as_str(); 8857 let cmd_success = remote_command(&api_socket, "remove-device", Some(disk_id)); 8858 assert!(cmd_success); 8859 thread::sleep(std::time::Duration::new(5, 0)); 8860 } 8861 8862 // Verify the devices have been removed 8863 let disk_num = 1; 8864 assert_eq!(windows_guest.disk_count(), disk_num); 8865 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num); 8866 8867 // Remount 8868 for it in &disk_test_data { 8869 let disk = it[1].as_str(); 8870 let (cmd_success, _cmd_output) = remote_command_w_output( 8871 &api_socket, 8872 "add-disk", 8873 Some(format!("path={disk},readonly=off").as_str()), 8874 ); 8875 assert!(cmd_success); 8876 thread::sleep(std::time::Duration::new(5, 0)); 8877 } 8878 8879 // Check the files exists with the expected contents 8880 for it in &disk_test_data { 8881 let fname = it[2].as_str(); 8882 let data = it[3].as_str(); 8883 let out = windows_guest.disk_file_read(fname); 8884 assert_eq!(data, out.trim()); 8885 } 8886 8887 // Intentionally no unmount, it'll happen at shutdown. 8888 8889 windows_guest.shutdown(); 8890 }); 8891 8892 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8893 let _ = child.kill(); 8894 let output = child.wait_with_output().unwrap(); 8895 8896 let _ = child_dnsmasq.kill(); 8897 let _ = child_dnsmasq.wait(); 8898 8899 handle_child_output(r, &output); 8900 } 8901 8902 #[test] 8903 #[cfg(not(feature = "mshv"))] 8904 #[cfg(not(target_arch = "aarch64"))] 8905 fn test_windows_guest_netdev_multi() { 8906 let windows_guest = WindowsGuest::new(); 8907 8908 let mut ovmf_path = dirs::home_dir().unwrap(); 8909 ovmf_path.push("workloads"); 8910 ovmf_path.push(OVMF_NAME); 8911 8912 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap(); 8913 let api_socket = temp_api_path(&tmp_dir); 8914 8915 let mut child = GuestCommand::new(windows_guest.guest()) 8916 .args(["--api-socket", &api_socket]) 8917 .args(["--cpus", "boot=2,kvm_hyperv=on"]) 8918 .args(["--memory", "size=4G"]) 8919 .args(["--kernel", ovmf_path.to_str().unwrap()]) 8920 .args(["--serial", "tty"]) 8921 .args(["--console", "off"]) 8922 .default_disks() 8923 // The multi net dev config is borrowed from test_multiple_network_interfaces 8924 .args([ 8925 "--net", 8926 windows_guest.guest().default_net_string().as_str(), 8927 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0", 8928 "tap=mytap42,mac=fe:1f:9e:e1:60:f2,ip=192.168.4.1,mask=255.255.255.0", 8929 ]) 8930 .capture_output() 8931 .spawn() 8932 .unwrap(); 8933 8934 let mut child_dnsmasq = windows_guest.run_dnsmasq(); 8935 8936 let r = std::panic::catch_unwind(|| { 8937 // Wait to make sure Windows boots up 8938 assert!(windows_guest.wait_for_boot()); 8939 8940 let netdev_num = 3; 8941 assert_eq!(windows_guest.netdev_count(), netdev_num); 8942 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num); 8943 8944 let tap_count = exec_host_command_output("ip link | grep -c mytap42"); 8945 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1"); 8946 8947 windows_guest.shutdown(); 8948 }); 8949 8950 let _ = child.wait_timeout(std::time::Duration::from_secs(60)); 8951 let _ = child.kill(); 8952 let output = child.wait_with_output().unwrap(); 8953 8954 let _ = child_dnsmasq.kill(); 8955 let _ = child_dnsmasq.wait(); 8956 8957 handle_child_output(r, &output); 8958 } 8959 } 8960 8961 #[cfg(target_arch = "x86_64")] 8962 mod sgx { 8963 use crate::*; 8964 8965 #[test] 8966 fn test_sgx() { 8967 let jammy_image = JAMMY_IMAGE_NAME.to_string(); 8968 let jammy = UbuntuDiskConfig::new(jammy_image); 8969 let guest = Guest::new(Box::new(jammy)); 8970 8971 let mut child = GuestCommand::new(&guest) 8972 .args(["--cpus", "boot=1"]) 8973 .args(["--memory", "size=512M"]) 8974 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 8975 .default_disks() 8976 .default_net() 8977 .args(["--sgx-epc", "id=epc0,size=64M"]) 8978 .capture_output() 8979 .spawn() 8980 .unwrap(); 8981 8982 let r = std::panic::catch_unwind(|| { 8983 guest.wait_vm_boot(None).unwrap(); 8984 8985 // Check if SGX is correctly detected in the guest. 8986 guest.check_sgx_support().unwrap(); 8987 8988 // Validate the SGX EPC section is 64MiB. 8989 assert_eq!( 8990 guest 8991 .ssh_command("cpuid -l 0x12 -s 2 | grep 'section size' | cut -d '=' -f 2") 8992 .unwrap() 8993 .trim(), 8994 "0x0000000004000000" 8995 ); 8996 }); 8997 8998 let _ = child.kill(); 8999 let output = child.wait_with_output().unwrap(); 9000 9001 handle_child_output(r, &output); 9002 } 9003 } 9004 9005 #[cfg(target_arch = "x86_64")] 9006 mod vfio { 9007 use crate::*; 9008 const NVIDIA_VFIO_DEVICE: &str = "/sys/bus/pci/devices/0002:00:01.0"; 9009 9010 fn test_nvidia_card_memory_hotplug(hotplug_method: &str) { 9011 let jammy = UbuntuDiskConfig::new(JAMMY_VFIO_IMAGE_NAME.to_string()); 9012 let guest = Guest::new(Box::new(jammy)); 9013 let api_socket = temp_api_path(&guest.tmp_dir); 9014 9015 let mut child = GuestCommand::new(&guest) 9016 .args(["--cpus", "boot=4"]) 9017 .args([ 9018 "--memory", 9019 format!("size=4G,hotplug_size=4G,hotplug_method={hotplug_method}").as_str(), 9020 ]) 9021 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 9022 .args(["--device", format!("path={NVIDIA_VFIO_DEVICE}").as_str()]) 9023 .args(["--api-socket", &api_socket]) 9024 .default_disks() 9025 .default_net() 9026 .capture_output() 9027 .spawn() 9028 .unwrap(); 9029 9030 let r = std::panic::catch_unwind(|| { 9031 guest.wait_vm_boot(None).unwrap(); 9032 9033 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9034 9035 guest.enable_memory_hotplug(); 9036 9037 // Add RAM to the VM 9038 let desired_ram = 6 << 30; 9039 resize_command(&api_socket, None, Some(desired_ram), None, None); 9040 thread::sleep(std::time::Duration::new(30, 0)); 9041 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 9042 9043 // Check the VFIO device works when RAM is increased to 6GiB 9044 guest.check_nvidia_gpu(); 9045 }); 9046 9047 let _ = child.kill(); 9048 let output = child.wait_with_output().unwrap(); 9049 9050 handle_child_output(r, &output); 9051 } 9052 9053 #[test] 9054 fn test_nvidia_card_memory_hotplug_acpi() { 9055 test_nvidia_card_memory_hotplug("acpi") 9056 } 9057 9058 #[test] 9059 fn test_nvidia_card_memory_hotplug_virtio_mem() { 9060 test_nvidia_card_memory_hotplug("virtio-mem") 9061 } 9062 9063 #[test] 9064 fn test_nvidia_card_pci_hotplug() { 9065 let jammy = UbuntuDiskConfig::new(JAMMY_VFIO_IMAGE_NAME.to_string()); 9066 let guest = Guest::new(Box::new(jammy)); 9067 let api_socket = temp_api_path(&guest.tmp_dir); 9068 9069 let mut child = GuestCommand::new(&guest) 9070 .args(["--cpus", "boot=4"]) 9071 .args(["--memory", "size=4G"]) 9072 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 9073 .args(["--api-socket", &api_socket]) 9074 .default_disks() 9075 .default_net() 9076 .capture_output() 9077 .spawn() 9078 .unwrap(); 9079 9080 let r = std::panic::catch_unwind(|| { 9081 guest.wait_vm_boot(None).unwrap(); 9082 9083 // Hotplug the card to the VM 9084 let (cmd_success, cmd_output) = remote_command_w_output( 9085 &api_socket, 9086 "add-device", 9087 Some(format!("id=vfio0,path={NVIDIA_VFIO_DEVICE}").as_str()), 9088 ); 9089 assert!(cmd_success); 9090 assert!(String::from_utf8_lossy(&cmd_output) 9091 .contains("{\"id\":\"vfio0\",\"bdf\":\"0000:00:06.0\"}")); 9092 9093 thread::sleep(std::time::Duration::new(10, 0)); 9094 9095 // Check the VFIO device works after hotplug 9096 guest.check_nvidia_gpu(); 9097 }); 9098 9099 let _ = child.kill(); 9100 let output = child.wait_with_output().unwrap(); 9101 9102 handle_child_output(r, &output); 9103 } 9104 9105 #[test] 9106 fn test_nvidia_card_reboot() { 9107 let jammy = UbuntuDiskConfig::new(JAMMY_VFIO_IMAGE_NAME.to_string()); 9108 let guest = Guest::new(Box::new(jammy)); 9109 let api_socket = temp_api_path(&guest.tmp_dir); 9110 9111 let mut child = GuestCommand::new(&guest) 9112 .args(["--cpus", "boot=4"]) 9113 .args(["--memory", "size=4G"]) 9114 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 9115 .args([ 9116 "--device", 9117 format!("path={NVIDIA_VFIO_DEVICE},iommu=on").as_str(), 9118 ]) 9119 .args(["--api-socket", &api_socket]) 9120 .default_disks() 9121 .default_net() 9122 .capture_output() 9123 .spawn() 9124 .unwrap(); 9125 9126 let r = std::panic::catch_unwind(|| { 9127 guest.wait_vm_boot(None).unwrap(); 9128 9129 // Check the VFIO device works after boot 9130 guest.check_nvidia_gpu(); 9131 9132 guest.reboot_linux(0, None); 9133 9134 // Check the VFIO device works after reboot 9135 guest.check_nvidia_gpu(); 9136 }); 9137 9138 let _ = child.kill(); 9139 let output = child.wait_with_output().unwrap(); 9140 9141 handle_child_output(r, &output); 9142 } 9143 9144 #[test] 9145 fn test_nvidia_card_iommu_address_width() { 9146 let jammy = UbuntuDiskConfig::new(JAMMY_VFIO_IMAGE_NAME.to_string()); 9147 let guest = Guest::new(Box::new(jammy)); 9148 let api_socket = temp_api_path(&guest.tmp_dir); 9149 9150 let mut child = GuestCommand::new(&guest) 9151 .args(["--cpus", "boot=4"]) 9152 .args(["--memory", "size=4G"]) 9153 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()]) 9154 .args(["--device", format!("path={NVIDIA_VFIO_DEVICE}").as_str()]) 9155 .args([ 9156 "--platform", 9157 "num_pci_segments=2,iommu_segments=1,iommu_address_width=42", 9158 ]) 9159 .args(["--api-socket", &api_socket]) 9160 .default_disks() 9161 .default_net() 9162 .capture_output() 9163 .spawn() 9164 .unwrap(); 9165 9166 let r = std::panic::catch_unwind(|| { 9167 guest.wait_vm_boot(None).unwrap(); 9168 9169 assert!(guest 9170 .ssh_command("sudo dmesg") 9171 .unwrap() 9172 .contains("input address: 42 bits")); 9173 }); 9174 9175 let _ = child.kill(); 9176 let output = child.wait_with_output().unwrap(); 9177 9178 handle_child_output(r, &output); 9179 } 9180 } 9181 9182 mod live_migration { 9183 use crate::*; 9184 9185 fn start_live_migration( 9186 migration_socket: &str, 9187 src_api_socket: &str, 9188 dest_api_socket: &str, 9189 local: bool, 9190 ) -> bool { 9191 // Start to receive migration from the destination VM 9192 let mut receive_migration = Command::new(clh_command("ch-remote")) 9193 .args([ 9194 &format!("--api-socket={dest_api_socket}"), 9195 "receive-migration", 9196 &format! {"unix:{migration_socket}"}, 9197 ]) 9198 .stderr(Stdio::piped()) 9199 .stdout(Stdio::piped()) 9200 .spawn() 9201 .unwrap(); 9202 // Give it '1s' to make sure the 'migration_socket' file is properly created 9203 thread::sleep(std::time::Duration::new(1, 0)); 9204 // Start to send migration from the source VM 9205 9206 let mut args = [ 9207 format!("--api-socket={}", &src_api_socket), 9208 "send-migration".to_string(), 9209 format! {"unix:{migration_socket}"}, 9210 ] 9211 .to_vec(); 9212 9213 if local { 9214 args.insert(2, "--local".to_string()); 9215 } 9216 9217 let mut send_migration = Command::new(clh_command("ch-remote")) 9218 .args(&args) 9219 .stderr(Stdio::piped()) 9220 .stdout(Stdio::piped()) 9221 .spawn() 9222 .unwrap(); 9223 9224 // The 'send-migration' command should be executed successfully within the given timeout 9225 let send_success = if let Some(status) = send_migration 9226 .wait_timeout(std::time::Duration::from_secs(30)) 9227 .unwrap() 9228 { 9229 status.success() 9230 } else { 9231 false 9232 }; 9233 9234 if !send_success { 9235 let _ = send_migration.kill(); 9236 let output = send_migration.wait_with_output().unwrap(); 9237 eprintln!( 9238 "\n\n==== Start 'send_migration' output ==== \ 9239 \n\n---stdout---\n{}\n\n---stderr---\n{} \ 9240 \n\n==== End 'send_migration' output ====\n\n", 9241 String::from_utf8_lossy(&output.stdout), 9242 String::from_utf8_lossy(&output.stderr) 9243 ); 9244 } 9245 9246 // The 'receive-migration' command should be executed successfully within the given timeout 9247 let receive_success = if let Some(status) = receive_migration 9248 .wait_timeout(std::time::Duration::from_secs(30)) 9249 .unwrap() 9250 { 9251 status.success() 9252 } else { 9253 false 9254 }; 9255 9256 if !receive_success { 9257 let _ = receive_migration.kill(); 9258 let output = receive_migration.wait_with_output().unwrap(); 9259 eprintln!( 9260 "\n\n==== Start 'receive_migration' output ==== \ 9261 \n\n---stdout---\n{}\n\n---stderr---\n{} \ 9262 \n\n==== End 'receive_migration' output ====\n\n", 9263 String::from_utf8_lossy(&output.stdout), 9264 String::from_utf8_lossy(&output.stderr) 9265 ); 9266 } 9267 9268 send_success && receive_success 9269 } 9270 9271 fn print_and_panic(src_vm: Child, dest_vm: Child, ovs_vm: Option<Child>, message: &str) -> ! { 9272 let mut src_vm = src_vm; 9273 let mut dest_vm = dest_vm; 9274 9275 let _ = src_vm.kill(); 9276 let src_output = src_vm.wait_with_output().unwrap(); 9277 eprintln!( 9278 "\n\n==== Start 'source_vm' stdout ====\n\n{}\n\n==== End 'source_vm' stdout ====", 9279 String::from_utf8_lossy(&src_output.stdout) 9280 ); 9281 eprintln!( 9282 "\n\n==== Start 'source_vm' stderr ====\n\n{}\n\n==== End 'source_vm' stderr ====", 9283 String::from_utf8_lossy(&src_output.stderr) 9284 ); 9285 let _ = dest_vm.kill(); 9286 let dest_output = dest_vm.wait_with_output().unwrap(); 9287 eprintln!( 9288 "\n\n==== Start 'destination_vm' stdout ====\n\n{}\n\n==== End 'destination_vm' stdout ====", 9289 String::from_utf8_lossy(&dest_output.stdout) 9290 ); 9291 eprintln!( 9292 "\n\n==== Start 'destination_vm' stderr ====\n\n{}\n\n==== End 'destination_vm' stderr ====", 9293 String::from_utf8_lossy(&dest_output.stderr) 9294 ); 9295 9296 if let Some(ovs_vm) = ovs_vm { 9297 let mut ovs_vm = ovs_vm; 9298 let _ = ovs_vm.kill(); 9299 let ovs_output = ovs_vm.wait_with_output().unwrap(); 9300 eprintln!( 9301 "\n\n==== Start 'ovs_vm' stdout ====\n\n{}\n\n==== End 'ovs_vm' stdout ====", 9302 String::from_utf8_lossy(&ovs_output.stdout) 9303 ); 9304 eprintln!( 9305 "\n\n==== Start 'ovs_vm' stderr ====\n\n{}\n\n==== End 'ovs_vm' stderr ====", 9306 String::from_utf8_lossy(&ovs_output.stderr) 9307 ); 9308 9309 cleanup_ovs_dpdk(); 9310 } 9311 9312 panic!("Test failed: {message}") 9313 } 9314 9315 // This test exercises the local live-migration between two Cloud Hypervisor VMs on the 9316 // same host. It ensures the following behaviors: 9317 // 1. The source VM is up and functional (including various virtio-devices are working properly); 9318 // 2. The 'send-migration' and 'receive-migration' command finished successfully; 9319 // 3. The source VM terminated gracefully after live migration; 9320 // 4. The destination VM is functional (including various virtio-devices are working properly) after 9321 // live migration; 9322 // Note: This test does not use vsock as we can't create two identical vsock on the same host. 9323 fn _test_live_migration(upgrade_test: bool, local: bool) { 9324 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9325 let guest = Guest::new(Box::new(focal)); 9326 let kernel_path = direct_kernel_boot_path(); 9327 let console_text = String::from("On a branch floating down river a cricket, singing."); 9328 let net_id = "net123"; 9329 let net_params = format!( 9330 "id={},tap=,mac={},ip={},mask=255.255.255.0", 9331 net_id, guest.network.guest_mac, guest.network.host_ip 9332 ); 9333 9334 let memory_param: &[&str] = if local { 9335 &["--memory", "size=4G,shared=on"] 9336 } else { 9337 &["--memory", "size=4G"] 9338 }; 9339 9340 let boot_vcpus = 2; 9341 let max_vcpus = 4; 9342 9343 let pmem_temp_file = TempFile::new().unwrap(); 9344 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 9345 std::process::Command::new("mkfs.ext4") 9346 .arg(pmem_temp_file.as_path()) 9347 .output() 9348 .expect("Expect creating disk image to succeed"); 9349 let pmem_path = String::from("/dev/pmem0"); 9350 9351 // Start the source VM 9352 let src_vm_path = if !upgrade_test { 9353 clh_command("cloud-hypervisor") 9354 } else { 9355 cloud_hypervisor_release_path() 9356 }; 9357 let src_api_socket = temp_api_path(&guest.tmp_dir); 9358 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 9359 src_vm_cmd 9360 .args([ 9361 "--cpus", 9362 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 9363 ]) 9364 .args(memory_param) 9365 .args(["--kernel", kernel_path.to_str().unwrap()]) 9366 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9367 .default_disks() 9368 .args(["--net", net_params.as_str()]) 9369 .args(["--api-socket", &src_api_socket]) 9370 .args([ 9371 "--pmem", 9372 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 9373 ]); 9374 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 9375 9376 // Start the destination VM 9377 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 9378 dest_api_socket.push_str(".dest"); 9379 let mut dest_child = GuestCommand::new(&guest) 9380 .args(["--api-socket", &dest_api_socket]) 9381 .capture_output() 9382 .spawn() 9383 .unwrap(); 9384 9385 let r = std::panic::catch_unwind(|| { 9386 guest.wait_vm_boot(None).unwrap(); 9387 9388 // Make sure the source VM is functional 9389 // Check the number of vCPUs 9390 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9391 9392 // Check the guest RAM 9393 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9394 9395 // Check the guest virtio-devices, e.g. block, rng, console, and net 9396 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9397 9398 // x86_64: Following what's done in the `test_snapshot_restore`, we need 9399 // to make sure that removing and adding back the virtio-net device does 9400 // not break the live-migration support for virtio-pci. 9401 #[cfg(target_arch = "x86_64")] 9402 { 9403 assert!(remote_command( 9404 &src_api_socket, 9405 "remove-device", 9406 Some(net_id), 9407 )); 9408 thread::sleep(std::time::Duration::new(10, 0)); 9409 9410 // Plug the virtio-net device again 9411 assert!(remote_command( 9412 &src_api_socket, 9413 "add-net", 9414 Some(net_params.as_str()), 9415 )); 9416 thread::sleep(std::time::Duration::new(10, 0)); 9417 } 9418 9419 // Start the live-migration 9420 let migration_socket = String::from( 9421 guest 9422 .tmp_dir 9423 .as_path() 9424 .join("live-migration.sock") 9425 .to_str() 9426 .unwrap(), 9427 ); 9428 9429 assert!( 9430 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9431 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9432 ); 9433 }); 9434 9435 // Check and report any errors occurred during the live-migration 9436 if r.is_err() { 9437 print_and_panic( 9438 src_child, 9439 dest_child, 9440 None, 9441 "Error occurred during live-migration", 9442 ); 9443 } 9444 9445 // Check the source vm has been terminated successful (give it '3s' to settle) 9446 thread::sleep(std::time::Duration::new(3, 0)); 9447 if !src_child.try_wait().unwrap().is_some_and(|s| s.success()) { 9448 print_and_panic( 9449 src_child, 9450 dest_child, 9451 None, 9452 "source VM was not terminated successfully.", 9453 ); 9454 }; 9455 9456 // Post live-migration check to make sure the destination VM is functional 9457 let r = std::panic::catch_unwind(|| { 9458 // Perform same checks to validate VM has been properly migrated 9459 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9460 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9461 9462 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9463 }); 9464 9465 // Clean-up the destination VM and make sure it terminated correctly 9466 let _ = dest_child.kill(); 9467 let dest_output = dest_child.wait_with_output().unwrap(); 9468 handle_child_output(r, &dest_output); 9469 9470 // Check the destination VM has the expected 'console_text' from its output 9471 let r = std::panic::catch_unwind(|| { 9472 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 9473 }); 9474 handle_child_output(r, &dest_output); 9475 } 9476 9477 fn _test_live_migration_balloon(upgrade_test: bool, local: bool) { 9478 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9479 let guest = Guest::new(Box::new(focal)); 9480 let kernel_path = direct_kernel_boot_path(); 9481 let console_text = String::from("On a branch floating down river a cricket, singing."); 9482 let net_id = "net123"; 9483 let net_params = format!( 9484 "id={},tap=,mac={},ip={},mask=255.255.255.0", 9485 net_id, guest.network.guest_mac, guest.network.host_ip 9486 ); 9487 9488 let memory_param: &[&str] = if local { 9489 &[ 9490 "--memory", 9491 "size=4G,hotplug_method=virtio-mem,hotplug_size=8G,shared=on", 9492 "--balloon", 9493 "size=0", 9494 ] 9495 } else { 9496 &[ 9497 "--memory", 9498 "size=4G,hotplug_method=virtio-mem,hotplug_size=8G", 9499 "--balloon", 9500 "size=0", 9501 ] 9502 }; 9503 9504 let boot_vcpus = 2; 9505 let max_vcpus = 4; 9506 9507 let pmem_temp_file = TempFile::new().unwrap(); 9508 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 9509 std::process::Command::new("mkfs.ext4") 9510 .arg(pmem_temp_file.as_path()) 9511 .output() 9512 .expect("Expect creating disk image to succeed"); 9513 let pmem_path = String::from("/dev/pmem0"); 9514 9515 // Start the source VM 9516 let src_vm_path = if !upgrade_test { 9517 clh_command("cloud-hypervisor") 9518 } else { 9519 cloud_hypervisor_release_path() 9520 }; 9521 let src_api_socket = temp_api_path(&guest.tmp_dir); 9522 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 9523 src_vm_cmd 9524 .args([ 9525 "--cpus", 9526 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 9527 ]) 9528 .args(memory_param) 9529 .args(["--kernel", kernel_path.to_str().unwrap()]) 9530 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9531 .default_disks() 9532 .args(["--net", net_params.as_str()]) 9533 .args(["--api-socket", &src_api_socket]) 9534 .args([ 9535 "--pmem", 9536 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 9537 ]); 9538 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 9539 9540 // Start the destination VM 9541 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 9542 dest_api_socket.push_str(".dest"); 9543 let mut dest_child = GuestCommand::new(&guest) 9544 .args(["--api-socket", &dest_api_socket]) 9545 .capture_output() 9546 .spawn() 9547 .unwrap(); 9548 9549 let r = std::panic::catch_unwind(|| { 9550 guest.wait_vm_boot(None).unwrap(); 9551 9552 // Make sure the source VM is functional 9553 // Check the number of vCPUs 9554 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9555 9556 // Check the guest RAM 9557 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9558 // Increase the guest RAM 9559 resize_command(&src_api_socket, None, Some(6 << 30), None, None); 9560 thread::sleep(std::time::Duration::new(5, 0)); 9561 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 9562 // Use balloon to remove RAM from the VM 9563 resize_command(&src_api_socket, None, None, Some(1 << 30), None); 9564 thread::sleep(std::time::Duration::new(5, 0)); 9565 let total_memory = guest.get_total_memory().unwrap_or_default(); 9566 assert!(total_memory > 4_800_000); 9567 assert!(total_memory < 5_760_000); 9568 9569 // Check the guest virtio-devices, e.g. block, rng, console, and net 9570 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9571 9572 // x86_64: Following what's done in the `test_snapshot_restore`, we need 9573 // to make sure that removing and adding back the virtio-net device does 9574 // not break the live-migration support for virtio-pci. 9575 #[cfg(target_arch = "x86_64")] 9576 { 9577 assert!(remote_command( 9578 &src_api_socket, 9579 "remove-device", 9580 Some(net_id), 9581 )); 9582 thread::sleep(std::time::Duration::new(10, 0)); 9583 9584 // Plug the virtio-net device again 9585 assert!(remote_command( 9586 &src_api_socket, 9587 "add-net", 9588 Some(net_params.as_str()), 9589 )); 9590 thread::sleep(std::time::Duration::new(10, 0)); 9591 } 9592 9593 // Start the live-migration 9594 let migration_socket = String::from( 9595 guest 9596 .tmp_dir 9597 .as_path() 9598 .join("live-migration.sock") 9599 .to_str() 9600 .unwrap(), 9601 ); 9602 9603 assert!( 9604 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9605 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9606 ); 9607 }); 9608 9609 // Check and report any errors occurred during the live-migration 9610 if r.is_err() { 9611 print_and_panic( 9612 src_child, 9613 dest_child, 9614 None, 9615 "Error occurred during live-migration", 9616 ); 9617 } 9618 9619 // Check the source vm has been terminated successful (give it '3s' to settle) 9620 thread::sleep(std::time::Duration::new(3, 0)); 9621 if !src_child.try_wait().unwrap().is_some_and(|s| s.success()) { 9622 print_and_panic( 9623 src_child, 9624 dest_child, 9625 None, 9626 "source VM was not terminated successfully.", 9627 ); 9628 }; 9629 9630 // Post live-migration check to make sure the destination VM is functional 9631 let r = std::panic::catch_unwind(|| { 9632 // Perform same checks to validate VM has been properly migrated 9633 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9634 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9635 9636 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9637 9638 // Perform checks on guest RAM using balloon 9639 let total_memory = guest.get_total_memory().unwrap_or_default(); 9640 assert!(total_memory > 4_800_000); 9641 assert!(total_memory < 5_760_000); 9642 // Deflate balloon to restore entire RAM to the VM 9643 resize_command(&dest_api_socket, None, None, Some(0), None); 9644 thread::sleep(std::time::Duration::new(5, 0)); 9645 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000); 9646 // Decrease guest RAM with virtio-mem 9647 resize_command(&dest_api_socket, None, Some(5 << 30), None, None); 9648 thread::sleep(std::time::Duration::new(5, 0)); 9649 let total_memory = guest.get_total_memory().unwrap_or_default(); 9650 assert!(total_memory > 4_800_000); 9651 assert!(total_memory < 5_760_000); 9652 }); 9653 9654 // Clean-up the destination VM and make sure it terminated correctly 9655 let _ = dest_child.kill(); 9656 let dest_output = dest_child.wait_with_output().unwrap(); 9657 handle_child_output(r, &dest_output); 9658 9659 // Check the destination VM has the expected 'console_text' from its output 9660 let r = std::panic::catch_unwind(|| { 9661 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 9662 }); 9663 handle_child_output(r, &dest_output); 9664 } 9665 9666 fn _test_live_migration_numa(upgrade_test: bool, local: bool) { 9667 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9668 let guest = Guest::new(Box::new(focal)); 9669 let kernel_path = direct_kernel_boot_path(); 9670 let console_text = String::from("On a branch floating down river a cricket, singing."); 9671 let net_id = "net123"; 9672 let net_params = format!( 9673 "id={},tap=,mac={},ip={},mask=255.255.255.0", 9674 net_id, guest.network.guest_mac, guest.network.host_ip 9675 ); 9676 9677 let memory_param: &[&str] = if local { 9678 &[ 9679 "--memory", 9680 "size=0,hotplug_method=virtio-mem,shared=on", 9681 "--memory-zone", 9682 "id=mem0,size=1G,hotplug_size=4G,shared=on", 9683 "id=mem1,size=1G,hotplug_size=4G,shared=on", 9684 "id=mem2,size=2G,hotplug_size=4G,shared=on", 9685 "--numa", 9686 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0", 9687 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1", 9688 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2", 9689 ] 9690 } else { 9691 &[ 9692 "--memory", 9693 "size=0,hotplug_method=virtio-mem", 9694 "--memory-zone", 9695 "id=mem0,size=1G,hotplug_size=4G", 9696 "id=mem1,size=1G,hotplug_size=4G", 9697 "id=mem2,size=2G,hotplug_size=4G", 9698 "--numa", 9699 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0", 9700 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1", 9701 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2", 9702 ] 9703 }; 9704 9705 let boot_vcpus = 6; 9706 let max_vcpus = 12; 9707 9708 let pmem_temp_file = TempFile::new().unwrap(); 9709 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 9710 std::process::Command::new("mkfs.ext4") 9711 .arg(pmem_temp_file.as_path()) 9712 .output() 9713 .expect("Expect creating disk image to succeed"); 9714 let pmem_path = String::from("/dev/pmem0"); 9715 9716 // Start the source VM 9717 let src_vm_path = if !upgrade_test { 9718 clh_command("cloud-hypervisor") 9719 } else { 9720 cloud_hypervisor_release_path() 9721 }; 9722 let src_api_socket = temp_api_path(&guest.tmp_dir); 9723 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 9724 src_vm_cmd 9725 .args([ 9726 "--cpus", 9727 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 9728 ]) 9729 .args(memory_param) 9730 .args(["--kernel", kernel_path.to_str().unwrap()]) 9731 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9732 .default_disks() 9733 .args(["--net", net_params.as_str()]) 9734 .args(["--api-socket", &src_api_socket]) 9735 .args([ 9736 "--pmem", 9737 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 9738 ]); 9739 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 9740 9741 // Start the destination VM 9742 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 9743 dest_api_socket.push_str(".dest"); 9744 let mut dest_child = GuestCommand::new(&guest) 9745 .args(["--api-socket", &dest_api_socket]) 9746 .capture_output() 9747 .spawn() 9748 .unwrap(); 9749 9750 let r = std::panic::catch_unwind(|| { 9751 guest.wait_vm_boot(None).unwrap(); 9752 9753 // Make sure the source VM is functional 9754 // Check the number of vCPUs 9755 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9756 9757 // Check the guest RAM 9758 assert!(guest.get_total_memory().unwrap_or_default() > 2_880_000); 9759 9760 // Check the guest virtio-devices, e.g. block, rng, console, and net 9761 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9762 9763 // Check the NUMA parameters are applied correctly and resize 9764 // each zone to test the case where we migrate a VM with the 9765 // virtio-mem regions being used. 9766 { 9767 guest.check_numa_common( 9768 Some(&[960_000, 960_000, 1_920_000]), 9769 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 9770 Some(&["10 15 20", "20 10 25", "25 30 10"]), 9771 ); 9772 9773 // AArch64 currently does not support hotplug, and therefore we only 9774 // test hotplug-related function on x86_64 here. 9775 #[cfg(target_arch = "x86_64")] 9776 { 9777 guest.enable_memory_hotplug(); 9778 9779 // Resize every memory zone and check each associated NUMA node 9780 // has been assigned the right amount of memory. 9781 resize_zone_command(&src_api_socket, "mem0", "2G"); 9782 resize_zone_command(&src_api_socket, "mem1", "2G"); 9783 resize_zone_command(&src_api_socket, "mem2", "3G"); 9784 thread::sleep(std::time::Duration::new(5, 0)); 9785 9786 guest.check_numa_common(Some(&[1_920_000, 1_920_000, 1_920_000]), None, None); 9787 } 9788 } 9789 9790 // x86_64: Following what's done in the `test_snapshot_restore`, we need 9791 // to make sure that removing and adding back the virtio-net device does 9792 // not break the live-migration support for virtio-pci. 9793 #[cfg(target_arch = "x86_64")] 9794 { 9795 assert!(remote_command( 9796 &src_api_socket, 9797 "remove-device", 9798 Some(net_id), 9799 )); 9800 thread::sleep(std::time::Duration::new(10, 0)); 9801 9802 // Plug the virtio-net device again 9803 assert!(remote_command( 9804 &src_api_socket, 9805 "add-net", 9806 Some(net_params.as_str()), 9807 )); 9808 thread::sleep(std::time::Duration::new(10, 0)); 9809 } 9810 9811 // Start the live-migration 9812 let migration_socket = String::from( 9813 guest 9814 .tmp_dir 9815 .as_path() 9816 .join("live-migration.sock") 9817 .to_str() 9818 .unwrap(), 9819 ); 9820 9821 assert!( 9822 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 9823 "Unsuccessful command: 'send-migration' or 'receive-migration'." 9824 ); 9825 }); 9826 9827 // Check and report any errors occurred during the live-migration 9828 if r.is_err() { 9829 print_and_panic( 9830 src_child, 9831 dest_child, 9832 None, 9833 "Error occurred during live-migration", 9834 ); 9835 } 9836 9837 // Check the source vm has been terminated successful (give it '3s' to settle) 9838 thread::sleep(std::time::Duration::new(3, 0)); 9839 if !src_child.try_wait().unwrap().is_some_and(|s| s.success()) { 9840 print_and_panic( 9841 src_child, 9842 dest_child, 9843 None, 9844 "source VM was not terminated successfully.", 9845 ); 9846 }; 9847 9848 // Post live-migration check to make sure the destination VM is functional 9849 let r = std::panic::catch_unwind(|| { 9850 // Perform same checks to validate VM has been properly migrated 9851 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9852 #[cfg(target_arch = "x86_64")] 9853 assert!(guest.get_total_memory().unwrap_or_default() > 6_720_000); 9854 #[cfg(target_arch = "aarch64")] 9855 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9856 9857 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9858 9859 // Perform NUMA related checks 9860 { 9861 #[cfg(target_arch = "aarch64")] 9862 { 9863 guest.check_numa_common( 9864 Some(&[960_000, 960_000, 1_920_000]), 9865 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 9866 Some(&["10 15 20", "20 10 25", "25 30 10"]), 9867 ); 9868 } 9869 9870 // AArch64 currently does not support hotplug, and therefore we only 9871 // test hotplug-related function on x86_64 here. 9872 #[cfg(target_arch = "x86_64")] 9873 { 9874 guest.check_numa_common( 9875 Some(&[1_920_000, 1_920_000, 2_880_000]), 9876 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]), 9877 Some(&["10 15 20", "20 10 25", "25 30 10"]), 9878 ); 9879 9880 guest.enable_memory_hotplug(); 9881 9882 // Resize every memory zone and check each associated NUMA node 9883 // has been assigned the right amount of memory. 9884 resize_zone_command(&dest_api_socket, "mem0", "4G"); 9885 resize_zone_command(&dest_api_socket, "mem1", "4G"); 9886 resize_zone_command(&dest_api_socket, "mem2", "4G"); 9887 // Resize to the maximum amount of CPUs and check each NUMA 9888 // node has been assigned the right CPUs set. 9889 resize_command(&dest_api_socket, Some(max_vcpus), None, None, None); 9890 thread::sleep(std::time::Duration::new(5, 0)); 9891 9892 guest.check_numa_common( 9893 Some(&[3_840_000, 3_840_000, 3_840_000]), 9894 Some(&[vec![0, 1, 2, 9], vec![3, 4, 6, 7, 8], vec![5, 10, 11]]), 9895 None, 9896 ); 9897 } 9898 } 9899 }); 9900 9901 // Clean-up the destination VM and make sure it terminated correctly 9902 let _ = dest_child.kill(); 9903 let dest_output = dest_child.wait_with_output().unwrap(); 9904 handle_child_output(r, &dest_output); 9905 9906 // Check the destination VM has the expected 'console_text' from its output 9907 let r = std::panic::catch_unwind(|| { 9908 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 9909 }); 9910 handle_child_output(r, &dest_output); 9911 } 9912 9913 fn _test_live_migration_watchdog(upgrade_test: bool, local: bool) { 9914 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 9915 let guest = Guest::new(Box::new(focal)); 9916 let kernel_path = direct_kernel_boot_path(); 9917 let console_text = String::from("On a branch floating down river a cricket, singing."); 9918 let net_id = "net123"; 9919 let net_params = format!( 9920 "id={},tap=,mac={},ip={},mask=255.255.255.0", 9921 net_id, guest.network.guest_mac, guest.network.host_ip 9922 ); 9923 9924 let memory_param: &[&str] = if local { 9925 &["--memory", "size=4G,shared=on"] 9926 } else { 9927 &["--memory", "size=4G"] 9928 }; 9929 9930 let boot_vcpus = 2; 9931 let max_vcpus = 4; 9932 9933 let pmem_temp_file = TempFile::new().unwrap(); 9934 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 9935 std::process::Command::new("mkfs.ext4") 9936 .arg(pmem_temp_file.as_path()) 9937 .output() 9938 .expect("Expect creating disk image to succeed"); 9939 let pmem_path = String::from("/dev/pmem0"); 9940 9941 // Start the source VM 9942 let src_vm_path = if !upgrade_test { 9943 clh_command("cloud-hypervisor") 9944 } else { 9945 cloud_hypervisor_release_path() 9946 }; 9947 let src_api_socket = temp_api_path(&guest.tmp_dir); 9948 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 9949 src_vm_cmd 9950 .args([ 9951 "--cpus", 9952 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 9953 ]) 9954 .args(memory_param) 9955 .args(["--kernel", kernel_path.to_str().unwrap()]) 9956 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 9957 .default_disks() 9958 .args(["--net", net_params.as_str()]) 9959 .args(["--api-socket", &src_api_socket]) 9960 .args([ 9961 "--pmem", 9962 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(), 9963 ]) 9964 .args(["--watchdog"]); 9965 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap(); 9966 9967 // Start the destination VM 9968 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 9969 dest_api_socket.push_str(".dest"); 9970 let mut dest_child = GuestCommand::new(&guest) 9971 .args(["--api-socket", &dest_api_socket]) 9972 .capture_output() 9973 .spawn() 9974 .unwrap(); 9975 9976 let r = std::panic::catch_unwind(|| { 9977 guest.wait_vm_boot(None).unwrap(); 9978 9979 // Make sure the source VM is functional 9980 // Check the number of vCPUs 9981 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 9982 // Check the guest RAM 9983 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 9984 // Check the guest virtio-devices, e.g. block, rng, console, and net 9985 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 9986 // x86_64: Following what's done in the `test_snapshot_restore`, we need 9987 // to make sure that removing and adding back the virtio-net device does 9988 // not break the live-migration support for virtio-pci. 9989 #[cfg(target_arch = "x86_64")] 9990 { 9991 assert!(remote_command( 9992 &src_api_socket, 9993 "remove-device", 9994 Some(net_id), 9995 )); 9996 thread::sleep(std::time::Duration::new(10, 0)); 9997 9998 // Plug the virtio-net device again 9999 assert!(remote_command( 10000 &src_api_socket, 10001 "add-net", 10002 Some(net_params.as_str()), 10003 )); 10004 thread::sleep(std::time::Duration::new(10, 0)); 10005 } 10006 10007 // Enable watchdog and ensure its functional 10008 let expected_reboot_count = 1; 10009 // Enable the watchdog with a 15s timeout 10010 enable_guest_watchdog(&guest, 15); 10011 10012 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 10013 assert_eq!( 10014 guest 10015 .ssh_command("sudo journalctl | grep -c -- \"Watchdog started\"") 10016 .unwrap() 10017 .trim() 10018 .parse::<u32>() 10019 .unwrap_or_default(), 10020 1 10021 ); 10022 // Allow some normal time to elapse to check we don't get spurious reboots 10023 thread::sleep(std::time::Duration::new(40, 0)); 10024 // Check no reboot 10025 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 10026 10027 // Start the live-migration 10028 let migration_socket = String::from( 10029 guest 10030 .tmp_dir 10031 .as_path() 10032 .join("live-migration.sock") 10033 .to_str() 10034 .unwrap(), 10035 ); 10036 10037 assert!( 10038 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 10039 "Unsuccessful command: 'send-migration' or 'receive-migration'." 10040 ); 10041 }); 10042 10043 // Check and report any errors occurred during the live-migration 10044 if r.is_err() { 10045 print_and_panic( 10046 src_child, 10047 dest_child, 10048 None, 10049 "Error occurred during live-migration", 10050 ); 10051 } 10052 10053 // Check the source vm has been terminated successful (give it '3s' to settle) 10054 thread::sleep(std::time::Duration::new(3, 0)); 10055 if !src_child.try_wait().unwrap().is_some_and(|s| s.success()) { 10056 print_and_panic( 10057 src_child, 10058 dest_child, 10059 None, 10060 "source VM was not terminated successfully.", 10061 ); 10062 }; 10063 10064 // Post live-migration check to make sure the destination VM is functional 10065 let r = std::panic::catch_unwind(|| { 10066 // Perform same checks to validate VM has been properly migrated 10067 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 10068 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 10069 10070 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 10071 10072 // Perform checks on watchdog 10073 let mut expected_reboot_count = 1; 10074 10075 // Allow some normal time to elapse to check we don't get spurious reboots 10076 thread::sleep(std::time::Duration::new(40, 0)); 10077 // Check no reboot 10078 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 10079 10080 // Trigger a panic (sync first). We need to do this inside a screen with a delay so the SSH command returns. 10081 guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap(); 10082 // Allow some time for the watchdog to trigger (max 30s) and reboot to happen 10083 guest.wait_vm_boot(Some(50)).unwrap(); 10084 // Check a reboot is triggered by the watchdog 10085 expected_reboot_count += 1; 10086 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 10087 10088 #[cfg(target_arch = "x86_64")] 10089 { 10090 // Now pause the VM and remain offline for 30s 10091 assert!(remote_command(&dest_api_socket, "pause", None)); 10092 thread::sleep(std::time::Duration::new(30, 0)); 10093 assert!(remote_command(&dest_api_socket, "resume", None)); 10094 10095 // Check no reboot 10096 assert_eq!(get_reboot_count(&guest), expected_reboot_count); 10097 } 10098 }); 10099 10100 // Clean-up the destination VM and make sure it terminated correctly 10101 let _ = dest_child.kill(); 10102 let dest_output = dest_child.wait_with_output().unwrap(); 10103 handle_child_output(r, &dest_output); 10104 10105 // Check the destination VM has the expected 'console_text' from its output 10106 let r = std::panic::catch_unwind(|| { 10107 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 10108 }); 10109 handle_child_output(r, &dest_output); 10110 } 10111 10112 fn _test_live_migration_ovs_dpdk(upgrade_test: bool, local: bool) { 10113 let ovs_focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10114 let ovs_guest = Guest::new(Box::new(ovs_focal)); 10115 10116 let migration_focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10117 let migration_guest = Guest::new(Box::new(migration_focal)); 10118 let src_api_socket = temp_api_path(&migration_guest.tmp_dir); 10119 10120 // Start two VMs that are connected through ovs-dpdk and one of the VMs is the source VM for live-migration 10121 let (mut ovs_child, mut src_child) = 10122 setup_ovs_dpdk_guests(&ovs_guest, &migration_guest, &src_api_socket, upgrade_test); 10123 10124 // Start the destination VM 10125 let mut dest_api_socket = temp_api_path(&migration_guest.tmp_dir); 10126 dest_api_socket.push_str(".dest"); 10127 let mut dest_child = GuestCommand::new(&migration_guest) 10128 .args(["--api-socket", &dest_api_socket]) 10129 .capture_output() 10130 .spawn() 10131 .unwrap(); 10132 10133 let r = std::panic::catch_unwind(|| { 10134 // Give it '1s' to make sure the 'dest_api_socket' file is properly created 10135 thread::sleep(std::time::Duration::new(1, 0)); 10136 10137 // Start the live-migration 10138 let migration_socket = String::from( 10139 migration_guest 10140 .tmp_dir 10141 .as_path() 10142 .join("live-migration.sock") 10143 .to_str() 10144 .unwrap(), 10145 ); 10146 10147 assert!( 10148 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local), 10149 "Unsuccessful command: 'send-migration' or 'receive-migration'." 10150 ); 10151 }); 10152 10153 // Check and report any errors occurred during the live-migration 10154 if r.is_err() { 10155 print_and_panic( 10156 src_child, 10157 dest_child, 10158 Some(ovs_child), 10159 "Error occurred during live-migration", 10160 ); 10161 } 10162 10163 // Check the source vm has been terminated successful (give it '3s' to settle) 10164 thread::sleep(std::time::Duration::new(3, 0)); 10165 if !src_child.try_wait().unwrap().is_some_and(|s| s.success()) { 10166 print_and_panic( 10167 src_child, 10168 dest_child, 10169 Some(ovs_child), 10170 "source VM was not terminated successfully.", 10171 ); 10172 }; 10173 10174 // Post live-migration check to make sure the destination VM is functional 10175 let r = std::panic::catch_unwind(|| { 10176 // Perform same checks to validate VM has been properly migrated 10177 // Spawn a new netcat listener in the OVS VM 10178 let guest_ip = ovs_guest.network.guest_ip.clone(); 10179 thread::spawn(move || { 10180 ssh_command_ip( 10181 "nc -l 12345", 10182 &guest_ip, 10183 DEFAULT_SSH_RETRIES, 10184 DEFAULT_SSH_TIMEOUT, 10185 ) 10186 .unwrap(); 10187 }); 10188 10189 // Wait for the server to be listening 10190 thread::sleep(std::time::Duration::new(5, 0)); 10191 10192 // And check the connection is still functional after live-migration 10193 migration_guest 10194 .ssh_command("nc -vz 172.100.0.1 12345") 10195 .unwrap(); 10196 }); 10197 10198 // Clean-up the destination VM and OVS VM, and make sure they terminated correctly 10199 let _ = dest_child.kill(); 10200 let _ = ovs_child.kill(); 10201 let dest_output = dest_child.wait_with_output().unwrap(); 10202 let ovs_output = ovs_child.wait_with_output().unwrap(); 10203 10204 cleanup_ovs_dpdk(); 10205 10206 handle_child_output(r, &dest_output); 10207 handle_child_output(Ok(()), &ovs_output); 10208 } 10209 10210 // This test exercises the local live-migration between two Cloud Hypervisor VMs on the 10211 // same host with Landlock enabled on both VMs. The test validates the following: 10212 // 1. The source VM is up and functional 10213 // 2. Ensure Landlock is enabled on source VM by hotplugging a disk. As the path for this 10214 // disk is not known to the source VM this step will fail. 10215 // 3. The 'send-migration' and 'receive-migration' command finished successfully; 10216 // 4. The source VM terminated gracefully after live migration; 10217 // 5. The destination VM is functional after live migration; 10218 // 6. Ensure Landlock is enabled on destination VM by hotplugging a disk. As the path for 10219 // this disk is not known to the destination VM this step will fail. 10220 fn _test_live_migration_with_landlock() { 10221 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10222 let guest = Guest::new(Box::new(focal)); 10223 let kernel_path = direct_kernel_boot_path(); 10224 let net_id = "net123"; 10225 let net_params = format!( 10226 "id={},tap=,mac={},ip={},mask=255.255.255.0", 10227 net_id, guest.network.guest_mac, guest.network.host_ip 10228 ); 10229 10230 let boot_vcpus = 2; 10231 let max_vcpus = 4; 10232 10233 let mut blk_file_path = dirs::home_dir().unwrap(); 10234 blk_file_path.push("workloads"); 10235 blk_file_path.push("blk.img"); 10236 10237 let src_api_socket = temp_api_path(&guest.tmp_dir); 10238 let mut src_child = GuestCommand::new(&guest) 10239 .args([ 10240 "--cpus", 10241 format!("boot={boot_vcpus},max={max_vcpus}").as_str(), 10242 ]) 10243 .args(["--memory", "size=4G,shared=on"]) 10244 .args(["--kernel", kernel_path.to_str().unwrap()]) 10245 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 10246 .default_disks() 10247 .args(["--api-socket", &src_api_socket]) 10248 .args(["--landlock"]) 10249 .args(["--net", net_params.as_str()]) 10250 .args([ 10251 "--landlock-rules", 10252 format!("path={:?},access=rw", guest.tmp_dir.as_path()).as_str(), 10253 ]) 10254 .capture_output() 10255 .spawn() 10256 .unwrap(); 10257 10258 // Start the destination VM 10259 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 10260 dest_api_socket.push_str(".dest"); 10261 let mut dest_child = GuestCommand::new(&guest) 10262 .args(["--api-socket", &dest_api_socket]) 10263 .capture_output() 10264 .spawn() 10265 .unwrap(); 10266 10267 let r = std::panic::catch_unwind(|| { 10268 guest.wait_vm_boot(None).unwrap(); 10269 10270 // Make sure the source VM is functaionl 10271 // Check the number of vCPUs 10272 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 10273 10274 // Check the guest RAM 10275 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 10276 10277 // Check Landlock is enabled by hot-plugging a disk. 10278 assert!(!remote_command( 10279 &src_api_socket, 10280 "add-disk", 10281 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 10282 )); 10283 10284 // Start the live-migration 10285 let migration_socket = String::from( 10286 guest 10287 .tmp_dir 10288 .as_path() 10289 .join("live-migration.sock") 10290 .to_str() 10291 .unwrap(), 10292 ); 10293 10294 assert!( 10295 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, true), 10296 "Unsuccessful command: 'send-migration' or 'receive-migration'." 10297 ); 10298 }); 10299 10300 // Check and report any errors occurred during the live-migration 10301 if r.is_err() { 10302 print_and_panic( 10303 src_child, 10304 dest_child, 10305 None, 10306 "Error occurred during live-migration", 10307 ); 10308 } 10309 10310 // Check the source vm has been terminated successful (give it '3s' to settle) 10311 thread::sleep(std::time::Duration::new(3, 0)); 10312 if !src_child.try_wait().unwrap().is_some_and(|s| s.success()) { 10313 print_and_panic( 10314 src_child, 10315 dest_child, 10316 None, 10317 "source VM was not terminated successfully.", 10318 ); 10319 }; 10320 10321 // Post live-migration check to make sure the destination VM is funcational 10322 let r = std::panic::catch_unwind(|| { 10323 // Perform same checks to validate VM has been properly migrated 10324 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 10325 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 10326 }); 10327 10328 // Check Landlock is enabled on destination VM by hot-plugging a disk. 10329 assert!(!remote_command( 10330 &dest_api_socket, 10331 "add-disk", 10332 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()), 10333 )); 10334 10335 // Clean-up the destination VM and make sure it terminated correctly 10336 let _ = dest_child.kill(); 10337 let dest_output = dest_child.wait_with_output().unwrap(); 10338 handle_child_output(r, &dest_output); 10339 } 10340 10341 // Function to get an available port 10342 fn get_available_port() -> u16 { 10343 TcpListener::bind("127.0.0.1:0") 10344 .expect("Failed to bind to address") 10345 .local_addr() 10346 .unwrap() 10347 .port() 10348 } 10349 10350 fn start_live_migration_tcp(src_api_socket: &str, dest_api_socket: &str) -> bool { 10351 // Get an available TCP port 10352 let migration_port = get_available_port(); 10353 let host_ip = "127.0.0.1"; 10354 10355 // Start the 'receive-migration' command on the destination 10356 let mut receive_migration = Command::new(clh_command("ch-remote")) 10357 .args([ 10358 &format!("--api-socket={}", dest_api_socket), 10359 "receive-migration", 10360 &format!("tcp:0.0.0.0:{}", migration_port), 10361 ]) 10362 .stdin(Stdio::null()) 10363 .stderr(Stdio::piped()) 10364 .stdout(Stdio::piped()) 10365 .spawn() 10366 .unwrap(); 10367 10368 // Give the destination some time to start listening 10369 thread::sleep(Duration::from_secs(1)); 10370 10371 // Start the 'send-migration' command on the source 10372 let mut send_migration = Command::new(clh_command("ch-remote")) 10373 .args([ 10374 &format!("--api-socket={}", src_api_socket), 10375 "send-migration", 10376 &format!("tcp:{}:{}", host_ip, migration_port), 10377 ]) 10378 .stdin(Stdio::null()) 10379 .stderr(Stdio::piped()) 10380 .stdout(Stdio::piped()) 10381 .spawn() 10382 .unwrap(); 10383 10384 // Check if the 'send-migration' command executed successfully 10385 let send_success = if let Some(status) = send_migration 10386 .wait_timeout(Duration::from_secs(60)) 10387 .unwrap() 10388 { 10389 status.success() 10390 } else { 10391 false 10392 }; 10393 10394 if !send_success { 10395 let _ = send_migration.kill(); 10396 let output = send_migration.wait_with_output().unwrap(); 10397 eprintln!( 10398 "\n\n==== Start 'send_migration' output ====\n\n---stdout---\n{}\n\n---stderr---\n{}\n\n==== End 'send_migration' output ====\n\n", 10399 String::from_utf8_lossy(&output.stdout), 10400 String::from_utf8_lossy(&output.stderr) 10401 ); 10402 } 10403 10404 // Check if the 'receive-migration' command executed successfully 10405 let receive_success = if let Some(status) = receive_migration 10406 .wait_timeout(Duration::from_secs(60)) 10407 .unwrap() 10408 { 10409 status.success() 10410 } else { 10411 false 10412 }; 10413 10414 if !receive_success { 10415 let _ = receive_migration.kill(); 10416 let output = receive_migration.wait_with_output().unwrap(); 10417 eprintln!( 10418 "\n\n==== Start 'receive_migration' output ====\n\n---stdout---\n{}\n\n---stderr---\n{}\n\n==== End 'receive_migration' output ====\n\n", 10419 String::from_utf8_lossy(&output.stdout), 10420 String::from_utf8_lossy(&output.stderr) 10421 ); 10422 } 10423 10424 send_success && receive_success 10425 } 10426 10427 fn _test_live_migration_tcp() { 10428 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10429 let guest = Guest::new(Box::new(focal)); 10430 let kernel_path = direct_kernel_boot_path(); 10431 let console_text = String::from("On a branch floating down river a cricket, singing."); 10432 let net_id = "net123"; 10433 let net_params = format!( 10434 "id={},tap=,mac={},ip={},mask=255.255.255.0", 10435 net_id, guest.network.guest_mac, guest.network.host_ip 10436 ); 10437 let memory_param: &[&str] = &["--memory", "size=4G,shared=on"]; 10438 let boot_vcpus = 2; 10439 let max_vcpus = 4; 10440 let pmem_temp_file = TempFile::new().unwrap(); 10441 pmem_temp_file.as_file().set_len(128 << 20).unwrap(); 10442 std::process::Command::new("mkfs.ext4") 10443 .arg(pmem_temp_file.as_path()) 10444 .output() 10445 .expect("Expect creating disk image to succeed"); 10446 let pmem_path = String::from("/dev/pmem0"); 10447 10448 // Start the source VM 10449 let src_vm_path = clh_command("cloud-hypervisor"); 10450 let src_api_socket = temp_api_path(&guest.tmp_dir); 10451 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path); 10452 src_vm_cmd 10453 .args([ 10454 "--cpus", 10455 format!("boot={},max={}", boot_vcpus, max_vcpus).as_str(), 10456 ]) 10457 .args(memory_param) 10458 .args(["--kernel", kernel_path.to_str().unwrap()]) 10459 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 10460 .default_disks() 10461 .args(["--net", net_params.as_str()]) 10462 .args(["--api-socket", &src_api_socket]) 10463 .args([ 10464 "--pmem", 10465 format!( 10466 "file={},discard_writes=on", 10467 pmem_temp_file.as_path().to_str().unwrap(), 10468 ) 10469 .as_str(), 10470 ]) 10471 .capture_output(); 10472 let mut src_child = src_vm_cmd.spawn().unwrap(); 10473 10474 // Start the destination VM 10475 let mut dest_api_socket = temp_api_path(&guest.tmp_dir); 10476 dest_api_socket.push_str(".dest"); 10477 let mut dest_child = GuestCommand::new(&guest) 10478 .args(["--api-socket", &dest_api_socket]) 10479 .capture_output() 10480 .spawn() 10481 .unwrap(); 10482 10483 let r = std::panic::catch_unwind(|| { 10484 guest.wait_vm_boot(None).unwrap(); 10485 // Ensure the source VM is running normally 10486 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 10487 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 10488 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 10489 10490 // On x86_64 architecture, remove and re-add the virtio-net device 10491 #[cfg(target_arch = "x86_64")] 10492 { 10493 assert!(remote_command( 10494 &src_api_socket, 10495 "remove-device", 10496 Some(net_id), 10497 )); 10498 thread::sleep(Duration::new(10, 0)); 10499 // Re-add the virtio-net device 10500 assert!(remote_command( 10501 &src_api_socket, 10502 "add-net", 10503 Some(net_params.as_str()), 10504 )); 10505 thread::sleep(Duration::new(10, 0)); 10506 } 10507 // Start TCP live migration 10508 assert!( 10509 start_live_migration_tcp(&src_api_socket, &dest_api_socket), 10510 "Unsuccessful command: 'send-migration' or 'receive-migration'." 10511 ); 10512 }); 10513 10514 // Check and report any errors that occurred during live migration 10515 if r.is_err() { 10516 print_and_panic( 10517 src_child, 10518 dest_child, 10519 None, 10520 "Error occurred during live-migration", 10521 ); 10522 } 10523 10524 // Check the source vm has been terminated successful (give it '3s' to settle) 10525 thread::sleep(std::time::Duration::new(3, 0)); 10526 if !src_child.try_wait().unwrap().is_some_and(|s| s.success()) { 10527 print_and_panic( 10528 src_child, 10529 dest_child, 10530 None, 10531 "Source VM was not terminated successfully.", 10532 ); 10533 }; 10534 10535 // After live migration, ensure the destination VM is running normally 10536 let r = std::panic::catch_unwind(|| { 10537 // Perform the same checks to ensure the VM has migrated correctly 10538 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus); 10539 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000); 10540 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path)); 10541 }); 10542 10543 // Clean up the destination VM and ensure it terminates properly 10544 let _ = dest_child.kill(); 10545 let dest_output = dest_child.wait_with_output().unwrap(); 10546 handle_child_output(r, &dest_output); 10547 10548 // Check if the expected `console_text` is present in the destination VM's output 10549 let r = std::panic::catch_unwind(|| { 10550 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text)); 10551 }); 10552 handle_child_output(r, &dest_output); 10553 } 10554 10555 mod live_migration_parallel { 10556 use super::*; 10557 #[test] 10558 fn test_live_migration_basic() { 10559 _test_live_migration(false, false) 10560 } 10561 10562 #[test] 10563 fn test_live_migration_local() { 10564 _test_live_migration(false, true) 10565 } 10566 10567 #[test] 10568 fn test_live_migration_tcp() { 10569 _test_live_migration_tcp(); 10570 } 10571 10572 #[test] 10573 fn test_live_migration_watchdog() { 10574 _test_live_migration_watchdog(false, false) 10575 } 10576 10577 #[test] 10578 fn test_live_migration_watchdog_local() { 10579 _test_live_migration_watchdog(false, true) 10580 } 10581 10582 #[test] 10583 fn test_live_upgrade_basic() { 10584 _test_live_migration(true, false) 10585 } 10586 10587 #[test] 10588 fn test_live_upgrade_local() { 10589 _test_live_migration(true, true) 10590 } 10591 10592 #[test] 10593 fn test_live_upgrade_watchdog() { 10594 _test_live_migration_watchdog(true, false) 10595 } 10596 10597 #[test] 10598 fn test_live_upgrade_watchdog_local() { 10599 _test_live_migration_watchdog(true, true) 10600 } 10601 #[test] 10602 #[cfg(target_arch = "x86_64")] 10603 fn test_live_migration_with_landlock() { 10604 _test_live_migration_with_landlock() 10605 } 10606 } 10607 10608 mod live_migration_sequential { 10609 use super::*; 10610 10611 // NUMA & balloon live migration tests are large so run sequentially 10612 10613 #[test] 10614 fn test_live_migration_balloon() { 10615 _test_live_migration_balloon(false, false) 10616 } 10617 10618 #[test] 10619 fn test_live_migration_balloon_local() { 10620 _test_live_migration_balloon(false, true) 10621 } 10622 10623 #[test] 10624 fn test_live_upgrade_balloon() { 10625 _test_live_migration_balloon(true, false) 10626 } 10627 10628 #[test] 10629 fn test_live_upgrade_balloon_local() { 10630 _test_live_migration_balloon(true, true) 10631 } 10632 10633 #[test] 10634 #[cfg(not(feature = "mshv"))] 10635 fn test_live_migration_numa() { 10636 _test_live_migration_numa(false, false) 10637 } 10638 10639 #[test] 10640 #[cfg(not(feature = "mshv"))] 10641 fn test_live_migration_numa_local() { 10642 _test_live_migration_numa(false, true) 10643 } 10644 10645 #[test] 10646 #[cfg(not(feature = "mshv"))] 10647 fn test_live_upgrade_numa() { 10648 _test_live_migration_numa(true, false) 10649 } 10650 10651 #[test] 10652 #[cfg(not(feature = "mshv"))] 10653 fn test_live_upgrade_numa_local() { 10654 _test_live_migration_numa(true, true) 10655 } 10656 10657 // Require to run ovs-dpdk tests sequentially because they rely on the same ovs-dpdk setup 10658 #[test] 10659 #[ignore = "See #5532"] 10660 #[cfg(target_arch = "x86_64")] 10661 #[cfg(not(feature = "mshv"))] 10662 fn test_live_migration_ovs_dpdk() { 10663 _test_live_migration_ovs_dpdk(false, false); 10664 } 10665 10666 #[test] 10667 #[cfg(target_arch = "x86_64")] 10668 #[cfg(not(feature = "mshv"))] 10669 fn test_live_migration_ovs_dpdk_local() { 10670 _test_live_migration_ovs_dpdk(false, true); 10671 } 10672 10673 #[test] 10674 #[ignore = "See #5532"] 10675 #[cfg(target_arch = "x86_64")] 10676 #[cfg(not(feature = "mshv"))] 10677 fn test_live_upgrade_ovs_dpdk() { 10678 _test_live_migration_ovs_dpdk(true, false); 10679 } 10680 10681 #[test] 10682 #[ignore = "See #5532"] 10683 #[cfg(target_arch = "x86_64")] 10684 #[cfg(not(feature = "mshv"))] 10685 fn test_live_upgrade_ovs_dpdk_local() { 10686 _test_live_migration_ovs_dpdk(true, true); 10687 } 10688 } 10689 } 10690 10691 #[cfg(target_arch = "aarch64")] 10692 mod aarch64_acpi { 10693 use crate::*; 10694 10695 #[test] 10696 fn test_simple_launch_acpi() { 10697 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10698 10699 vec![Box::new(focal)].drain(..).for_each(|disk_config| { 10700 let guest = Guest::new(disk_config); 10701 10702 let mut child = GuestCommand::new(&guest) 10703 .args(["--cpus", "boot=1"]) 10704 .args(["--memory", "size=512M"]) 10705 .args(["--kernel", edk2_path().to_str().unwrap()]) 10706 .default_disks() 10707 .default_net() 10708 .args(["--serial", "tty", "--console", "off"]) 10709 .capture_output() 10710 .spawn() 10711 .unwrap(); 10712 10713 let r = std::panic::catch_unwind(|| { 10714 guest.wait_vm_boot(Some(120)).unwrap(); 10715 10716 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1); 10717 assert!(guest.get_total_memory().unwrap_or_default() > 400_000); 10718 assert_eq!(guest.get_pci_bridge_class().unwrap_or_default(), "0x060000"); 10719 }); 10720 10721 let _ = child.kill(); 10722 let output = child.wait_with_output().unwrap(); 10723 10724 handle_child_output(r, &output); 10725 }); 10726 } 10727 10728 #[test] 10729 fn test_guest_numa_nodes_acpi() { 10730 _test_guest_numa_nodes(true); 10731 } 10732 10733 #[test] 10734 fn test_cpu_topology_421_acpi() { 10735 test_cpu_topology(4, 2, 1, true); 10736 } 10737 10738 #[test] 10739 fn test_cpu_topology_142_acpi() { 10740 test_cpu_topology(1, 4, 2, true); 10741 } 10742 10743 #[test] 10744 fn test_cpu_topology_262_acpi() { 10745 test_cpu_topology(2, 6, 2, true); 10746 } 10747 10748 #[test] 10749 fn test_power_button_acpi() { 10750 _test_power_button(true); 10751 } 10752 10753 #[test] 10754 fn test_virtio_iommu() { 10755 _test_virtio_iommu(true) 10756 } 10757 } 10758 10759 mod rate_limiter { 10760 use super::*; 10761 10762 // Check if the 'measured' rate is within the expected 'difference' (in percentage) 10763 // compared to given 'limit' rate. 10764 fn check_rate_limit(measured: f64, limit: f64, difference: f64) -> bool { 10765 let upper_limit = limit * (1_f64 + difference); 10766 let lower_limit = limit * (1_f64 - difference); 10767 10768 if measured > lower_limit && measured < upper_limit { 10769 return true; 10770 } 10771 10772 eprintln!( 10773 "\n\n==== Start 'check_rate_limit' failed ==== \ 10774 \n\nmeasured={measured}, , lower_limit={lower_limit}, upper_limit={upper_limit} \ 10775 \n\n==== End 'check_rate_limit' failed ====\n\n" 10776 ); 10777 10778 false 10779 } 10780 10781 fn _test_rate_limiter_net(rx: bool) { 10782 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10783 let guest = Guest::new(Box::new(focal)); 10784 10785 let test_timeout = 10; 10786 let num_queues = 2; 10787 let queue_size = 256; 10788 let bw_size = 10485760_u64; // bytes 10789 let bw_refill_time = 100; // ms 10790 let limit_bps = (bw_size * 8 * 1000) as f64 / bw_refill_time as f64; 10791 10792 let net_params = format!( 10793 "tap=,mac={},ip={},mask=255.255.255.0,num_queues={},queue_size={},bw_size={},bw_refill_time={}", 10794 guest.network.guest_mac, 10795 guest.network.host_ip, 10796 num_queues, 10797 queue_size, 10798 bw_size, 10799 bw_refill_time, 10800 ); 10801 10802 let mut child = GuestCommand::new(&guest) 10803 .args(["--cpus", &format!("boot={}", num_queues / 2)]) 10804 .args(["--memory", "size=4G"]) 10805 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 10806 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 10807 .default_disks() 10808 .args(["--net", net_params.as_str()]) 10809 .capture_output() 10810 .spawn() 10811 .unwrap(); 10812 10813 let r = std::panic::catch_unwind(|| { 10814 guest.wait_vm_boot(None).unwrap(); 10815 let measured_bps = 10816 measure_virtio_net_throughput(test_timeout, num_queues / 2, &guest, rx, true) 10817 .unwrap(); 10818 assert!(check_rate_limit(measured_bps, limit_bps, 0.1)); 10819 }); 10820 10821 let _ = child.kill(); 10822 let output = child.wait_with_output().unwrap(); 10823 handle_child_output(r, &output); 10824 } 10825 10826 #[test] 10827 fn test_rate_limiter_net_rx() { 10828 _test_rate_limiter_net(true); 10829 } 10830 10831 #[test] 10832 fn test_rate_limiter_net_tx() { 10833 _test_rate_limiter_net(false); 10834 } 10835 10836 fn _test_rate_limiter_block(bandwidth: bool, num_queues: u32) { 10837 let test_timeout = 10; 10838 let fio_ops = FioOps::RandRW; 10839 10840 let bw_size = if bandwidth { 10841 10485760_u64 // bytes 10842 } else { 10843 100_u64 // I/O 10844 }; 10845 let bw_refill_time = 100; // ms 10846 let limit_rate = (bw_size * 1000) as f64 / bw_refill_time as f64; 10847 10848 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10849 let guest = Guest::new(Box::new(focal)); 10850 let api_socket = temp_api_path(&guest.tmp_dir); 10851 let test_img_dir = TempDir::new_with_prefix("/var/tmp/ch").unwrap(); 10852 let blk_rate_limiter_test_img = 10853 String::from(test_img_dir.as_path().join("blk.img").to_str().unwrap()); 10854 10855 // Create the test block image 10856 assert!(exec_host_command_output(&format!( 10857 "dd if=/dev/zero of={blk_rate_limiter_test_img} bs=1M count=1024" 10858 )) 10859 .status 10860 .success()); 10861 10862 let test_blk_params = if bandwidth { 10863 format!( 10864 "path={blk_rate_limiter_test_img},num_queues={num_queues},bw_size={bw_size},bw_refill_time={bw_refill_time}" 10865 ) 10866 } else { 10867 format!( 10868 "path={blk_rate_limiter_test_img},num_queues={num_queues},ops_size={bw_size},ops_refill_time={bw_refill_time}" 10869 ) 10870 }; 10871 10872 let mut child = GuestCommand::new(&guest) 10873 .args(["--cpus", &format!("boot={num_queues}")]) 10874 .args(["--memory", "size=4G"]) 10875 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 10876 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 10877 .args([ 10878 "--disk", 10879 format!( 10880 "path={}", 10881 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 10882 ) 10883 .as_str(), 10884 format!( 10885 "path={}", 10886 guest.disk_config.disk(DiskType::CloudInit).unwrap() 10887 ) 10888 .as_str(), 10889 test_blk_params.as_str(), 10890 ]) 10891 .default_net() 10892 .args(["--api-socket", &api_socket]) 10893 .capture_output() 10894 .spawn() 10895 .unwrap(); 10896 10897 let r = std::panic::catch_unwind(|| { 10898 guest.wait_vm_boot(None).unwrap(); 10899 10900 let fio_command = format!( 10901 "sudo fio --filename=/dev/vdc --name=test --output-format=json \ 10902 --direct=1 --bs=4k --ioengine=io_uring --iodepth=64 \ 10903 --rw={fio_ops} --runtime={test_timeout} --numjobs={num_queues}" 10904 ); 10905 let output = guest.ssh_command(&fio_command).unwrap(); 10906 10907 // Parse fio output 10908 let measured_rate = if bandwidth { 10909 parse_fio_output(&output, &fio_ops, num_queues).unwrap() 10910 } else { 10911 parse_fio_output_iops(&output, &fio_ops, num_queues).unwrap() 10912 }; 10913 assert!(check_rate_limit(measured_rate, limit_rate, 0.1)); 10914 }); 10915 10916 let _ = child.kill(); 10917 let output = child.wait_with_output().unwrap(); 10918 handle_child_output(r, &output); 10919 } 10920 10921 fn _test_rate_limiter_group_block(bandwidth: bool, num_queues: u32, num_disks: u32) { 10922 let test_timeout = 10; 10923 let fio_ops = FioOps::RandRW; 10924 10925 let bw_size = if bandwidth { 10926 10485760_u64 // bytes 10927 } else { 10928 100_u64 // I/O 10929 }; 10930 let bw_refill_time = 100; // ms 10931 let limit_rate = (bw_size * 1000) as f64 / bw_refill_time as f64; 10932 10933 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string()); 10934 let guest = Guest::new(Box::new(focal)); 10935 let api_socket = temp_api_path(&guest.tmp_dir); 10936 let test_img_dir = TempDir::new_with_prefix("/var/tmp/ch").unwrap(); 10937 10938 let rate_limit_group_arg = if bandwidth { 10939 format!("id=group0,bw_size={bw_size},bw_refill_time={bw_refill_time}") 10940 } else { 10941 format!("id=group0,ops_size={bw_size},ops_refill_time={bw_refill_time}") 10942 }; 10943 10944 let mut disk_args = vec![ 10945 "--disk".to_string(), 10946 format!( 10947 "path={}", 10948 guest.disk_config.disk(DiskType::OperatingSystem).unwrap() 10949 ), 10950 format!( 10951 "path={}", 10952 guest.disk_config.disk(DiskType::CloudInit).unwrap() 10953 ), 10954 ]; 10955 10956 for i in 0..num_disks { 10957 let test_img_path = String::from( 10958 test_img_dir 10959 .as_path() 10960 .join(format!("blk{}.img", i)) 10961 .to_str() 10962 .unwrap(), 10963 ); 10964 10965 assert!(exec_host_command_output(&format!( 10966 "dd if=/dev/zero of={test_img_path} bs=1M count=1024" 10967 )) 10968 .status 10969 .success()); 10970 10971 disk_args.push(format!( 10972 "path={test_img_path},num_queues={num_queues},rate_limit_group=group0" 10973 )); 10974 } 10975 10976 let mut child = GuestCommand::new(&guest) 10977 .args(["--cpus", &format!("boot={}", num_queues * num_disks)]) 10978 .args(["--memory", "size=4G"]) 10979 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()]) 10980 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]) 10981 .args(["--rate-limit-group", &rate_limit_group_arg]) 10982 .args(disk_args) 10983 .default_net() 10984 .args(["--api-socket", &api_socket]) 10985 .capture_output() 10986 .spawn() 10987 .unwrap(); 10988 10989 let r = std::panic::catch_unwind(|| { 10990 guest.wait_vm_boot(None).unwrap(); 10991 10992 let mut fio_command = format!( 10993 "sudo fio --name=global --output-format=json \ 10994 --direct=1 --bs=4k --ioengine=io_uring --iodepth=64 \ 10995 --rw={fio_ops} --runtime={test_timeout} --numjobs={num_queues}" 10996 ); 10997 10998 // Generate additional argument for each disk: 10999 // --name=job0 --filename=/dev/vdc \ 11000 // --name=job1 --filename=/dev/vdd \ 11001 // --name=job2 --filename=/dev/vde \ 11002 // ... 11003 for i in 0..num_disks { 11004 let c: char = 'c'; 11005 let arg = format!( 11006 " --name=job{i} --filename=/dev/vd{}", 11007 char::from_u32((c as u32) + i).unwrap() 11008 ); 11009 fio_command += &arg; 11010 } 11011 let output = guest.ssh_command(&fio_command).unwrap(); 11012 11013 // Parse fio output 11014 let measured_rate = if bandwidth { 11015 parse_fio_output(&output, &fio_ops, num_queues * num_disks).unwrap() 11016 } else { 11017 parse_fio_output_iops(&output, &fio_ops, num_queues * num_disks).unwrap() 11018 }; 11019 assert!(check_rate_limit(measured_rate, limit_rate, 0.2)); 11020 }); 11021 11022 let _ = child.kill(); 11023 let output = child.wait_with_output().unwrap(); 11024 handle_child_output(r, &output); 11025 } 11026 11027 #[test] 11028 fn test_rate_limiter_block_bandwidth() { 11029 _test_rate_limiter_block(true, 1); 11030 _test_rate_limiter_block(true, 2) 11031 } 11032 11033 #[test] 11034 fn test_rate_limiter_group_block_bandwidth() { 11035 _test_rate_limiter_group_block(true, 1, 1); 11036 _test_rate_limiter_group_block(true, 2, 1); 11037 _test_rate_limiter_group_block(true, 1, 2); 11038 _test_rate_limiter_group_block(true, 2, 2); 11039 } 11040 11041 #[test] 11042 fn test_rate_limiter_block_iops() { 11043 _test_rate_limiter_block(false, 1); 11044 _test_rate_limiter_block(false, 2); 11045 } 11046 11047 #[test] 11048 fn test_rate_limiter_group_block_iops() { 11049 _test_rate_limiter_group_block(false, 1, 1); 11050 _test_rate_limiter_group_block(false, 2, 1); 11051 _test_rate_limiter_group_block(false, 1, 2); 11052 _test_rate_limiter_group_block(false, 2, 2); 11053 } 11054 } 11055