1 // Copyright © 2020 Intel Corporation
2 //
3 // SPDX-License-Identifier: Apache-2.0
4 //
5 #![allow(clippy::undocumented_unsafe_blocks)]
6 // When enabling the `mshv` feature, we skip quite some tests and
7 // hence have known dead-code. This annotation silences dead-code
8 // related warnings for our quality workflow to pass.
9 #![allow(dead_code)]
10
11 extern crate test_infra;
12
13 use std::collections::HashMap;
14 use std::io::{BufRead, Read, Seek, Write};
15 use std::net::TcpListener;
16 use std::os::unix::io::AsRawFd;
17 use std::path::PathBuf;
18 use std::process::{Child, Command, Stdio};
19 use std::string::String;
20 use std::sync::mpsc::Receiver;
21 use std::sync::{mpsc, Mutex};
22 use std::time::Duration;
23 use std::{fs, io, thread};
24
25 use net_util::MacAddr;
26 use test_infra::*;
27 use vmm_sys_util::tempdir::TempDir;
28 use vmm_sys_util::tempfile::TempFile;
29 use wait_timeout::ChildExt;
30
31 // Constant taken from the VMM crate.
32 const MAX_NUM_PCI_SEGMENTS: u16 = 96;
33
34 #[cfg(target_arch = "x86_64")]
35 mod x86_64 {
36 pub const FOCAL_IMAGE_NAME: &str = "focal-server-cloudimg-amd64-custom-20210609-0.raw";
37 pub const JAMMY_VFIO_IMAGE_NAME: &str =
38 "jammy-server-cloudimg-amd64-custom-vfio-20241012-0.raw";
39 pub const FOCAL_IMAGE_NAME_QCOW2: &str = "focal-server-cloudimg-amd64-custom-20210609-0.qcow2";
40 pub const FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE: &str =
41 "focal-server-cloudimg-amd64-custom-20210609-0-backing.qcow2";
42 pub const FOCAL_IMAGE_NAME_VHD: &str = "focal-server-cloudimg-amd64-custom-20210609-0.vhd";
43 pub const FOCAL_IMAGE_NAME_VHDX: &str = "focal-server-cloudimg-amd64-custom-20210609-0.vhdx";
44 pub const JAMMY_IMAGE_NAME: &str = "jammy-server-cloudimg-amd64-custom-20241017-0.raw";
45 pub const WINDOWS_IMAGE_NAME: &str = "windows-server-2022-amd64-2.raw";
46 pub const OVMF_NAME: &str = "CLOUDHV.fd";
47 pub const GREP_SERIAL_IRQ_CMD: &str = "grep -c 'IO-APIC.*ttyS0' /proc/interrupts || true";
48 }
49
50 #[cfg(target_arch = "x86_64")]
51 use x86_64::*;
52
53 #[cfg(target_arch = "aarch64")]
54 mod aarch64 {
55 pub const FOCAL_IMAGE_NAME: &str = "focal-server-cloudimg-arm64-custom-20210929-0.raw";
56 pub const FOCAL_IMAGE_UPDATE_KERNEL_NAME: &str =
57 "focal-server-cloudimg-arm64-custom-20210929-0-update-kernel.raw";
58 pub const FOCAL_IMAGE_NAME_QCOW2: &str = "focal-server-cloudimg-arm64-custom-20210929-0.qcow2";
59 pub const FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE: &str =
60 "focal-server-cloudimg-arm64-custom-20210929-0-backing.qcow2";
61 pub const FOCAL_IMAGE_NAME_VHD: &str = "focal-server-cloudimg-arm64-custom-20210929-0.vhd";
62 pub const FOCAL_IMAGE_NAME_VHDX: &str = "focal-server-cloudimg-arm64-custom-20210929-0.vhdx";
63 pub const JAMMY_IMAGE_NAME: &str = "jammy-server-cloudimg-arm64-custom-20220329-0.raw";
64 pub const WINDOWS_IMAGE_NAME: &str = "windows-11-iot-enterprise-aarch64.raw";
65 pub const OVMF_NAME: &str = "CLOUDHV_EFI.fd";
66 pub const GREP_SERIAL_IRQ_CMD: &str = "grep -c 'GICv3.*uart-pl011' /proc/interrupts || true";
67 pub const GREP_PMU_IRQ_CMD: &str = "grep -c 'GICv3.*arm-pmu' /proc/interrupts || true";
68 }
69
70 #[cfg(target_arch = "aarch64")]
71 use aarch64::*;
72
73 const DIRECT_KERNEL_BOOT_CMDLINE: &str =
74 "root=/dev/vda1 console=hvc0 rw systemd.journald.forward_to_console=1";
75
76 const CONSOLE_TEST_STRING: &str = "Started OpenBSD Secure Shell server";
77
78 // This enum exists to make it more convenient to
79 // implement test for both D-Bus and REST APIs.
80 enum TargetApi {
81 // API socket
82 HttpApi(String),
83 // well known service name, object path
84 DBusApi(String, String),
85 }
86
87 impl TargetApi {
new_http_api(tmp_dir: &TempDir) -> Self88 fn new_http_api(tmp_dir: &TempDir) -> Self {
89 Self::HttpApi(temp_api_path(tmp_dir))
90 }
91
new_dbus_api(tmp_dir: &TempDir) -> Self92 fn new_dbus_api(tmp_dir: &TempDir) -> Self {
93 // `tmp_dir` is in the form of "/tmp/chXXXXXX"
94 // and we take the `chXXXXXX` part as a unique identifier for the guest
95 let id = tmp_dir.as_path().file_name().unwrap().to_str().unwrap();
96
97 Self::DBusApi(
98 format!("org.cloudhypervisor.{id}"),
99 format!("/org/cloudhypervisor/{id}"),
100 )
101 }
102
guest_args(&self) -> Vec<String>103 fn guest_args(&self) -> Vec<String> {
104 match self {
105 TargetApi::HttpApi(api_socket) => {
106 vec![format!("--api-socket={}", api_socket.as_str())]
107 }
108 TargetApi::DBusApi(service_name, object_path) => {
109 vec![
110 format!("--dbus-service-name={}", service_name.as_str()),
111 format!("--dbus-object-path={}", object_path.as_str()),
112 ]
113 }
114 }
115 }
116
remote_args(&self) -> Vec<String>117 fn remote_args(&self) -> Vec<String> {
118 // `guest_args` and `remote_args` are consistent with each other
119 self.guest_args()
120 }
121
remote_command(&self, command: &str, arg: Option<&str>) -> bool122 fn remote_command(&self, command: &str, arg: Option<&str>) -> bool {
123 let mut cmd = Command::new(clh_command("ch-remote"));
124 cmd.args(self.remote_args());
125 cmd.arg(command);
126
127 if let Some(arg) = arg {
128 cmd.arg(arg);
129 }
130
131 let output = cmd.output().unwrap();
132 if output.status.success() {
133 true
134 } else {
135 eprintln!("Error running ch-remote command: {:?}", &cmd);
136 let stderr = String::from_utf8_lossy(&output.stderr);
137 eprintln!("stderr: {stderr}");
138 false
139 }
140 }
141 }
142
143 // Start cloud-hypervisor with no VM parameters, only the API server running.
144 // From the API: Create a VM, boot it and check that it looks as expected.
_test_api_create_boot(target_api: TargetApi, guest: Guest)145 fn _test_api_create_boot(target_api: TargetApi, guest: Guest) {
146 let mut child = GuestCommand::new(&guest)
147 .args(target_api.guest_args())
148 .capture_output()
149 .spawn()
150 .unwrap();
151
152 thread::sleep(std::time::Duration::new(1, 0));
153
154 // Verify API server is running
155 assert!(target_api.remote_command("ping", None));
156
157 // Create the VM first
158 let cpu_count: u8 = 4;
159 let request_body = guest.api_create_body(
160 cpu_count,
161 direct_kernel_boot_path().to_str().unwrap(),
162 DIRECT_KERNEL_BOOT_CMDLINE,
163 );
164
165 let temp_config_path = guest.tmp_dir.as_path().join("config");
166 std::fs::write(&temp_config_path, request_body).unwrap();
167 let create_config = temp_config_path.as_os_str().to_str().unwrap();
168
169 assert!(target_api.remote_command("create", Some(create_config),));
170
171 // Then boot it
172 assert!(target_api.remote_command("boot", None));
173 thread::sleep(std::time::Duration::new(20, 0));
174
175 let r = std::panic::catch_unwind(|| {
176 // Check that the VM booted as expected
177 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count);
178 assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
179 });
180
181 kill_child(&mut child);
182 let output = child.wait_with_output().unwrap();
183
184 handle_child_output(r, &output);
185 }
186
187 // Start cloud-hypervisor with no VM parameters, only the API server running.
188 // From the API: Create a VM, boot it and check it can be shutdown and then
189 // booted again
_test_api_shutdown(target_api: TargetApi, guest: Guest)190 fn _test_api_shutdown(target_api: TargetApi, guest: Guest) {
191 let mut child = GuestCommand::new(&guest)
192 .args(target_api.guest_args())
193 .capture_output()
194 .spawn()
195 .unwrap();
196
197 thread::sleep(std::time::Duration::new(1, 0));
198
199 // Verify API server is running
200 assert!(target_api.remote_command("ping", None));
201
202 // Create the VM first
203 let cpu_count: u8 = 4;
204 let request_body = guest.api_create_body(
205 cpu_count,
206 direct_kernel_boot_path().to_str().unwrap(),
207 DIRECT_KERNEL_BOOT_CMDLINE,
208 );
209
210 let temp_config_path = guest.tmp_dir.as_path().join("config");
211 std::fs::write(&temp_config_path, request_body).unwrap();
212 let create_config = temp_config_path.as_os_str().to_str().unwrap();
213
214 let r = std::panic::catch_unwind(|| {
215 assert!(target_api.remote_command("create", Some(create_config)));
216
217 // Then boot it
218 assert!(target_api.remote_command("boot", None));
219
220 guest.wait_vm_boot(None).unwrap();
221
222 // Check that the VM booted as expected
223 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count);
224 assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
225
226 // Sync and shutdown without powering off to prevent filesystem
227 // corruption.
228 guest.ssh_command("sync").unwrap();
229 guest.ssh_command("sudo shutdown -H now").unwrap();
230
231 // Wait for the guest to be fully shutdown
232 thread::sleep(std::time::Duration::new(20, 0));
233
234 // Then shut it down
235 assert!(target_api.remote_command("shutdown", None));
236
237 // Then boot it again
238 assert!(target_api.remote_command("boot", None));
239
240 guest.wait_vm_boot(None).unwrap();
241
242 // Check that the VM booted as expected
243 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count);
244 assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
245 });
246
247 kill_child(&mut child);
248 let output = child.wait_with_output().unwrap();
249
250 handle_child_output(r, &output);
251 }
252
253 // Start cloud-hypervisor with no VM parameters, only the API server running.
254 // From the API: Create a VM, boot it and check it can be deleted and then recreated
255 // booted again.
_test_api_delete(target_api: TargetApi, guest: Guest)256 fn _test_api_delete(target_api: TargetApi, guest: Guest) {
257 let mut child = GuestCommand::new(&guest)
258 .args(target_api.guest_args())
259 .capture_output()
260 .spawn()
261 .unwrap();
262
263 thread::sleep(std::time::Duration::new(1, 0));
264
265 // Verify API server is running
266 assert!(target_api.remote_command("ping", None));
267
268 // Create the VM first
269 let cpu_count: u8 = 4;
270 let request_body = guest.api_create_body(
271 cpu_count,
272 direct_kernel_boot_path().to_str().unwrap(),
273 DIRECT_KERNEL_BOOT_CMDLINE,
274 );
275 let temp_config_path = guest.tmp_dir.as_path().join("config");
276 std::fs::write(&temp_config_path, request_body).unwrap();
277 let create_config = temp_config_path.as_os_str().to_str().unwrap();
278
279 let r = std::panic::catch_unwind(|| {
280 assert!(target_api.remote_command("create", Some(create_config)));
281
282 // Then boot it
283 assert!(target_api.remote_command("boot", None));
284
285 guest.wait_vm_boot(None).unwrap();
286
287 // Check that the VM booted as expected
288 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count);
289 assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
290
291 // Sync and shutdown without powering off to prevent filesystem
292 // corruption.
293 guest.ssh_command("sync").unwrap();
294 guest.ssh_command("sudo shutdown -H now").unwrap();
295
296 // Wait for the guest to be fully shutdown
297 thread::sleep(std::time::Duration::new(20, 0));
298
299 // Then delete it
300 assert!(target_api.remote_command("delete", None));
301
302 assert!(target_api.remote_command("create", Some(create_config)));
303
304 // Then boot it again
305 assert!(target_api.remote_command("boot", None));
306
307 guest.wait_vm_boot(None).unwrap();
308
309 // Check that the VM booted as expected
310 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count);
311 assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
312 });
313
314 kill_child(&mut child);
315 let output = child.wait_with_output().unwrap();
316
317 handle_child_output(r, &output);
318 }
319
320 // Start cloud-hypervisor with no VM parameters, only the API server running.
321 // From the API: Create a VM, boot it and check that it looks as expected.
322 // Then we pause the VM, check that it's no longer available.
323 // Finally we resume the VM and check that it's available.
_test_api_pause_resume(target_api: TargetApi, guest: Guest)324 fn _test_api_pause_resume(target_api: TargetApi, guest: Guest) {
325 let mut child = GuestCommand::new(&guest)
326 .args(target_api.guest_args())
327 .capture_output()
328 .spawn()
329 .unwrap();
330
331 thread::sleep(std::time::Duration::new(1, 0));
332
333 // Verify API server is running
334 assert!(target_api.remote_command("ping", None));
335
336 // Create the VM first
337 let cpu_count: u8 = 4;
338 let request_body = guest.api_create_body(
339 cpu_count,
340 direct_kernel_boot_path().to_str().unwrap(),
341 DIRECT_KERNEL_BOOT_CMDLINE,
342 );
343
344 let temp_config_path = guest.tmp_dir.as_path().join("config");
345 std::fs::write(&temp_config_path, request_body).unwrap();
346 let create_config = temp_config_path.as_os_str().to_str().unwrap();
347
348 assert!(target_api.remote_command("create", Some(create_config)));
349
350 // Then boot it
351 assert!(target_api.remote_command("boot", None));
352 thread::sleep(std::time::Duration::new(20, 0));
353
354 let r = std::panic::catch_unwind(|| {
355 // Check that the VM booted as expected
356 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count);
357 assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
358
359 // We now pause the VM
360 assert!(target_api.remote_command("pause", None));
361
362 // Check pausing again fails
363 assert!(!target_api.remote_command("pause", None));
364
365 thread::sleep(std::time::Duration::new(2, 0));
366
367 // SSH into the VM should fail
368 ssh_command_ip(
369 "grep -c processor /proc/cpuinfo",
370 &guest.network.guest_ip,
371 2,
372 5,
373 )
374 .unwrap_err();
375
376 // Resume the VM
377 assert!(target_api.remote_command("resume", None));
378
379 // Check resuming again fails
380 assert!(!target_api.remote_command("resume", None));
381
382 thread::sleep(std::time::Duration::new(2, 0));
383
384 // Now we should be able to SSH back in and get the right number of CPUs
385 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count);
386 });
387
388 kill_child(&mut child);
389 let output = child.wait_with_output().unwrap();
390
391 handle_child_output(r, &output);
392 }
393
_test_pty_interaction(pty_path: PathBuf)394 fn _test_pty_interaction(pty_path: PathBuf) {
395 let mut cf = std::fs::OpenOptions::new()
396 .write(true)
397 .read(true)
398 .open(pty_path)
399 .unwrap();
400
401 // Some dumb sleeps but we don't want to write
402 // before the console is up and we don't want
403 // to try and write the next line before the
404 // login process is ready.
405 thread::sleep(std::time::Duration::new(5, 0));
406 assert_eq!(cf.write(b"cloud\n").unwrap(), 6);
407 thread::sleep(std::time::Duration::new(2, 0));
408 assert_eq!(cf.write(b"cloud123\n").unwrap(), 9);
409 thread::sleep(std::time::Duration::new(2, 0));
410 assert_eq!(cf.write(b"echo test_pty_console\n").unwrap(), 22);
411 thread::sleep(std::time::Duration::new(2, 0));
412
413 // read pty and ensure they have a login shell
414 // some fairly hacky workarounds to avoid looping
415 // forever in case the channel is blocked getting output
416 let ptyc = pty_read(cf);
417 let mut empty = 0;
418 let mut prev = String::new();
419 loop {
420 thread::sleep(std::time::Duration::new(2, 0));
421 match ptyc.try_recv() {
422 Ok(line) => {
423 empty = 0;
424 prev = prev + &line;
425 if prev.contains("test_pty_console") {
426 break;
427 }
428 }
429 Err(mpsc::TryRecvError::Empty) => {
430 empty += 1;
431 assert!(empty <= 5, "No login on pty");
432 }
433 _ => {
434 panic!("No login on pty")
435 }
436 }
437 }
438 }
439
prepare_virtiofsd(tmp_dir: &TempDir, shared_dir: &str) -> (std::process::Child, String)440 fn prepare_virtiofsd(tmp_dir: &TempDir, shared_dir: &str) -> (std::process::Child, String) {
441 let mut workload_path = dirs::home_dir().unwrap();
442 workload_path.push("workloads");
443
444 let mut virtiofsd_path = workload_path;
445 virtiofsd_path.push("virtiofsd");
446 let virtiofsd_path = String::from(virtiofsd_path.to_str().unwrap());
447
448 let virtiofsd_socket_path =
449 String::from(tmp_dir.as_path().join("virtiofs.sock").to_str().unwrap());
450
451 // Start the daemon
452 let child = Command::new(virtiofsd_path.as_str())
453 .args(["--shared-dir", shared_dir])
454 .args(["--socket-path", virtiofsd_socket_path.as_str()])
455 .args(["--cache", "never"])
456 .spawn()
457 .unwrap();
458
459 thread::sleep(std::time::Duration::new(10, 0));
460
461 (child, virtiofsd_socket_path)
462 }
463
prepare_vubd( tmp_dir: &TempDir, blk_img: &str, num_queues: usize, rdonly: bool, direct: bool, ) -> (std::process::Child, String)464 fn prepare_vubd(
465 tmp_dir: &TempDir,
466 blk_img: &str,
467 num_queues: usize,
468 rdonly: bool,
469 direct: bool,
470 ) -> (std::process::Child, String) {
471 let mut workload_path = dirs::home_dir().unwrap();
472 workload_path.push("workloads");
473
474 let mut blk_file_path = workload_path;
475 blk_file_path.push(blk_img);
476 let blk_file_path = String::from(blk_file_path.to_str().unwrap());
477
478 let vubd_socket_path = String::from(tmp_dir.as_path().join("vub.sock").to_str().unwrap());
479
480 // Start the daemon
481 let child = Command::new(clh_command("vhost_user_block"))
482 .args([
483 "--block-backend",
484 format!(
485 "path={blk_file_path},socket={vubd_socket_path},num_queues={num_queues},readonly={rdonly},direct={direct}"
486 )
487 .as_str(),
488 ])
489 .spawn()
490 .unwrap();
491
492 thread::sleep(std::time::Duration::new(10, 0));
493
494 (child, vubd_socket_path)
495 }
496
temp_vsock_path(tmp_dir: &TempDir) -> String497 fn temp_vsock_path(tmp_dir: &TempDir) -> String {
498 String::from(tmp_dir.as_path().join("vsock").to_str().unwrap())
499 }
500
temp_api_path(tmp_dir: &TempDir) -> String501 fn temp_api_path(tmp_dir: &TempDir) -> String {
502 String::from(
503 tmp_dir
504 .as_path()
505 .join("cloud-hypervisor.sock")
506 .to_str()
507 .unwrap(),
508 )
509 }
510
temp_event_monitor_path(tmp_dir: &TempDir) -> String511 fn temp_event_monitor_path(tmp_dir: &TempDir) -> String {
512 String::from(tmp_dir.as_path().join("event.json").to_str().unwrap())
513 }
514
515 // Creates the directory and returns the path.
temp_snapshot_dir_path(tmp_dir: &TempDir) -> String516 fn temp_snapshot_dir_path(tmp_dir: &TempDir) -> String {
517 let snapshot_dir = String::from(tmp_dir.as_path().join("snapshot").to_str().unwrap());
518 std::fs::create_dir(&snapshot_dir).unwrap();
519 snapshot_dir
520 }
521
temp_vmcore_file_path(tmp_dir: &TempDir) -> String522 fn temp_vmcore_file_path(tmp_dir: &TempDir) -> String {
523 let vmcore_file = String::from(tmp_dir.as_path().join("vmcore").to_str().unwrap());
524 vmcore_file
525 }
526
527 // Creates the path for direct kernel boot and return the path.
528 // For x86_64, this function returns the vmlinux kernel path.
529 // For AArch64, this function returns the PE kernel path.
direct_kernel_boot_path() -> PathBuf530 fn direct_kernel_boot_path() -> PathBuf {
531 let mut workload_path = dirs::home_dir().unwrap();
532 workload_path.push("workloads");
533
534 let mut kernel_path = workload_path;
535 #[cfg(target_arch = "x86_64")]
536 kernel_path.push("vmlinux-x86_64");
537 #[cfg(target_arch = "aarch64")]
538 kernel_path.push("Image-arm64");
539
540 kernel_path
541 }
542
edk2_path() -> PathBuf543 fn edk2_path() -> PathBuf {
544 let mut workload_path = dirs::home_dir().unwrap();
545 workload_path.push("workloads");
546 let mut edk2_path = workload_path;
547 edk2_path.push(OVMF_NAME);
548
549 edk2_path
550 }
551
cloud_hypervisor_release_path() -> String552 fn cloud_hypervisor_release_path() -> String {
553 let mut workload_path = dirs::home_dir().unwrap();
554 workload_path.push("workloads");
555
556 let mut ch_release_path = workload_path;
557 #[cfg(target_arch = "x86_64")]
558 ch_release_path.push("cloud-hypervisor-static");
559 #[cfg(target_arch = "aarch64")]
560 ch_release_path.push("cloud-hypervisor-static-aarch64");
561
562 ch_release_path.into_os_string().into_string().unwrap()
563 }
564
prepare_vhost_user_net_daemon( tmp_dir: &TempDir, ip: &str, tap: Option<&str>, mtu: Option<u16>, num_queues: usize, client_mode: bool, ) -> (std::process::Command, String)565 fn prepare_vhost_user_net_daemon(
566 tmp_dir: &TempDir,
567 ip: &str,
568 tap: Option<&str>,
569 mtu: Option<u16>,
570 num_queues: usize,
571 client_mode: bool,
572 ) -> (std::process::Command, String) {
573 let vunet_socket_path = String::from(tmp_dir.as_path().join("vunet.sock").to_str().unwrap());
574
575 // Start the daemon
576 let mut net_params = format!(
577 "ip={ip},mask=255.255.255.0,socket={vunet_socket_path},num_queues={num_queues},queue_size=1024,client={client_mode}"
578 );
579
580 if let Some(tap) = tap {
581 net_params.push_str(format!(",tap={tap}").as_str());
582 }
583
584 if let Some(mtu) = mtu {
585 net_params.push_str(format!(",mtu={mtu}").as_str());
586 }
587
588 let mut command = Command::new(clh_command("vhost_user_net"));
589 command.args(["--net-backend", net_params.as_str()]);
590
591 (command, vunet_socket_path)
592 }
593
prepare_swtpm_daemon(tmp_dir: &TempDir) -> (std::process::Command, String)594 fn prepare_swtpm_daemon(tmp_dir: &TempDir) -> (std::process::Command, String) {
595 let swtpm_tpm_dir = String::from(tmp_dir.as_path().join("swtpm").to_str().unwrap());
596 let swtpm_socket_path = String::from(
597 tmp_dir
598 .as_path()
599 .join("swtpm")
600 .join("swtpm.sock")
601 .to_str()
602 .unwrap(),
603 );
604 std::fs::create_dir(&swtpm_tpm_dir).unwrap();
605
606 let mut swtpm_command = Command::new("swtpm");
607 let swtpm_args = [
608 "socket",
609 "--tpmstate",
610 &format!("dir={swtpm_tpm_dir}"),
611 "--ctrl",
612 &format!("type=unixio,path={swtpm_socket_path}"),
613 "--flags",
614 "startup-clear",
615 "--tpm2",
616 ];
617 swtpm_command.args(swtpm_args);
618
619 (swtpm_command, swtpm_socket_path)
620 }
621
remote_command(api_socket: &str, command: &str, arg: Option<&str>) -> bool622 fn remote_command(api_socket: &str, command: &str, arg: Option<&str>) -> bool {
623 let mut cmd = Command::new(clh_command("ch-remote"));
624 cmd.args([&format!("--api-socket={api_socket}"), command]);
625
626 if let Some(arg) = arg {
627 cmd.arg(arg);
628 }
629 let output = cmd.output().unwrap();
630 if output.status.success() {
631 true
632 } else {
633 eprintln!("Error running ch-remote command: {:?}", &cmd);
634 let stderr = String::from_utf8_lossy(&output.stderr);
635 eprintln!("stderr: {stderr}");
636 false
637 }
638 }
639
remote_command_w_output(api_socket: &str, command: &str, arg: Option<&str>) -> (bool, Vec<u8>)640 fn remote_command_w_output(api_socket: &str, command: &str, arg: Option<&str>) -> (bool, Vec<u8>) {
641 let mut cmd = Command::new(clh_command("ch-remote"));
642 cmd.args([&format!("--api-socket={api_socket}"), command]);
643
644 if let Some(arg) = arg {
645 cmd.arg(arg);
646 }
647
648 let output = cmd.output().expect("Failed to launch ch-remote");
649
650 (output.status.success(), output.stdout)
651 }
652
resize_command( api_socket: &str, desired_vcpus: Option<u8>, desired_ram: Option<usize>, desired_balloon: Option<usize>, event_file: Option<&str>, ) -> bool653 fn resize_command(
654 api_socket: &str,
655 desired_vcpus: Option<u8>,
656 desired_ram: Option<usize>,
657 desired_balloon: Option<usize>,
658 event_file: Option<&str>,
659 ) -> bool {
660 let mut cmd = Command::new(clh_command("ch-remote"));
661 cmd.args([&format!("--api-socket={api_socket}"), "resize"]);
662
663 if let Some(desired_vcpus) = desired_vcpus {
664 cmd.arg(format!("--cpus={desired_vcpus}"));
665 }
666
667 if let Some(desired_ram) = desired_ram {
668 cmd.arg(format!("--memory={desired_ram}"));
669 }
670
671 if let Some(desired_balloon) = desired_balloon {
672 cmd.arg(format!("--balloon={desired_balloon}"));
673 }
674
675 let ret = cmd.status().expect("Failed to launch ch-remote").success();
676
677 if let Some(event_path) = event_file {
678 let latest_events = [
679 &MetaEvent {
680 event: "resizing".to_string(),
681 device_id: None,
682 },
683 &MetaEvent {
684 event: "resized".to_string(),
685 device_id: None,
686 },
687 ];
688 // See: #5938
689 thread::sleep(std::time::Duration::new(1, 0));
690 assert!(check_latest_events_exact(&latest_events, event_path));
691 }
692
693 ret
694 }
695
resize_zone_command(api_socket: &str, id: &str, desired_size: &str) -> bool696 fn resize_zone_command(api_socket: &str, id: &str, desired_size: &str) -> bool {
697 let mut cmd = Command::new(clh_command("ch-remote"));
698 cmd.args([
699 &format!("--api-socket={api_socket}"),
700 "resize-zone",
701 &format!("--id={id}"),
702 &format!("--size={desired_size}"),
703 ]);
704
705 cmd.status().expect("Failed to launch ch-remote").success()
706 }
707
708 // setup OVS-DPDK bridge and ports
setup_ovs_dpdk()709 fn setup_ovs_dpdk() {
710 // setup OVS-DPDK
711 assert!(exec_host_command_status("service openvswitch-switch start").success());
712 assert!(exec_host_command_status("ovs-vsctl init").success());
713 assert!(
714 exec_host_command_status("ovs-vsctl set Open_vSwitch . other_config:dpdk-init=true")
715 .success()
716 );
717 assert!(exec_host_command_status("service openvswitch-switch restart").success());
718
719 // Create OVS-DPDK bridge and ports
720 assert!(exec_host_command_status(
721 "ovs-vsctl add-br ovsbr0 -- set bridge ovsbr0 datapath_type=netdev",
722 )
723 .success());
724 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface vhost-user1 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient1").success());
725 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user2 -- set Interface vhost-user2 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient2").success());
726 assert!(exec_host_command_status("ip link set up dev ovsbr0").success());
727 assert!(exec_host_command_status("service openvswitch-switch restart").success());
728 }
cleanup_ovs_dpdk()729 fn cleanup_ovs_dpdk() {
730 assert!(exec_host_command_status("ovs-vsctl del-br ovsbr0").success());
731 exec_host_command_status("rm -f ovs-vsctl /tmp/dpdkvhostclient1 /tmp/dpdkvhostclient2");
732 }
733 // Setup two guests and ensure they are connected through ovs-dpdk
setup_ovs_dpdk_guests( guest1: &Guest, guest2: &Guest, api_socket: &str, release_binary: bool, ) -> (Child, Child)734 fn setup_ovs_dpdk_guests(
735 guest1: &Guest,
736 guest2: &Guest,
737 api_socket: &str,
738 release_binary: bool,
739 ) -> (Child, Child) {
740 setup_ovs_dpdk();
741
742 let clh_path = if !release_binary {
743 clh_command("cloud-hypervisor")
744 } else {
745 cloud_hypervisor_release_path()
746 };
747
748 let mut child1 = GuestCommand::new_with_binary_path(guest1, &clh_path)
749 .args(["--cpus", "boot=2"])
750 .args(["--memory", "size=0,shared=on"])
751 .args(["--memory-zone", "id=mem0,size=1G,shared=on,host_numa_node=0"])
752 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
753 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
754 .default_disks()
755 .args(["--net", guest1.default_net_string().as_str(), "vhost_user=true,socket=/tmp/dpdkvhostclient1,num_queues=2,queue_size=256,vhost_mode=server"])
756 .capture_output()
757 .spawn()
758 .unwrap();
759
760 #[cfg(target_arch = "x86_64")]
761 let guest_net_iface = "ens5";
762 #[cfg(target_arch = "aarch64")]
763 let guest_net_iface = "enp0s5";
764
765 let r = std::panic::catch_unwind(|| {
766 guest1.wait_vm_boot(None).unwrap();
767
768 guest1
769 .ssh_command(&format!(
770 "sudo ip addr add 172.100.0.1/24 dev {guest_net_iface}"
771 ))
772 .unwrap();
773 guest1
774 .ssh_command(&format!("sudo ip link set up dev {guest_net_iface}"))
775 .unwrap();
776
777 let guest_ip = guest1.network.guest_ip.clone();
778 thread::spawn(move || {
779 ssh_command_ip(
780 "nc -l 12345",
781 &guest_ip,
782 DEFAULT_SSH_RETRIES,
783 DEFAULT_SSH_TIMEOUT,
784 )
785 .unwrap();
786 });
787 });
788 if r.is_err() {
789 cleanup_ovs_dpdk();
790
791 let _ = child1.kill();
792 let output = child1.wait_with_output().unwrap();
793 handle_child_output(r, &output);
794 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return
795 }
796
797 let mut child2 = GuestCommand::new_with_binary_path(guest2, &clh_path)
798 .args(["--api-socket", api_socket])
799 .args(["--cpus", "boot=2"])
800 .args(["--memory", "size=0,shared=on"])
801 .args(["--memory-zone", "id=mem0,size=1G,shared=on,host_numa_node=0"])
802 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
803 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
804 .default_disks()
805 .args(["--net", guest2.default_net_string().as_str(), "vhost_user=true,socket=/tmp/dpdkvhostclient2,num_queues=2,queue_size=256,vhost_mode=server"])
806 .capture_output()
807 .spawn()
808 .unwrap();
809
810 let r = std::panic::catch_unwind(|| {
811 guest2.wait_vm_boot(None).unwrap();
812
813 guest2
814 .ssh_command(&format!(
815 "sudo ip addr add 172.100.0.2/24 dev {guest_net_iface}"
816 ))
817 .unwrap();
818 guest2
819 .ssh_command(&format!("sudo ip link set up dev {guest_net_iface}"))
820 .unwrap();
821
822 // Check the connection works properly between the two VMs
823 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap();
824 });
825 if r.is_err() {
826 cleanup_ovs_dpdk();
827
828 let _ = child1.kill();
829 let _ = child2.kill();
830 let output = child2.wait_with_output().unwrap();
831 handle_child_output(r, &output);
832 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return
833 }
834
835 (child1, child2)
836 }
837
838 enum FwType {
839 Ovmf,
840 RustHypervisorFirmware,
841 }
842
fw_path(_fw_type: FwType) -> String843 fn fw_path(_fw_type: FwType) -> String {
844 let mut workload_path = dirs::home_dir().unwrap();
845 workload_path.push("workloads");
846
847 let mut fw_path = workload_path;
848 #[cfg(target_arch = "aarch64")]
849 fw_path.push("CLOUDHV_EFI.fd");
850 #[cfg(target_arch = "x86_64")]
851 {
852 match _fw_type {
853 FwType::Ovmf => fw_path.push(OVMF_NAME),
854 FwType::RustHypervisorFirmware => fw_path.push("hypervisor-fw"),
855 }
856 }
857
858 fw_path.to_str().unwrap().to_string()
859 }
860
861 #[derive(Debug)]
862 struct MetaEvent {
863 event: String,
864 device_id: Option<String>,
865 }
866
867 impl MetaEvent {
match_with_json_event(&self, v: &serde_json::Value) -> bool868 pub fn match_with_json_event(&self, v: &serde_json::Value) -> bool {
869 let mut matched = false;
870 if v["event"].as_str().unwrap() == self.event {
871 if let Some(device_id) = &self.device_id {
872 if v["properties"]["id"].as_str().unwrap() == device_id {
873 matched = true
874 }
875 } else {
876 matched = true;
877 }
878 }
879 matched
880 }
881 }
882
883 // Parse the event_monitor file based on the format that each event
884 // is followed by a double newline
parse_event_file(event_file: &str) -> Vec<serde_json::Value>885 fn parse_event_file(event_file: &str) -> Vec<serde_json::Value> {
886 let content = fs::read(event_file).unwrap();
887 let mut ret = Vec::new();
888 for entry in String::from_utf8_lossy(&content)
889 .trim()
890 .split("\n\n")
891 .collect::<Vec<&str>>()
892 {
893 ret.push(serde_json::from_str(entry).unwrap());
894 }
895
896 ret
897 }
898
899 // Return true if all events from the input 'expected_events' are matched sequentially
900 // with events from the 'event_file'
check_sequential_events(expected_events: &[&MetaEvent], event_file: &str) -> bool901 fn check_sequential_events(expected_events: &[&MetaEvent], event_file: &str) -> bool {
902 let json_events = parse_event_file(event_file);
903 let len = expected_events.len();
904 let mut idx = 0;
905 for e in &json_events {
906 if idx == len {
907 break;
908 }
909 if expected_events[idx].match_with_json_event(e) {
910 idx += 1;
911 }
912 }
913
914 let ret = idx == len;
915
916 if !ret {
917 eprintln!(
918 "\n\n==== Start 'check_sequential_events' failed ==== \
919 \n\nexpected_events={expected_events:?}\nactual_events={json_events:?} \
920 \n\n==== End 'check_sequential_events' failed ====",
921 );
922 }
923
924 ret
925 }
926
927 // Return true if all events from the input 'expected_events' are matched exactly
928 // with events from the 'event_file'
check_sequential_events_exact(expected_events: &[&MetaEvent], event_file: &str) -> bool929 fn check_sequential_events_exact(expected_events: &[&MetaEvent], event_file: &str) -> bool {
930 let json_events = parse_event_file(event_file);
931 assert!(expected_events.len() <= json_events.len());
932 let json_events = &json_events[..expected_events.len()];
933
934 for (idx, e) in json_events.iter().enumerate() {
935 if !expected_events[idx].match_with_json_event(e) {
936 eprintln!(
937 "\n\n==== Start 'check_sequential_events_exact' failed ==== \
938 \n\nexpected_events={expected_events:?}\nactual_events={json_events:?} \
939 \n\n==== End 'check_sequential_events_exact' failed ====",
940 );
941
942 return false;
943 }
944 }
945
946 true
947 }
948
949 // Return true if events from the input 'latest_events' are matched exactly
950 // with the most recent events from the 'event_file'
check_latest_events_exact(latest_events: &[&MetaEvent], event_file: &str) -> bool951 fn check_latest_events_exact(latest_events: &[&MetaEvent], event_file: &str) -> bool {
952 let json_events = parse_event_file(event_file);
953 assert!(latest_events.len() <= json_events.len());
954 let json_events = &json_events[(json_events.len() - latest_events.len())..];
955
956 for (idx, e) in json_events.iter().enumerate() {
957 if !latest_events[idx].match_with_json_event(e) {
958 eprintln!(
959 "\n\n==== Start 'check_latest_events_exact' failed ==== \
960 \n\nexpected_events={latest_events:?}\nactual_events={json_events:?} \
961 \n\n==== End 'check_latest_events_exact' failed ====",
962 );
963
964 return false;
965 }
966 }
967
968 true
969 }
970
test_cpu_topology(threads_per_core: u8, cores_per_package: u8, packages: u8, use_fw: bool)971 fn test_cpu_topology(threads_per_core: u8, cores_per_package: u8, packages: u8, use_fw: bool) {
972 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
973 let guest = Guest::new(Box::new(focal));
974 let total_vcpus = threads_per_core * cores_per_package * packages;
975 let direct_kernel_boot_path = direct_kernel_boot_path();
976 let mut kernel_path = direct_kernel_boot_path.to_str().unwrap();
977 let fw_path = fw_path(FwType::RustHypervisorFirmware);
978 if use_fw {
979 kernel_path = fw_path.as_str();
980 }
981
982 let mut child = GuestCommand::new(&guest)
983 .args([
984 "--cpus",
985 &format!(
986 "boot={total_vcpus},topology={threads_per_core}:{cores_per_package}:1:{packages}"
987 ),
988 ])
989 .args(["--memory", "size=512M"])
990 .args(["--kernel", kernel_path])
991 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
992 .default_disks()
993 .default_net()
994 .capture_output()
995 .spawn()
996 .unwrap();
997
998 let r = std::panic::catch_unwind(|| {
999 guest.wait_vm_boot(None).unwrap();
1000 assert_eq!(
1001 guest.get_cpu_count().unwrap_or_default(),
1002 u32::from(total_vcpus)
1003 );
1004 assert_eq!(
1005 guest
1006 .ssh_command("lscpu | grep \"per core\" | cut -f 2 -d \":\" | sed \"s# *##\"")
1007 .unwrap()
1008 .trim()
1009 .parse::<u8>()
1010 .unwrap_or(0),
1011 threads_per_core
1012 );
1013
1014 assert_eq!(
1015 guest
1016 .ssh_command("lscpu | grep \"per socket\" | cut -f 2 -d \":\" | sed \"s# *##\"")
1017 .unwrap()
1018 .trim()
1019 .parse::<u8>()
1020 .unwrap_or(0),
1021 cores_per_package
1022 );
1023
1024 assert_eq!(
1025 guest
1026 .ssh_command("lscpu | grep \"Socket\" | cut -f 2 -d \":\" | sed \"s# *##\"")
1027 .unwrap()
1028 .trim()
1029 .parse::<u8>()
1030 .unwrap_or(0),
1031 packages
1032 );
1033
1034 #[cfg(target_arch = "x86_64")]
1035 {
1036 let mut cpu_id = 0;
1037 for package_id in 0..packages {
1038 for core_id in 0..cores_per_package {
1039 for _ in 0..threads_per_core {
1040 assert_eq!(
1041 guest
1042 .ssh_command(&format!("cat /sys/devices/system/cpu/cpu{cpu_id}/topology/physical_package_id"))
1043 .unwrap()
1044 .trim()
1045 .parse::<u8>()
1046 .unwrap_or(0),
1047 package_id
1048 );
1049
1050 assert_eq!(
1051 guest
1052 .ssh_command(&format!(
1053 "cat /sys/devices/system/cpu/cpu{cpu_id}/topology/core_id"
1054 ))
1055 .unwrap()
1056 .trim()
1057 .parse::<u8>()
1058 .unwrap_or(0),
1059 core_id
1060 );
1061
1062 cpu_id += 1;
1063 }
1064 }
1065 }
1066 }
1067 });
1068
1069 kill_child(&mut child);
1070 let output = child.wait_with_output().unwrap();
1071
1072 handle_child_output(r, &output);
1073 }
1074
1075 #[allow(unused_variables)]
_test_guest_numa_nodes(acpi: bool)1076 fn _test_guest_numa_nodes(acpi: bool) {
1077 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
1078 let guest = Guest::new(Box::new(focal));
1079 let api_socket = temp_api_path(&guest.tmp_dir);
1080 #[cfg(target_arch = "x86_64")]
1081 let kernel_path = direct_kernel_boot_path();
1082 #[cfg(target_arch = "aarch64")]
1083 let kernel_path = if acpi {
1084 edk2_path()
1085 } else {
1086 direct_kernel_boot_path()
1087 };
1088
1089 let mut child = GuestCommand::new(&guest)
1090 .args(["--cpus", "boot=6,max=12"])
1091 .args(["--memory", "size=0,hotplug_method=virtio-mem"])
1092 .args([
1093 "--memory-zone",
1094 "id=mem0,size=1G,hotplug_size=3G",
1095 "id=mem1,size=2G,hotplug_size=3G",
1096 "id=mem2,size=3G,hotplug_size=3G",
1097 ])
1098 .args([
1099 "--numa",
1100 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0",
1101 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1",
1102 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2",
1103 ])
1104 .args(["--kernel", kernel_path.to_str().unwrap()])
1105 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
1106 .args(["--api-socket", &api_socket])
1107 .capture_output()
1108 .default_disks()
1109 .default_net()
1110 .spawn()
1111 .unwrap();
1112
1113 let r = std::panic::catch_unwind(|| {
1114 guest.wait_vm_boot(None).unwrap();
1115
1116 guest.check_numa_common(
1117 Some(&[960_000, 1_920_000, 2_880_000]),
1118 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]),
1119 Some(&["10 15 20", "20 10 25", "25 30 10"]),
1120 );
1121
1122 // AArch64 currently does not support hotplug, and therefore we only
1123 // test hotplug-related function on x86_64 here.
1124 #[cfg(target_arch = "x86_64")]
1125 {
1126 guest.enable_memory_hotplug();
1127
1128 // Resize every memory zone and check each associated NUMA node
1129 // has been assigned the right amount of memory.
1130 resize_zone_command(&api_socket, "mem0", "4G");
1131 resize_zone_command(&api_socket, "mem1", "4G");
1132 resize_zone_command(&api_socket, "mem2", "4G");
1133 // Resize to the maximum amount of CPUs and check each NUMA
1134 // node has been assigned the right CPUs set.
1135 resize_command(&api_socket, Some(12), None, None, None);
1136 thread::sleep(std::time::Duration::new(5, 0));
1137
1138 guest.check_numa_common(
1139 Some(&[3_840_000, 3_840_000, 3_840_000]),
1140 Some(&[vec![0, 1, 2, 9], vec![3, 4, 6, 7, 8], vec![5, 10, 11]]),
1141 None,
1142 );
1143 }
1144 });
1145
1146 kill_child(&mut child);
1147 let output = child.wait_with_output().unwrap();
1148
1149 handle_child_output(r, &output);
1150 }
1151
1152 #[allow(unused_variables)]
_test_power_button(acpi: bool)1153 fn _test_power_button(acpi: bool) {
1154 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
1155 let guest = Guest::new(Box::new(focal));
1156 let mut cmd = GuestCommand::new(&guest);
1157 let api_socket = temp_api_path(&guest.tmp_dir);
1158
1159 #[cfg(target_arch = "x86_64")]
1160 let kernel_path = direct_kernel_boot_path();
1161 #[cfg(target_arch = "aarch64")]
1162 let kernel_path = if acpi {
1163 edk2_path()
1164 } else {
1165 direct_kernel_boot_path()
1166 };
1167
1168 cmd.args(["--cpus", "boot=1"])
1169 .args(["--memory", "size=512M"])
1170 .args(["--kernel", kernel_path.to_str().unwrap()])
1171 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
1172 .capture_output()
1173 .default_disks()
1174 .default_net()
1175 .args(["--api-socket", &api_socket]);
1176
1177 let child = cmd.spawn().unwrap();
1178
1179 let r = std::panic::catch_unwind(|| {
1180 guest.wait_vm_boot(None).unwrap();
1181 assert!(remote_command(&api_socket, "power-button", None));
1182 });
1183
1184 let output = child.wait_with_output().unwrap();
1185 assert!(output.status.success());
1186 handle_child_output(r, &output);
1187 }
1188
1189 type PrepareNetDaemon = dyn Fn(
1190 &TempDir,
1191 &str,
1192 Option<&str>,
1193 Option<u16>,
1194 usize,
1195 bool,
1196 ) -> (std::process::Command, String);
1197
test_vhost_user_net( tap: Option<&str>, num_queues: usize, prepare_daemon: &PrepareNetDaemon, generate_host_mac: bool, client_mode_daemon: bool, )1198 fn test_vhost_user_net(
1199 tap: Option<&str>,
1200 num_queues: usize,
1201 prepare_daemon: &PrepareNetDaemon,
1202 generate_host_mac: bool,
1203 client_mode_daemon: bool,
1204 ) {
1205 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
1206 let guest = Guest::new(Box::new(focal));
1207 let api_socket = temp_api_path(&guest.tmp_dir);
1208
1209 let kernel_path = direct_kernel_boot_path();
1210
1211 let host_mac = if generate_host_mac {
1212 Some(MacAddr::local_random())
1213 } else {
1214 None
1215 };
1216
1217 let mtu = Some(3000);
1218
1219 let (mut daemon_command, vunet_socket_path) = prepare_daemon(
1220 &guest.tmp_dir,
1221 &guest.network.host_ip,
1222 tap,
1223 mtu,
1224 num_queues,
1225 client_mode_daemon,
1226 );
1227
1228 let net_params = format!(
1229 "vhost_user=true,mac={},socket={},num_queues={},queue_size=1024{},vhost_mode={},mtu=3000",
1230 guest.network.guest_mac,
1231 vunet_socket_path,
1232 num_queues,
1233 if let Some(host_mac) = host_mac {
1234 format!(",host_mac={host_mac}")
1235 } else {
1236 "".to_owned()
1237 },
1238 if client_mode_daemon {
1239 "server"
1240 } else {
1241 "client"
1242 },
1243 );
1244
1245 let mut ch_command = GuestCommand::new(&guest);
1246 ch_command
1247 .args(["--cpus", format!("boot={}", num_queues / 2).as_str()])
1248 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"])
1249 .args(["--kernel", kernel_path.to_str().unwrap()])
1250 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
1251 .default_disks()
1252 .args(["--net", net_params.as_str()])
1253 .args(["--api-socket", &api_socket])
1254 .capture_output();
1255
1256 let mut daemon_child: std::process::Child;
1257 let mut child: std::process::Child;
1258
1259 if client_mode_daemon {
1260 child = ch_command.spawn().unwrap();
1261 // Make sure the VMM is waiting for the backend to connect
1262 thread::sleep(std::time::Duration::new(10, 0));
1263 daemon_child = daemon_command.spawn().unwrap();
1264 } else {
1265 daemon_child = daemon_command.spawn().unwrap();
1266 // Make sure the backend is waiting for the VMM to connect
1267 thread::sleep(std::time::Duration::new(10, 0));
1268 child = ch_command.spawn().unwrap();
1269 }
1270
1271 let r = std::panic::catch_unwind(|| {
1272 guest.wait_vm_boot(None).unwrap();
1273
1274 if let Some(tap_name) = tap {
1275 let tap_count = exec_host_command_output(&format!("ip link | grep -c {tap_name}"));
1276 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1");
1277 }
1278
1279 if let Some(host_mac) = tap {
1280 let mac_count = exec_host_command_output(&format!("ip link | grep -c {host_mac}"));
1281 assert_eq!(String::from_utf8_lossy(&mac_count.stdout).trim(), "1");
1282 }
1283
1284 #[cfg(target_arch = "aarch64")]
1285 let iface = "enp0s4";
1286 #[cfg(target_arch = "x86_64")]
1287 let iface = "ens4";
1288
1289 assert_eq!(
1290 guest
1291 .ssh_command(format!("cat /sys/class/net/{iface}/mtu").as_str())
1292 .unwrap()
1293 .trim(),
1294 "3000"
1295 );
1296
1297 // 1 network interface + default localhost ==> 2 interfaces
1298 // It's important to note that this test is fully exercising the
1299 // vhost-user-net implementation and the associated backend since
1300 // it does not define any --net network interface. That means all
1301 // the ssh communication in that test happens through the network
1302 // interface backed by vhost-user-net.
1303 assert_eq!(
1304 guest
1305 .ssh_command("ip -o link | wc -l")
1306 .unwrap()
1307 .trim()
1308 .parse::<u32>()
1309 .unwrap_or_default(),
1310 2
1311 );
1312
1313 // The following pci devices will appear on guest with PCI-MSI
1314 // interrupt vectors assigned.
1315 // 1 virtio-console with 3 vectors: config, Rx, Tx
1316 // 1 virtio-blk with 2 vectors: config, Request
1317 // 1 virtio-blk with 2 vectors: config, Request
1318 // 1 virtio-rng with 2 vectors: config, Request
1319 // Since virtio-net has 2 queue pairs, its vectors is as follows:
1320 // 1 virtio-net with 5 vectors: config, Rx (2), Tx (2)
1321 // Based on the above, the total vectors should 14.
1322 #[cfg(target_arch = "x86_64")]
1323 let grep_cmd = "grep -c PCI-MSI /proc/interrupts";
1324 #[cfg(target_arch = "aarch64")]
1325 let grep_cmd = "grep -c ITS-PCI-MSIX /proc/interrupts";
1326 assert_eq!(
1327 guest
1328 .ssh_command(grep_cmd)
1329 .unwrap()
1330 .trim()
1331 .parse::<u32>()
1332 .unwrap_or_default(),
1333 10 + (num_queues as u32)
1334 );
1335
1336 // ACPI feature is needed.
1337 #[cfg(target_arch = "x86_64")]
1338 {
1339 guest.enable_memory_hotplug();
1340
1341 // Add RAM to the VM
1342 let desired_ram = 1024 << 20;
1343 resize_command(&api_socket, None, Some(desired_ram), None, None);
1344
1345 thread::sleep(std::time::Duration::new(10, 0));
1346
1347 // Here by simply checking the size (through ssh), we validate
1348 // the connection is still working, which means vhost-user-net
1349 // keeps working after the resize.
1350 assert!(guest.get_total_memory().unwrap_or_default() > 960_000);
1351 }
1352 });
1353
1354 kill_child(&mut child);
1355 let output = child.wait_with_output().unwrap();
1356
1357 thread::sleep(std::time::Duration::new(5, 0));
1358 let _ = daemon_child.kill();
1359 let _ = daemon_child.wait();
1360
1361 handle_child_output(r, &output);
1362 }
1363
1364 type PrepareBlkDaemon = dyn Fn(&TempDir, &str, usize, bool, bool) -> (std::process::Child, String);
1365
test_vhost_user_blk( num_queues: usize, readonly: bool, direct: bool, prepare_vhost_user_blk_daemon: Option<&PrepareBlkDaemon>, )1366 fn test_vhost_user_blk(
1367 num_queues: usize,
1368 readonly: bool,
1369 direct: bool,
1370 prepare_vhost_user_blk_daemon: Option<&PrepareBlkDaemon>,
1371 ) {
1372 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
1373 let guest = Guest::new(Box::new(focal));
1374 let api_socket = temp_api_path(&guest.tmp_dir);
1375
1376 let kernel_path = direct_kernel_boot_path();
1377
1378 let (blk_params, daemon_child) = {
1379 let prepare_daemon = prepare_vhost_user_blk_daemon.unwrap();
1380 // Start the daemon
1381 let (daemon_child, vubd_socket_path) =
1382 prepare_daemon(&guest.tmp_dir, "blk.img", num_queues, readonly, direct);
1383
1384 (
1385 format!(
1386 "vhost_user=true,socket={vubd_socket_path},num_queues={num_queues},queue_size=128",
1387 ),
1388 Some(daemon_child),
1389 )
1390 };
1391
1392 let mut child = GuestCommand::new(&guest)
1393 .args(["--cpus", format!("boot={num_queues}").as_str()])
1394 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"])
1395 .args(["--kernel", kernel_path.to_str().unwrap()])
1396 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
1397 .args([
1398 "--disk",
1399 format!(
1400 "path={}",
1401 guest.disk_config.disk(DiskType::OperatingSystem).unwrap()
1402 )
1403 .as_str(),
1404 format!(
1405 "path={}",
1406 guest.disk_config.disk(DiskType::CloudInit).unwrap()
1407 )
1408 .as_str(),
1409 blk_params.as_str(),
1410 ])
1411 .default_net()
1412 .args(["--api-socket", &api_socket])
1413 .capture_output()
1414 .spawn()
1415 .unwrap();
1416
1417 let r = std::panic::catch_unwind(|| {
1418 guest.wait_vm_boot(None).unwrap();
1419
1420 // Check both if /dev/vdc exists and if the block size is 16M.
1421 assert_eq!(
1422 guest
1423 .ssh_command("lsblk | grep vdc | grep -c 16M")
1424 .unwrap()
1425 .trim()
1426 .parse::<u32>()
1427 .unwrap_or_default(),
1428 1
1429 );
1430
1431 // Check if this block is RO or RW.
1432 assert_eq!(
1433 guest
1434 .ssh_command("lsblk | grep vdc | awk '{print $5}'")
1435 .unwrap()
1436 .trim()
1437 .parse::<u32>()
1438 .unwrap_or_default(),
1439 readonly as u32
1440 );
1441
1442 // Check if the number of queues in /sys/block/vdc/mq matches the
1443 // expected num_queues.
1444 assert_eq!(
1445 guest
1446 .ssh_command("ls -ll /sys/block/vdc/mq | grep ^d | wc -l")
1447 .unwrap()
1448 .trim()
1449 .parse::<u32>()
1450 .unwrap_or_default(),
1451 num_queues as u32
1452 );
1453
1454 // Mount the device
1455 let mount_ro_rw_flag = if readonly { "ro,noload" } else { "rw" };
1456 guest.ssh_command("mkdir mount_image").unwrap();
1457 guest
1458 .ssh_command(
1459 format!("sudo mount -o {mount_ro_rw_flag} -t ext4 /dev/vdc mount_image/").as_str(),
1460 )
1461 .unwrap();
1462
1463 // Check the content of the block device. The file "foo" should
1464 // contain "bar".
1465 assert_eq!(
1466 guest.ssh_command("cat mount_image/foo").unwrap().trim(),
1467 "bar"
1468 );
1469
1470 // ACPI feature is needed.
1471 #[cfg(target_arch = "x86_64")]
1472 {
1473 guest.enable_memory_hotplug();
1474
1475 // Add RAM to the VM
1476 let desired_ram = 1024 << 20;
1477 resize_command(&api_socket, None, Some(desired_ram), None, None);
1478
1479 thread::sleep(std::time::Duration::new(10, 0));
1480
1481 assert!(guest.get_total_memory().unwrap_or_default() > 960_000);
1482
1483 // Check again the content of the block device after the resize
1484 // has been performed.
1485 assert_eq!(
1486 guest.ssh_command("cat mount_image/foo").unwrap().trim(),
1487 "bar"
1488 );
1489 }
1490
1491 // Unmount the device
1492 guest.ssh_command("sudo umount /dev/vdc").unwrap();
1493 guest.ssh_command("rm -r mount_image").unwrap();
1494 });
1495
1496 kill_child(&mut child);
1497 let output = child.wait_with_output().unwrap();
1498
1499 if let Some(mut daemon_child) = daemon_child {
1500 thread::sleep(std::time::Duration::new(5, 0));
1501 let _ = daemon_child.kill();
1502 let _ = daemon_child.wait();
1503 }
1504
1505 handle_child_output(r, &output);
1506 }
1507
test_boot_from_vhost_user_blk( num_queues: usize, readonly: bool, direct: bool, prepare_vhost_user_blk_daemon: Option<&PrepareBlkDaemon>, )1508 fn test_boot_from_vhost_user_blk(
1509 num_queues: usize,
1510 readonly: bool,
1511 direct: bool,
1512 prepare_vhost_user_blk_daemon: Option<&PrepareBlkDaemon>,
1513 ) {
1514 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
1515 let guest = Guest::new(Box::new(focal));
1516
1517 let kernel_path = direct_kernel_boot_path();
1518
1519 let disk_path = guest.disk_config.disk(DiskType::OperatingSystem).unwrap();
1520
1521 let (blk_boot_params, daemon_child) = {
1522 let prepare_daemon = prepare_vhost_user_blk_daemon.unwrap();
1523 // Start the daemon
1524 let (daemon_child, vubd_socket_path) = prepare_daemon(
1525 &guest.tmp_dir,
1526 disk_path.as_str(),
1527 num_queues,
1528 readonly,
1529 direct,
1530 );
1531
1532 (
1533 format!(
1534 "vhost_user=true,socket={vubd_socket_path},num_queues={num_queues},queue_size=128",
1535 ),
1536 Some(daemon_child),
1537 )
1538 };
1539
1540 let mut child = GuestCommand::new(&guest)
1541 .args(["--cpus", format!("boot={num_queues}").as_str()])
1542 .args(["--memory", "size=512M,shared=on"])
1543 .args(["--kernel", kernel_path.to_str().unwrap()])
1544 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
1545 .args([
1546 "--disk",
1547 blk_boot_params.as_str(),
1548 format!(
1549 "path={}",
1550 guest.disk_config.disk(DiskType::CloudInit).unwrap()
1551 )
1552 .as_str(),
1553 ])
1554 .default_net()
1555 .capture_output()
1556 .spawn()
1557 .unwrap();
1558
1559 let r = std::panic::catch_unwind(|| {
1560 guest.wait_vm_boot(None).unwrap();
1561
1562 // Just check the VM booted correctly.
1563 assert_eq!(guest.get_cpu_count().unwrap_or_default(), num_queues as u32);
1564 assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
1565 });
1566 kill_child(&mut child);
1567 let output = child.wait_with_output().unwrap();
1568
1569 if let Some(mut daemon_child) = daemon_child {
1570 thread::sleep(std::time::Duration::new(5, 0));
1571 let _ = daemon_child.kill();
1572 let _ = daemon_child.wait();
1573 }
1574
1575 handle_child_output(r, &output);
1576 }
1577
_test_virtio_fs( prepare_daemon: &dyn Fn(&TempDir, &str) -> (std::process::Child, String), hotplug: bool, pci_segment: Option<u16>, )1578 fn _test_virtio_fs(
1579 prepare_daemon: &dyn Fn(&TempDir, &str) -> (std::process::Child, String),
1580 hotplug: bool,
1581 pci_segment: Option<u16>,
1582 ) {
1583 #[cfg(target_arch = "aarch64")]
1584 let focal_image = if hotplug {
1585 FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string()
1586 } else {
1587 FOCAL_IMAGE_NAME.to_string()
1588 };
1589 #[cfg(target_arch = "x86_64")]
1590 let focal_image = FOCAL_IMAGE_NAME.to_string();
1591 let focal = UbuntuDiskConfig::new(focal_image);
1592 let guest = Guest::new(Box::new(focal));
1593 let api_socket = temp_api_path(&guest.tmp_dir);
1594
1595 let mut workload_path = dirs::home_dir().unwrap();
1596 workload_path.push("workloads");
1597
1598 let mut shared_dir = workload_path;
1599 shared_dir.push("shared_dir");
1600
1601 #[cfg(target_arch = "x86_64")]
1602 let kernel_path = direct_kernel_boot_path();
1603 #[cfg(target_arch = "aarch64")]
1604 let kernel_path = if hotplug {
1605 edk2_path()
1606 } else {
1607 direct_kernel_boot_path()
1608 };
1609
1610 let (mut daemon_child, virtiofsd_socket_path) =
1611 prepare_daemon(&guest.tmp_dir, shared_dir.to_str().unwrap());
1612
1613 let mut guest_command = GuestCommand::new(&guest);
1614 guest_command
1615 .args(["--cpus", "boot=1"])
1616 .args(["--memory", "size=512M,hotplug_size=2048M,shared=on"])
1617 .args(["--kernel", kernel_path.to_str().unwrap()])
1618 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
1619 .default_disks()
1620 .default_net()
1621 .args(["--api-socket", &api_socket]);
1622 if pci_segment.is_some() {
1623 guest_command.args([
1624 "--platform",
1625 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"),
1626 ]);
1627 }
1628
1629 let fs_params = format!(
1630 "id=myfs0,tag=myfs,socket={},num_queues=1,queue_size=1024{}",
1631 virtiofsd_socket_path,
1632 if let Some(pci_segment) = pci_segment {
1633 format!(",pci_segment={pci_segment}")
1634 } else {
1635 "".to_owned()
1636 }
1637 );
1638
1639 if !hotplug {
1640 guest_command.args(["--fs", fs_params.as_str()]);
1641 }
1642
1643 let mut child = guest_command.capture_output().spawn().unwrap();
1644
1645 let r = std::panic::catch_unwind(|| {
1646 guest.wait_vm_boot(None).unwrap();
1647
1648 if hotplug {
1649 // Add fs to the VM
1650 let (cmd_success, cmd_output) =
1651 remote_command_w_output(&api_socket, "add-fs", Some(&fs_params));
1652 assert!(cmd_success);
1653
1654 if let Some(pci_segment) = pci_segment {
1655 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!(
1656 "{{\"id\":\"myfs0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}"
1657 )));
1658 } else {
1659 assert!(String::from_utf8_lossy(&cmd_output)
1660 .contains("{\"id\":\"myfs0\",\"bdf\":\"0000:00:06.0\"}"));
1661 }
1662
1663 thread::sleep(std::time::Duration::new(10, 0));
1664 }
1665
1666 // Mount shared directory through virtio_fs filesystem
1667 guest
1668 .ssh_command("mkdir -p mount_dir && sudo mount -t virtiofs myfs mount_dir/")
1669 .unwrap();
1670
1671 // Check file1 exists and its content is "foo"
1672 assert_eq!(
1673 guest.ssh_command("cat mount_dir/file1").unwrap().trim(),
1674 "foo"
1675 );
1676 // Check file2 does not exist
1677 guest
1678 .ssh_command("[ ! -f 'mount_dir/file2' ] || true")
1679 .unwrap();
1680
1681 // Check file3 exists and its content is "bar"
1682 assert_eq!(
1683 guest.ssh_command("cat mount_dir/file3").unwrap().trim(),
1684 "bar"
1685 );
1686
1687 // ACPI feature is needed.
1688 #[cfg(target_arch = "x86_64")]
1689 {
1690 guest.enable_memory_hotplug();
1691
1692 // Add RAM to the VM
1693 let desired_ram = 1024 << 20;
1694 resize_command(&api_socket, None, Some(desired_ram), None, None);
1695
1696 thread::sleep(std::time::Duration::new(30, 0));
1697 assert!(guest.get_total_memory().unwrap_or_default() > 960_000);
1698
1699 // After the resize, check again that file1 exists and its
1700 // content is "foo".
1701 assert_eq!(
1702 guest.ssh_command("cat mount_dir/file1").unwrap().trim(),
1703 "foo"
1704 );
1705 }
1706
1707 if hotplug {
1708 // Remove from VM
1709 guest.ssh_command("sudo umount mount_dir").unwrap();
1710 assert!(remote_command(&api_socket, "remove-device", Some("myfs0")));
1711 }
1712 });
1713
1714 let (r, hotplug_daemon_child) = if r.is_ok() && hotplug {
1715 thread::sleep(std::time::Duration::new(10, 0));
1716 let (daemon_child, virtiofsd_socket_path) =
1717 prepare_daemon(&guest.tmp_dir, shared_dir.to_str().unwrap());
1718
1719 let r = std::panic::catch_unwind(|| {
1720 thread::sleep(std::time::Duration::new(10, 0));
1721 let fs_params = format!(
1722 "id=myfs0,tag=myfs,socket={},num_queues=1,queue_size=1024{}",
1723 virtiofsd_socket_path,
1724 if let Some(pci_segment) = pci_segment {
1725 format!(",pci_segment={pci_segment}")
1726 } else {
1727 "".to_owned()
1728 }
1729 );
1730
1731 // Add back and check it works
1732 let (cmd_success, cmd_output) =
1733 remote_command_w_output(&api_socket, "add-fs", Some(&fs_params));
1734 assert!(cmd_success);
1735 if let Some(pci_segment) = pci_segment {
1736 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!(
1737 "{{\"id\":\"myfs0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}"
1738 )));
1739 } else {
1740 assert!(String::from_utf8_lossy(&cmd_output)
1741 .contains("{\"id\":\"myfs0\",\"bdf\":\"0000:00:06.0\"}"));
1742 }
1743
1744 thread::sleep(std::time::Duration::new(10, 0));
1745 // Mount shared directory through virtio_fs filesystem
1746 guest
1747 .ssh_command("mkdir -p mount_dir && sudo mount -t virtiofs myfs mount_dir/")
1748 .unwrap();
1749
1750 // Check file1 exists and its content is "foo"
1751 assert_eq!(
1752 guest.ssh_command("cat mount_dir/file1").unwrap().trim(),
1753 "foo"
1754 );
1755 });
1756
1757 (r, Some(daemon_child))
1758 } else {
1759 (r, None)
1760 };
1761
1762 kill_child(&mut child);
1763 let output = child.wait_with_output().unwrap();
1764
1765 let _ = daemon_child.kill();
1766 let _ = daemon_child.wait();
1767
1768 if let Some(mut daemon_child) = hotplug_daemon_child {
1769 let _ = daemon_child.kill();
1770 let _ = daemon_child.wait();
1771 }
1772
1773 handle_child_output(r, &output);
1774 }
1775
test_virtio_pmem(discard_writes: bool, specify_size: bool)1776 fn test_virtio_pmem(discard_writes: bool, specify_size: bool) {
1777 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
1778 let guest = Guest::new(Box::new(focal));
1779
1780 let kernel_path = direct_kernel_boot_path();
1781
1782 let pmem_temp_file = TempFile::new().unwrap();
1783 pmem_temp_file.as_file().set_len(128 << 20).unwrap();
1784
1785 std::process::Command::new("mkfs.ext4")
1786 .arg(pmem_temp_file.as_path())
1787 .output()
1788 .expect("Expect creating disk image to succeed");
1789
1790 let mut child = GuestCommand::new(&guest)
1791 .args(["--cpus", "boot=1"])
1792 .args(["--memory", "size=512M"])
1793 .args(["--kernel", kernel_path.to_str().unwrap()])
1794 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
1795 .default_disks()
1796 .default_net()
1797 .args([
1798 "--pmem",
1799 format!(
1800 "file={}{}{}",
1801 pmem_temp_file.as_path().to_str().unwrap(),
1802 if specify_size { ",size=128M" } else { "" },
1803 if discard_writes {
1804 ",discard_writes=on"
1805 } else {
1806 ""
1807 }
1808 )
1809 .as_str(),
1810 ])
1811 .capture_output()
1812 .spawn()
1813 .unwrap();
1814
1815 let r = std::panic::catch_unwind(|| {
1816 guest.wait_vm_boot(None).unwrap();
1817
1818 // Check for the presence of /dev/pmem0
1819 assert_eq!(
1820 guest.ssh_command("ls /dev/pmem0").unwrap().trim(),
1821 "/dev/pmem0"
1822 );
1823
1824 // Check changes persist after reboot
1825 assert_eq!(guest.ssh_command("sudo mount /dev/pmem0 /mnt").unwrap(), "");
1826 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), "lost+found\n");
1827 guest
1828 .ssh_command("echo test123 | sudo tee /mnt/test")
1829 .unwrap();
1830 assert_eq!(guest.ssh_command("sudo umount /mnt").unwrap(), "");
1831 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), "");
1832
1833 guest.reboot_linux(0, None);
1834 assert_eq!(guest.ssh_command("sudo mount /dev/pmem0 /mnt").unwrap(), "");
1835 assert_eq!(
1836 guest
1837 .ssh_command("sudo cat /mnt/test || true")
1838 .unwrap()
1839 .trim(),
1840 if discard_writes { "" } else { "test123" }
1841 );
1842 });
1843
1844 kill_child(&mut child);
1845 let output = child.wait_with_output().unwrap();
1846
1847 handle_child_output(r, &output);
1848 }
1849
get_fd_count(pid: u32) -> usize1850 fn get_fd_count(pid: u32) -> usize {
1851 fs::read_dir(format!("/proc/{pid}/fd")).unwrap().count()
1852 }
1853
_test_virtio_vsock(hotplug: bool)1854 fn _test_virtio_vsock(hotplug: bool) {
1855 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
1856 let guest = Guest::new(Box::new(focal));
1857
1858 #[cfg(target_arch = "x86_64")]
1859 let kernel_path = direct_kernel_boot_path();
1860 #[cfg(target_arch = "aarch64")]
1861 let kernel_path = if hotplug {
1862 edk2_path()
1863 } else {
1864 direct_kernel_boot_path()
1865 };
1866
1867 let socket = temp_vsock_path(&guest.tmp_dir);
1868 let api_socket = temp_api_path(&guest.tmp_dir);
1869
1870 let mut cmd = GuestCommand::new(&guest);
1871 cmd.args(["--api-socket", &api_socket]);
1872 cmd.args(["--cpus", "boot=1"]);
1873 cmd.args(["--memory", "size=512M"]);
1874 cmd.args(["--kernel", kernel_path.to_str().unwrap()]);
1875 cmd.args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE]);
1876 cmd.default_disks();
1877 cmd.default_net();
1878
1879 if !hotplug {
1880 cmd.args(["--vsock", format!("cid=3,socket={socket}").as_str()]);
1881 }
1882
1883 let mut child = cmd.capture_output().spawn().unwrap();
1884
1885 let r = std::panic::catch_unwind(|| {
1886 guest.wait_vm_boot(None).unwrap();
1887
1888 if hotplug {
1889 let (cmd_success, cmd_output) = remote_command_w_output(
1890 &api_socket,
1891 "add-vsock",
1892 Some(format!("cid=3,socket={socket},id=test0").as_str()),
1893 );
1894 assert!(cmd_success);
1895 assert!(String::from_utf8_lossy(&cmd_output)
1896 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}"));
1897 thread::sleep(std::time::Duration::new(10, 0));
1898 // Check adding a second one fails
1899 assert!(!remote_command(
1900 &api_socket,
1901 "add-vsock",
1902 Some("cid=1234,socket=/tmp/fail")
1903 ));
1904 }
1905
1906 // Validate vsock works as expected.
1907 guest.check_vsock(socket.as_str());
1908 guest.reboot_linux(0, None);
1909 // Validate vsock still works after a reboot.
1910 guest.check_vsock(socket.as_str());
1911
1912 if hotplug {
1913 assert!(remote_command(&api_socket, "remove-device", Some("test0")));
1914 }
1915 });
1916
1917 kill_child(&mut child);
1918 let output = child.wait_with_output().unwrap();
1919
1920 handle_child_output(r, &output);
1921 }
1922
get_ksm_pages_shared() -> u321923 fn get_ksm_pages_shared() -> u32 {
1924 fs::read_to_string("/sys/kernel/mm/ksm/pages_shared")
1925 .unwrap()
1926 .trim()
1927 .parse::<u32>()
1928 .unwrap()
1929 }
1930
test_memory_mergeable(mergeable: bool)1931 fn test_memory_mergeable(mergeable: bool) {
1932 let memory_param = if mergeable {
1933 "mergeable=on"
1934 } else {
1935 "mergeable=off"
1936 };
1937
1938 // We are assuming the rest of the system in our CI is not using mergeable memory
1939 let ksm_ps_init = get_ksm_pages_shared();
1940 assert!(ksm_ps_init == 0);
1941
1942 let focal1 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
1943 let guest1 = Guest::new(Box::new(focal1));
1944 let mut child1 = GuestCommand::new(&guest1)
1945 .args(["--cpus", "boot=1"])
1946 .args(["--memory", format!("size=512M,{memory_param}").as_str()])
1947 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
1948 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
1949 .default_disks()
1950 .args(["--net", guest1.default_net_string().as_str()])
1951 .args(["--serial", "tty", "--console", "off"])
1952 .capture_output()
1953 .spawn()
1954 .unwrap();
1955
1956 let r = std::panic::catch_unwind(|| {
1957 guest1.wait_vm_boot(None).unwrap();
1958 });
1959 if r.is_err() {
1960 kill_child(&mut child1);
1961 let output = child1.wait_with_output().unwrap();
1962 handle_child_output(r, &output);
1963 panic!("Test should already be failed/panicked"); // To explicitly mark this block never return
1964 }
1965
1966 let ksm_ps_guest1 = get_ksm_pages_shared();
1967
1968 let focal2 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
1969 let guest2 = Guest::new(Box::new(focal2));
1970 let mut child2 = GuestCommand::new(&guest2)
1971 .args(["--cpus", "boot=1"])
1972 .args(["--memory", format!("size=512M,{memory_param}").as_str()])
1973 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
1974 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
1975 .default_disks()
1976 .args(["--net", guest2.default_net_string().as_str()])
1977 .args(["--serial", "tty", "--console", "off"])
1978 .capture_output()
1979 .spawn()
1980 .unwrap();
1981
1982 let r = std::panic::catch_unwind(|| {
1983 guest2.wait_vm_boot(None).unwrap();
1984 let ksm_ps_guest2 = get_ksm_pages_shared();
1985
1986 if mergeable {
1987 println!(
1988 "ksm pages_shared after vm1 booted '{ksm_ps_guest1}', ksm pages_shared after vm2 booted '{ksm_ps_guest2}'"
1989 );
1990 // We are expecting the number of shared pages to increase as the number of VM increases
1991 assert!(ksm_ps_guest1 < ksm_ps_guest2);
1992 } else {
1993 assert!(ksm_ps_guest1 == 0);
1994 assert!(ksm_ps_guest2 == 0);
1995 }
1996 });
1997
1998 kill_child(&mut child1);
1999 kill_child(&mut child2);
2000
2001 let output = child1.wait_with_output().unwrap();
2002 child2.wait().unwrap();
2003
2004 handle_child_output(r, &output);
2005 }
2006
_get_vmm_overhead(pid: u32, guest_memory_size: u32) -> HashMap<String, u32>2007 fn _get_vmm_overhead(pid: u32, guest_memory_size: u32) -> HashMap<String, u32> {
2008 let smaps = fs::File::open(format!("/proc/{pid}/smaps")).unwrap();
2009 let reader = io::BufReader::new(smaps);
2010
2011 let mut skip_map: bool = false;
2012 let mut region_name: String = "".to_string();
2013 let mut region_maps = HashMap::new();
2014 for line in reader.lines() {
2015 let l = line.unwrap();
2016
2017 if l.contains('-') {
2018 let values: Vec<&str> = l.split_whitespace().collect();
2019 region_name = values.last().unwrap().trim().to_string();
2020 if region_name == "0" {
2021 region_name = "anonymous".to_string()
2022 }
2023 }
2024
2025 // Each section begins with something that looks like:
2026 // Size: 2184 kB
2027 if l.starts_with("Size:") {
2028 let values: Vec<&str> = l.split_whitespace().collect();
2029 let map_size = values[1].parse::<u32>().unwrap();
2030 // We skip the assigned guest RAM map, its RSS is only
2031 // dependent on the guest actual memory usage.
2032 // Everything else can be added to the VMM overhead.
2033 skip_map = map_size >= guest_memory_size;
2034 continue;
2035 }
2036
2037 // If this is a map we're taking into account, then we only
2038 // count the RSS. The sum of all counted RSS is the VMM overhead.
2039 if !skip_map && l.starts_with("Rss:") {
2040 let values: Vec<&str> = l.split_whitespace().collect();
2041 let value = values[1].trim().parse::<u32>().unwrap();
2042 *region_maps.entry(region_name.clone()).or_insert(0) += value;
2043 }
2044 }
2045
2046 region_maps
2047 }
2048
get_vmm_overhead(pid: u32, guest_memory_size: u32) -> u322049 fn get_vmm_overhead(pid: u32, guest_memory_size: u32) -> u32 {
2050 let mut total = 0;
2051
2052 for (region_name, value) in &_get_vmm_overhead(pid, guest_memory_size) {
2053 eprintln!("{region_name}: {value}");
2054 total += value;
2055 }
2056
2057 total
2058 }
2059
process_rss_kib(pid: u32) -> usize2060 fn process_rss_kib(pid: u32) -> usize {
2061 let command = format!("ps -q {pid} -o rss=");
2062 let rss = exec_host_command_output(&command);
2063 String::from_utf8_lossy(&rss.stdout).trim().parse().unwrap()
2064 }
2065
2066 // 10MB is our maximum accepted overhead.
2067 const MAXIMUM_VMM_OVERHEAD_KB: u32 = 10 * 1024;
2068
2069 #[derive(PartialEq, Eq, PartialOrd)]
2070 struct Counters {
2071 rx_bytes: u64,
2072 rx_frames: u64,
2073 tx_bytes: u64,
2074 tx_frames: u64,
2075 read_bytes: u64,
2076 write_bytes: u64,
2077 read_ops: u64,
2078 write_ops: u64,
2079 }
2080
get_counters(api_socket: &str) -> Counters2081 fn get_counters(api_socket: &str) -> Counters {
2082 // Get counters
2083 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "counters", None);
2084 assert!(cmd_success);
2085
2086 let counters: HashMap<&str, HashMap<&str, u64>> =
2087 serde_json::from_slice(&cmd_output).unwrap_or_default();
2088
2089 let rx_bytes = *counters.get("_net2").unwrap().get("rx_bytes").unwrap();
2090 let rx_frames = *counters.get("_net2").unwrap().get("rx_frames").unwrap();
2091 let tx_bytes = *counters.get("_net2").unwrap().get("tx_bytes").unwrap();
2092 let tx_frames = *counters.get("_net2").unwrap().get("tx_frames").unwrap();
2093
2094 let read_bytes = *counters.get("_disk0").unwrap().get("read_bytes").unwrap();
2095 let write_bytes = *counters.get("_disk0").unwrap().get("write_bytes").unwrap();
2096 let read_ops = *counters.get("_disk0").unwrap().get("read_ops").unwrap();
2097 let write_ops = *counters.get("_disk0").unwrap().get("write_ops").unwrap();
2098
2099 Counters {
2100 rx_bytes,
2101 rx_frames,
2102 tx_bytes,
2103 tx_frames,
2104 read_bytes,
2105 write_bytes,
2106 read_ops,
2107 write_ops,
2108 }
2109 }
2110
pty_read(mut pty: std::fs::File) -> Receiver<String>2111 fn pty_read(mut pty: std::fs::File) -> Receiver<String> {
2112 let (tx, rx) = mpsc::channel::<String>();
2113 thread::spawn(move || loop {
2114 thread::sleep(std::time::Duration::new(1, 0));
2115 let mut buf = [0; 512];
2116 match pty.read(&mut buf) {
2117 Ok(_bytes) => {
2118 let output = std::str::from_utf8(&buf).unwrap().to_string();
2119 match tx.send(output) {
2120 Ok(_) => (),
2121 Err(_) => break,
2122 }
2123 }
2124 Err(_) => break,
2125 }
2126 });
2127 rx
2128 }
2129
get_pty_path(api_socket: &str, pty_type: &str) -> PathBuf2130 fn get_pty_path(api_socket: &str, pty_type: &str) -> PathBuf {
2131 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None);
2132 assert!(cmd_success);
2133 let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default();
2134 assert_eq!("Pty", info["config"][pty_type]["mode"]);
2135 PathBuf::from(
2136 info["config"][pty_type]["file"]
2137 .as_str()
2138 .expect("Missing pty path"),
2139 )
2140 }
2141
2142 // VFIO test network setup.
2143 // We reserve a different IP class for it: 172.18.0.0/24.
2144 #[cfg(target_arch = "x86_64")]
setup_vfio_network_interfaces()2145 fn setup_vfio_network_interfaces() {
2146 // 'vfio-br0'
2147 assert!(exec_host_command_status("sudo ip link add name vfio-br0 type bridge").success());
2148 assert!(exec_host_command_status("sudo ip link set vfio-br0 up").success());
2149 assert!(exec_host_command_status("sudo ip addr add 172.18.0.1/24 dev vfio-br0").success());
2150 // 'vfio-tap0'
2151 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap0 mode tap").success());
2152 assert!(exec_host_command_status("sudo ip link set vfio-tap0 master vfio-br0").success());
2153 assert!(exec_host_command_status("sudo ip link set vfio-tap0 up").success());
2154 // 'vfio-tap1'
2155 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap1 mode tap").success());
2156 assert!(exec_host_command_status("sudo ip link set vfio-tap1 master vfio-br0").success());
2157 assert!(exec_host_command_status("sudo ip link set vfio-tap1 up").success());
2158 // 'vfio-tap2'
2159 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap2 mode tap").success());
2160 assert!(exec_host_command_status("sudo ip link set vfio-tap2 master vfio-br0").success());
2161 assert!(exec_host_command_status("sudo ip link set vfio-tap2 up").success());
2162 // 'vfio-tap3'
2163 assert!(exec_host_command_status("sudo ip tuntap add vfio-tap3 mode tap").success());
2164 assert!(exec_host_command_status("sudo ip link set vfio-tap3 master vfio-br0").success());
2165 assert!(exec_host_command_status("sudo ip link set vfio-tap3 up").success());
2166 }
2167
2168 // Tear VFIO test network down
2169 #[cfg(target_arch = "x86_64")]
cleanup_vfio_network_interfaces()2170 fn cleanup_vfio_network_interfaces() {
2171 assert!(exec_host_command_status("sudo ip link del vfio-br0").success());
2172 assert!(exec_host_command_status("sudo ip link del vfio-tap0").success());
2173 assert!(exec_host_command_status("sudo ip link del vfio-tap1").success());
2174 assert!(exec_host_command_status("sudo ip link del vfio-tap2").success());
2175 assert!(exec_host_command_status("sudo ip link del vfio-tap3").success());
2176 }
2177
balloon_size(api_socket: &str) -> u642178 fn balloon_size(api_socket: &str) -> u64 {
2179 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None);
2180 assert!(cmd_success);
2181
2182 let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default();
2183 let total_mem = &info["config"]["memory"]["size"]
2184 .to_string()
2185 .parse::<u64>()
2186 .unwrap();
2187 let actual_mem = &info["memory_actual_size"]
2188 .to_string()
2189 .parse::<u64>()
2190 .unwrap();
2191 total_mem - actual_mem
2192 }
2193
vm_state(api_socket: &str) -> String2194 fn vm_state(api_socket: &str) -> String {
2195 let (cmd_success, cmd_output) = remote_command_w_output(api_socket, "info", None);
2196 assert!(cmd_success);
2197
2198 let info: serde_json::Value = serde_json::from_slice(&cmd_output).unwrap_or_default();
2199 let state = &info["state"].as_str().unwrap();
2200
2201 state.to_string()
2202 }
2203
2204 // This test validates that it can find the virtio-iommu device at first.
2205 // It also verifies that both disks and the network card are attached to
2206 // the virtual IOMMU by looking at /sys/kernel/iommu_groups directory.
2207 // The last interesting part of this test is that it exercises the network
2208 // interface attached to the virtual IOMMU since this is the one used to
2209 // send all commands through SSH.
_test_virtio_iommu(acpi: bool)2210 fn _test_virtio_iommu(acpi: bool) {
2211 // Virtio-iommu support is ready in recent kernel (v5.14). But the kernel in
2212 // Focal image is still old.
2213 // So if ACPI is enabled on AArch64, we use a modified Focal image in which
2214 // the kernel binary has been updated.
2215 #[cfg(target_arch = "aarch64")]
2216 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string();
2217 #[cfg(target_arch = "x86_64")]
2218 let focal_image = FOCAL_IMAGE_NAME.to_string();
2219 let focal = UbuntuDiskConfig::new(focal_image);
2220 let guest = Guest::new(Box::new(focal));
2221
2222 #[cfg(target_arch = "x86_64")]
2223 let kernel_path = direct_kernel_boot_path();
2224 #[cfg(target_arch = "aarch64")]
2225 let kernel_path = if acpi {
2226 edk2_path()
2227 } else {
2228 direct_kernel_boot_path()
2229 };
2230
2231 let mut child = GuestCommand::new(&guest)
2232 .args(["--cpus", "boot=1"])
2233 .args(["--memory", "size=512M"])
2234 .args(["--kernel", kernel_path.to_str().unwrap()])
2235 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
2236 .args([
2237 "--disk",
2238 format!(
2239 "path={},iommu=on",
2240 guest.disk_config.disk(DiskType::OperatingSystem).unwrap()
2241 )
2242 .as_str(),
2243 format!(
2244 "path={},iommu=on",
2245 guest.disk_config.disk(DiskType::CloudInit).unwrap()
2246 )
2247 .as_str(),
2248 ])
2249 .args(["--net", guest.default_net_string_w_iommu().as_str()])
2250 .capture_output()
2251 .spawn()
2252 .unwrap();
2253
2254 let r = std::panic::catch_unwind(|| {
2255 guest.wait_vm_boot(None).unwrap();
2256
2257 // Verify the virtio-iommu device is present.
2258 assert!(guest
2259 .does_device_vendor_pair_match("0x1057", "0x1af4")
2260 .unwrap_or_default());
2261
2262 // On AArch64, if the guest system boots from FDT, the behavior of IOMMU is a bit
2263 // different with ACPI.
2264 // All devices on the PCI bus will be attached to the virtual IOMMU, except the
2265 // virtio-iommu device itself. So these devices will all be added to IOMMU groups,
2266 // and appear under folder '/sys/kernel/iommu_groups/'.
2267 // The result is, in the case of FDT, IOMMU group '0' contains "0000:00:01.0"
2268 // which is the console. The first disk "0000:00:02.0" is in group '1'.
2269 // While on ACPI, console device is not attached to IOMMU. So the IOMMU group '0'
2270 // contains "0000:00:02.0" which is the first disk.
2271 //
2272 // Verify the iommu group of the first disk.
2273 let iommu_group = !acpi as i32;
2274 assert_eq!(
2275 guest
2276 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str())
2277 .unwrap()
2278 .trim(),
2279 "0000:00:02.0"
2280 );
2281
2282 // Verify the iommu group of the second disk.
2283 let iommu_group = if acpi { 1 } else { 2 };
2284 assert_eq!(
2285 guest
2286 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str())
2287 .unwrap()
2288 .trim(),
2289 "0000:00:03.0"
2290 );
2291
2292 // Verify the iommu group of the network card.
2293 let iommu_group = if acpi { 2 } else { 3 };
2294 assert_eq!(
2295 guest
2296 .ssh_command(format!("ls /sys/kernel/iommu_groups/{iommu_group}/devices").as_str())
2297 .unwrap()
2298 .trim(),
2299 "0000:00:04.0"
2300 );
2301 });
2302
2303 kill_child(&mut child);
2304 let output = child.wait_with_output().unwrap();
2305
2306 handle_child_output(r, &output);
2307 }
2308
get_reboot_count(guest: &Guest) -> u322309 fn get_reboot_count(guest: &Guest) -> u32 {
2310 guest
2311 .ssh_command("sudo last | grep -c reboot")
2312 .unwrap()
2313 .trim()
2314 .parse::<u32>()
2315 .unwrap_or_default()
2316 }
2317
enable_guest_watchdog(guest: &Guest, watchdog_sec: u32)2318 fn enable_guest_watchdog(guest: &Guest, watchdog_sec: u32) {
2319 // Check for PCI device
2320 assert!(guest
2321 .does_device_vendor_pair_match("0x1063", "0x1af4")
2322 .unwrap_or_default());
2323
2324 // Enable systemd watchdog
2325 guest
2326 .ssh_command(&format!(
2327 "echo RuntimeWatchdogSec={watchdog_sec}s | sudo tee -a /etc/systemd/system.conf"
2328 ))
2329 .unwrap();
2330
2331 guest.ssh_command("sudo systemctl daemon-reexec").unwrap();
2332 }
2333
make_guest_panic(guest: &Guest)2334 fn make_guest_panic(guest: &Guest) {
2335 // Check for pvpanic device
2336 assert!(guest
2337 .does_device_vendor_pair_match("0x0011", "0x1b36")
2338 .unwrap_or_default());
2339
2340 // Trigger guest a panic
2341 guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap();
2342 }
2343
2344 mod common_parallel {
2345 use std::fs::OpenOptions;
2346 use std::io::SeekFrom;
2347
2348 use crate::*;
2349
2350 #[test]
2351 #[cfg(target_arch = "x86_64")]
test_focal_hypervisor_fw()2352 fn test_focal_hypervisor_fw() {
2353 test_simple_launch(fw_path(FwType::RustHypervisorFirmware), FOCAL_IMAGE_NAME)
2354 }
2355
2356 #[test]
2357 #[cfg(target_arch = "x86_64")]
test_focal_ovmf()2358 fn test_focal_ovmf() {
2359 test_simple_launch(fw_path(FwType::Ovmf), FOCAL_IMAGE_NAME)
2360 }
2361
2362 #[cfg(target_arch = "x86_64")]
test_simple_launch(fw_path: String, disk_path: &str)2363 fn test_simple_launch(fw_path: String, disk_path: &str) {
2364 let disk_config = Box::new(UbuntuDiskConfig::new(disk_path.to_string()));
2365 let guest = Guest::new(disk_config);
2366 let event_path = temp_event_monitor_path(&guest.tmp_dir);
2367
2368 let mut child = GuestCommand::new(&guest)
2369 .args(["--cpus", "boot=1"])
2370 .args(["--memory", "size=512M"])
2371 .args(["--kernel", fw_path.as_str()])
2372 .default_disks()
2373 .default_net()
2374 .args(["--serial", "tty", "--console", "off"])
2375 .args(["--event-monitor", format!("path={event_path}").as_str()])
2376 .capture_output()
2377 .spawn()
2378 .unwrap();
2379
2380 let r = std::panic::catch_unwind(|| {
2381 guest.wait_vm_boot(Some(120)).unwrap();
2382
2383 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1);
2384 assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
2385 assert_eq!(guest.get_pci_bridge_class().unwrap_or_default(), "0x060000");
2386
2387 let expected_sequential_events = [
2388 &MetaEvent {
2389 event: "starting".to_string(),
2390 device_id: None,
2391 },
2392 &MetaEvent {
2393 event: "booting".to_string(),
2394 device_id: None,
2395 },
2396 &MetaEvent {
2397 event: "booted".to_string(),
2398 device_id: None,
2399 },
2400 &MetaEvent {
2401 event: "activated".to_string(),
2402 device_id: Some("_disk0".to_string()),
2403 },
2404 &MetaEvent {
2405 event: "reset".to_string(),
2406 device_id: Some("_disk0".to_string()),
2407 },
2408 ];
2409 assert!(check_sequential_events(
2410 &expected_sequential_events,
2411 &event_path
2412 ));
2413
2414 // It's been observed on the Bionic image that udev and snapd
2415 // services can cause some delay in the VM's shutdown. Disabling
2416 // them improves the reliability of this test.
2417 let _ = guest.ssh_command("sudo systemctl disable udev");
2418 let _ = guest.ssh_command("sudo systemctl stop udev");
2419 let _ = guest.ssh_command("sudo systemctl disable snapd");
2420 let _ = guest.ssh_command("sudo systemctl stop snapd");
2421
2422 guest.ssh_command("sudo poweroff").unwrap();
2423 thread::sleep(std::time::Duration::new(20, 0));
2424 let latest_events = [
2425 &MetaEvent {
2426 event: "shutdown".to_string(),
2427 device_id: None,
2428 },
2429 &MetaEvent {
2430 event: "deleted".to_string(),
2431 device_id: None,
2432 },
2433 &MetaEvent {
2434 event: "shutdown".to_string(),
2435 device_id: None,
2436 },
2437 ];
2438 assert!(check_latest_events_exact(&latest_events, &event_path));
2439 });
2440
2441 kill_child(&mut child);
2442 let output = child.wait_with_output().unwrap();
2443
2444 handle_child_output(r, &output);
2445 }
2446
2447 #[test]
test_multi_cpu()2448 fn test_multi_cpu() {
2449 let jammy_image = JAMMY_IMAGE_NAME.to_string();
2450 let jammy = UbuntuDiskConfig::new(jammy_image);
2451 let guest = Guest::new(Box::new(jammy));
2452
2453 let mut cmd = GuestCommand::new(&guest);
2454 cmd.args(["--cpus", "boot=2,max=4"])
2455 .args(["--memory", "size=512M"])
2456 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
2457 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
2458 .capture_output()
2459 .default_disks()
2460 .default_net();
2461
2462 let mut child = cmd.spawn().unwrap();
2463
2464 let r = std::panic::catch_unwind(|| {
2465 guest.wait_vm_boot(Some(120)).unwrap();
2466
2467 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2);
2468
2469 assert_eq!(
2470 guest
2471 .ssh_command(
2472 r#"sudo dmesg | grep "smp: Brought up" | sed "s/\[\ *[0-9.]*\] //""#
2473 )
2474 .unwrap()
2475 .trim(),
2476 "smp: Brought up 1 node, 2 CPUs"
2477 );
2478 });
2479
2480 kill_child(&mut child);
2481 let output = child.wait_with_output().unwrap();
2482
2483 handle_child_output(r, &output);
2484 }
2485
2486 #[test]
test_cpu_topology_421()2487 fn test_cpu_topology_421() {
2488 test_cpu_topology(4, 2, 1, false);
2489 }
2490
2491 #[test]
test_cpu_topology_142()2492 fn test_cpu_topology_142() {
2493 test_cpu_topology(1, 4, 2, false);
2494 }
2495
2496 #[test]
test_cpu_topology_262()2497 fn test_cpu_topology_262() {
2498 test_cpu_topology(2, 6, 2, false);
2499 }
2500
2501 #[test]
2502 #[cfg(target_arch = "x86_64")]
2503 #[cfg(not(feature = "mshv"))]
test_cpu_physical_bits()2504 fn test_cpu_physical_bits() {
2505 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
2506 let guest = Guest::new(Box::new(focal));
2507 let max_phys_bits: u8 = 36;
2508 let mut child = GuestCommand::new(&guest)
2509 .args(["--cpus", &format!("max_phys_bits={max_phys_bits}")])
2510 .args(["--memory", "size=512M"])
2511 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
2512 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
2513 .default_disks()
2514 .default_net()
2515 .capture_output()
2516 .spawn()
2517 .unwrap();
2518
2519 let r = std::panic::catch_unwind(|| {
2520 guest.wait_vm_boot(None).unwrap();
2521
2522 assert!(
2523 guest
2524 .ssh_command("lscpu | grep \"Address sizes:\" | cut -f 2 -d \":\" | sed \"s# *##\" | cut -f 1 -d \" \"")
2525 .unwrap()
2526 .trim()
2527 .parse::<u8>()
2528 .unwrap_or(max_phys_bits + 1) <= max_phys_bits,
2529 );
2530 });
2531
2532 kill_child(&mut child);
2533 let output = child.wait_with_output().unwrap();
2534
2535 handle_child_output(r, &output);
2536 }
2537
2538 #[test]
test_cpu_affinity()2539 fn test_cpu_affinity() {
2540 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
2541 let guest = Guest::new(Box::new(focal));
2542
2543 // We need the host to have at least 4 CPUs if we want to be able
2544 // to run this test.
2545 let host_cpus_count = exec_host_command_output("nproc");
2546 assert!(
2547 String::from_utf8_lossy(&host_cpus_count.stdout)
2548 .trim()
2549 .parse::<u16>()
2550 .unwrap_or(0)
2551 >= 4
2552 );
2553
2554 let mut child = GuestCommand::new(&guest)
2555 .args(["--cpus", "boot=2,affinity=[0@[0,2],1@[1,3]]"])
2556 .args(["--memory", "size=512M"])
2557 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
2558 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
2559 .default_disks()
2560 .default_net()
2561 .capture_output()
2562 .spawn()
2563 .unwrap();
2564
2565 let r = std::panic::catch_unwind(|| {
2566 guest.wait_vm_boot(None).unwrap();
2567 let pid = child.id();
2568 let taskset_vcpu0 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep vcpu0 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str());
2569 assert_eq!(String::from_utf8_lossy(&taskset_vcpu0.stdout).trim(), "0,2");
2570 let taskset_vcpu1 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep vcpu1 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str());
2571 assert_eq!(String::from_utf8_lossy(&taskset_vcpu1.stdout).trim(), "1,3");
2572 });
2573
2574 kill_child(&mut child);
2575 let output = child.wait_with_output().unwrap();
2576 handle_child_output(r, &output);
2577 }
2578
2579 #[test]
test_virtio_queue_affinity()2580 fn test_virtio_queue_affinity() {
2581 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
2582 let guest = Guest::new(Box::new(focal));
2583
2584 // We need the host to have at least 4 CPUs if we want to be able
2585 // to run this test.
2586 let host_cpus_count = exec_host_command_output("nproc");
2587 assert!(
2588 String::from_utf8_lossy(&host_cpus_count.stdout)
2589 .trim()
2590 .parse::<u16>()
2591 .unwrap_or(0)
2592 >= 4
2593 );
2594
2595 let mut child = GuestCommand::new(&guest)
2596 .args(["--cpus", "boot=4"])
2597 .args(["--memory", "size=512M"])
2598 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
2599 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
2600 .args([
2601 "--disk",
2602 format!(
2603 "path={}",
2604 guest.disk_config.disk(DiskType::OperatingSystem).unwrap()
2605 )
2606 .as_str(),
2607 format!(
2608 "path={},num_queues=4,queue_affinity=[0@[0,2],1@[1,3],2@[1],3@[3]]",
2609 guest.disk_config.disk(DiskType::CloudInit).unwrap()
2610 )
2611 .as_str(),
2612 ])
2613 .default_net()
2614 .capture_output()
2615 .spawn()
2616 .unwrap();
2617
2618 let r = std::panic::catch_unwind(|| {
2619 guest.wait_vm_boot(None).unwrap();
2620 let pid = child.id();
2621 let taskset_q0 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep disk1_q0 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str());
2622 assert_eq!(String::from_utf8_lossy(&taskset_q0.stdout).trim(), "0,2");
2623 let taskset_q1 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep disk1_q1 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str());
2624 assert_eq!(String::from_utf8_lossy(&taskset_q1.stdout).trim(), "1,3");
2625 let taskset_q2 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep disk1_q2 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str());
2626 assert_eq!(String::from_utf8_lossy(&taskset_q2.stdout).trim(), "1");
2627 let taskset_q3 = exec_host_command_output(format!("taskset -pc $(ps -T -p {pid} | grep disk1_q3 | xargs | cut -f 2 -d \" \") | cut -f 6 -d \" \"").as_str());
2628 assert_eq!(String::from_utf8_lossy(&taskset_q3.stdout).trim(), "3");
2629 });
2630
2631 kill_child(&mut child);
2632 let output = child.wait_with_output().unwrap();
2633 handle_child_output(r, &output);
2634 }
2635
2636 #[test]
2637 #[cfg(not(feature = "mshv"))]
test_large_vm()2638 fn test_large_vm() {
2639 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
2640 let guest = Guest::new(Box::new(focal));
2641 let mut cmd = GuestCommand::new(&guest);
2642 cmd.args(["--cpus", "boot=48"])
2643 .args(["--memory", "size=5120M"])
2644 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
2645 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
2646 .args(["--serial", "tty"])
2647 .args(["--console", "off"])
2648 .capture_output()
2649 .default_disks()
2650 .default_net();
2651
2652 let mut child = cmd.spawn().unwrap();
2653
2654 guest.wait_vm_boot(None).unwrap();
2655
2656 let r = std::panic::catch_unwind(|| {
2657 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 48);
2658 assert_eq!(
2659 guest
2660 .ssh_command("lscpu | grep \"On-line\" | cut -f 2 -d \":\" | sed \"s# *##\"")
2661 .unwrap()
2662 .trim(),
2663 "0-47"
2664 );
2665
2666 assert!(guest.get_total_memory().unwrap_or_default() > 5_000_000);
2667 });
2668
2669 kill_child(&mut child);
2670 let output = child.wait_with_output().unwrap();
2671
2672 handle_child_output(r, &output);
2673 }
2674
2675 #[test]
2676 #[cfg(not(feature = "mshv"))]
test_huge_memory()2677 fn test_huge_memory() {
2678 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
2679 let guest = Guest::new(Box::new(focal));
2680 let mut cmd = GuestCommand::new(&guest);
2681 cmd.args(["--cpus", "boot=1"])
2682 .args(["--memory", "size=128G"])
2683 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
2684 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
2685 .capture_output()
2686 .default_disks()
2687 .default_net();
2688
2689 let mut child = cmd.spawn().unwrap();
2690
2691 guest.wait_vm_boot(Some(120)).unwrap();
2692
2693 let r = std::panic::catch_unwind(|| {
2694 assert!(guest.get_total_memory().unwrap_or_default() > 128_000_000);
2695 });
2696
2697 kill_child(&mut child);
2698 let output = child.wait_with_output().unwrap();
2699
2700 handle_child_output(r, &output);
2701 }
2702
2703 #[test]
test_power_button()2704 fn test_power_button() {
2705 _test_power_button(false);
2706 }
2707
2708 #[test]
2709 #[cfg(not(feature = "mshv"))]
test_user_defined_memory_regions()2710 fn test_user_defined_memory_regions() {
2711 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
2712 let guest = Guest::new(Box::new(focal));
2713 let api_socket = temp_api_path(&guest.tmp_dir);
2714
2715 let kernel_path = direct_kernel_boot_path();
2716
2717 let mut child = GuestCommand::new(&guest)
2718 .args(["--cpus", "boot=1"])
2719 .args(["--memory", "size=0,hotplug_method=virtio-mem"])
2720 .args([
2721 "--memory-zone",
2722 "id=mem0,size=1G,hotplug_size=2G",
2723 "id=mem1,size=1G,shared=on",
2724 "id=mem2,size=1G,host_numa_node=0,hotplug_size=2G",
2725 ])
2726 .args(["--kernel", kernel_path.to_str().unwrap()])
2727 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
2728 .args(["--api-socket", &api_socket])
2729 .capture_output()
2730 .default_disks()
2731 .default_net()
2732 .spawn()
2733 .unwrap();
2734
2735 let r = std::panic::catch_unwind(|| {
2736 guest.wait_vm_boot(None).unwrap();
2737
2738 assert!(guest.get_total_memory().unwrap_or_default() > 2_880_000);
2739
2740 guest.enable_memory_hotplug();
2741
2742 resize_zone_command(&api_socket, "mem0", "3G");
2743 thread::sleep(std::time::Duration::new(5, 0));
2744 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000);
2745 resize_zone_command(&api_socket, "mem2", "3G");
2746 thread::sleep(std::time::Duration::new(5, 0));
2747 assert!(guest.get_total_memory().unwrap_or_default() > 6_720_000);
2748 resize_zone_command(&api_socket, "mem0", "2G");
2749 thread::sleep(std::time::Duration::new(5, 0));
2750 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000);
2751 resize_zone_command(&api_socket, "mem2", "2G");
2752 thread::sleep(std::time::Duration::new(5, 0));
2753 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000);
2754
2755 guest.reboot_linux(0, None);
2756
2757 // Check the amount of RAM after reboot
2758 assert!(guest.get_total_memory().unwrap_or_default() > 4_800_000);
2759 assert!(guest.get_total_memory().unwrap_or_default() < 5_760_000);
2760
2761 // Check if we can still resize down to the initial 'boot'size
2762 resize_zone_command(&api_socket, "mem0", "1G");
2763 thread::sleep(std::time::Duration::new(5, 0));
2764 assert!(guest.get_total_memory().unwrap_or_default() < 4_800_000);
2765 resize_zone_command(&api_socket, "mem2", "1G");
2766 thread::sleep(std::time::Duration::new(5, 0));
2767 assert!(guest.get_total_memory().unwrap_or_default() < 3_840_000);
2768 });
2769
2770 kill_child(&mut child);
2771 let output = child.wait_with_output().unwrap();
2772
2773 handle_child_output(r, &output);
2774 }
2775
2776 #[test]
2777 #[cfg(not(feature = "mshv"))]
test_guest_numa_nodes()2778 fn test_guest_numa_nodes() {
2779 _test_guest_numa_nodes(false);
2780 }
2781
2782 #[test]
2783 #[cfg(target_arch = "x86_64")]
test_iommu_segments()2784 fn test_iommu_segments() {
2785 let focal_image = FOCAL_IMAGE_NAME.to_string();
2786 let focal = UbuntuDiskConfig::new(focal_image);
2787 let guest = Guest::new(Box::new(focal));
2788
2789 // Prepare another disk file for the virtio-disk device
2790 let test_disk_path = String::from(
2791 guest
2792 .tmp_dir
2793 .as_path()
2794 .join("test-disk.raw")
2795 .to_str()
2796 .unwrap(),
2797 );
2798 assert!(
2799 exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success()
2800 );
2801 assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success());
2802
2803 let api_socket = temp_api_path(&guest.tmp_dir);
2804 let mut cmd = GuestCommand::new(&guest);
2805
2806 cmd.args(["--cpus", "boot=1"])
2807 .args(["--api-socket", &api_socket])
2808 .args(["--memory", "size=512M"])
2809 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
2810 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
2811 .args([
2812 "--platform",
2813 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS},iommu_segments=[1]"),
2814 ])
2815 .default_disks()
2816 .capture_output()
2817 .default_net();
2818
2819 let mut child = cmd.spawn().unwrap();
2820
2821 guest.wait_vm_boot(None).unwrap();
2822
2823 let r = std::panic::catch_unwind(|| {
2824 let (cmd_success, cmd_output) = remote_command_w_output(
2825 &api_socket,
2826 "add-disk",
2827 Some(
2828 format!(
2829 "path={},id=test0,pci_segment=1,iommu=on",
2830 test_disk_path.as_str()
2831 )
2832 .as_str(),
2833 ),
2834 );
2835 assert!(cmd_success);
2836 assert!(String::from_utf8_lossy(&cmd_output)
2837 .contains("{\"id\":\"test0\",\"bdf\":\"0001:00:01.0\"}"));
2838
2839 // Check IOMMU setup
2840 assert!(guest
2841 .does_device_vendor_pair_match("0x1057", "0x1af4")
2842 .unwrap_or_default());
2843 assert_eq!(
2844 guest
2845 .ssh_command("ls /sys/kernel/iommu_groups/0/devices")
2846 .unwrap()
2847 .trim(),
2848 "0001:00:01.0"
2849 );
2850 });
2851
2852 kill_child(&mut child);
2853 let output = child.wait_with_output().unwrap();
2854
2855 handle_child_output(r, &output);
2856 }
2857
2858 #[test]
test_pci_msi()2859 fn test_pci_msi() {
2860 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
2861 let guest = Guest::new(Box::new(focal));
2862 let mut cmd = GuestCommand::new(&guest);
2863 cmd.args(["--cpus", "boot=1"])
2864 .args(["--memory", "size=512M"])
2865 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
2866 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
2867 .capture_output()
2868 .default_disks()
2869 .default_net();
2870
2871 let mut child = cmd.spawn().unwrap();
2872
2873 guest.wait_vm_boot(None).unwrap();
2874
2875 #[cfg(target_arch = "x86_64")]
2876 let grep_cmd = "grep -c PCI-MSI /proc/interrupts";
2877 #[cfg(target_arch = "aarch64")]
2878 let grep_cmd = "grep -c ITS-PCI-MSIX /proc/interrupts";
2879
2880 let r = std::panic::catch_unwind(|| {
2881 assert_eq!(
2882 guest
2883 .ssh_command(grep_cmd)
2884 .unwrap()
2885 .trim()
2886 .parse::<u32>()
2887 .unwrap_or_default(),
2888 12
2889 );
2890 });
2891
2892 kill_child(&mut child);
2893 let output = child.wait_with_output().unwrap();
2894
2895 handle_child_output(r, &output);
2896 }
2897
2898 #[test]
test_virtio_net_ctrl_queue()2899 fn test_virtio_net_ctrl_queue() {
2900 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
2901 let guest = Guest::new(Box::new(focal));
2902 let mut cmd = GuestCommand::new(&guest);
2903 cmd.args(["--cpus", "boot=1"])
2904 .args(["--memory", "size=512M"])
2905 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
2906 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
2907 .args(["--net", guest.default_net_string_w_mtu(3000).as_str()])
2908 .capture_output()
2909 .default_disks();
2910
2911 let mut child = cmd.spawn().unwrap();
2912
2913 guest.wait_vm_boot(None).unwrap();
2914
2915 #[cfg(target_arch = "aarch64")]
2916 let iface = "enp0s4";
2917 #[cfg(target_arch = "x86_64")]
2918 let iface = "ens4";
2919
2920 let r = std::panic::catch_unwind(|| {
2921 assert_eq!(
2922 guest
2923 .ssh_command(
2924 format!("sudo ethtool -K {iface} rx-gro-hw off && echo success").as_str()
2925 )
2926 .unwrap()
2927 .trim(),
2928 "success"
2929 );
2930 assert_eq!(
2931 guest
2932 .ssh_command(format!("cat /sys/class/net/{iface}/mtu").as_str())
2933 .unwrap()
2934 .trim(),
2935 "3000"
2936 );
2937 });
2938
2939 kill_child(&mut child);
2940 let output = child.wait_with_output().unwrap();
2941
2942 handle_child_output(r, &output);
2943 }
2944
2945 #[test]
test_pci_multiple_segments()2946 fn test_pci_multiple_segments() {
2947 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
2948 let guest = Guest::new(Box::new(focal));
2949
2950 // Prepare another disk file for the virtio-disk device
2951 let test_disk_path = String::from(
2952 guest
2953 .tmp_dir
2954 .as_path()
2955 .join("test-disk.raw")
2956 .to_str()
2957 .unwrap(),
2958 );
2959 assert!(
2960 exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success()
2961 );
2962 assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success());
2963
2964 let mut cmd = GuestCommand::new(&guest);
2965 cmd.args(["--cpus", "boot=1"])
2966 .args(["--memory", "size=512M"])
2967 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
2968 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
2969 .args([
2970 "--platform",
2971 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"),
2972 ])
2973 .args([
2974 "--disk",
2975 format!(
2976 "path={}",
2977 guest.disk_config.disk(DiskType::OperatingSystem).unwrap()
2978 )
2979 .as_str(),
2980 format!(
2981 "path={}",
2982 guest.disk_config.disk(DiskType::CloudInit).unwrap()
2983 )
2984 .as_str(),
2985 format!("path={test_disk_path},pci_segment=15").as_str(),
2986 ])
2987 .capture_output()
2988 .default_net();
2989
2990 let mut child = cmd.spawn().unwrap();
2991
2992 guest.wait_vm_boot(None).unwrap();
2993
2994 let grep_cmd = "lspci | grep \"Host bridge\" | wc -l";
2995
2996 let r = std::panic::catch_unwind(|| {
2997 // There should be MAX_NUM_PCI_SEGMENTS PCI host bridges in the guest.
2998 assert_eq!(
2999 guest
3000 .ssh_command(grep_cmd)
3001 .unwrap()
3002 .trim()
3003 .parse::<u16>()
3004 .unwrap_or_default(),
3005 MAX_NUM_PCI_SEGMENTS
3006 );
3007
3008 // Check both if /dev/vdc exists and if the block size is 4M.
3009 assert_eq!(
3010 guest
3011 .ssh_command("lsblk | grep vdc | grep -c 4M")
3012 .unwrap()
3013 .trim()
3014 .parse::<u32>()
3015 .unwrap_or_default(),
3016 1
3017 );
3018
3019 // Mount the device.
3020 guest.ssh_command("mkdir mount_image").unwrap();
3021 guest
3022 .ssh_command("sudo mount -o rw -t ext4 /dev/vdc mount_image/")
3023 .unwrap();
3024 // Grant all users with write permission.
3025 guest.ssh_command("sudo chmod a+w mount_image/").unwrap();
3026
3027 // Write something to the device.
3028 guest
3029 .ssh_command("sudo echo \"bar\" >> mount_image/foo")
3030 .unwrap();
3031
3032 // Check the content of the block device. The file "foo" should
3033 // contain "bar".
3034 assert_eq!(
3035 guest
3036 .ssh_command("sudo cat mount_image/foo")
3037 .unwrap()
3038 .trim(),
3039 "bar"
3040 );
3041 });
3042
3043 kill_child(&mut child);
3044 let output = child.wait_with_output().unwrap();
3045
3046 handle_child_output(r, &output);
3047 }
3048
3049 #[test]
test_pci_multiple_segments_numa_node()3050 fn test_pci_multiple_segments_numa_node() {
3051 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
3052 let guest = Guest::new(Box::new(focal));
3053 let api_socket = temp_api_path(&guest.tmp_dir);
3054 #[cfg(target_arch = "x86_64")]
3055 let kernel_path = direct_kernel_boot_path();
3056 #[cfg(target_arch = "aarch64")]
3057 let kernel_path = edk2_path();
3058
3059 // Prepare another disk file for the virtio-disk device
3060 let test_disk_path = String::from(
3061 guest
3062 .tmp_dir
3063 .as_path()
3064 .join("test-disk.raw")
3065 .to_str()
3066 .unwrap(),
3067 );
3068 assert!(
3069 exec_host_command_status(format!("truncate {test_disk_path} -s 4M").as_str()).success()
3070 );
3071 assert!(exec_host_command_status(format!("mkfs.ext4 {test_disk_path}").as_str()).success());
3072 const TEST_DISK_NODE: u16 = 1;
3073
3074 let mut child = GuestCommand::new(&guest)
3075 .args(["--platform", "num_pci_segments=2"])
3076 .args(["--cpus", "boot=2"])
3077 .args(["--memory", "size=0"])
3078 .args(["--memory-zone", "id=mem0,size=256M", "id=mem1,size=256M"])
3079 .args([
3080 "--numa",
3081 "guest_numa_id=0,cpus=[0],distances=[1@20],memory_zones=mem0,pci_segments=[0]",
3082 "guest_numa_id=1,cpus=[1],distances=[0@20],memory_zones=mem1,pci_segments=[1]",
3083 ])
3084 .args(["--kernel", kernel_path.to_str().unwrap()])
3085 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
3086 .args(["--api-socket", &api_socket])
3087 .capture_output()
3088 .args([
3089 "--disk",
3090 format!(
3091 "path={}",
3092 guest.disk_config.disk(DiskType::OperatingSystem).unwrap()
3093 )
3094 .as_str(),
3095 format!(
3096 "path={}",
3097 guest.disk_config.disk(DiskType::CloudInit).unwrap()
3098 )
3099 .as_str(),
3100 format!("path={test_disk_path},pci_segment={TEST_DISK_NODE}").as_str(),
3101 ])
3102 .default_net()
3103 .spawn()
3104 .unwrap();
3105
3106 let cmd = "cat /sys/block/vdc/device/../numa_node";
3107
3108 let r = std::panic::catch_unwind(|| {
3109 guest.wait_vm_boot(None).unwrap();
3110
3111 assert_eq!(
3112 guest
3113 .ssh_command(cmd)
3114 .unwrap()
3115 .trim()
3116 .parse::<u16>()
3117 .unwrap_or_default(),
3118 TEST_DISK_NODE
3119 );
3120 });
3121
3122 kill_child(&mut child);
3123 let output = child.wait_with_output().unwrap();
3124
3125 handle_child_output(r, &output);
3126 }
3127
3128 #[test]
test_direct_kernel_boot()3129 fn test_direct_kernel_boot() {
3130 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
3131 let guest = Guest::new(Box::new(focal));
3132
3133 let kernel_path = direct_kernel_boot_path();
3134
3135 let mut child = GuestCommand::new(&guest)
3136 .args(["--cpus", "boot=1"])
3137 .args(["--memory", "size=512M"])
3138 .args(["--kernel", kernel_path.to_str().unwrap()])
3139 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
3140 .default_disks()
3141 .default_net()
3142 .capture_output()
3143 .spawn()
3144 .unwrap();
3145
3146 let r = std::panic::catch_unwind(|| {
3147 guest.wait_vm_boot(None).unwrap();
3148
3149 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1);
3150 assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
3151
3152 let grep_cmd = if cfg!(target_arch = "x86_64") {
3153 "grep -c PCI-MSI /proc/interrupts"
3154 } else {
3155 "grep -c ITS-PCI-MSIX /proc/interrupts"
3156 };
3157 assert_eq!(
3158 guest
3159 .ssh_command(grep_cmd)
3160 .unwrap()
3161 .trim()
3162 .parse::<u32>()
3163 .unwrap_or_default(),
3164 12
3165 );
3166 });
3167
3168 kill_child(&mut child);
3169 let output = child.wait_with_output().unwrap();
3170
3171 handle_child_output(r, &output);
3172 }
3173
3174 #[test]
3175 #[cfg(target_arch = "x86_64")]
test_direct_kernel_boot_bzimage()3176 fn test_direct_kernel_boot_bzimage() {
3177 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
3178 let guest = Guest::new(Box::new(focal));
3179
3180 let mut kernel_path = direct_kernel_boot_path();
3181 // Replace the default kernel with the bzImage.
3182 kernel_path.pop();
3183 kernel_path.push("bzImage-x86_64");
3184
3185 let mut child = GuestCommand::new(&guest)
3186 .args(["--cpus", "boot=1"])
3187 .args(["--memory", "size=512M"])
3188 .args(["--kernel", kernel_path.to_str().unwrap()])
3189 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
3190 .default_disks()
3191 .default_net()
3192 .capture_output()
3193 .spawn()
3194 .unwrap();
3195
3196 let r = std::panic::catch_unwind(|| {
3197 guest.wait_vm_boot(None).unwrap();
3198
3199 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1);
3200 assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
3201
3202 let grep_cmd = if cfg!(target_arch = "x86_64") {
3203 "grep -c PCI-MSI /proc/interrupts"
3204 } else {
3205 "grep -c ITS-PCI-MSIX /proc/interrupts"
3206 };
3207 assert_eq!(
3208 guest
3209 .ssh_command(grep_cmd)
3210 .unwrap()
3211 .trim()
3212 .parse::<u32>()
3213 .unwrap_or_default(),
3214 12
3215 );
3216 });
3217
3218 kill_child(&mut child);
3219 let output = child.wait_with_output().unwrap();
3220
3221 handle_child_output(r, &output);
3222 }
3223
_test_virtio_block(image_name: &str, disable_io_uring: bool, disable_aio: bool)3224 fn _test_virtio_block(image_name: &str, disable_io_uring: bool, disable_aio: bool) {
3225 let focal = UbuntuDiskConfig::new(image_name.to_string());
3226 let guest = Guest::new(Box::new(focal));
3227
3228 let mut workload_path = dirs::home_dir().unwrap();
3229 workload_path.push("workloads");
3230
3231 let mut blk_file_path = workload_path;
3232 blk_file_path.push("blk.img");
3233
3234 let kernel_path = direct_kernel_boot_path();
3235
3236 let mut cloud_child = GuestCommand::new(&guest)
3237 .args(["--cpus", "boot=4"])
3238 .args(["--memory", "size=512M,shared=on"])
3239 .args(["--kernel", kernel_path.to_str().unwrap()])
3240 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
3241 .args([
3242 "--disk",
3243 format!(
3244 "path={}",
3245 guest.disk_config.disk(DiskType::OperatingSystem).unwrap()
3246 )
3247 .as_str(),
3248 format!(
3249 "path={}",
3250 guest.disk_config.disk(DiskType::CloudInit).unwrap()
3251 )
3252 .as_str(),
3253 format!(
3254 "path={},readonly=on,direct=on,num_queues=4,_disable_io_uring={},_disable_aio={}",
3255 blk_file_path.to_str().unwrap(),
3256 disable_io_uring,
3257 disable_aio,
3258 )
3259 .as_str(),
3260 ])
3261 .default_net()
3262 .capture_output()
3263 .spawn()
3264 .unwrap();
3265
3266 let r = std::panic::catch_unwind(|| {
3267 guest.wait_vm_boot(None).unwrap();
3268
3269 // Check both if /dev/vdc exists and if the block size is 16M.
3270 assert_eq!(
3271 guest
3272 .ssh_command("lsblk | grep vdc | grep -c 16M")
3273 .unwrap()
3274 .trim()
3275 .parse::<u32>()
3276 .unwrap_or_default(),
3277 1
3278 );
3279
3280 // Check both if /dev/vdc exists and if this block is RO.
3281 assert_eq!(
3282 guest
3283 .ssh_command("lsblk | grep vdc | awk '{print $5}'")
3284 .unwrap()
3285 .trim()
3286 .parse::<u32>()
3287 .unwrap_or_default(),
3288 1
3289 );
3290
3291 // Check if the number of queues is 4.
3292 assert_eq!(
3293 guest
3294 .ssh_command("ls -ll /sys/block/vdc/mq | grep ^d | wc -l")
3295 .unwrap()
3296 .trim()
3297 .parse::<u32>()
3298 .unwrap_or_default(),
3299 4
3300 );
3301 });
3302
3303 let _ = cloud_child.kill();
3304 let output = cloud_child.wait_with_output().unwrap();
3305
3306 handle_child_output(r, &output);
3307 }
3308
3309 #[test]
test_virtio_block_io_uring()3310 fn test_virtio_block_io_uring() {
3311 _test_virtio_block(FOCAL_IMAGE_NAME, false, true)
3312 }
3313
3314 #[test]
test_virtio_block_aio()3315 fn test_virtio_block_aio() {
3316 _test_virtio_block(FOCAL_IMAGE_NAME, true, false)
3317 }
3318
3319 #[test]
test_virtio_block_sync()3320 fn test_virtio_block_sync() {
3321 _test_virtio_block(FOCAL_IMAGE_NAME, true, true)
3322 }
3323
3324 #[test]
test_virtio_block_qcow2()3325 fn test_virtio_block_qcow2() {
3326 _test_virtio_block(FOCAL_IMAGE_NAME_QCOW2, false, false)
3327 }
3328
3329 #[test]
test_virtio_block_qcow2_backing_file()3330 fn test_virtio_block_qcow2_backing_file() {
3331 _test_virtio_block(FOCAL_IMAGE_NAME_QCOW2_BACKING_FILE, false, false)
3332 }
3333
3334 #[test]
test_virtio_block_vhd()3335 fn test_virtio_block_vhd() {
3336 let mut workload_path = dirs::home_dir().unwrap();
3337 workload_path.push("workloads");
3338
3339 let mut raw_file_path = workload_path.clone();
3340 let mut vhd_file_path = workload_path;
3341 raw_file_path.push(FOCAL_IMAGE_NAME);
3342 vhd_file_path.push(FOCAL_IMAGE_NAME_VHD);
3343
3344 // Generate VHD file from RAW file
3345 std::process::Command::new("qemu-img")
3346 .arg("convert")
3347 .arg("-p")
3348 .args(["-f", "raw"])
3349 .args(["-O", "vpc"])
3350 .args(["-o", "subformat=fixed"])
3351 .arg(raw_file_path.to_str().unwrap())
3352 .arg(vhd_file_path.to_str().unwrap())
3353 .output()
3354 .expect("Expect generating VHD image from RAW image");
3355
3356 _test_virtio_block(FOCAL_IMAGE_NAME_VHD, false, false)
3357 }
3358
3359 #[test]
test_virtio_block_vhdx()3360 fn test_virtio_block_vhdx() {
3361 let mut workload_path = dirs::home_dir().unwrap();
3362 workload_path.push("workloads");
3363
3364 let mut raw_file_path = workload_path.clone();
3365 let mut vhdx_file_path = workload_path;
3366 raw_file_path.push(FOCAL_IMAGE_NAME);
3367 vhdx_file_path.push(FOCAL_IMAGE_NAME_VHDX);
3368
3369 // Generate dynamic VHDX file from RAW file
3370 std::process::Command::new("qemu-img")
3371 .arg("convert")
3372 .arg("-p")
3373 .args(["-f", "raw"])
3374 .args(["-O", "vhdx"])
3375 .arg(raw_file_path.to_str().unwrap())
3376 .arg(vhdx_file_path.to_str().unwrap())
3377 .output()
3378 .expect("Expect generating dynamic VHDx image from RAW image");
3379
3380 _test_virtio_block(FOCAL_IMAGE_NAME_VHDX, false, false)
3381 }
3382
3383 #[test]
test_virtio_block_dynamic_vhdx_expand()3384 fn test_virtio_block_dynamic_vhdx_expand() {
3385 const VIRTUAL_DISK_SIZE: u64 = 100 << 20;
3386 const EMPTY_VHDX_FILE_SIZE: u64 = 8 << 20;
3387 const FULL_VHDX_FILE_SIZE: u64 = 112 << 20;
3388 const DYNAMIC_VHDX_NAME: &str = "dynamic.vhdx";
3389
3390 let mut workload_path = dirs::home_dir().unwrap();
3391 workload_path.push("workloads");
3392
3393 let mut vhdx_file_path = workload_path;
3394 vhdx_file_path.push(DYNAMIC_VHDX_NAME);
3395 let vhdx_path = vhdx_file_path.to_str().unwrap();
3396
3397 // Generate a 100 MiB dynamic VHDX file
3398 std::process::Command::new("qemu-img")
3399 .arg("create")
3400 .args(["-f", "vhdx"])
3401 .arg(vhdx_path)
3402 .arg(VIRTUAL_DISK_SIZE.to_string())
3403 .output()
3404 .expect("Expect generating dynamic VHDx image from RAW image");
3405
3406 // Check if the size matches with empty VHDx file size
3407 assert_eq!(vhdx_image_size(vhdx_path), EMPTY_VHDX_FILE_SIZE);
3408
3409 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
3410 let guest = Guest::new(Box::new(focal));
3411 let kernel_path = direct_kernel_boot_path();
3412
3413 let mut cloud_child = GuestCommand::new(&guest)
3414 .args(["--cpus", "boot=1"])
3415 .args(["--memory", "size=512M"])
3416 .args(["--kernel", kernel_path.to_str().unwrap()])
3417 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
3418 .args([
3419 "--disk",
3420 format!(
3421 "path={}",
3422 guest.disk_config.disk(DiskType::OperatingSystem).unwrap()
3423 )
3424 .as_str(),
3425 format!(
3426 "path={}",
3427 guest.disk_config.disk(DiskType::CloudInit).unwrap()
3428 )
3429 .as_str(),
3430 format!("path={vhdx_path}").as_str(),
3431 ])
3432 .default_net()
3433 .capture_output()
3434 .spawn()
3435 .unwrap();
3436
3437 let r = std::panic::catch_unwind(|| {
3438 guest.wait_vm_boot(None).unwrap();
3439
3440 // Check both if /dev/vdc exists and if the block size is 100 MiB.
3441 assert_eq!(
3442 guest
3443 .ssh_command("lsblk | grep vdc | grep -c 100M")
3444 .unwrap()
3445 .trim()
3446 .parse::<u32>()
3447 .unwrap_or_default(),
3448 1
3449 );
3450
3451 // Write 100 MB of data to the VHDx disk
3452 guest
3453 .ssh_command("sudo dd if=/dev/urandom of=/dev/vdc bs=1M count=100")
3454 .unwrap();
3455 });
3456
3457 // Check if the size matches with expected expanded VHDx file size
3458 assert_eq!(vhdx_image_size(vhdx_path), FULL_VHDX_FILE_SIZE);
3459
3460 kill_child(&mut cloud_child);
3461 let output = cloud_child.wait_with_output().unwrap();
3462
3463 handle_child_output(r, &output);
3464 }
3465
vhdx_image_size(disk_name: &str) -> u643466 fn vhdx_image_size(disk_name: &str) -> u64 {
3467 std::fs::File::open(disk_name)
3468 .unwrap()
3469 .seek(SeekFrom::End(0))
3470 .unwrap()
3471 }
3472
3473 #[test]
test_virtio_block_direct_and_firmware()3474 fn test_virtio_block_direct_and_firmware() {
3475 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
3476 let guest = Guest::new(Box::new(focal));
3477
3478 // The OS disk must be copied to a location that is not backed by
3479 // tmpfs, otherwise the syscall openat(2) with O_DIRECT simply fails
3480 // with EINVAL because tmpfs doesn't support this flag.
3481 let mut workloads_path = dirs::home_dir().unwrap();
3482 workloads_path.push("workloads");
3483 let os_dir = TempDir::new_in(workloads_path.as_path()).unwrap();
3484 let mut os_path = os_dir.as_path().to_path_buf();
3485 os_path.push("osdisk.img");
3486 rate_limited_copy(
3487 guest.disk_config.disk(DiskType::OperatingSystem).unwrap(),
3488 os_path.as_path(),
3489 )
3490 .expect("copying of OS disk failed");
3491
3492 let mut child = GuestCommand::new(&guest)
3493 .args(["--cpus", "boot=1"])
3494 .args(["--memory", "size=512M"])
3495 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()])
3496 .args([
3497 "--disk",
3498 format!("path={},direct=on", os_path.as_path().to_str().unwrap()).as_str(),
3499 format!(
3500 "path={}",
3501 guest.disk_config.disk(DiskType::CloudInit).unwrap()
3502 )
3503 .as_str(),
3504 ])
3505 .default_net()
3506 .capture_output()
3507 .spawn()
3508 .unwrap();
3509
3510 let r = std::panic::catch_unwind(|| {
3511 guest.wait_vm_boot(Some(120)).unwrap();
3512 });
3513
3514 kill_child(&mut child);
3515 let output = child.wait_with_output().unwrap();
3516
3517 handle_child_output(r, &output);
3518 }
3519
3520 #[test]
test_vhost_user_net_default()3521 fn test_vhost_user_net_default() {
3522 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, false, false)
3523 }
3524
3525 #[test]
test_vhost_user_net_named_tap()3526 fn test_vhost_user_net_named_tap() {
3527 test_vhost_user_net(
3528 Some("mytap0"),
3529 2,
3530 &prepare_vhost_user_net_daemon,
3531 false,
3532 false,
3533 )
3534 }
3535
3536 #[test]
test_vhost_user_net_existing_tap()3537 fn test_vhost_user_net_existing_tap() {
3538 test_vhost_user_net(
3539 Some("vunet-tap0"),
3540 2,
3541 &prepare_vhost_user_net_daemon,
3542 false,
3543 false,
3544 )
3545 }
3546
3547 #[test]
test_vhost_user_net_multiple_queues()3548 fn test_vhost_user_net_multiple_queues() {
3549 test_vhost_user_net(None, 4, &prepare_vhost_user_net_daemon, false, false)
3550 }
3551
3552 #[test]
test_vhost_user_net_tap_multiple_queues()3553 fn test_vhost_user_net_tap_multiple_queues() {
3554 test_vhost_user_net(
3555 Some("vunet-tap1"),
3556 4,
3557 &prepare_vhost_user_net_daemon,
3558 false,
3559 false,
3560 )
3561 }
3562
3563 #[test]
test_vhost_user_net_host_mac()3564 fn test_vhost_user_net_host_mac() {
3565 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, true, false)
3566 }
3567
3568 #[test]
test_vhost_user_net_client_mode()3569 fn test_vhost_user_net_client_mode() {
3570 test_vhost_user_net(None, 2, &prepare_vhost_user_net_daemon, false, true)
3571 }
3572
3573 #[test]
3574 #[cfg(not(target_arch = "aarch64"))]
test_vhost_user_blk_default()3575 fn test_vhost_user_blk_default() {
3576 test_vhost_user_blk(2, false, false, Some(&prepare_vubd))
3577 }
3578
3579 #[test]
3580 #[cfg(not(target_arch = "aarch64"))]
test_vhost_user_blk_readonly()3581 fn test_vhost_user_blk_readonly() {
3582 test_vhost_user_blk(1, true, false, Some(&prepare_vubd))
3583 }
3584
3585 #[test]
3586 #[cfg(not(target_arch = "aarch64"))]
test_vhost_user_blk_direct()3587 fn test_vhost_user_blk_direct() {
3588 test_vhost_user_blk(1, false, true, Some(&prepare_vubd))
3589 }
3590
3591 #[test]
test_boot_from_vhost_user_blk_default()3592 fn test_boot_from_vhost_user_blk_default() {
3593 test_boot_from_vhost_user_blk(1, false, false, Some(&prepare_vubd))
3594 }
3595
3596 #[test]
3597 #[cfg(target_arch = "x86_64")]
test_split_irqchip()3598 fn test_split_irqchip() {
3599 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
3600 let guest = Guest::new(Box::new(focal));
3601
3602 let mut child = GuestCommand::new(&guest)
3603 .args(["--cpus", "boot=1"])
3604 .args(["--memory", "size=512M"])
3605 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
3606 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
3607 .default_disks()
3608 .default_net()
3609 .capture_output()
3610 .spawn()
3611 .unwrap();
3612
3613 let r = std::panic::catch_unwind(|| {
3614 guest.wait_vm_boot(None).unwrap();
3615
3616 assert_eq!(
3617 guest
3618 .ssh_command("grep -c IO-APIC.*timer /proc/interrupts || true")
3619 .unwrap()
3620 .trim()
3621 .parse::<u32>()
3622 .unwrap_or(1),
3623 0
3624 );
3625 assert_eq!(
3626 guest
3627 .ssh_command("grep -c IO-APIC.*cascade /proc/interrupts || true")
3628 .unwrap()
3629 .trim()
3630 .parse::<u32>()
3631 .unwrap_or(1),
3632 0
3633 );
3634 });
3635
3636 kill_child(&mut child);
3637 let output = child.wait_with_output().unwrap();
3638
3639 handle_child_output(r, &output);
3640 }
3641
3642 #[test]
3643 #[cfg(target_arch = "x86_64")]
test_dmi_serial_number()3644 fn test_dmi_serial_number() {
3645 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
3646 let guest = Guest::new(Box::new(focal));
3647
3648 let mut child = GuestCommand::new(&guest)
3649 .args(["--cpus", "boot=1"])
3650 .args(["--memory", "size=512M"])
3651 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
3652 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
3653 .args(["--platform", "serial_number=a=b;c=d"])
3654 .default_disks()
3655 .default_net()
3656 .capture_output()
3657 .spawn()
3658 .unwrap();
3659
3660 let r = std::panic::catch_unwind(|| {
3661 guest.wait_vm_boot(None).unwrap();
3662
3663 assert_eq!(
3664 guest
3665 .ssh_command("sudo cat /sys/class/dmi/id/product_serial")
3666 .unwrap()
3667 .trim(),
3668 "a=b;c=d"
3669 );
3670 });
3671
3672 kill_child(&mut child);
3673 let output = child.wait_with_output().unwrap();
3674
3675 handle_child_output(r, &output);
3676 }
3677
3678 #[test]
3679 #[cfg(target_arch = "x86_64")]
test_dmi_uuid()3680 fn test_dmi_uuid() {
3681 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
3682 let guest = Guest::new(Box::new(focal));
3683
3684 let mut child = GuestCommand::new(&guest)
3685 .args(["--cpus", "boot=1"])
3686 .args(["--memory", "size=512M"])
3687 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
3688 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
3689 .args(["--platform", "uuid=1e8aa28a-435d-4027-87f4-40dceff1fa0a"])
3690 .default_disks()
3691 .default_net()
3692 .capture_output()
3693 .spawn()
3694 .unwrap();
3695
3696 let r = std::panic::catch_unwind(|| {
3697 guest.wait_vm_boot(None).unwrap();
3698
3699 assert_eq!(
3700 guest
3701 .ssh_command("sudo cat /sys/class/dmi/id/product_uuid")
3702 .unwrap()
3703 .trim(),
3704 "1e8aa28a-435d-4027-87f4-40dceff1fa0a"
3705 );
3706 });
3707
3708 kill_child(&mut child);
3709 let output = child.wait_with_output().unwrap();
3710
3711 handle_child_output(r, &output);
3712 }
3713
3714 #[test]
3715 #[cfg(target_arch = "x86_64")]
test_dmi_oem_strings()3716 fn test_dmi_oem_strings() {
3717 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
3718 let guest = Guest::new(Box::new(focal));
3719
3720 let s1 = "io.systemd.credential:xx=yy";
3721 let s2 = "This is a test string";
3722
3723 let oem_strings = format!("oem_strings=[{s1},{s2}]");
3724
3725 let mut child = GuestCommand::new(&guest)
3726 .args(["--cpus", "boot=1"])
3727 .args(["--memory", "size=512M"])
3728 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
3729 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
3730 .args(["--platform", &oem_strings])
3731 .default_disks()
3732 .default_net()
3733 .capture_output()
3734 .spawn()
3735 .unwrap();
3736
3737 let r = std::panic::catch_unwind(|| {
3738 guest.wait_vm_boot(None).unwrap();
3739
3740 assert_eq!(
3741 guest
3742 .ssh_command("sudo dmidecode --oem-string count")
3743 .unwrap()
3744 .trim(),
3745 "2"
3746 );
3747
3748 assert_eq!(
3749 guest
3750 .ssh_command("sudo dmidecode --oem-string 1")
3751 .unwrap()
3752 .trim(),
3753 s1
3754 );
3755
3756 assert_eq!(
3757 guest
3758 .ssh_command("sudo dmidecode --oem-string 2")
3759 .unwrap()
3760 .trim(),
3761 s2
3762 );
3763 });
3764
3765 kill_child(&mut child);
3766 let output = child.wait_with_output().unwrap();
3767
3768 handle_child_output(r, &output);
3769 }
3770
3771 #[test]
test_virtio_fs()3772 fn test_virtio_fs() {
3773 _test_virtio_fs(&prepare_virtiofsd, false, None)
3774 }
3775
3776 #[test]
test_virtio_fs_hotplug()3777 fn test_virtio_fs_hotplug() {
3778 _test_virtio_fs(&prepare_virtiofsd, true, None)
3779 }
3780
3781 #[test]
3782 #[cfg(not(feature = "mshv"))]
test_virtio_fs_multi_segment_hotplug()3783 fn test_virtio_fs_multi_segment_hotplug() {
3784 _test_virtio_fs(&prepare_virtiofsd, true, Some(15))
3785 }
3786
3787 #[test]
3788 #[cfg(not(feature = "mshv"))]
test_virtio_fs_multi_segment()3789 fn test_virtio_fs_multi_segment() {
3790 _test_virtio_fs(&prepare_virtiofsd, false, Some(15))
3791 }
3792
3793 #[test]
test_virtio_pmem_discard_writes()3794 fn test_virtio_pmem_discard_writes() {
3795 test_virtio_pmem(true, false)
3796 }
3797
3798 #[test]
test_virtio_pmem_with_size()3799 fn test_virtio_pmem_with_size() {
3800 test_virtio_pmem(true, true)
3801 }
3802
3803 #[test]
test_boot_from_virtio_pmem()3804 fn test_boot_from_virtio_pmem() {
3805 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
3806 let guest = Guest::new(Box::new(focal));
3807
3808 let kernel_path = direct_kernel_boot_path();
3809
3810 let mut child = GuestCommand::new(&guest)
3811 .args(["--cpus", "boot=1"])
3812 .args(["--memory", "size=512M"])
3813 .args(["--kernel", kernel_path.to_str().unwrap()])
3814 .args([
3815 "--disk",
3816 format!(
3817 "path={}",
3818 guest.disk_config.disk(DiskType::CloudInit).unwrap()
3819 )
3820 .as_str(),
3821 ])
3822 .default_net()
3823 .args([
3824 "--pmem",
3825 format!(
3826 "file={},size={}",
3827 guest.disk_config.disk(DiskType::OperatingSystem).unwrap(),
3828 fs::metadata(guest.disk_config.disk(DiskType::OperatingSystem).unwrap())
3829 .unwrap()
3830 .len()
3831 )
3832 .as_str(),
3833 ])
3834 .args([
3835 "--cmdline",
3836 DIRECT_KERNEL_BOOT_CMDLINE
3837 .replace("vda1", "pmem0p1")
3838 .as_str(),
3839 ])
3840 .capture_output()
3841 .spawn()
3842 .unwrap();
3843
3844 let r = std::panic::catch_unwind(|| {
3845 guest.wait_vm_boot(None).unwrap();
3846
3847 // Simple checks to validate the VM booted properly
3848 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1);
3849 assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
3850 });
3851
3852 kill_child(&mut child);
3853 let output = child.wait_with_output().unwrap();
3854
3855 handle_child_output(r, &output);
3856 }
3857
3858 #[test]
test_multiple_network_interfaces()3859 fn test_multiple_network_interfaces() {
3860 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
3861 let guest = Guest::new(Box::new(focal));
3862
3863 let kernel_path = direct_kernel_boot_path();
3864
3865 let mut child = GuestCommand::new(&guest)
3866 .args(["--cpus", "boot=1"])
3867 .args(["--memory", "size=512M"])
3868 .args(["--kernel", kernel_path.to_str().unwrap()])
3869 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
3870 .default_disks()
3871 .args([
3872 "--net",
3873 guest.default_net_string().as_str(),
3874 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0",
3875 "tap=mytap1,mac=fe:1f:9e:e1:60:f2,ip=192.168.4.1,mask=255.255.255.0",
3876 ])
3877 .capture_output()
3878 .spawn()
3879 .unwrap();
3880
3881 let r = std::panic::catch_unwind(|| {
3882 guest.wait_vm_boot(None).unwrap();
3883
3884 let tap_count = exec_host_command_output("ip link | grep -c mytap1");
3885 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1");
3886
3887 // 3 network interfaces + default localhost ==> 4 interfaces
3888 assert_eq!(
3889 guest
3890 .ssh_command("ip -o link | wc -l")
3891 .unwrap()
3892 .trim()
3893 .parse::<u32>()
3894 .unwrap_or_default(),
3895 4
3896 );
3897 });
3898
3899 kill_child(&mut child);
3900 let output = child.wait_with_output().unwrap();
3901
3902 handle_child_output(r, &output);
3903 }
3904
3905 #[test]
3906 #[cfg(target_arch = "aarch64")]
test_pmu_on()3907 fn test_pmu_on() {
3908 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
3909 let guest = Guest::new(Box::new(focal));
3910 let mut child = GuestCommand::new(&guest)
3911 .args(["--cpus", "boot=1"])
3912 .args(["--memory", "size=512M"])
3913 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
3914 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
3915 .default_disks()
3916 .default_net()
3917 .capture_output()
3918 .spawn()
3919 .unwrap();
3920
3921 let r = std::panic::catch_unwind(|| {
3922 guest.wait_vm_boot(None).unwrap();
3923
3924 // Test that PMU exists.
3925 assert_eq!(
3926 guest
3927 .ssh_command(GREP_PMU_IRQ_CMD)
3928 .unwrap()
3929 .trim()
3930 .parse::<u32>()
3931 .unwrap_or_default(),
3932 1
3933 );
3934 });
3935
3936 kill_child(&mut child);
3937 let output = child.wait_with_output().unwrap();
3938
3939 handle_child_output(r, &output);
3940 }
3941
3942 #[test]
test_serial_off()3943 fn test_serial_off() {
3944 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
3945 let guest = Guest::new(Box::new(focal));
3946 let mut child = GuestCommand::new(&guest)
3947 .args(["--cpus", "boot=1"])
3948 .args(["--memory", "size=512M"])
3949 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
3950 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
3951 .default_disks()
3952 .default_net()
3953 .args(["--serial", "off"])
3954 .capture_output()
3955 .spawn()
3956 .unwrap();
3957
3958 let r = std::panic::catch_unwind(|| {
3959 guest.wait_vm_boot(None).unwrap();
3960
3961 // Test that there is no ttyS0
3962 assert_eq!(
3963 guest
3964 .ssh_command(GREP_SERIAL_IRQ_CMD)
3965 .unwrap()
3966 .trim()
3967 .parse::<u32>()
3968 .unwrap_or(1),
3969 0
3970 );
3971 });
3972
3973 kill_child(&mut child);
3974 let output = child.wait_with_output().unwrap();
3975
3976 handle_child_output(r, &output);
3977 }
3978
3979 #[test]
test_serial_null()3980 fn test_serial_null() {
3981 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
3982 let guest = Guest::new(Box::new(focal));
3983 let mut cmd = GuestCommand::new(&guest);
3984 #[cfg(target_arch = "x86_64")]
3985 let console_str: &str = "console=ttyS0";
3986 #[cfg(target_arch = "aarch64")]
3987 let console_str: &str = "console=ttyAMA0";
3988
3989 cmd.args(["--cpus", "boot=1"])
3990 .args(["--memory", "size=512M"])
3991 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
3992 .args([
3993 "--cmdline",
3994 DIRECT_KERNEL_BOOT_CMDLINE
3995 .replace("console=hvc0 ", console_str)
3996 .as_str(),
3997 ])
3998 .default_disks()
3999 .default_net()
4000 .args(["--serial", "null"])
4001 .args(["--console", "off"])
4002 .capture_output();
4003
4004 let mut child = cmd.spawn().unwrap();
4005
4006 let r = std::panic::catch_unwind(|| {
4007 guest.wait_vm_boot(None).unwrap();
4008
4009 // Test that there is a ttyS0
4010 assert_eq!(
4011 guest
4012 .ssh_command(GREP_SERIAL_IRQ_CMD)
4013 .unwrap()
4014 .trim()
4015 .parse::<u32>()
4016 .unwrap_or_default(),
4017 1
4018 );
4019 });
4020
4021 kill_child(&mut child);
4022 let output = child.wait_with_output().unwrap();
4023 handle_child_output(r, &output);
4024
4025 let r = std::panic::catch_unwind(|| {
4026 assert!(!String::from_utf8_lossy(&output.stdout).contains(CONSOLE_TEST_STRING));
4027 });
4028
4029 handle_child_output(r, &output);
4030 }
4031
4032 #[test]
test_serial_tty()4033 fn test_serial_tty() {
4034 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
4035 let guest = Guest::new(Box::new(focal));
4036
4037 let kernel_path = direct_kernel_boot_path();
4038
4039 #[cfg(target_arch = "x86_64")]
4040 let console_str: &str = "console=ttyS0";
4041 #[cfg(target_arch = "aarch64")]
4042 let console_str: &str = "console=ttyAMA0";
4043
4044 let mut child = GuestCommand::new(&guest)
4045 .args(["--cpus", "boot=1"])
4046 .args(["--memory", "size=512M"])
4047 .args(["--kernel", kernel_path.to_str().unwrap()])
4048 .args([
4049 "--cmdline",
4050 DIRECT_KERNEL_BOOT_CMDLINE
4051 .replace("console=hvc0 ", console_str)
4052 .as_str(),
4053 ])
4054 .default_disks()
4055 .default_net()
4056 .args(["--serial", "tty"])
4057 .args(["--console", "off"])
4058 .capture_output()
4059 .spawn()
4060 .unwrap();
4061
4062 let r = std::panic::catch_unwind(|| {
4063 guest.wait_vm_boot(None).unwrap();
4064
4065 // Test that there is a ttyS0
4066 assert_eq!(
4067 guest
4068 .ssh_command(GREP_SERIAL_IRQ_CMD)
4069 .unwrap()
4070 .trim()
4071 .parse::<u32>()
4072 .unwrap_or_default(),
4073 1
4074 );
4075 });
4076
4077 // This sleep is needed to wait for the login prompt
4078 thread::sleep(std::time::Duration::new(2, 0));
4079
4080 kill_child(&mut child);
4081 let output = child.wait_with_output().unwrap();
4082 handle_child_output(r, &output);
4083
4084 let r = std::panic::catch_unwind(|| {
4085 assert!(String::from_utf8_lossy(&output.stdout).contains(CONSOLE_TEST_STRING));
4086 });
4087
4088 handle_child_output(r, &output);
4089 }
4090
4091 #[test]
test_serial_file()4092 fn test_serial_file() {
4093 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
4094 let guest = Guest::new(Box::new(focal));
4095
4096 let serial_path = guest.tmp_dir.as_path().join("serial-output");
4097 #[cfg(target_arch = "x86_64")]
4098 let console_str: &str = "console=ttyS0";
4099 #[cfg(target_arch = "aarch64")]
4100 let console_str: &str = "console=ttyAMA0";
4101
4102 let mut child = GuestCommand::new(&guest)
4103 .args(["--cpus", "boot=1"])
4104 .args(["--memory", "size=512M"])
4105 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
4106 .args([
4107 "--cmdline",
4108 DIRECT_KERNEL_BOOT_CMDLINE
4109 .replace("console=hvc0 ", console_str)
4110 .as_str(),
4111 ])
4112 .default_disks()
4113 .default_net()
4114 .args([
4115 "--serial",
4116 format!("file={}", serial_path.to_str().unwrap()).as_str(),
4117 ])
4118 .capture_output()
4119 .spawn()
4120 .unwrap();
4121
4122 let r = std::panic::catch_unwind(|| {
4123 guest.wait_vm_boot(None).unwrap();
4124
4125 // Test that there is a ttyS0
4126 assert_eq!(
4127 guest
4128 .ssh_command(GREP_SERIAL_IRQ_CMD)
4129 .unwrap()
4130 .trim()
4131 .parse::<u32>()
4132 .unwrap_or_default(),
4133 1
4134 );
4135
4136 guest.ssh_command("sudo shutdown -h now").unwrap();
4137 });
4138
4139 let _ = child.wait_timeout(std::time::Duration::from_secs(20));
4140 kill_child(&mut child);
4141 let output = child.wait_with_output().unwrap();
4142 handle_child_output(r, &output);
4143
4144 let r = std::panic::catch_unwind(|| {
4145 // Check that the cloud-hypervisor binary actually terminated
4146 assert!(output.status.success());
4147
4148 // Do this check after shutdown of the VM as an easy way to ensure
4149 // all writes are flushed to disk
4150 let mut f = std::fs::File::open(serial_path).unwrap();
4151 let mut buf = String::new();
4152 f.read_to_string(&mut buf).unwrap();
4153 assert!(buf.contains(CONSOLE_TEST_STRING));
4154 });
4155
4156 handle_child_output(r, &output);
4157 }
4158
4159 #[test]
test_pty_interaction()4160 fn test_pty_interaction() {
4161 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
4162 let guest = Guest::new(Box::new(focal));
4163 let api_socket = temp_api_path(&guest.tmp_dir);
4164 let serial_option = if cfg!(target_arch = "x86_64") {
4165 " console=ttyS0"
4166 } else {
4167 " console=ttyAMA0"
4168 };
4169 let cmdline = DIRECT_KERNEL_BOOT_CMDLINE.to_owned() + serial_option;
4170
4171 let mut child = GuestCommand::new(&guest)
4172 .args(["--cpus", "boot=1"])
4173 .args(["--memory", "size=512M"])
4174 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
4175 .args(["--cmdline", &cmdline])
4176 .default_disks()
4177 .default_net()
4178 .args(["--serial", "null"])
4179 .args(["--console", "pty"])
4180 .args(["--api-socket", &api_socket])
4181 .spawn()
4182 .unwrap();
4183
4184 let r = std::panic::catch_unwind(|| {
4185 guest.wait_vm_boot(None).unwrap();
4186 // Get pty fd for console
4187 let console_path = get_pty_path(&api_socket, "console");
4188 _test_pty_interaction(console_path);
4189
4190 guest.ssh_command("sudo shutdown -h now").unwrap();
4191 });
4192
4193 let _ = child.wait_timeout(std::time::Duration::from_secs(20));
4194 let _ = child.kill();
4195 let output = child.wait_with_output().unwrap();
4196 handle_child_output(r, &output);
4197
4198 let r = std::panic::catch_unwind(|| {
4199 // Check that the cloud-hypervisor binary actually terminated
4200 assert!(output.status.success())
4201 });
4202 handle_child_output(r, &output);
4203 }
4204
4205 #[test]
test_serial_socket_interaction()4206 fn test_serial_socket_interaction() {
4207 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
4208 let guest = Guest::new(Box::new(focal));
4209 let serial_socket = guest.tmp_dir.as_path().join("serial.socket");
4210 let serial_socket_pty = guest.tmp_dir.as_path().join("serial.pty");
4211 let serial_option = if cfg!(target_arch = "x86_64") {
4212 " console=ttyS0"
4213 } else {
4214 " console=ttyAMA0"
4215 };
4216 let cmdline = DIRECT_KERNEL_BOOT_CMDLINE.to_owned() + serial_option;
4217
4218 let mut child = GuestCommand::new(&guest)
4219 .args(["--cpus", "boot=1"])
4220 .args(["--memory", "size=512M"])
4221 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
4222 .args(["--cmdline", &cmdline])
4223 .default_disks()
4224 .default_net()
4225 .args(["--console", "null"])
4226 .args([
4227 "--serial",
4228 format!("socket={}", serial_socket.to_str().unwrap()).as_str(),
4229 ])
4230 .spawn()
4231 .unwrap();
4232
4233 let _ = std::panic::catch_unwind(|| {
4234 guest.wait_vm_boot(None).unwrap();
4235 });
4236
4237 let mut socat_command = Command::new("socat");
4238 let socat_args = [
4239 &format!("pty,link={},raw", serial_socket_pty.display()),
4240 &format!("UNIX-CONNECT:{}", serial_socket.display()),
4241 ];
4242 socat_command.args(socat_args);
4243
4244 let mut socat_child = socat_command.spawn().unwrap();
4245 thread::sleep(std::time::Duration::new(1, 0));
4246
4247 let _ = std::panic::catch_unwind(|| {
4248 _test_pty_interaction(serial_socket_pty);
4249 });
4250
4251 let _ = socat_child.kill();
4252 let _ = socat_child.wait();
4253
4254 let r = std::panic::catch_unwind(|| {
4255 guest.ssh_command("sudo shutdown -h now").unwrap();
4256 });
4257
4258 let _ = child.wait_timeout(std::time::Duration::from_secs(20));
4259 kill_child(&mut child);
4260 let output = child.wait_with_output().unwrap();
4261 handle_child_output(r, &output);
4262
4263 let r = std::panic::catch_unwind(|| {
4264 // Check that the cloud-hypervisor binary actually terminated
4265 if !output.status.success() {
4266 panic!(
4267 "Cloud Hypervisor process failed to terminate gracefully: {:?}",
4268 output.status
4269 );
4270 }
4271 });
4272 handle_child_output(r, &output);
4273 }
4274
4275 #[test]
test_virtio_console()4276 fn test_virtio_console() {
4277 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
4278 let guest = Guest::new(Box::new(focal));
4279
4280 let kernel_path = direct_kernel_boot_path();
4281
4282 let mut child = GuestCommand::new(&guest)
4283 .args(["--cpus", "boot=1"])
4284 .args(["--memory", "size=512M"])
4285 .args(["--kernel", kernel_path.to_str().unwrap()])
4286 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
4287 .default_disks()
4288 .default_net()
4289 .args(["--console", "tty"])
4290 .args(["--serial", "null"])
4291 .capture_output()
4292 .spawn()
4293 .unwrap();
4294
4295 let text = String::from("On a branch floating down river a cricket, singing.");
4296 let cmd = format!("echo {text} | sudo tee /dev/hvc0");
4297
4298 let r = std::panic::catch_unwind(|| {
4299 guest.wait_vm_boot(None).unwrap();
4300
4301 assert!(guest
4302 .does_device_vendor_pair_match("0x1043", "0x1af4")
4303 .unwrap_or_default());
4304
4305 guest.ssh_command(&cmd).unwrap();
4306 });
4307
4308 kill_child(&mut child);
4309 let output = child.wait_with_output().unwrap();
4310 handle_child_output(r, &output);
4311
4312 let r = std::panic::catch_unwind(|| {
4313 assert!(String::from_utf8_lossy(&output.stdout).contains(&text));
4314 });
4315
4316 handle_child_output(r, &output);
4317 }
4318
4319 #[test]
test_console_file()4320 fn test_console_file() {
4321 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
4322 let guest = Guest::new(Box::new(focal));
4323
4324 let console_path = guest.tmp_dir.as_path().join("console-output");
4325 let mut child = GuestCommand::new(&guest)
4326 .args(["--cpus", "boot=1"])
4327 .args(["--memory", "size=512M"])
4328 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
4329 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
4330 .default_disks()
4331 .default_net()
4332 .args([
4333 "--console",
4334 format!("file={}", console_path.to_str().unwrap()).as_str(),
4335 ])
4336 .capture_output()
4337 .spawn()
4338 .unwrap();
4339
4340 guest.wait_vm_boot(None).unwrap();
4341
4342 guest.ssh_command("sudo shutdown -h now").unwrap();
4343
4344 let _ = child.wait_timeout(std::time::Duration::from_secs(20));
4345 kill_child(&mut child);
4346 let output = child.wait_with_output().unwrap();
4347
4348 let r = std::panic::catch_unwind(|| {
4349 // Check that the cloud-hypervisor binary actually terminated
4350 assert!(output.status.success());
4351
4352 // Do this check after shutdown of the VM as an easy way to ensure
4353 // all writes are flushed to disk
4354 let mut f = std::fs::File::open(console_path).unwrap();
4355 let mut buf = String::new();
4356 f.read_to_string(&mut buf).unwrap();
4357
4358 if !buf.contains(CONSOLE_TEST_STRING) {
4359 eprintln!(
4360 "\n\n==== Console file output ====\n\n{buf}\n\n==== End console file output ===="
4361 );
4362 }
4363 assert!(buf.contains(CONSOLE_TEST_STRING));
4364 });
4365
4366 handle_child_output(r, &output);
4367 }
4368
4369 #[test]
4370 #[cfg(target_arch = "x86_64")]
4371 #[cfg(not(feature = "mshv"))]
4372 // The VFIO integration test starts cloud-hypervisor guest with 3 TAP
4373 // backed networking interfaces, bound through a simple bridge on the host.
4374 // So if the nested cloud-hypervisor succeeds in getting a directly
4375 // assigned interface from its cloud-hypervisor host, we should be able to
4376 // ssh into it, and verify that it's running with the right kernel command
4377 // line (We tag the command line from cloud-hypervisor for that purpose).
4378 // The third device is added to validate that hotplug works correctly since
4379 // it is being added to the L2 VM through hotplugging mechanism.
4380 // Also, we pass-through a virtio-blk device to the L2 VM to test the 32-bit
4381 // vfio device support
test_vfio()4382 fn test_vfio() {
4383 setup_vfio_network_interfaces();
4384
4385 let jammy = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string());
4386 let guest = Guest::new_from_ip_range(Box::new(jammy), "172.18", 0);
4387
4388 let mut workload_path = dirs::home_dir().unwrap();
4389 workload_path.push("workloads");
4390
4391 let kernel_path = direct_kernel_boot_path();
4392
4393 let mut vfio_path = workload_path.clone();
4394 vfio_path.push("vfio");
4395
4396 let mut cloud_init_vfio_base_path = vfio_path.clone();
4397 cloud_init_vfio_base_path.push("cloudinit.img");
4398
4399 // We copy our cloudinit into the vfio mount point, for the nested
4400 // cloud-hypervisor guest to use.
4401 rate_limited_copy(
4402 guest.disk_config.disk(DiskType::CloudInit).unwrap(),
4403 &cloud_init_vfio_base_path,
4404 )
4405 .expect("copying of cloud-init disk failed");
4406
4407 let mut vfio_disk_path = workload_path.clone();
4408 vfio_disk_path.push("vfio.img");
4409
4410 // Create the vfio disk image
4411 let output = Command::new("mkfs.ext4")
4412 .arg("-d")
4413 .arg(vfio_path.to_str().unwrap())
4414 .arg(vfio_disk_path.to_str().unwrap())
4415 .arg("2g")
4416 .output()
4417 .unwrap();
4418 if !output.status.success() {
4419 eprintln!("{}", String::from_utf8_lossy(&output.stderr));
4420 panic!("mkfs.ext4 command generated an error");
4421 }
4422
4423 let mut blk_file_path = workload_path;
4424 blk_file_path.push("blk.img");
4425
4426 let vfio_tap0 = "vfio-tap0";
4427 let vfio_tap1 = "vfio-tap1";
4428 let vfio_tap2 = "vfio-tap2";
4429 let vfio_tap3 = "vfio-tap3";
4430
4431 let mut child = GuestCommand::new(&guest)
4432 .args(["--cpus", "boot=4"])
4433 .args(["--memory", "size=2G,hugepages=on,shared=on"])
4434 .args(["--kernel", kernel_path.to_str().unwrap()])
4435 .args([
4436 "--disk",
4437 format!(
4438 "path={}",
4439 guest.disk_config.disk(DiskType::OperatingSystem).unwrap()
4440 )
4441 .as_str(),
4442 format!(
4443 "path={}",
4444 guest.disk_config.disk(DiskType::CloudInit).unwrap()
4445 )
4446 .as_str(),
4447 format!("path={}", vfio_disk_path.to_str().unwrap()).as_str(),
4448 format!("path={},iommu=on,readonly=true", blk_file_path.to_str().unwrap()).as_str(),
4449 ])
4450 .args([
4451 "--cmdline",
4452 format!(
4453 "{DIRECT_KERNEL_BOOT_CMDLINE} kvm-intel.nested=1 vfio_iommu_type1.allow_unsafe_interrupts"
4454 )
4455 .as_str(),
4456 ])
4457 .args([
4458 "--net",
4459 format!("tap={},mac={}", vfio_tap0, guest.network.guest_mac).as_str(),
4460 format!(
4461 "tap={},mac={},iommu=on",
4462 vfio_tap1, guest.network.l2_guest_mac1
4463 )
4464 .as_str(),
4465 format!(
4466 "tap={},mac={},iommu=on",
4467 vfio_tap2, guest.network.l2_guest_mac2
4468 )
4469 .as_str(),
4470 format!(
4471 "tap={},mac={},iommu=on",
4472 vfio_tap3, guest.network.l2_guest_mac3
4473 )
4474 .as_str(),
4475 ])
4476 .capture_output()
4477 .spawn()
4478 .unwrap();
4479
4480 thread::sleep(std::time::Duration::new(30, 0));
4481
4482 let r = std::panic::catch_unwind(|| {
4483 guest.ssh_command_l1("sudo systemctl start vfio").unwrap();
4484 thread::sleep(std::time::Duration::new(120, 0));
4485
4486 // We booted our cloud hypervisor L2 guest with a "VFIOTAG" tag
4487 // added to its kernel command line.
4488 // Let's ssh into it and verify that it's there. If it is it means
4489 // we're in the right guest (The L2 one) because the QEMU L1 guest
4490 // does not have this command line tag.
4491 assert!(check_matched_lines_count(
4492 guest.ssh_command_l2_1("cat /proc/cmdline").unwrap().trim(),
4493 vec!["VFIOTAG"],
4494 1
4495 ));
4496
4497 // Let's also verify from the second virtio-net device passed to
4498 // the L2 VM.
4499 assert!(check_matched_lines_count(
4500 guest.ssh_command_l2_2("cat /proc/cmdline").unwrap().trim(),
4501 vec!["VFIOTAG"],
4502 1
4503 ));
4504
4505 // Check the amount of PCI devices appearing in L2 VM.
4506 assert!(check_lines_count(
4507 guest
4508 .ssh_command_l2_1("ls /sys/bus/pci/devices")
4509 .unwrap()
4510 .trim(),
4511 8
4512 ));
4513
4514 // Check both if /dev/vdc exists and if the block size is 16M in L2 VM
4515 assert!(check_matched_lines_count(
4516 guest.ssh_command_l2_1("lsblk").unwrap().trim(),
4517 vec!["vdc", "16M"],
4518 1
4519 ));
4520
4521 // Hotplug an extra virtio-net device through L2 VM.
4522 guest
4523 .ssh_command_l1(
4524 "echo 0000:00:09.0 | sudo tee /sys/bus/pci/devices/0000:00:09.0/driver/unbind",
4525 )
4526 .unwrap();
4527 guest
4528 .ssh_command_l1("echo 0000:00:09.0 | sudo tee /sys/bus/pci/drivers/vfio-pci/bind")
4529 .unwrap();
4530 let vfio_hotplug_output = guest
4531 .ssh_command_l1(
4532 "sudo /mnt/ch-remote \
4533 --api-socket=/tmp/ch_api.sock \
4534 add-device path=/sys/bus/pci/devices/0000:00:09.0,id=vfio123",
4535 )
4536 .unwrap();
4537 assert!(check_matched_lines_count(
4538 vfio_hotplug_output.trim(),
4539 vec!["{\"id\":\"vfio123\",\"bdf\":\"0000:00:08.0\"}"],
4540 1
4541 ));
4542
4543 thread::sleep(std::time::Duration::new(10, 0));
4544
4545 // Let's also verify from the third virtio-net device passed to
4546 // the L2 VM. This third device has been hotplugged through the L2
4547 // VM, so this is our way to validate hotplug works for VFIO PCI.
4548 assert!(check_matched_lines_count(
4549 guest.ssh_command_l2_3("cat /proc/cmdline").unwrap().trim(),
4550 vec!["VFIOTAG"],
4551 1
4552 ));
4553
4554 // Check the amount of PCI devices appearing in L2 VM.
4555 // There should be one more device than before, raising the count
4556 // up to 9 PCI devices.
4557 assert!(check_lines_count(
4558 guest
4559 .ssh_command_l2_1("ls /sys/bus/pci/devices")
4560 .unwrap()
4561 .trim(),
4562 9
4563 ));
4564
4565 // Let's now verify that we can correctly remove the virtio-net
4566 // device through the "remove-device" command responsible for
4567 // unplugging VFIO devices.
4568 guest
4569 .ssh_command_l1(
4570 "sudo /mnt/ch-remote \
4571 --api-socket=/tmp/ch_api.sock \
4572 remove-device vfio123",
4573 )
4574 .unwrap();
4575 thread::sleep(std::time::Duration::new(10, 0));
4576
4577 // Check the amount of PCI devices appearing in L2 VM is back down
4578 // to 8 devices.
4579 assert!(check_lines_count(
4580 guest
4581 .ssh_command_l2_1("ls /sys/bus/pci/devices")
4582 .unwrap()
4583 .trim(),
4584 8
4585 ));
4586
4587 // Perform memory hotplug in L2 and validate the memory is showing
4588 // up as expected. In order to check, we will use the virtio-net
4589 // device already passed through L2 as a VFIO device, this will
4590 // verify that VFIO devices are functional with memory hotplug.
4591 assert!(guest.get_total_memory_l2().unwrap_or_default() > 480_000);
4592 guest
4593 .ssh_command_l2_1(
4594 "sudo bash -c 'echo online > /sys/devices/system/memory/auto_online_blocks'",
4595 )
4596 .unwrap();
4597 guest
4598 .ssh_command_l1(
4599 "sudo /mnt/ch-remote \
4600 --api-socket=/tmp/ch_api.sock \
4601 resize --memory=1073741824",
4602 )
4603 .unwrap();
4604 assert!(guest.get_total_memory_l2().unwrap_or_default() > 960_000);
4605 });
4606
4607 kill_child(&mut child);
4608 let output = child.wait_with_output().unwrap();
4609
4610 cleanup_vfio_network_interfaces();
4611
4612 handle_child_output(r, &output);
4613 }
4614
4615 #[test]
test_direct_kernel_boot_noacpi()4616 fn test_direct_kernel_boot_noacpi() {
4617 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
4618 let guest = Guest::new(Box::new(focal));
4619
4620 let kernel_path = direct_kernel_boot_path();
4621
4622 let mut child = GuestCommand::new(&guest)
4623 .args(["--cpus", "boot=1"])
4624 .args(["--memory", "size=512M"])
4625 .args(["--kernel", kernel_path.to_str().unwrap()])
4626 .args([
4627 "--cmdline",
4628 format!("{DIRECT_KERNEL_BOOT_CMDLINE} acpi=off").as_str(),
4629 ])
4630 .default_disks()
4631 .default_net()
4632 .capture_output()
4633 .spawn()
4634 .unwrap();
4635
4636 let r = std::panic::catch_unwind(|| {
4637 guest.wait_vm_boot(None).unwrap();
4638
4639 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1);
4640 assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
4641 });
4642
4643 kill_child(&mut child);
4644 let output = child.wait_with_output().unwrap();
4645
4646 handle_child_output(r, &output);
4647 }
4648
4649 #[test]
test_virtio_vsock()4650 fn test_virtio_vsock() {
4651 _test_virtio_vsock(false)
4652 }
4653
4654 #[test]
test_virtio_vsock_hotplug()4655 fn test_virtio_vsock_hotplug() {
4656 _test_virtio_vsock(true);
4657 }
4658
4659 #[test]
test_api_http_shutdown()4660 fn test_api_http_shutdown() {
4661 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
4662 let guest = Guest::new(Box::new(focal));
4663
4664 _test_api_shutdown(TargetApi::new_http_api(&guest.tmp_dir), guest)
4665 }
4666
4667 #[test]
test_api_http_delete()4668 fn test_api_http_delete() {
4669 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
4670 let guest = Guest::new(Box::new(focal));
4671
4672 _test_api_delete(TargetApi::new_http_api(&guest.tmp_dir), guest);
4673 }
4674
4675 #[test]
test_api_http_pause_resume()4676 fn test_api_http_pause_resume() {
4677 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
4678 let guest = Guest::new(Box::new(focal));
4679
4680 _test_api_pause_resume(TargetApi::new_http_api(&guest.tmp_dir), guest)
4681 }
4682
4683 #[test]
test_api_http_create_boot()4684 fn test_api_http_create_boot() {
4685 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
4686 let guest = Guest::new(Box::new(focal));
4687
4688 _test_api_create_boot(TargetApi::new_http_api(&guest.tmp_dir), guest)
4689 }
4690
4691 #[test]
test_virtio_iommu()4692 fn test_virtio_iommu() {
4693 _test_virtio_iommu(cfg!(target_arch = "x86_64"))
4694 }
4695
4696 #[test]
4697 // We cannot force the software running in the guest to reprogram the BAR
4698 // with some different addresses, but we have a reliable way of testing it
4699 // with a standard Linux kernel.
4700 // By removing a device from the PCI tree, and then rescanning the tree,
4701 // Linux consistently chooses to reorganize the PCI device BARs to other
4702 // locations in the guest address space.
4703 // This test creates a dedicated PCI network device to be checked as being
4704 // properly probed first, then removing it, and adding it again by doing a
4705 // rescan.
test_pci_bar_reprogramming()4706 fn test_pci_bar_reprogramming() {
4707 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
4708 let guest = Guest::new(Box::new(focal));
4709
4710 #[cfg(target_arch = "x86_64")]
4711 let kernel_path = direct_kernel_boot_path();
4712 #[cfg(target_arch = "aarch64")]
4713 let kernel_path = edk2_path();
4714
4715 let mut child = GuestCommand::new(&guest)
4716 .args(["--cpus", "boot=1"])
4717 .args(["--memory", "size=512M"])
4718 .args(["--kernel", kernel_path.to_str().unwrap()])
4719 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
4720 .default_disks()
4721 .args([
4722 "--net",
4723 guest.default_net_string().as_str(),
4724 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0",
4725 ])
4726 .capture_output()
4727 .spawn()
4728 .unwrap();
4729
4730 let r = std::panic::catch_unwind(|| {
4731 guest.wait_vm_boot(None).unwrap();
4732
4733 // 2 network interfaces + default localhost ==> 3 interfaces
4734 assert_eq!(
4735 guest
4736 .ssh_command("ip -o link | wc -l")
4737 .unwrap()
4738 .trim()
4739 .parse::<u32>()
4740 .unwrap_or_default(),
4741 3
4742 );
4743
4744 let init_bar_addr = guest
4745 .ssh_command(
4746 "sudo awk '{print $1; exit}' /sys/bus/pci/devices/0000:00:05.0/resource",
4747 )
4748 .unwrap();
4749
4750 // Remove the PCI device
4751 guest
4752 .ssh_command("echo 1 | sudo tee /sys/bus/pci/devices/0000:00:05.0/remove")
4753 .unwrap();
4754
4755 // Only 1 network interface left + default localhost ==> 2 interfaces
4756 assert_eq!(
4757 guest
4758 .ssh_command("ip -o link | wc -l")
4759 .unwrap()
4760 .trim()
4761 .parse::<u32>()
4762 .unwrap_or_default(),
4763 2
4764 );
4765
4766 // Remove the PCI device
4767 guest
4768 .ssh_command("echo 1 | sudo tee /sys/bus/pci/rescan")
4769 .unwrap();
4770
4771 // Back to 2 network interface + default localhost ==> 3 interfaces
4772 assert_eq!(
4773 guest
4774 .ssh_command("ip -o link | wc -l")
4775 .unwrap()
4776 .trim()
4777 .parse::<u32>()
4778 .unwrap_or_default(),
4779 3
4780 );
4781
4782 let new_bar_addr = guest
4783 .ssh_command(
4784 "sudo awk '{print $1; exit}' /sys/bus/pci/devices/0000:00:05.0/resource",
4785 )
4786 .unwrap();
4787
4788 // Let's compare the BAR addresses for our virtio-net device.
4789 // They should be different as we expect the BAR reprogramming
4790 // to have happened.
4791 assert_ne!(init_bar_addr, new_bar_addr);
4792 });
4793
4794 kill_child(&mut child);
4795 let output = child.wait_with_output().unwrap();
4796
4797 handle_child_output(r, &output);
4798 }
4799
4800 #[test]
test_memory_mergeable_off()4801 fn test_memory_mergeable_off() {
4802 test_memory_mergeable(false)
4803 }
4804
4805 #[test]
4806 #[cfg(target_arch = "x86_64")]
test_cpu_hotplug()4807 fn test_cpu_hotplug() {
4808 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
4809 let guest = Guest::new(Box::new(focal));
4810 let api_socket = temp_api_path(&guest.tmp_dir);
4811
4812 let kernel_path = direct_kernel_boot_path();
4813
4814 let mut child = GuestCommand::new(&guest)
4815 .args(["--cpus", "boot=2,max=4"])
4816 .args(["--memory", "size=512M"])
4817 .args(["--kernel", kernel_path.to_str().unwrap()])
4818 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
4819 .default_disks()
4820 .default_net()
4821 .args(["--api-socket", &api_socket])
4822 .capture_output()
4823 .spawn()
4824 .unwrap();
4825
4826 let r = std::panic::catch_unwind(|| {
4827 guest.wait_vm_boot(None).unwrap();
4828
4829 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2);
4830
4831 // Resize the VM
4832 let desired_vcpus = 4;
4833 resize_command(&api_socket, Some(desired_vcpus), None, None, None);
4834
4835 guest
4836 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online")
4837 .unwrap();
4838 guest
4839 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online")
4840 .unwrap();
4841 thread::sleep(std::time::Duration::new(10, 0));
4842 assert_eq!(
4843 guest.get_cpu_count().unwrap_or_default(),
4844 u32::from(desired_vcpus)
4845 );
4846
4847 guest.reboot_linux(0, None);
4848
4849 assert_eq!(
4850 guest.get_cpu_count().unwrap_or_default(),
4851 u32::from(desired_vcpus)
4852 );
4853
4854 // Resize the VM
4855 let desired_vcpus = 2;
4856 resize_command(&api_socket, Some(desired_vcpus), None, None, None);
4857
4858 thread::sleep(std::time::Duration::new(10, 0));
4859 assert_eq!(
4860 guest.get_cpu_count().unwrap_or_default(),
4861 u32::from(desired_vcpus)
4862 );
4863
4864 // Resize the VM back up to 4
4865 let desired_vcpus = 4;
4866 resize_command(&api_socket, Some(desired_vcpus), None, None, None);
4867
4868 guest
4869 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online")
4870 .unwrap();
4871 guest
4872 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online")
4873 .unwrap();
4874 thread::sleep(std::time::Duration::new(10, 0));
4875 assert_eq!(
4876 guest.get_cpu_count().unwrap_or_default(),
4877 u32::from(desired_vcpus)
4878 );
4879 });
4880
4881 kill_child(&mut child);
4882 let output = child.wait_with_output().unwrap();
4883
4884 handle_child_output(r, &output);
4885 }
4886
4887 #[test]
test_memory_hotplug()4888 fn test_memory_hotplug() {
4889 #[cfg(target_arch = "aarch64")]
4890 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string();
4891 #[cfg(target_arch = "x86_64")]
4892 let focal_image = FOCAL_IMAGE_NAME.to_string();
4893 let focal = UbuntuDiskConfig::new(focal_image);
4894 let guest = Guest::new(Box::new(focal));
4895 let api_socket = temp_api_path(&guest.tmp_dir);
4896
4897 #[cfg(target_arch = "aarch64")]
4898 let kernel_path = edk2_path();
4899 #[cfg(target_arch = "x86_64")]
4900 let kernel_path = direct_kernel_boot_path();
4901
4902 let mut child = GuestCommand::new(&guest)
4903 .args(["--cpus", "boot=2,max=4"])
4904 .args(["--memory", "size=512M,hotplug_size=8192M"])
4905 .args(["--kernel", kernel_path.to_str().unwrap()])
4906 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
4907 .default_disks()
4908 .default_net()
4909 .args(["--balloon", "size=0"])
4910 .args(["--api-socket", &api_socket])
4911 .capture_output()
4912 .spawn()
4913 .unwrap();
4914
4915 let r = std::panic::catch_unwind(|| {
4916 guest.wait_vm_boot(None).unwrap();
4917
4918 assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
4919
4920 guest.enable_memory_hotplug();
4921
4922 // Add RAM to the VM
4923 let desired_ram = 1024 << 20;
4924 resize_command(&api_socket, None, Some(desired_ram), None, None);
4925
4926 thread::sleep(std::time::Duration::new(10, 0));
4927 assert!(guest.get_total_memory().unwrap_or_default() > 960_000);
4928
4929 // Use balloon to remove RAM from the VM
4930 let desired_balloon = 512 << 20;
4931 resize_command(&api_socket, None, None, Some(desired_balloon), None);
4932
4933 thread::sleep(std::time::Duration::new(10, 0));
4934 assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
4935 assert!(guest.get_total_memory().unwrap_or_default() < 960_000);
4936
4937 guest.reboot_linux(0, None);
4938
4939 assert!(guest.get_total_memory().unwrap_or_default() < 960_000);
4940
4941 // Use balloon add RAM to the VM
4942 let desired_balloon = 0;
4943 resize_command(&api_socket, None, None, Some(desired_balloon), None);
4944
4945 thread::sleep(std::time::Duration::new(10, 0));
4946
4947 assert!(guest.get_total_memory().unwrap_or_default() > 960_000);
4948
4949 guest.enable_memory_hotplug();
4950
4951 // Add RAM to the VM
4952 let desired_ram = 2048 << 20;
4953 resize_command(&api_socket, None, Some(desired_ram), None, None);
4954
4955 thread::sleep(std::time::Duration::new(10, 0));
4956 assert!(guest.get_total_memory().unwrap_or_default() > 1_920_000);
4957
4958 // Remove RAM to the VM (only applies after reboot)
4959 let desired_ram = 1024 << 20;
4960 resize_command(&api_socket, None, Some(desired_ram), None, None);
4961
4962 guest.reboot_linux(1, None);
4963
4964 assert!(guest.get_total_memory().unwrap_or_default() > 960_000);
4965 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000);
4966 });
4967
4968 kill_child(&mut child);
4969 let output = child.wait_with_output().unwrap();
4970
4971 handle_child_output(r, &output);
4972 }
4973
4974 #[test]
4975 #[cfg(not(feature = "mshv"))]
test_virtio_mem()4976 fn test_virtio_mem() {
4977 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
4978 let guest = Guest::new(Box::new(focal));
4979 let api_socket = temp_api_path(&guest.tmp_dir);
4980
4981 let kernel_path = direct_kernel_boot_path();
4982
4983 let mut child = GuestCommand::new(&guest)
4984 .args(["--cpus", "boot=2,max=4"])
4985 .args([
4986 "--memory",
4987 "size=512M,hotplug_method=virtio-mem,hotplug_size=8192M",
4988 ])
4989 .args(["--kernel", kernel_path.to_str().unwrap()])
4990 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
4991 .default_disks()
4992 .default_net()
4993 .args(["--api-socket", &api_socket])
4994 .capture_output()
4995 .spawn()
4996 .unwrap();
4997
4998 let r = std::panic::catch_unwind(|| {
4999 guest.wait_vm_boot(None).unwrap();
5000
5001 assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
5002
5003 guest.enable_memory_hotplug();
5004
5005 // Add RAM to the VM
5006 let desired_ram = 1024 << 20;
5007 resize_command(&api_socket, None, Some(desired_ram), None, None);
5008
5009 thread::sleep(std::time::Duration::new(10, 0));
5010 assert!(guest.get_total_memory().unwrap_or_default() > 960_000);
5011
5012 // Add RAM to the VM
5013 let desired_ram = 2048 << 20;
5014 resize_command(&api_socket, None, Some(desired_ram), None, None);
5015
5016 thread::sleep(std::time::Duration::new(10, 0));
5017 assert!(guest.get_total_memory().unwrap_or_default() > 1_920_000);
5018
5019 // Remove RAM from the VM
5020 let desired_ram = 1024 << 20;
5021 resize_command(&api_socket, None, Some(desired_ram), None, None);
5022
5023 thread::sleep(std::time::Duration::new(10, 0));
5024 assert!(guest.get_total_memory().unwrap_or_default() > 960_000);
5025 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000);
5026
5027 guest.reboot_linux(0, None);
5028
5029 // Check the amount of memory after reboot is 1GiB
5030 assert!(guest.get_total_memory().unwrap_or_default() > 960_000);
5031 assert!(guest.get_total_memory().unwrap_or_default() < 1_920_000);
5032
5033 // Check we can still resize to 512MiB
5034 let desired_ram = 512 << 20;
5035 resize_command(&api_socket, None, Some(desired_ram), None, None);
5036 thread::sleep(std::time::Duration::new(10, 0));
5037 assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
5038 assert!(guest.get_total_memory().unwrap_or_default() < 960_000);
5039 });
5040
5041 kill_child(&mut child);
5042 let output = child.wait_with_output().unwrap();
5043
5044 handle_child_output(r, &output);
5045 }
5046
5047 #[test]
5048 #[cfg(target_arch = "x86_64")]
5049 #[cfg(not(feature = "mshv"))]
5050 // Test both vCPU and memory resizing together
test_resize()5051 fn test_resize() {
5052 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
5053 let guest = Guest::new(Box::new(focal));
5054 let api_socket = temp_api_path(&guest.tmp_dir);
5055
5056 let kernel_path = direct_kernel_boot_path();
5057
5058 let mut child = GuestCommand::new(&guest)
5059 .args(["--cpus", "boot=2,max=4"])
5060 .args(["--memory", "size=512M,hotplug_size=8192M"])
5061 .args(["--kernel", kernel_path.to_str().unwrap()])
5062 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
5063 .default_disks()
5064 .default_net()
5065 .args(["--api-socket", &api_socket])
5066 .capture_output()
5067 .spawn()
5068 .unwrap();
5069
5070 let r = std::panic::catch_unwind(|| {
5071 guest.wait_vm_boot(None).unwrap();
5072
5073 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2);
5074 assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
5075
5076 guest.enable_memory_hotplug();
5077
5078 // Resize the VM
5079 let desired_vcpus = 4;
5080 let desired_ram = 1024 << 20;
5081 resize_command(
5082 &api_socket,
5083 Some(desired_vcpus),
5084 Some(desired_ram),
5085 None,
5086 None,
5087 );
5088
5089 guest
5090 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu2/online")
5091 .unwrap();
5092 guest
5093 .ssh_command("echo 1 | sudo tee /sys/bus/cpu/devices/cpu3/online")
5094 .unwrap();
5095 thread::sleep(std::time::Duration::new(10, 0));
5096 assert_eq!(
5097 guest.get_cpu_count().unwrap_or_default(),
5098 u32::from(desired_vcpus)
5099 );
5100
5101 assert!(guest.get_total_memory().unwrap_or_default() > 960_000);
5102 });
5103
5104 kill_child(&mut child);
5105 let output = child.wait_with_output().unwrap();
5106
5107 handle_child_output(r, &output);
5108 }
5109
5110 #[test]
test_memory_overhead()5111 fn test_memory_overhead() {
5112 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
5113 let guest = Guest::new(Box::new(focal));
5114
5115 let kernel_path = direct_kernel_boot_path();
5116
5117 let guest_memory_size_kb = 512 * 1024;
5118
5119 let mut child = GuestCommand::new(&guest)
5120 .args(["--cpus", "boot=1"])
5121 .args(["--memory", format!("size={guest_memory_size_kb}K").as_str()])
5122 .args(["--kernel", kernel_path.to_str().unwrap()])
5123 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
5124 .default_net()
5125 .default_disks()
5126 .capture_output()
5127 .spawn()
5128 .unwrap();
5129
5130 guest.wait_vm_boot(None).unwrap();
5131
5132 let r = std::panic::catch_unwind(|| {
5133 let overhead = get_vmm_overhead(child.id(), guest_memory_size_kb);
5134 eprintln!("Guest memory overhead: {overhead} vs {MAXIMUM_VMM_OVERHEAD_KB}");
5135 assert!(overhead <= MAXIMUM_VMM_OVERHEAD_KB);
5136 });
5137
5138 kill_child(&mut child);
5139 let output = child.wait_with_output().unwrap();
5140
5141 handle_child_output(r, &output);
5142 }
5143
5144 #[test]
5145 #[cfg(target_arch = "x86_64")]
5146 // This test runs a guest with Landlock enabled and hotplugs a new disk. As
5147 // the path for the hotplug disk is not pre-added to Landlock rules, this
5148 // the test will result in a failure.
test_landlock()5149 fn test_landlock() {
5150 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
5151 let guest = Guest::new(Box::new(focal));
5152
5153 #[cfg(target_arch = "x86_64")]
5154 let kernel_path = direct_kernel_boot_path();
5155 #[cfg(target_arch = "aarch64")]
5156 let kernel_path = edk2_path();
5157
5158 let api_socket = temp_api_path(&guest.tmp_dir);
5159
5160 let mut child = GuestCommand::new(&guest)
5161 .args(["--api-socket", &api_socket])
5162 .args(["--cpus", "boot=1"])
5163 .args(["--memory", "size=512M"])
5164 .args(["--kernel", kernel_path.to_str().unwrap()])
5165 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
5166 .args(["--landlock"])
5167 .default_disks()
5168 .default_net()
5169 .capture_output()
5170 .spawn()
5171 .unwrap();
5172
5173 let r = std::panic::catch_unwind(|| {
5174 guest.wait_vm_boot(None).unwrap();
5175
5176 // Check /dev/vdc is not there
5177 assert_eq!(
5178 guest
5179 .ssh_command("lsblk | grep -c vdc.*16M || true")
5180 .unwrap()
5181 .trim()
5182 .parse::<u32>()
5183 .unwrap_or(1),
5184 0
5185 );
5186
5187 // Now let's add the extra disk.
5188 let mut blk_file_path = dirs::home_dir().unwrap();
5189 blk_file_path.push("workloads");
5190 blk_file_path.push("blk.img");
5191 // As the path to the hotplug disk is not pre-added, this remote
5192 // command will fail.
5193 assert!(!remote_command(
5194 &api_socket,
5195 "add-disk",
5196 Some(
5197 format!(
5198 "path={},id=test0,readonly=true",
5199 blk_file_path.to_str().unwrap()
5200 )
5201 .as_str()
5202 ),
5203 ));
5204 });
5205
5206 let _ = child.kill();
5207 let output = child.wait_with_output().unwrap();
5208
5209 handle_child_output(r, &output);
5210 }
5211
_test_disk_hotplug(landlock_enabled: bool)5212 fn _test_disk_hotplug(landlock_enabled: bool) {
5213 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
5214 let guest = Guest::new(Box::new(focal));
5215
5216 #[cfg(target_arch = "x86_64")]
5217 let kernel_path = direct_kernel_boot_path();
5218 #[cfg(target_arch = "aarch64")]
5219 let kernel_path = edk2_path();
5220
5221 let api_socket = temp_api_path(&guest.tmp_dir);
5222
5223 let mut blk_file_path = dirs::home_dir().unwrap();
5224 blk_file_path.push("workloads");
5225 blk_file_path.push("blk.img");
5226
5227 let mut cmd = GuestCommand::new(&guest);
5228 if landlock_enabled {
5229 cmd.args(["--landlock"]).args([
5230 "--landlock-rules",
5231 format!("path={blk_file_path:?},access=rw").as_str(),
5232 ]);
5233 }
5234
5235 cmd.args(["--api-socket", &api_socket])
5236 .args(["--cpus", "boot=1"])
5237 .args(["--memory", "size=512M"])
5238 .args(["--kernel", kernel_path.to_str().unwrap()])
5239 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
5240 .default_disks()
5241 .default_net()
5242 .capture_output();
5243
5244 let mut child = cmd.spawn().unwrap();
5245
5246 let r = std::panic::catch_unwind(|| {
5247 guest.wait_vm_boot(None).unwrap();
5248
5249 // Check /dev/vdc is not there
5250 assert_eq!(
5251 guest
5252 .ssh_command("lsblk | grep -c vdc.*16M || true")
5253 .unwrap()
5254 .trim()
5255 .parse::<u32>()
5256 .unwrap_or(1),
5257 0
5258 );
5259
5260 // Now let's add the extra disk.
5261 let (cmd_success, cmd_output) = remote_command_w_output(
5262 &api_socket,
5263 "add-disk",
5264 Some(
5265 format!(
5266 "path={},id=test0,readonly=true",
5267 blk_file_path.to_str().unwrap()
5268 )
5269 .as_str(),
5270 ),
5271 );
5272 assert!(cmd_success);
5273 assert!(String::from_utf8_lossy(&cmd_output)
5274 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}"));
5275
5276 thread::sleep(std::time::Duration::new(10, 0));
5277
5278 // Check that /dev/vdc exists and the block size is 16M.
5279 assert_eq!(
5280 guest
5281 .ssh_command("lsblk | grep vdc | grep -c 16M")
5282 .unwrap()
5283 .trim()
5284 .parse::<u32>()
5285 .unwrap_or_default(),
5286 1
5287 );
5288 // And check the block device can be read.
5289 guest
5290 .ssh_command("sudo dd if=/dev/vdc of=/dev/null bs=1M iflag=direct count=16")
5291 .unwrap();
5292
5293 // Let's remove it the extra disk.
5294 assert!(remote_command(&api_socket, "remove-device", Some("test0")));
5295 thread::sleep(std::time::Duration::new(5, 0));
5296 // And check /dev/vdc is not there
5297 assert_eq!(
5298 guest
5299 .ssh_command("lsblk | grep -c vdc.*16M || true")
5300 .unwrap()
5301 .trim()
5302 .parse::<u32>()
5303 .unwrap_or(1),
5304 0
5305 );
5306
5307 // And add it back to validate unplug did work correctly.
5308 let (cmd_success, cmd_output) = remote_command_w_output(
5309 &api_socket,
5310 "add-disk",
5311 Some(
5312 format!(
5313 "path={},id=test0,readonly=true",
5314 blk_file_path.to_str().unwrap()
5315 )
5316 .as_str(),
5317 ),
5318 );
5319 assert!(cmd_success);
5320 assert!(String::from_utf8_lossy(&cmd_output)
5321 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}"));
5322
5323 thread::sleep(std::time::Duration::new(10, 0));
5324
5325 // Check that /dev/vdc exists and the block size is 16M.
5326 assert_eq!(
5327 guest
5328 .ssh_command("lsblk | grep vdc | grep -c 16M")
5329 .unwrap()
5330 .trim()
5331 .parse::<u32>()
5332 .unwrap_or_default(),
5333 1
5334 );
5335 // And check the block device can be read.
5336 guest
5337 .ssh_command("sudo dd if=/dev/vdc of=/dev/null bs=1M iflag=direct count=16")
5338 .unwrap();
5339
5340 // Reboot the VM.
5341 guest.reboot_linux(0, None);
5342
5343 // Check still there after reboot
5344 assert_eq!(
5345 guest
5346 .ssh_command("lsblk | grep vdc | grep -c 16M")
5347 .unwrap()
5348 .trim()
5349 .parse::<u32>()
5350 .unwrap_or_default(),
5351 1
5352 );
5353
5354 assert!(remote_command(&api_socket, "remove-device", Some("test0")));
5355
5356 thread::sleep(std::time::Duration::new(20, 0));
5357
5358 // Check device has gone away
5359 assert_eq!(
5360 guest
5361 .ssh_command("lsblk | grep -c vdc.*16M || true")
5362 .unwrap()
5363 .trim()
5364 .parse::<u32>()
5365 .unwrap_or(1),
5366 0
5367 );
5368
5369 guest.reboot_linux(1, None);
5370
5371 // Check device still absent
5372 assert_eq!(
5373 guest
5374 .ssh_command("lsblk | grep -c vdc.*16M || true")
5375 .unwrap()
5376 .trim()
5377 .parse::<u32>()
5378 .unwrap_or(1),
5379 0
5380 );
5381 });
5382
5383 kill_child(&mut child);
5384 let output = child.wait_with_output().unwrap();
5385
5386 handle_child_output(r, &output);
5387 }
5388
5389 #[test]
test_disk_hotplug()5390 fn test_disk_hotplug() {
5391 _test_disk_hotplug(false)
5392 }
5393
5394 #[test]
5395 #[cfg(target_arch = "x86_64")]
test_disk_hotplug_with_landlock()5396 fn test_disk_hotplug_with_landlock() {
5397 _test_disk_hotplug(true)
5398 }
5399
create_loop_device(backing_file_path: &str, block_size: u32, num_retries: usize) -> String5400 fn create_loop_device(backing_file_path: &str, block_size: u32, num_retries: usize) -> String {
5401 const LOOP_CONFIGURE: u64 = 0x4c0a;
5402 const LOOP_CTL_GET_FREE: u64 = 0x4c82;
5403 const LOOP_CTL_PATH: &str = "/dev/loop-control";
5404 const LOOP_DEVICE_PREFIX: &str = "/dev/loop";
5405
5406 #[repr(C)]
5407 struct LoopInfo64 {
5408 lo_device: u64,
5409 lo_inode: u64,
5410 lo_rdevice: u64,
5411 lo_offset: u64,
5412 lo_sizelimit: u64,
5413 lo_number: u32,
5414 lo_encrypt_type: u32,
5415 lo_encrypt_key_size: u32,
5416 lo_flags: u32,
5417 lo_file_name: [u8; 64],
5418 lo_crypt_name: [u8; 64],
5419 lo_encrypt_key: [u8; 32],
5420 lo_init: [u64; 2],
5421 }
5422
5423 impl Default for LoopInfo64 {
5424 fn default() -> Self {
5425 LoopInfo64 {
5426 lo_device: 0,
5427 lo_inode: 0,
5428 lo_rdevice: 0,
5429 lo_offset: 0,
5430 lo_sizelimit: 0,
5431 lo_number: 0,
5432 lo_encrypt_type: 0,
5433 lo_encrypt_key_size: 0,
5434 lo_flags: 0,
5435 lo_file_name: [0; 64],
5436 lo_crypt_name: [0; 64],
5437 lo_encrypt_key: [0; 32],
5438 lo_init: [0; 2],
5439 }
5440 }
5441 }
5442
5443 #[derive(Default)]
5444 #[repr(C)]
5445 struct LoopConfig {
5446 fd: u32,
5447 block_size: u32,
5448 info: LoopInfo64,
5449 _reserved: [u64; 8],
5450 }
5451
5452 // Open loop-control device
5453 let loop_ctl_file = OpenOptions::new()
5454 .read(true)
5455 .write(true)
5456 .open(LOOP_CTL_PATH)
5457 .unwrap();
5458
5459 // Request a free loop device
5460 let loop_device_number =
5461 unsafe { libc::ioctl(loop_ctl_file.as_raw_fd(), LOOP_CTL_GET_FREE as _) };
5462
5463 if loop_device_number < 0 {
5464 panic!("Couldn't find a free loop device");
5465 }
5466
5467 // Create loop device path
5468 let loop_device_path = format!("{LOOP_DEVICE_PREFIX}{loop_device_number}");
5469
5470 // Open loop device
5471 let loop_device_file = OpenOptions::new()
5472 .read(true)
5473 .write(true)
5474 .open(&loop_device_path)
5475 .unwrap();
5476
5477 // Open backing file
5478 let backing_file = OpenOptions::new()
5479 .read(true)
5480 .write(true)
5481 .open(backing_file_path)
5482 .unwrap();
5483
5484 let loop_config = LoopConfig {
5485 fd: backing_file.as_raw_fd() as u32,
5486 block_size,
5487 ..Default::default()
5488 };
5489
5490 for i in 0..num_retries {
5491 let ret = unsafe {
5492 libc::ioctl(
5493 loop_device_file.as_raw_fd(),
5494 LOOP_CONFIGURE as _,
5495 &loop_config,
5496 )
5497 };
5498 if ret != 0 {
5499 if i < num_retries - 1 {
5500 println!(
5501 "Iteration {}: Failed to configure the loop device {}: {}",
5502 i,
5503 loop_device_path,
5504 std::io::Error::last_os_error()
5505 );
5506 } else {
5507 panic!(
5508 "Failed {} times trying to configure the loop device {}: {}",
5509 num_retries,
5510 loop_device_path,
5511 std::io::Error::last_os_error()
5512 );
5513 }
5514 } else {
5515 break;
5516 }
5517
5518 // Wait for a bit before retrying
5519 thread::sleep(std::time::Duration::new(5, 0));
5520 }
5521
5522 loop_device_path
5523 }
5524
5525 #[test]
test_virtio_block_topology()5526 fn test_virtio_block_topology() {
5527 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
5528 let guest = Guest::new(Box::new(focal));
5529
5530 let kernel_path = direct_kernel_boot_path();
5531 let test_disk_path = guest.tmp_dir.as_path().join("test.img");
5532
5533 let output = exec_host_command_output(
5534 format!(
5535 "qemu-img create -f raw {} 16M",
5536 test_disk_path.to_str().unwrap()
5537 )
5538 .as_str(),
5539 );
5540 if !output.status.success() {
5541 let stdout = String::from_utf8_lossy(&output.stdout);
5542 let stderr = String::from_utf8_lossy(&output.stderr);
5543 panic!("qemu-img command failed\nstdout\n{stdout}\nstderr\n{stderr}");
5544 }
5545
5546 let loop_dev = create_loop_device(test_disk_path.to_str().unwrap(), 4096, 5);
5547
5548 let mut child = GuestCommand::new(&guest)
5549 .args(["--cpus", "boot=1"])
5550 .args(["--memory", "size=512M"])
5551 .args(["--kernel", kernel_path.to_str().unwrap()])
5552 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
5553 .args([
5554 "--disk",
5555 format!(
5556 "path={}",
5557 guest.disk_config.disk(DiskType::OperatingSystem).unwrap()
5558 )
5559 .as_str(),
5560 format!(
5561 "path={}",
5562 guest.disk_config.disk(DiskType::CloudInit).unwrap()
5563 )
5564 .as_str(),
5565 format!("path={}", &loop_dev).as_str(),
5566 ])
5567 .default_net()
5568 .capture_output()
5569 .spawn()
5570 .unwrap();
5571
5572 let r = std::panic::catch_unwind(|| {
5573 guest.wait_vm_boot(None).unwrap();
5574
5575 // MIN-IO column
5576 assert_eq!(
5577 guest
5578 .ssh_command("lsblk -t| grep vdc | awk '{print $3}'")
5579 .unwrap()
5580 .trim()
5581 .parse::<u32>()
5582 .unwrap_or_default(),
5583 4096
5584 );
5585 // PHY-SEC column
5586 assert_eq!(
5587 guest
5588 .ssh_command("lsblk -t| grep vdc | awk '{print $5}'")
5589 .unwrap()
5590 .trim()
5591 .parse::<u32>()
5592 .unwrap_or_default(),
5593 4096
5594 );
5595 // LOG-SEC column
5596 assert_eq!(
5597 guest
5598 .ssh_command("lsblk -t| grep vdc | awk '{print $6}'")
5599 .unwrap()
5600 .trim()
5601 .parse::<u32>()
5602 .unwrap_or_default(),
5603 4096
5604 );
5605 });
5606
5607 kill_child(&mut child);
5608 let output = child.wait_with_output().unwrap();
5609
5610 handle_child_output(r, &output);
5611
5612 Command::new("losetup")
5613 .args(["-d", &loop_dev])
5614 .output()
5615 .expect("loop device not found");
5616 }
5617
5618 #[test]
test_virtio_balloon_deflate_on_oom()5619 fn test_virtio_balloon_deflate_on_oom() {
5620 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
5621 let guest = Guest::new(Box::new(focal));
5622
5623 let kernel_path = direct_kernel_boot_path();
5624
5625 let api_socket = temp_api_path(&guest.tmp_dir);
5626
5627 //Let's start a 4G guest with balloon occupied 2G memory
5628 let mut child = GuestCommand::new(&guest)
5629 .args(["--api-socket", &api_socket])
5630 .args(["--cpus", "boot=1"])
5631 .args(["--memory", "size=4G"])
5632 .args(["--kernel", kernel_path.to_str().unwrap()])
5633 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
5634 .args(["--balloon", "size=2G,deflate_on_oom=on"])
5635 .default_disks()
5636 .default_net()
5637 .capture_output()
5638 .spawn()
5639 .unwrap();
5640
5641 let r = std::panic::catch_unwind(|| {
5642 guest.wait_vm_boot(None).unwrap();
5643
5644 // Wait for balloon memory's initialization and check its size.
5645 // The virtio-balloon driver might take a few seconds to report the
5646 // balloon effective size back to the VMM.
5647 thread::sleep(std::time::Duration::new(20, 0));
5648
5649 let orig_balloon = balloon_size(&api_socket);
5650 println!("The original balloon memory size is {orig_balloon} bytes");
5651 assert!(orig_balloon == 2147483648);
5652
5653 // Two steps to verify if the 'deflate_on_oom' parameter works.
5654 // 1st: run a command to trigger an OOM in the guest.
5655 guest
5656 .ssh_command("echo f | sudo tee /proc/sysrq-trigger")
5657 .unwrap();
5658
5659 // Give some time for the OOM to happen in the guest and be reported
5660 // back to the host.
5661 thread::sleep(std::time::Duration::new(20, 0));
5662
5663 // 2nd: check balloon_mem's value to verify balloon has been automatically deflated
5664 let deflated_balloon = balloon_size(&api_socket);
5665 println!("After deflating, balloon memory size is {deflated_balloon} bytes");
5666 // Verify the balloon size deflated
5667 assert!(deflated_balloon < 2147483648);
5668 });
5669
5670 kill_child(&mut child);
5671 let output = child.wait_with_output().unwrap();
5672
5673 handle_child_output(r, &output);
5674 }
5675
5676 #[test]
5677 #[cfg(not(feature = "mshv"))]
test_virtio_balloon_free_page_reporting()5678 fn test_virtio_balloon_free_page_reporting() {
5679 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
5680 let guest = Guest::new(Box::new(focal));
5681
5682 //Let's start a 4G guest with balloon occupied 2G memory
5683 let mut child = GuestCommand::new(&guest)
5684 .args(["--cpus", "boot=1"])
5685 .args(["--memory", "size=4G"])
5686 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
5687 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
5688 .args(["--balloon", "size=0,free_page_reporting=on"])
5689 .default_disks()
5690 .default_net()
5691 .capture_output()
5692 .spawn()
5693 .unwrap();
5694
5695 let pid = child.id();
5696 let r = std::panic::catch_unwind(|| {
5697 guest.wait_vm_boot(None).unwrap();
5698
5699 // Check the initial RSS is less than 1GiB
5700 let rss = process_rss_kib(pid);
5701 println!("RSS {rss} < 1048576");
5702 assert!(rss < 1048576);
5703
5704 // Spawn a command inside the guest to consume 2GiB of RAM for 60
5705 // seconds
5706 let guest_ip = guest.network.guest_ip.clone();
5707 thread::spawn(move || {
5708 ssh_command_ip(
5709 "stress --vm 1 --vm-bytes 2G --vm-keep --timeout 60",
5710 &guest_ip,
5711 DEFAULT_SSH_RETRIES,
5712 DEFAULT_SSH_TIMEOUT,
5713 )
5714 .unwrap();
5715 });
5716
5717 // Wait for 50 seconds to make sure the stress command is consuming
5718 // the expected amount of memory.
5719 thread::sleep(std::time::Duration::new(50, 0));
5720 let rss = process_rss_kib(pid);
5721 println!("RSS {rss} >= 2097152");
5722 assert!(rss >= 2097152);
5723
5724 // Wait for an extra minute to make sure the stress command has
5725 // completed and that the guest reported the free pages to the VMM
5726 // through the virtio-balloon device. We expect the RSS to be under
5727 // 2GiB.
5728 thread::sleep(std::time::Duration::new(60, 0));
5729 let rss = process_rss_kib(pid);
5730 println!("RSS {rss} < 2097152");
5731 assert!(rss < 2097152);
5732 });
5733
5734 kill_child(&mut child);
5735 let output = child.wait_with_output().unwrap();
5736
5737 handle_child_output(r, &output);
5738 }
5739
5740 #[test]
test_pmem_hotplug()5741 fn test_pmem_hotplug() {
5742 _test_pmem_hotplug(None)
5743 }
5744
5745 #[test]
test_pmem_multi_segment_hotplug()5746 fn test_pmem_multi_segment_hotplug() {
5747 _test_pmem_hotplug(Some(15))
5748 }
5749
_test_pmem_hotplug(pci_segment: Option<u16>)5750 fn _test_pmem_hotplug(pci_segment: Option<u16>) {
5751 #[cfg(target_arch = "aarch64")]
5752 let focal_image = FOCAL_IMAGE_UPDATE_KERNEL_NAME.to_string();
5753 #[cfg(target_arch = "x86_64")]
5754 let focal_image = FOCAL_IMAGE_NAME.to_string();
5755 let focal = UbuntuDiskConfig::new(focal_image);
5756 let guest = Guest::new(Box::new(focal));
5757
5758 #[cfg(target_arch = "x86_64")]
5759 let kernel_path = direct_kernel_boot_path();
5760 #[cfg(target_arch = "aarch64")]
5761 let kernel_path = edk2_path();
5762
5763 let api_socket = temp_api_path(&guest.tmp_dir);
5764
5765 let mut cmd = GuestCommand::new(&guest);
5766
5767 cmd.args(["--api-socket", &api_socket])
5768 .args(["--cpus", "boot=1"])
5769 .args(["--memory", "size=512M"])
5770 .args(["--kernel", kernel_path.to_str().unwrap()])
5771 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
5772 .default_disks()
5773 .default_net()
5774 .capture_output();
5775
5776 if pci_segment.is_some() {
5777 cmd.args([
5778 "--platform",
5779 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"),
5780 ]);
5781 }
5782
5783 let mut child = cmd.spawn().unwrap();
5784
5785 let r = std::panic::catch_unwind(|| {
5786 guest.wait_vm_boot(None).unwrap();
5787
5788 // Check /dev/pmem0 is not there
5789 assert_eq!(
5790 guest
5791 .ssh_command("lsblk | grep -c pmem0 || true")
5792 .unwrap()
5793 .trim()
5794 .parse::<u32>()
5795 .unwrap_or(1),
5796 0
5797 );
5798
5799 let pmem_temp_file = TempFile::new().unwrap();
5800 pmem_temp_file.as_file().set_len(128 << 20).unwrap();
5801 let (cmd_success, cmd_output) = remote_command_w_output(
5802 &api_socket,
5803 "add-pmem",
5804 Some(&format!(
5805 "file={},id=test0{}",
5806 pmem_temp_file.as_path().to_str().unwrap(),
5807 if let Some(pci_segment) = pci_segment {
5808 format!(",pci_segment={pci_segment}")
5809 } else {
5810 "".to_owned()
5811 }
5812 )),
5813 );
5814 assert!(cmd_success);
5815 if let Some(pci_segment) = pci_segment {
5816 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!(
5817 "{{\"id\":\"test0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}"
5818 )));
5819 } else {
5820 assert!(String::from_utf8_lossy(&cmd_output)
5821 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:06.0\"}"));
5822 }
5823
5824 // Check that /dev/pmem0 exists and the block size is 128M
5825 assert_eq!(
5826 guest
5827 .ssh_command("lsblk | grep pmem0 | grep -c 128M")
5828 .unwrap()
5829 .trim()
5830 .parse::<u32>()
5831 .unwrap_or_default(),
5832 1
5833 );
5834
5835 guest.reboot_linux(0, None);
5836
5837 // Check still there after reboot
5838 assert_eq!(
5839 guest
5840 .ssh_command("lsblk | grep pmem0 | grep -c 128M")
5841 .unwrap()
5842 .trim()
5843 .parse::<u32>()
5844 .unwrap_or_default(),
5845 1
5846 );
5847
5848 assert!(remote_command(&api_socket, "remove-device", Some("test0")));
5849
5850 thread::sleep(std::time::Duration::new(20, 0));
5851
5852 // Check device has gone away
5853 assert_eq!(
5854 guest
5855 .ssh_command("lsblk | grep -c pmem0.*128M || true")
5856 .unwrap()
5857 .trim()
5858 .parse::<u32>()
5859 .unwrap_or(1),
5860 0
5861 );
5862
5863 guest.reboot_linux(1, None);
5864
5865 // Check still absent after reboot
5866 assert_eq!(
5867 guest
5868 .ssh_command("lsblk | grep -c pmem0.*128M || true")
5869 .unwrap()
5870 .trim()
5871 .parse::<u32>()
5872 .unwrap_or(1),
5873 0
5874 );
5875 });
5876
5877 kill_child(&mut child);
5878 let output = child.wait_with_output().unwrap();
5879
5880 handle_child_output(r, &output);
5881 }
5882
5883 #[test]
test_net_hotplug()5884 fn test_net_hotplug() {
5885 _test_net_hotplug(None)
5886 }
5887
5888 #[test]
test_net_multi_segment_hotplug()5889 fn test_net_multi_segment_hotplug() {
5890 _test_net_hotplug(Some(15))
5891 }
5892
_test_net_hotplug(pci_segment: Option<u16>)5893 fn _test_net_hotplug(pci_segment: Option<u16>) {
5894 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
5895 let guest = Guest::new(Box::new(focal));
5896
5897 #[cfg(target_arch = "x86_64")]
5898 let kernel_path = direct_kernel_boot_path();
5899 #[cfg(target_arch = "aarch64")]
5900 let kernel_path = edk2_path();
5901
5902 let api_socket = temp_api_path(&guest.tmp_dir);
5903
5904 // Boot without network
5905 let mut cmd = GuestCommand::new(&guest);
5906
5907 cmd.args(["--api-socket", &api_socket])
5908 .args(["--cpus", "boot=1"])
5909 .args(["--memory", "size=512M"])
5910 .args(["--kernel", kernel_path.to_str().unwrap()])
5911 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
5912 .default_disks()
5913 .capture_output();
5914
5915 if pci_segment.is_some() {
5916 cmd.args([
5917 "--platform",
5918 &format!("num_pci_segments={MAX_NUM_PCI_SEGMENTS}"),
5919 ]);
5920 }
5921
5922 let mut child = cmd.spawn().unwrap();
5923
5924 thread::sleep(std::time::Duration::new(20, 0));
5925
5926 let r = std::panic::catch_unwind(|| {
5927 // Add network
5928 let (cmd_success, cmd_output) = remote_command_w_output(
5929 &api_socket,
5930 "add-net",
5931 Some(
5932 format!(
5933 "{}{},id=test0",
5934 guest.default_net_string(),
5935 if let Some(pci_segment) = pci_segment {
5936 format!(",pci_segment={pci_segment}")
5937 } else {
5938 "".to_owned()
5939 }
5940 )
5941 .as_str(),
5942 ),
5943 );
5944 assert!(cmd_success);
5945
5946 if let Some(pci_segment) = pci_segment {
5947 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!(
5948 "{{\"id\":\"test0\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}"
5949 )));
5950 } else {
5951 assert!(String::from_utf8_lossy(&cmd_output)
5952 .contains("{\"id\":\"test0\",\"bdf\":\"0000:00:05.0\"}"));
5953 }
5954
5955 thread::sleep(std::time::Duration::new(5, 0));
5956
5957 // 1 network interfaces + default localhost ==> 2 interfaces
5958 assert_eq!(
5959 guest
5960 .ssh_command("ip -o link | wc -l")
5961 .unwrap()
5962 .trim()
5963 .parse::<u32>()
5964 .unwrap_or_default(),
5965 2
5966 );
5967
5968 // Remove network
5969 assert!(remote_command(&api_socket, "remove-device", Some("test0"),));
5970 thread::sleep(std::time::Duration::new(5, 0));
5971
5972 let (cmd_success, cmd_output) = remote_command_w_output(
5973 &api_socket,
5974 "add-net",
5975 Some(
5976 format!(
5977 "{}{},id=test1",
5978 guest.default_net_string(),
5979 if let Some(pci_segment) = pci_segment {
5980 format!(",pci_segment={pci_segment}")
5981 } else {
5982 "".to_owned()
5983 }
5984 )
5985 .as_str(),
5986 ),
5987 );
5988 assert!(cmd_success);
5989
5990 if let Some(pci_segment) = pci_segment {
5991 assert!(String::from_utf8_lossy(&cmd_output).contains(&format!(
5992 "{{\"id\":\"test1\",\"bdf\":\"{pci_segment:04x}:00:01.0\"}}"
5993 )));
5994 } else {
5995 assert!(String::from_utf8_lossy(&cmd_output)
5996 .contains("{\"id\":\"test1\",\"bdf\":\"0000:00:05.0\"}"));
5997 }
5998
5999 thread::sleep(std::time::Duration::new(5, 0));
6000
6001 // 1 network interfaces + default localhost ==> 2 interfaces
6002 assert_eq!(
6003 guest
6004 .ssh_command("ip -o link | wc -l")
6005 .unwrap()
6006 .trim()
6007 .parse::<u32>()
6008 .unwrap_or_default(),
6009 2
6010 );
6011
6012 guest.reboot_linux(0, None);
6013
6014 // Check still there after reboot
6015 // 1 network interfaces + default localhost ==> 2 interfaces
6016 assert_eq!(
6017 guest
6018 .ssh_command("ip -o link | wc -l")
6019 .unwrap()
6020 .trim()
6021 .parse::<u32>()
6022 .unwrap_or_default(),
6023 2
6024 );
6025 });
6026
6027 kill_child(&mut child);
6028 let output = child.wait_with_output().unwrap();
6029
6030 handle_child_output(r, &output);
6031 }
6032
6033 #[test]
test_initramfs()6034 fn test_initramfs() {
6035 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
6036 let guest = Guest::new(Box::new(focal));
6037 let mut workload_path = dirs::home_dir().unwrap();
6038 workload_path.push("workloads");
6039
6040 #[cfg(target_arch = "x86_64")]
6041 let mut kernels = vec![direct_kernel_boot_path()];
6042 #[cfg(target_arch = "aarch64")]
6043 let kernels = [direct_kernel_boot_path()];
6044
6045 #[cfg(target_arch = "x86_64")]
6046 {
6047 let mut pvh_kernel_path = workload_path.clone();
6048 pvh_kernel_path.push("vmlinux-x86_64");
6049 kernels.push(pvh_kernel_path);
6050 }
6051
6052 let mut initramfs_path = workload_path;
6053 initramfs_path.push("alpine_initramfs.img");
6054
6055 let test_string = String::from("axz34i9rylotd8n50wbv6kcj7f2qushme1pg");
6056 let cmdline = format!("console=hvc0 quiet TEST_STRING={test_string}");
6057
6058 kernels.iter().for_each(|k_path| {
6059 let mut child = GuestCommand::new(&guest)
6060 .args(["--kernel", k_path.to_str().unwrap()])
6061 .args(["--initramfs", initramfs_path.to_str().unwrap()])
6062 .args(["--cmdline", &cmdline])
6063 .capture_output()
6064 .spawn()
6065 .unwrap();
6066
6067 thread::sleep(std::time::Duration::new(20, 0));
6068
6069 kill_child(&mut child);
6070 let output = child.wait_with_output().unwrap();
6071
6072 let r = std::panic::catch_unwind(|| {
6073 let s = String::from_utf8_lossy(&output.stdout);
6074
6075 assert_ne!(s.lines().position(|line| line == test_string), None);
6076 });
6077
6078 handle_child_output(r, &output);
6079 });
6080 }
6081
6082 #[test]
test_counters()6083 fn test_counters() {
6084 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
6085 let guest = Guest::new(Box::new(focal));
6086 let api_socket = temp_api_path(&guest.tmp_dir);
6087
6088 let mut cmd = GuestCommand::new(&guest);
6089 cmd.args(["--cpus", "boot=1"])
6090 .args(["--memory", "size=512M"])
6091 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
6092 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
6093 .default_disks()
6094 .args(["--net", guest.default_net_string().as_str()])
6095 .args(["--api-socket", &api_socket])
6096 .capture_output();
6097
6098 let mut child = cmd.spawn().unwrap();
6099
6100 let r = std::panic::catch_unwind(|| {
6101 guest.wait_vm_boot(None).unwrap();
6102
6103 let orig_counters = get_counters(&api_socket);
6104 guest
6105 .ssh_command("dd if=/dev/zero of=test count=8 bs=1M")
6106 .unwrap();
6107
6108 let new_counters = get_counters(&api_socket);
6109
6110 // Check that all the counters have increased
6111 assert!(new_counters > orig_counters);
6112 });
6113
6114 kill_child(&mut child);
6115 let output = child.wait_with_output().unwrap();
6116
6117 handle_child_output(r, &output);
6118 }
6119
6120 #[test]
6121 #[cfg(feature = "guest_debug")]
test_coredump()6122 fn test_coredump() {
6123 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
6124 let guest = Guest::new(Box::new(focal));
6125 let api_socket = temp_api_path(&guest.tmp_dir);
6126
6127 let mut cmd = GuestCommand::new(&guest);
6128 cmd.args(["--cpus", "boot=4"])
6129 .args(["--memory", "size=4G"])
6130 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()])
6131 .default_disks()
6132 .args(["--net", guest.default_net_string().as_str()])
6133 .args(["--api-socket", &api_socket])
6134 .capture_output();
6135
6136 let mut child = cmd.spawn().unwrap();
6137 let vmcore_file = temp_vmcore_file_path(&guest.tmp_dir);
6138
6139 let r = std::panic::catch_unwind(|| {
6140 guest.wait_vm_boot(None).unwrap();
6141
6142 assert!(remote_command(&api_socket, "pause", None));
6143
6144 assert!(remote_command(
6145 &api_socket,
6146 "coredump",
6147 Some(format!("file://{vmcore_file}").as_str()),
6148 ));
6149
6150 // the num of CORE notes should equals to vcpu
6151 let readelf_core_num_cmd =
6152 format!("readelf --all {vmcore_file} |grep CORE |grep -v Type |wc -l");
6153 let core_num_in_elf = exec_host_command_output(&readelf_core_num_cmd);
6154 assert_eq!(String::from_utf8_lossy(&core_num_in_elf.stdout).trim(), "4");
6155
6156 // the num of QEMU notes should equals to vcpu
6157 let readelf_vmm_num_cmd = format!("readelf --all {vmcore_file} |grep QEMU |wc -l");
6158 let vmm_num_in_elf = exec_host_command_output(&readelf_vmm_num_cmd);
6159 assert_eq!(String::from_utf8_lossy(&vmm_num_in_elf.stdout).trim(), "4");
6160 });
6161
6162 kill_child(&mut child);
6163 let output = child.wait_with_output().unwrap();
6164
6165 handle_child_output(r, &output);
6166 }
6167
6168 #[test]
6169 #[cfg(feature = "guest_debug")]
test_coredump_no_pause()6170 fn test_coredump_no_pause() {
6171 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
6172 let guest = Guest::new(Box::new(focal));
6173 let api_socket = temp_api_path(&guest.tmp_dir);
6174
6175 let mut cmd = GuestCommand::new(&guest);
6176 cmd.args(["--cpus", "boot=4"])
6177 .args(["--memory", "size=4G"])
6178 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()])
6179 .default_disks()
6180 .args(["--net", guest.default_net_string().as_str()])
6181 .args(["--api-socket", &api_socket])
6182 .capture_output();
6183
6184 let mut child = cmd.spawn().unwrap();
6185 let vmcore_file = temp_vmcore_file_path(&guest.tmp_dir);
6186
6187 let r = std::panic::catch_unwind(|| {
6188 guest.wait_vm_boot(None).unwrap();
6189
6190 assert!(remote_command(
6191 &api_socket,
6192 "coredump",
6193 Some(format!("file://{vmcore_file}").as_str()),
6194 ));
6195
6196 assert_eq!(vm_state(&api_socket), "Running");
6197 });
6198
6199 kill_child(&mut child);
6200 let output = child.wait_with_output().unwrap();
6201
6202 handle_child_output(r, &output);
6203 }
6204
6205 #[test]
test_watchdog()6206 fn test_watchdog() {
6207 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
6208 let guest = Guest::new(Box::new(focal));
6209 let api_socket = temp_api_path(&guest.tmp_dir);
6210
6211 let kernel_path = direct_kernel_boot_path();
6212 let event_path = temp_event_monitor_path(&guest.tmp_dir);
6213
6214 let mut cmd = GuestCommand::new(&guest);
6215 cmd.args(["--cpus", "boot=1"])
6216 .args(["--memory", "size=512M"])
6217 .args(["--kernel", kernel_path.to_str().unwrap()])
6218 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
6219 .default_disks()
6220 .args(["--net", guest.default_net_string().as_str()])
6221 .args(["--watchdog"])
6222 .args(["--api-socket", &api_socket])
6223 .args(["--event-monitor", format!("path={event_path}").as_str()])
6224 .capture_output();
6225
6226 let mut child = cmd.spawn().unwrap();
6227
6228 let r = std::panic::catch_unwind(|| {
6229 guest.wait_vm_boot(None).unwrap();
6230
6231 let mut expected_reboot_count = 1;
6232
6233 // Enable the watchdog with a 15s timeout
6234 enable_guest_watchdog(&guest, 15);
6235
6236 assert_eq!(get_reboot_count(&guest), expected_reboot_count);
6237 assert_eq!(
6238 guest
6239 .ssh_command("sudo journalctl | grep -c -- \"Watchdog started\"")
6240 .unwrap()
6241 .trim()
6242 .parse::<u32>()
6243 .unwrap_or_default(),
6244 1
6245 );
6246
6247 // Allow some normal time to elapse to check we don't get spurious reboots
6248 thread::sleep(std::time::Duration::new(40, 0));
6249 // Check no reboot
6250 assert_eq!(get_reboot_count(&guest), expected_reboot_count);
6251
6252 // Trigger a panic (sync first). We need to do this inside a screen with a delay so the SSH command returns.
6253 guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap();
6254 // Allow some time for the watchdog to trigger (max 30s) and reboot to happen
6255 guest.wait_vm_boot(Some(50)).unwrap();
6256 // Check a reboot is triggered by the watchdog
6257 expected_reboot_count += 1;
6258 assert_eq!(get_reboot_count(&guest), expected_reboot_count);
6259
6260 #[cfg(target_arch = "x86_64")]
6261 {
6262 // Now pause the VM and remain offline for 30s
6263 assert!(remote_command(&api_socket, "pause", None));
6264 let latest_events = [
6265 &MetaEvent {
6266 event: "pausing".to_string(),
6267 device_id: None,
6268 },
6269 &MetaEvent {
6270 event: "paused".to_string(),
6271 device_id: None,
6272 },
6273 ];
6274 assert!(check_latest_events_exact(&latest_events, &event_path));
6275 assert!(remote_command(&api_socket, "resume", None));
6276
6277 // Check no reboot
6278 assert_eq!(get_reboot_count(&guest), expected_reboot_count);
6279 }
6280 });
6281
6282 kill_child(&mut child);
6283 let output = child.wait_with_output().unwrap();
6284
6285 handle_child_output(r, &output);
6286 }
6287
6288 #[test]
test_pvpanic()6289 fn test_pvpanic() {
6290 let jammy = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string());
6291 let guest = Guest::new(Box::new(jammy));
6292 let api_socket = temp_api_path(&guest.tmp_dir);
6293 let event_path = temp_event_monitor_path(&guest.tmp_dir);
6294
6295 let kernel_path = direct_kernel_boot_path();
6296
6297 let mut cmd = GuestCommand::new(&guest);
6298 cmd.args(["--cpus", "boot=1"])
6299 .args(["--memory", "size=512M"])
6300 .args(["--kernel", kernel_path.to_str().unwrap()])
6301 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
6302 .default_disks()
6303 .args(["--net", guest.default_net_string().as_str()])
6304 .args(["--pvpanic"])
6305 .args(["--api-socket", &api_socket])
6306 .args(["--event-monitor", format!("path={event_path}").as_str()])
6307 .capture_output();
6308
6309 let mut child = cmd.spawn().unwrap();
6310
6311 let r = std::panic::catch_unwind(|| {
6312 guest.wait_vm_boot(None).unwrap();
6313
6314 // Trigger guest a panic
6315 make_guest_panic(&guest);
6316
6317 // Wait a while for guest
6318 thread::sleep(std::time::Duration::new(10, 0));
6319
6320 let expected_sequential_events = [&MetaEvent {
6321 event: "panic".to_string(),
6322 device_id: None,
6323 }];
6324 assert!(check_latest_events_exact(
6325 &expected_sequential_events,
6326 &event_path
6327 ));
6328 });
6329
6330 kill_child(&mut child);
6331 let output = child.wait_with_output().unwrap();
6332
6333 handle_child_output(r, &output);
6334 }
6335
6336 #[test]
test_tap_from_fd()6337 fn test_tap_from_fd() {
6338 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
6339 let guest = Guest::new(Box::new(focal));
6340 let kernel_path = direct_kernel_boot_path();
6341
6342 // Create a TAP interface with multi-queue enabled
6343 let num_queue_pairs: usize = 2;
6344
6345 use std::str::FromStr;
6346 let taps = net_util::open_tap(
6347 Some("chtap0"),
6348 Some(std::net::IpAddr::V4(
6349 std::net::Ipv4Addr::from_str(&guest.network.host_ip).unwrap(),
6350 )),
6351 None,
6352 &mut None,
6353 None,
6354 num_queue_pairs,
6355 Some(libc::O_RDWR | libc::O_NONBLOCK),
6356 )
6357 .unwrap();
6358
6359 let mut child = GuestCommand::new(&guest)
6360 .args(["--cpus", &format!("boot={num_queue_pairs}")])
6361 .args(["--memory", "size=512M"])
6362 .args(["--kernel", kernel_path.to_str().unwrap()])
6363 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
6364 .default_disks()
6365 .args([
6366 "--net",
6367 &format!(
6368 "fd=[{},{}],mac={},num_queues={}",
6369 taps[0].as_raw_fd(),
6370 taps[1].as_raw_fd(),
6371 guest.network.guest_mac,
6372 num_queue_pairs * 2
6373 ),
6374 ])
6375 .capture_output()
6376 .spawn()
6377 .unwrap();
6378
6379 let r = std::panic::catch_unwind(|| {
6380 guest.wait_vm_boot(None).unwrap();
6381
6382 assert_eq!(
6383 guest
6384 .ssh_command("ip -o link | wc -l")
6385 .unwrap()
6386 .trim()
6387 .parse::<u32>()
6388 .unwrap_or_default(),
6389 2
6390 );
6391
6392 guest.reboot_linux(0, None);
6393
6394 assert_eq!(
6395 guest
6396 .ssh_command("ip -o link | wc -l")
6397 .unwrap()
6398 .trim()
6399 .parse::<u32>()
6400 .unwrap_or_default(),
6401 2
6402 );
6403 });
6404
6405 kill_child(&mut child);
6406 let output = child.wait_with_output().unwrap();
6407
6408 handle_child_output(r, &output);
6409 }
6410
6411 // By design, a guest VM won't be able to connect to the host
6412 // machine when using a macvtap network interface (while it can
6413 // communicate externally). As a workaround, this integration
6414 // test creates two macvtap interfaces in 'bridge' mode on the
6415 // same physical net interface, one for the guest and one for
6416 // the host. With additional setup on the IP address and the
6417 // routing table, it enables the communications between the
6418 // guest VM and the host machine.
6419 // Details: https://wiki.libvirt.org/page/TroubleshootMacvtapHostFail
_test_macvtap(hotplug: bool, guest_macvtap_name: &str, host_macvtap_name: &str)6420 fn _test_macvtap(hotplug: bool, guest_macvtap_name: &str, host_macvtap_name: &str) {
6421 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
6422 let guest = Guest::new(Box::new(focal));
6423 let api_socket = temp_api_path(&guest.tmp_dir);
6424
6425 #[cfg(target_arch = "x86_64")]
6426 let kernel_path = direct_kernel_boot_path();
6427 #[cfg(target_arch = "aarch64")]
6428 let kernel_path = edk2_path();
6429
6430 let phy_net = "eth0";
6431
6432 // Create a macvtap interface for the guest VM to use
6433 assert!(exec_host_command_status(&format!(
6434 "sudo ip link add link {phy_net} name {guest_macvtap_name} type macvtap mod bridge"
6435 ))
6436 .success());
6437 assert!(exec_host_command_status(&format!(
6438 "sudo ip link set {} address {} up",
6439 guest_macvtap_name, guest.network.guest_mac
6440 ))
6441 .success());
6442 assert!(
6443 exec_host_command_status(&format!("sudo ip link show {guest_macvtap_name}")).success()
6444 );
6445
6446 let tap_index =
6447 fs::read_to_string(format!("/sys/class/net/{guest_macvtap_name}/ifindex")).unwrap();
6448 let tap_device = format!("/dev/tap{}", tap_index.trim());
6449
6450 assert!(exec_host_command_status(&format!("sudo chown $UID.$UID {tap_device}")).success());
6451
6452 let cstr_tap_device = std::ffi::CString::new(tap_device).unwrap();
6453 let tap_fd1 = unsafe { libc::open(cstr_tap_device.as_ptr(), libc::O_RDWR) };
6454 assert!(tap_fd1 > 0);
6455 let tap_fd2 = unsafe { libc::open(cstr_tap_device.as_ptr(), libc::O_RDWR) };
6456 assert!(tap_fd2 > 0);
6457
6458 // Create a macvtap on the same physical net interface for
6459 // the host machine to use
6460 assert!(exec_host_command_status(&format!(
6461 "sudo ip link add link {phy_net} name {host_macvtap_name} type macvtap mod bridge"
6462 ))
6463 .success());
6464 // Use default mask "255.255.255.0"
6465 assert!(exec_host_command_status(&format!(
6466 "sudo ip address add {}/24 dev {}",
6467 guest.network.host_ip, host_macvtap_name
6468 ))
6469 .success());
6470 assert!(
6471 exec_host_command_status(&format!("sudo ip link set dev {host_macvtap_name} up"))
6472 .success()
6473 );
6474
6475 let mut guest_command = GuestCommand::new(&guest);
6476 guest_command
6477 .args(["--cpus", "boot=2"])
6478 .args(["--memory", "size=512M"])
6479 .args(["--kernel", kernel_path.to_str().unwrap()])
6480 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
6481 .default_disks()
6482 .args(["--api-socket", &api_socket]);
6483
6484 let net_params = format!(
6485 "fd=[{},{}],mac={},num_queues=4",
6486 tap_fd1, tap_fd2, guest.network.guest_mac
6487 );
6488
6489 if !hotplug {
6490 guest_command.args(["--net", &net_params]);
6491 }
6492
6493 let mut child = guest_command.capture_output().spawn().unwrap();
6494
6495 if hotplug {
6496 // Give some time to the VMM process to listen to the API
6497 // socket. This is the only requirement to avoid the following
6498 // call to ch-remote from failing.
6499 thread::sleep(std::time::Duration::new(10, 0));
6500 // Hotplug the virtio-net device
6501 let (cmd_success, cmd_output) =
6502 remote_command_w_output(&api_socket, "add-net", Some(&net_params));
6503 assert!(cmd_success);
6504 #[cfg(target_arch = "x86_64")]
6505 assert!(String::from_utf8_lossy(&cmd_output)
6506 .contains("{\"id\":\"_net2\",\"bdf\":\"0000:00:05.0\"}"));
6507 #[cfg(target_arch = "aarch64")]
6508 assert!(String::from_utf8_lossy(&cmd_output)
6509 .contains("{\"id\":\"_net0\",\"bdf\":\"0000:00:05.0\"}"));
6510 }
6511
6512 // The functional connectivity provided by the virtio-net device
6513 // gets tested through wait_vm_boot() as it expects to receive a
6514 // HTTP request, and through the SSH command as well.
6515 let r = std::panic::catch_unwind(|| {
6516 guest.wait_vm_boot(None).unwrap();
6517
6518 assert_eq!(
6519 guest
6520 .ssh_command("ip -o link | wc -l")
6521 .unwrap()
6522 .trim()
6523 .parse::<u32>()
6524 .unwrap_or_default(),
6525 2
6526 );
6527
6528 guest.reboot_linux(0, None);
6529
6530 assert_eq!(
6531 guest
6532 .ssh_command("ip -o link | wc -l")
6533 .unwrap()
6534 .trim()
6535 .parse::<u32>()
6536 .unwrap_or_default(),
6537 2
6538 );
6539 });
6540
6541 kill_child(&mut child);
6542
6543 exec_host_command_status(&format!("sudo ip link del {guest_macvtap_name}"));
6544 exec_host_command_status(&format!("sudo ip link del {host_macvtap_name}"));
6545
6546 let output = child.wait_with_output().unwrap();
6547
6548 handle_child_output(r, &output);
6549 }
6550
6551 #[test]
6552 #[cfg_attr(target_arch = "aarch64", ignore = "See #5443")]
test_macvtap()6553 fn test_macvtap() {
6554 _test_macvtap(false, "guestmacvtap0", "hostmacvtap0")
6555 }
6556
6557 #[test]
6558 #[cfg_attr(target_arch = "aarch64", ignore = "See #5443")]
test_macvtap_hotplug()6559 fn test_macvtap_hotplug() {
6560 _test_macvtap(true, "guestmacvtap1", "hostmacvtap1")
6561 }
6562
6563 #[test]
6564 #[cfg(not(feature = "mshv"))]
test_ovs_dpdk()6565 fn test_ovs_dpdk() {
6566 let focal1 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
6567 let guest1 = Guest::new(Box::new(focal1));
6568
6569 let focal2 = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
6570 let guest2 = Guest::new(Box::new(focal2));
6571 let api_socket_source = format!("{}.1", temp_api_path(&guest2.tmp_dir));
6572
6573 let (mut child1, mut child2) =
6574 setup_ovs_dpdk_guests(&guest1, &guest2, &api_socket_source, false);
6575
6576 // Create the snapshot directory
6577 let snapshot_dir = temp_snapshot_dir_path(&guest2.tmp_dir);
6578
6579 let r = std::panic::catch_unwind(|| {
6580 // Remove one of the two ports from the OVS bridge
6581 assert!(exec_host_command_status("ovs-vsctl del-port vhost-user1").success());
6582
6583 // Spawn a new netcat listener in the first VM
6584 let guest_ip = guest1.network.guest_ip.clone();
6585 thread::spawn(move || {
6586 ssh_command_ip(
6587 "nc -l 12345",
6588 &guest_ip,
6589 DEFAULT_SSH_RETRIES,
6590 DEFAULT_SSH_TIMEOUT,
6591 )
6592 .unwrap();
6593 });
6594
6595 // Wait for the server to be listening
6596 thread::sleep(std::time::Duration::new(5, 0));
6597
6598 // Check the connection fails this time
6599 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap_err();
6600
6601 // Add the OVS port back
6602 assert!(exec_host_command_status("ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface vhost-user1 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostclient1").success());
6603
6604 // And finally check the connection is functional again
6605 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap();
6606
6607 // Pause the VM
6608 assert!(remote_command(&api_socket_source, "pause", None));
6609
6610 // Take a snapshot from the VM
6611 assert!(remote_command(
6612 &api_socket_source,
6613 "snapshot",
6614 Some(format!("file://{snapshot_dir}").as_str()),
6615 ));
6616
6617 // Wait to make sure the snapshot is completed
6618 thread::sleep(std::time::Duration::new(10, 0));
6619 });
6620
6621 // Shutdown the source VM
6622 kill_child(&mut child2);
6623 let output = child2.wait_with_output().unwrap();
6624 handle_child_output(r, &output);
6625
6626 // Remove the vhost-user socket file.
6627 Command::new("rm")
6628 .arg("-f")
6629 .arg("/tmp/dpdkvhostclient2")
6630 .output()
6631 .unwrap();
6632
6633 let api_socket_restored = format!("{}.2", temp_api_path(&guest2.tmp_dir));
6634 // Restore the VM from the snapshot
6635 let mut child2 = GuestCommand::new(&guest2)
6636 .args(["--api-socket", &api_socket_restored])
6637 .args([
6638 "--restore",
6639 format!("source_url=file://{snapshot_dir}").as_str(),
6640 ])
6641 .capture_output()
6642 .spawn()
6643 .unwrap();
6644
6645 // Wait for the VM to be restored
6646 thread::sleep(std::time::Duration::new(10, 0));
6647
6648 let r = std::panic::catch_unwind(|| {
6649 // Resume the VM
6650 assert!(remote_command(&api_socket_restored, "resume", None));
6651
6652 // Spawn a new netcat listener in the first VM
6653 let guest_ip = guest1.network.guest_ip.clone();
6654 thread::spawn(move || {
6655 ssh_command_ip(
6656 "nc -l 12345",
6657 &guest_ip,
6658 DEFAULT_SSH_RETRIES,
6659 DEFAULT_SSH_TIMEOUT,
6660 )
6661 .unwrap();
6662 });
6663
6664 // Wait for the server to be listening
6665 thread::sleep(std::time::Duration::new(5, 0));
6666
6667 // And check the connection is still functional after restore
6668 guest2.ssh_command("nc -vz 172.100.0.1 12345").unwrap();
6669 });
6670
6671 kill_child(&mut child1);
6672 kill_child(&mut child2);
6673
6674 let output = child1.wait_with_output().unwrap();
6675 child2.wait().unwrap();
6676
6677 cleanup_ovs_dpdk();
6678
6679 handle_child_output(r, &output);
6680 }
6681
setup_spdk_nvme(nvme_dir: &std::path::Path) -> Child6682 fn setup_spdk_nvme(nvme_dir: &std::path::Path) -> Child {
6683 cleanup_spdk_nvme();
6684
6685 assert!(exec_host_command_status(&format!(
6686 "mkdir -p {}",
6687 nvme_dir.join("nvme-vfio-user").to_str().unwrap()
6688 ))
6689 .success());
6690 assert!(exec_host_command_status(&format!(
6691 "truncate {} -s 128M",
6692 nvme_dir.join("test-disk.raw").to_str().unwrap()
6693 ))
6694 .success());
6695 assert!(exec_host_command_status(&format!(
6696 "mkfs.ext4 {}",
6697 nvme_dir.join("test-disk.raw").to_str().unwrap()
6698 ))
6699 .success());
6700
6701 // Start the SPDK nvmf_tgt daemon to present NVMe device as a VFIO user device
6702 let child = Command::new("/usr/local/bin/spdk-nvme/nvmf_tgt")
6703 .args(["-i", "0", "-m", "0x1"])
6704 .spawn()
6705 .unwrap();
6706 thread::sleep(std::time::Duration::new(2, 0));
6707
6708 assert!(exec_host_command_with_retries(
6709 "/usr/local/bin/spdk-nvme/rpc.py nvmf_create_transport -t VFIOUSER",
6710 3,
6711 std::time::Duration::new(5, 0),
6712 ));
6713 assert!(exec_host_command_status(&format!(
6714 "/usr/local/bin/spdk-nvme/rpc.py bdev_aio_create {} test 512",
6715 nvme_dir.join("test-disk.raw").to_str().unwrap()
6716 ))
6717 .success());
6718 assert!(exec_host_command_status(
6719 "/usr/local/bin/spdk-nvme/rpc.py nvmf_create_subsystem nqn.2019-07.io.spdk:cnode -a -s test"
6720 )
6721 .success());
6722 assert!(exec_host_command_status(
6723 "/usr/local/bin/spdk-nvme/rpc.py nvmf_subsystem_add_ns nqn.2019-07.io.spdk:cnode test"
6724 )
6725 .success());
6726 assert!(exec_host_command_status(&format!(
6727 "/usr/local/bin/spdk-nvme/rpc.py nvmf_subsystem_add_listener nqn.2019-07.io.spdk:cnode -t VFIOUSER -a {} -s 0",
6728 nvme_dir.join("nvme-vfio-user").to_str().unwrap()
6729 ))
6730 .success());
6731
6732 child
6733 }
6734
cleanup_spdk_nvme()6735 fn cleanup_spdk_nvme() {
6736 exec_host_command_status("pkill -f nvmf_tgt");
6737 }
6738
6739 #[test]
test_vfio_user()6740 fn test_vfio_user() {
6741 let jammy_image = JAMMY_IMAGE_NAME.to_string();
6742 let jammy = UbuntuDiskConfig::new(jammy_image);
6743 let guest = Guest::new(Box::new(jammy));
6744
6745 let spdk_nvme_dir = guest.tmp_dir.as_path().join("test-vfio-user");
6746 let mut spdk_child = setup_spdk_nvme(spdk_nvme_dir.as_path());
6747
6748 let api_socket = temp_api_path(&guest.tmp_dir);
6749 let mut child = GuestCommand::new(&guest)
6750 .args(["--api-socket", &api_socket])
6751 .args(["--cpus", "boot=1"])
6752 .args(["--memory", "size=1G,shared=on,hugepages=on"])
6753 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()])
6754 .args(["--serial", "tty", "--console", "off"])
6755 .default_disks()
6756 .default_net()
6757 .capture_output()
6758 .spawn()
6759 .unwrap();
6760
6761 let r = std::panic::catch_unwind(|| {
6762 guest.wait_vm_boot(None).unwrap();
6763
6764 // Hotplug the SPDK-NVMe device to the VM
6765 let (cmd_success, cmd_output) = remote_command_w_output(
6766 &api_socket,
6767 "add-user-device",
6768 Some(&format!(
6769 "socket={},id=vfio_user0",
6770 spdk_nvme_dir
6771 .as_path()
6772 .join("nvme-vfio-user/cntrl")
6773 .to_str()
6774 .unwrap(),
6775 )),
6776 );
6777 assert!(cmd_success);
6778 assert!(String::from_utf8_lossy(&cmd_output)
6779 .contains("{\"id\":\"vfio_user0\",\"bdf\":\"0000:00:05.0\"}"));
6780
6781 thread::sleep(std::time::Duration::new(10, 0));
6782
6783 // Check both if /dev/nvme exists and if the block size is 128M.
6784 assert_eq!(
6785 guest
6786 .ssh_command("lsblk | grep nvme0n1 | grep -c 128M")
6787 .unwrap()
6788 .trim()
6789 .parse::<u32>()
6790 .unwrap_or_default(),
6791 1
6792 );
6793
6794 // Check changes persist after reboot
6795 assert_eq!(
6796 guest.ssh_command("sudo mount /dev/nvme0n1 /mnt").unwrap(),
6797 ""
6798 );
6799 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), "lost+found\n");
6800 guest
6801 .ssh_command("echo test123 | sudo tee /mnt/test")
6802 .unwrap();
6803 assert_eq!(guest.ssh_command("sudo umount /mnt").unwrap(), "");
6804 assert_eq!(guest.ssh_command("ls /mnt").unwrap(), "");
6805
6806 guest.reboot_linux(0, None);
6807 assert_eq!(
6808 guest.ssh_command("sudo mount /dev/nvme0n1 /mnt").unwrap(),
6809 ""
6810 );
6811 assert_eq!(
6812 guest.ssh_command("sudo cat /mnt/test").unwrap().trim(),
6813 "test123"
6814 );
6815 });
6816
6817 let _ = spdk_child.kill();
6818 let _ = spdk_child.wait();
6819
6820 kill_child(&mut child);
6821 let output = child.wait_with_output().unwrap();
6822
6823 handle_child_output(r, &output);
6824 }
6825
6826 #[test]
6827 #[cfg(target_arch = "x86_64")]
test_vdpa_block()6828 fn test_vdpa_block() {
6829 // Before trying to run the test, verify the vdpa_sim_blk module is correctly loaded.
6830 assert!(exec_host_command_status("lsmod | grep vdpa_sim_blk").success());
6831
6832 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
6833 let guest = Guest::new(Box::new(focal));
6834 let api_socket = temp_api_path(&guest.tmp_dir);
6835
6836 let kernel_path = direct_kernel_boot_path();
6837
6838 let mut child = GuestCommand::new(&guest)
6839 .args(["--cpus", "boot=2"])
6840 .args(["--memory", "size=512M,hugepages=on"])
6841 .args(["--kernel", kernel_path.to_str().unwrap()])
6842 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
6843 .default_disks()
6844 .default_net()
6845 .args(["--vdpa", "path=/dev/vhost-vdpa-0,num_queues=1"])
6846 .args(["--platform", "num_pci_segments=2,iommu_segments=1"])
6847 .args(["--api-socket", &api_socket])
6848 .capture_output()
6849 .spawn()
6850 .unwrap();
6851
6852 let r = std::panic::catch_unwind(|| {
6853 guest.wait_vm_boot(None).unwrap();
6854
6855 // Check both if /dev/vdc exists and if the block size is 128M.
6856 assert_eq!(
6857 guest
6858 .ssh_command("lsblk | grep vdc | grep -c 128M")
6859 .unwrap()
6860 .trim()
6861 .parse::<u32>()
6862 .unwrap_or_default(),
6863 1
6864 );
6865
6866 // Check the content of the block device after we wrote to it.
6867 // The vpda-sim-blk should let us read what we previously wrote.
6868 guest
6869 .ssh_command("sudo bash -c 'echo foobar > /dev/vdc'")
6870 .unwrap();
6871 assert_eq!(
6872 guest.ssh_command("sudo head -1 /dev/vdc").unwrap().trim(),
6873 "foobar"
6874 );
6875
6876 // Hotplug an extra vDPA block device behind the vIOMMU
6877 // Add a new vDPA device to the VM
6878 let (cmd_success, cmd_output) = remote_command_w_output(
6879 &api_socket,
6880 "add-vdpa",
6881 Some("id=myvdpa0,path=/dev/vhost-vdpa-1,num_queues=1,pci_segment=1,iommu=on"),
6882 );
6883 assert!(cmd_success);
6884 assert!(String::from_utf8_lossy(&cmd_output)
6885 .contains("{\"id\":\"myvdpa0\",\"bdf\":\"0001:00:01.0\"}"));
6886
6887 thread::sleep(std::time::Duration::new(10, 0));
6888
6889 // Check IOMMU setup
6890 assert!(guest
6891 .does_device_vendor_pair_match("0x1057", "0x1af4")
6892 .unwrap_or_default());
6893 assert_eq!(
6894 guest
6895 .ssh_command("ls /sys/kernel/iommu_groups/0/devices")
6896 .unwrap()
6897 .trim(),
6898 "0001:00:01.0"
6899 );
6900
6901 // Check both if /dev/vdd exists and if the block size is 128M.
6902 assert_eq!(
6903 guest
6904 .ssh_command("lsblk | grep vdd | grep -c 128M")
6905 .unwrap()
6906 .trim()
6907 .parse::<u32>()
6908 .unwrap_or_default(),
6909 1
6910 );
6911
6912 // Write some content to the block device we've just plugged.
6913 guest
6914 .ssh_command("sudo bash -c 'echo foobar > /dev/vdd'")
6915 .unwrap();
6916
6917 // Check we can read the content back.
6918 assert_eq!(
6919 guest.ssh_command("sudo head -1 /dev/vdd").unwrap().trim(),
6920 "foobar"
6921 );
6922
6923 // Unplug the device
6924 let cmd_success = remote_command(&api_socket, "remove-device", Some("myvdpa0"));
6925 assert!(cmd_success);
6926 thread::sleep(std::time::Duration::new(10, 0));
6927
6928 // Check /dev/vdd doesn't exist anymore
6929 assert_eq!(
6930 guest
6931 .ssh_command("lsblk | grep -c vdd || true")
6932 .unwrap()
6933 .trim()
6934 .parse::<u32>()
6935 .unwrap_or(1),
6936 0
6937 );
6938 });
6939
6940 kill_child(&mut child);
6941 let output = child.wait_with_output().unwrap();
6942
6943 handle_child_output(r, &output);
6944 }
6945
6946 #[test]
6947 #[cfg(target_arch = "x86_64")]
6948 #[ignore = "See #5756"]
test_vdpa_net()6949 fn test_vdpa_net() {
6950 // Before trying to run the test, verify the vdpa_sim_net module is correctly loaded.
6951 if !exec_host_command_status("lsmod | grep vdpa_sim_net").success() {
6952 return;
6953 }
6954
6955 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
6956 let guest = Guest::new(Box::new(focal));
6957
6958 let kernel_path = direct_kernel_boot_path();
6959
6960 let mut child = GuestCommand::new(&guest)
6961 .args(["--cpus", "boot=2"])
6962 .args(["--memory", "size=512M,hugepages=on"])
6963 .args(["--kernel", kernel_path.to_str().unwrap()])
6964 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
6965 .default_disks()
6966 .default_net()
6967 .args(["--vdpa", "path=/dev/vhost-vdpa-2,num_queues=2"])
6968 .capture_output()
6969 .spawn()
6970 .unwrap();
6971
6972 let r = std::panic::catch_unwind(|| {
6973 guest.wait_vm_boot(None).unwrap();
6974
6975 // Check we can find network interface related to vDPA device
6976 assert_eq!(
6977 guest
6978 .ssh_command("ip -o link | grep -c ens6")
6979 .unwrap()
6980 .trim()
6981 .parse::<u32>()
6982 .unwrap_or(0),
6983 1
6984 );
6985
6986 guest
6987 .ssh_command("sudo ip addr add 172.16.1.2/24 dev ens6")
6988 .unwrap();
6989 guest.ssh_command("sudo ip link set up dev ens6").unwrap();
6990
6991 // Check there is no packet yet on both TX/RX of the network interface
6992 assert_eq!(
6993 guest
6994 .ssh_command("ip -j -p -s link show ens6 | grep -c '\"packets\": 0'")
6995 .unwrap()
6996 .trim()
6997 .parse::<u32>()
6998 .unwrap_or(0),
6999 2
7000 );
7001
7002 // Send 6 packets with ping command
7003 guest.ssh_command("ping 172.16.1.10 -c 6 || true").unwrap();
7004
7005 // Check we can find 6 packets on both TX/RX of the network interface
7006 assert_eq!(
7007 guest
7008 .ssh_command("ip -j -p -s link show ens6 | grep -c '\"packets\": 6'")
7009 .unwrap()
7010 .trim()
7011 .parse::<u32>()
7012 .unwrap_or(0),
7013 2
7014 );
7015
7016 // No need to check for hotplug as we already tested it through
7017 // test_vdpa_block()
7018 });
7019
7020 kill_child(&mut child);
7021 let output = child.wait_with_output().unwrap();
7022
7023 handle_child_output(r, &output);
7024 }
7025
7026 #[test]
7027 #[cfg(target_arch = "x86_64")]
test_tpm()7028 fn test_tpm() {
7029 let focal = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string());
7030 let guest = Guest::new(Box::new(focal));
7031
7032 let (mut swtpm_command, swtpm_socket_path) = prepare_swtpm_daemon(&guest.tmp_dir);
7033
7034 let mut guest_cmd = GuestCommand::new(&guest);
7035 guest_cmd
7036 .args(["--cpus", "boot=1"])
7037 .args(["--memory", "size=1G"])
7038 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()])
7039 .args(["--tpm", &format!("socket={swtpm_socket_path}")])
7040 .capture_output()
7041 .default_disks()
7042 .default_net();
7043
7044 // Start swtpm daemon
7045 let mut swtpm_child = swtpm_command.spawn().unwrap();
7046 thread::sleep(std::time::Duration::new(10, 0));
7047 let mut child = guest_cmd.spawn().unwrap();
7048 let r = std::panic::catch_unwind(|| {
7049 guest.wait_vm_boot(None).unwrap();
7050 assert_eq!(
7051 guest.ssh_command("ls /dev/tpm0").unwrap().trim(),
7052 "/dev/tpm0"
7053 );
7054 guest.ssh_command("sudo tpm2_selftest -f").unwrap();
7055 guest
7056 .ssh_command("echo 'hello' > /tmp/checksum_test; ")
7057 .unwrap();
7058 guest.ssh_command("cmp <(sudo tpm2_pcrevent /tmp/checksum_test | grep sha256 | awk '{print $2}') <(sha256sum /tmp/checksum_test| awk '{print $1}')").unwrap();
7059 });
7060
7061 let _ = swtpm_child.kill();
7062 let _d_out = swtpm_child.wait_with_output().unwrap();
7063
7064 kill_child(&mut child);
7065 let output = child.wait_with_output().unwrap();
7066
7067 handle_child_output(r, &output);
7068 }
7069
7070 #[test]
7071 #[cfg(target_arch = "x86_64")]
test_double_tty()7072 fn test_double_tty() {
7073 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
7074 let guest = Guest::new(Box::new(focal));
7075 let mut cmd = GuestCommand::new(&guest);
7076 let api_socket = temp_api_path(&guest.tmp_dir);
7077 let tty_str: &str = "console=hvc0 earlyprintk=ttyS0 ";
7078 // linux printk module enable console log.
7079 let con_dis_str: &str = "console [hvc0] enabled";
7080 // linux printk module disable console log.
7081 let con_enb_str: &str = "bootconsole [earlyser0] disabled";
7082
7083 let kernel_path = direct_kernel_boot_path();
7084
7085 cmd.args(["--cpus", "boot=1"])
7086 .args(["--memory", "size=512M"])
7087 .args(["--kernel", kernel_path.to_str().unwrap()])
7088 .args([
7089 "--cmdline",
7090 DIRECT_KERNEL_BOOT_CMDLINE
7091 .replace("console=hvc0 ", tty_str)
7092 .as_str(),
7093 ])
7094 .capture_output()
7095 .default_disks()
7096 .default_net()
7097 .args(["--serial", "tty"])
7098 .args(["--console", "tty"])
7099 .args(["--api-socket", &api_socket]);
7100
7101 let mut child = cmd.spawn().unwrap();
7102
7103 let mut r = std::panic::catch_unwind(|| {
7104 guest.wait_vm_boot(None).unwrap();
7105 });
7106
7107 kill_child(&mut child);
7108 let output = child.wait_with_output().unwrap();
7109
7110 if r.is_ok() {
7111 r = std::panic::catch_unwind(|| {
7112 let s = String::from_utf8_lossy(&output.stdout);
7113 assert!(s.contains(tty_str));
7114 assert!(s.contains(con_dis_str));
7115 assert!(s.contains(con_enb_str));
7116 });
7117 }
7118
7119 handle_child_output(r, &output);
7120 }
7121
7122 #[test]
7123 #[cfg(target_arch = "x86_64")]
test_nmi()7124 fn test_nmi() {
7125 let jammy = UbuntuDiskConfig::new(JAMMY_IMAGE_NAME.to_string());
7126 let guest = Guest::new(Box::new(jammy));
7127 let api_socket = temp_api_path(&guest.tmp_dir);
7128 let event_path = temp_event_monitor_path(&guest.tmp_dir);
7129
7130 let kernel_path = direct_kernel_boot_path();
7131 let cmd_line = format!("{} {}", DIRECT_KERNEL_BOOT_CMDLINE, "unknown_nmi_panic=1");
7132
7133 let mut cmd = GuestCommand::new(&guest);
7134 cmd.args(["--cpus", "boot=4"])
7135 .args(["--memory", "size=512M"])
7136 .args(["--kernel", kernel_path.to_str().unwrap()])
7137 .args(["--cmdline", cmd_line.as_str()])
7138 .default_disks()
7139 .args(["--net", guest.default_net_string().as_str()])
7140 .args(["--pvpanic"])
7141 .args(["--api-socket", &api_socket])
7142 .args(["--event-monitor", format!("path={event_path}").as_str()])
7143 .capture_output();
7144
7145 let mut child = cmd.spawn().unwrap();
7146
7147 let r = std::panic::catch_unwind(|| {
7148 guest.wait_vm_boot(None).unwrap();
7149
7150 assert!(remote_command(&api_socket, "nmi", None));
7151
7152 // Wait a while for guest
7153 thread::sleep(std::time::Duration::new(3, 0));
7154
7155 let expected_sequential_events = [&MetaEvent {
7156 event: "panic".to_string(),
7157 device_id: None,
7158 }];
7159 assert!(check_latest_events_exact(
7160 &expected_sequential_events,
7161 &event_path
7162 ));
7163 });
7164
7165 kill_child(&mut child);
7166 let output = child.wait_with_output().unwrap();
7167
7168 handle_child_output(r, &output);
7169 }
7170 }
7171
7172 mod dbus_api {
7173 use crate::*;
7174
7175 // Start cloud-hypervisor with no VM parameters, running both the HTTP
7176 // and DBus APIs. Alternate calls to the external APIs (HTTP and DBus)
7177 // to create a VM, boot it, and verify that it can be shut down and then
7178 // booted again.
7179 #[test]
test_api_dbus_and_http_interleaved()7180 fn test_api_dbus_and_http_interleaved() {
7181 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
7182 let guest = Guest::new(Box::new(focal));
7183 let dbus_api = TargetApi::new_dbus_api(&guest.tmp_dir);
7184 let http_api = TargetApi::new_http_api(&guest.tmp_dir);
7185
7186 let mut child = GuestCommand::new(&guest)
7187 .args(dbus_api.guest_args())
7188 .args(http_api.guest_args())
7189 .capture_output()
7190 .spawn()
7191 .unwrap();
7192
7193 thread::sleep(std::time::Duration::new(1, 0));
7194
7195 // Verify API servers are running
7196 assert!(dbus_api.remote_command("ping", None));
7197 assert!(http_api.remote_command("ping", None));
7198
7199 // Create the VM first
7200 let cpu_count: u8 = 4;
7201 let request_body = guest.api_create_body(
7202 cpu_count,
7203 direct_kernel_boot_path().to_str().unwrap(),
7204 DIRECT_KERNEL_BOOT_CMDLINE,
7205 );
7206
7207 let temp_config_path = guest.tmp_dir.as_path().join("config");
7208 std::fs::write(&temp_config_path, request_body).unwrap();
7209 let create_config = temp_config_path.as_os_str().to_str().unwrap();
7210
7211 let r = std::panic::catch_unwind(|| {
7212 // Create the VM
7213 assert!(dbus_api.remote_command("create", Some(create_config),));
7214
7215 // Then boot it
7216 assert!(http_api.remote_command("boot", None));
7217 guest.wait_vm_boot(None).unwrap();
7218
7219 // Check that the VM booted as expected
7220 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count);
7221 assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
7222
7223 // Sync and shutdown without powering off to prevent filesystem
7224 // corruption.
7225 guest.ssh_command("sync").unwrap();
7226 guest.ssh_command("sudo shutdown -H now").unwrap();
7227
7228 // Wait for the guest to be fully shutdown
7229 thread::sleep(std::time::Duration::new(20, 0));
7230
7231 // Then shutdown the VM
7232 assert!(dbus_api.remote_command("shutdown", None));
7233
7234 // Then boot it again
7235 assert!(http_api.remote_command("boot", None));
7236 guest.wait_vm_boot(None).unwrap();
7237
7238 // Check that the VM booted as expected
7239 assert_eq!(guest.get_cpu_count().unwrap_or_default() as u8, cpu_count);
7240 assert!(guest.get_total_memory().unwrap_or_default() > 480_000);
7241 });
7242
7243 kill_child(&mut child);
7244 let output = child.wait_with_output().unwrap();
7245
7246 handle_child_output(r, &output);
7247 }
7248
7249 #[test]
test_api_dbus_create_boot()7250 fn test_api_dbus_create_boot() {
7251 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
7252 let guest = Guest::new(Box::new(focal));
7253
7254 _test_api_create_boot(TargetApi::new_dbus_api(&guest.tmp_dir), guest)
7255 }
7256
7257 #[test]
test_api_dbus_shutdown()7258 fn test_api_dbus_shutdown() {
7259 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
7260 let guest = Guest::new(Box::new(focal));
7261
7262 _test_api_shutdown(TargetApi::new_dbus_api(&guest.tmp_dir), guest)
7263 }
7264
7265 #[test]
test_api_dbus_delete()7266 fn test_api_dbus_delete() {
7267 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
7268 let guest = Guest::new(Box::new(focal));
7269
7270 _test_api_delete(TargetApi::new_dbus_api(&guest.tmp_dir), guest);
7271 }
7272
7273 #[test]
test_api_dbus_pause_resume()7274 fn test_api_dbus_pause_resume() {
7275 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
7276 let guest = Guest::new(Box::new(focal));
7277
7278 _test_api_pause_resume(TargetApi::new_dbus_api(&guest.tmp_dir), guest)
7279 }
7280 }
7281
7282 mod common_sequential {
7283 use std::fs::remove_dir_all;
7284
7285 use crate::*;
7286
7287 #[test]
7288 #[cfg(not(feature = "mshv"))]
test_memory_mergeable_on()7289 fn test_memory_mergeable_on() {
7290 test_memory_mergeable(true)
7291 }
7292
snapshot_and_check_events(api_socket: &str, snapshot_dir: &str, event_path: &str)7293 fn snapshot_and_check_events(api_socket: &str, snapshot_dir: &str, event_path: &str) {
7294 // Pause the VM
7295 assert!(remote_command(api_socket, "pause", None));
7296 let latest_events: [&MetaEvent; 2] = [
7297 &MetaEvent {
7298 event: "pausing".to_string(),
7299 device_id: None,
7300 },
7301 &MetaEvent {
7302 event: "paused".to_string(),
7303 device_id: None,
7304 },
7305 ];
7306 // See: #5938
7307 thread::sleep(std::time::Duration::new(1, 0));
7308 assert!(check_latest_events_exact(&latest_events, event_path));
7309
7310 // Take a snapshot from the VM
7311 assert!(remote_command(
7312 api_socket,
7313 "snapshot",
7314 Some(format!("file://{snapshot_dir}").as_str()),
7315 ));
7316
7317 // Wait to make sure the snapshot is completed
7318 thread::sleep(std::time::Duration::new(10, 0));
7319
7320 let latest_events = [
7321 &MetaEvent {
7322 event: "snapshotting".to_string(),
7323 device_id: None,
7324 },
7325 &MetaEvent {
7326 event: "snapshotted".to_string(),
7327 device_id: None,
7328 },
7329 ];
7330 // See: #5938
7331 thread::sleep(std::time::Duration::new(1, 0));
7332 assert!(check_latest_events_exact(&latest_events, event_path));
7333 }
7334
7335 // One thing to note about this test. The virtio-net device is heavily used
7336 // through each ssh command. There's no need to perform a dedicated test to
7337 // verify the migration went well for virtio-net.
7338 #[test]
7339 #[cfg(not(feature = "mshv"))]
test_snapshot_restore_hotplug_virtiomem()7340 fn test_snapshot_restore_hotplug_virtiomem() {
7341 _test_snapshot_restore(true);
7342 }
7343
7344 #[test]
test_snapshot_restore_basic()7345 fn test_snapshot_restore_basic() {
7346 _test_snapshot_restore(false);
7347 }
7348
_test_snapshot_restore(use_hotplug: bool)7349 fn _test_snapshot_restore(use_hotplug: bool) {
7350 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
7351 let guest = Guest::new(Box::new(focal));
7352 let kernel_path = direct_kernel_boot_path();
7353
7354 let api_socket_source = format!("{}.1", temp_api_path(&guest.tmp_dir));
7355
7356 let net_id = "net123";
7357 let net_params = format!(
7358 "id={},tap=,mac={},ip={},mask=255.255.255.0",
7359 net_id, guest.network.guest_mac, guest.network.host_ip
7360 );
7361 let mut mem_params = "size=2G";
7362
7363 if use_hotplug {
7364 mem_params = "size=2G,hotplug_method=virtio-mem,hotplug_size=32G"
7365 }
7366
7367 let cloudinit_params = format!(
7368 "path={},iommu=on",
7369 guest.disk_config.disk(DiskType::CloudInit).unwrap()
7370 );
7371
7372 let socket = temp_vsock_path(&guest.tmp_dir);
7373 let event_path = temp_event_monitor_path(&guest.tmp_dir);
7374
7375 let mut child = GuestCommand::new(&guest)
7376 .args(["--api-socket", &api_socket_source])
7377 .args(["--event-monitor", format!("path={event_path}").as_str()])
7378 .args(["--cpus", "boot=4"])
7379 .args(["--memory", mem_params])
7380 .args(["--balloon", "size=0"])
7381 .args(["--kernel", kernel_path.to_str().unwrap()])
7382 .args([
7383 "--disk",
7384 format!(
7385 "path={}",
7386 guest.disk_config.disk(DiskType::OperatingSystem).unwrap()
7387 )
7388 .as_str(),
7389 cloudinit_params.as_str(),
7390 ])
7391 .args(["--net", net_params.as_str()])
7392 .args(["--vsock", format!("cid=3,socket={socket}").as_str()])
7393 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
7394 .capture_output()
7395 .spawn()
7396 .unwrap();
7397
7398 let console_text = String::from("On a branch floating down river a cricket, singing.");
7399 // Create the snapshot directory
7400 let snapshot_dir = temp_snapshot_dir_path(&guest.tmp_dir);
7401
7402 let r = std::panic::catch_unwind(|| {
7403 guest.wait_vm_boot(None).unwrap();
7404
7405 // Check the number of vCPUs
7406 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 4);
7407 // Check the guest RAM
7408 assert!(guest.get_total_memory().unwrap_or_default() > 1_920_000);
7409 if use_hotplug {
7410 // Increase guest RAM with virtio-mem
7411 resize_command(
7412 &api_socket_source,
7413 None,
7414 Some(6 << 30),
7415 None,
7416 Some(&event_path),
7417 );
7418 thread::sleep(std::time::Duration::new(5, 0));
7419 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000);
7420 // Use balloon to remove RAM from the VM
7421 resize_command(
7422 &api_socket_source,
7423 None,
7424 None,
7425 Some(1 << 30),
7426 Some(&event_path),
7427 );
7428 thread::sleep(std::time::Duration::new(5, 0));
7429 let total_memory = guest.get_total_memory().unwrap_or_default();
7430 assert!(total_memory > 4_800_000);
7431 assert!(total_memory < 5_760_000);
7432 }
7433 // Check the guest virtio-devices, e.g. block, rng, vsock, console, and net
7434 guest.check_devices_common(Some(&socket), Some(&console_text), None);
7435
7436 // x86_64: We check that removing and adding back the virtio-net device
7437 // does not break the snapshot/restore support for virtio-pci.
7438 // This is an important thing to test as the hotplug will
7439 // trigger a PCI BAR reprogramming, which is a good way of
7440 // checking if the stored resources are correctly restored.
7441 // Unplug the virtio-net device
7442 // AArch64: Device hotplug is currently not supported, skipping here.
7443 #[cfg(target_arch = "x86_64")]
7444 {
7445 assert!(remote_command(
7446 &api_socket_source,
7447 "remove-device",
7448 Some(net_id),
7449 ));
7450 thread::sleep(std::time::Duration::new(10, 0));
7451 let latest_events = [&MetaEvent {
7452 event: "device-removed".to_string(),
7453 device_id: Some(net_id.to_string()),
7454 }];
7455 // See: #5938
7456 thread::sleep(std::time::Duration::new(1, 0));
7457 assert!(check_latest_events_exact(&latest_events, &event_path));
7458
7459 // Plug the virtio-net device again
7460 assert!(remote_command(
7461 &api_socket_source,
7462 "add-net",
7463 Some(net_params.as_str()),
7464 ));
7465 thread::sleep(std::time::Duration::new(10, 0));
7466 }
7467
7468 snapshot_and_check_events(&api_socket_source, &snapshot_dir, &event_path);
7469 });
7470
7471 // Shutdown the source VM and check console output
7472 kill_child(&mut child);
7473 let output = child.wait_with_output().unwrap();
7474 handle_child_output(r, &output);
7475
7476 let r = std::panic::catch_unwind(|| {
7477 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text));
7478 });
7479
7480 handle_child_output(r, &output);
7481
7482 // Remove the vsock socket file.
7483 Command::new("rm")
7484 .arg("-f")
7485 .arg(socket.as_str())
7486 .output()
7487 .unwrap();
7488
7489 let api_socket_restored = format!("{}.2", temp_api_path(&guest.tmp_dir));
7490 let event_path_restored = format!("{}.2", temp_event_monitor_path(&guest.tmp_dir));
7491
7492 // Restore the VM from the snapshot
7493 let mut child = GuestCommand::new(&guest)
7494 .args(["--api-socket", &api_socket_restored])
7495 .args([
7496 "--event-monitor",
7497 format!("path={event_path_restored}").as_str(),
7498 ])
7499 .args([
7500 "--restore",
7501 format!("source_url=file://{snapshot_dir}").as_str(),
7502 ])
7503 .capture_output()
7504 .spawn()
7505 .unwrap();
7506
7507 // Wait for the VM to be restored
7508 thread::sleep(std::time::Duration::new(20, 0));
7509 let expected_events = [
7510 &MetaEvent {
7511 event: "starting".to_string(),
7512 device_id: None,
7513 },
7514 &MetaEvent {
7515 event: "activated".to_string(),
7516 device_id: Some("__console".to_string()),
7517 },
7518 &MetaEvent {
7519 event: "activated".to_string(),
7520 device_id: Some("__rng".to_string()),
7521 },
7522 &MetaEvent {
7523 event: "restoring".to_string(),
7524 device_id: None,
7525 },
7526 ];
7527 assert!(check_sequential_events(
7528 &expected_events,
7529 &event_path_restored
7530 ));
7531 let latest_events = [&MetaEvent {
7532 event: "restored".to_string(),
7533 device_id: None,
7534 }];
7535 assert!(check_latest_events_exact(
7536 &latest_events,
7537 &event_path_restored
7538 ));
7539
7540 // Remove the snapshot dir
7541 let _ = remove_dir_all(snapshot_dir.as_str());
7542
7543 let r = std::panic::catch_unwind(|| {
7544 // Resume the VM
7545 assert!(remote_command(&api_socket_restored, "resume", None));
7546 // There is no way that we can ensure the 'write()' to the
7547 // event file is completed when the 'resume' request is
7548 // returned successfully, because the 'write()' was done
7549 // asynchronously from a different thread of Cloud
7550 // Hypervisor (e.g. the event-monitor thread).
7551 thread::sleep(std::time::Duration::new(1, 0));
7552 let latest_events = [
7553 &MetaEvent {
7554 event: "resuming".to_string(),
7555 device_id: None,
7556 },
7557 &MetaEvent {
7558 event: "resumed".to_string(),
7559 device_id: None,
7560 },
7561 ];
7562 assert!(check_latest_events_exact(
7563 &latest_events,
7564 &event_path_restored
7565 ));
7566
7567 // Perform same checks to validate VM has been properly restored
7568 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 4);
7569 let total_memory = guest.get_total_memory().unwrap_or_default();
7570 if !use_hotplug {
7571 assert!(total_memory > 1_920_000);
7572 } else {
7573 assert!(total_memory > 4_800_000);
7574 assert!(total_memory < 5_760_000);
7575 // Deflate balloon to restore entire RAM to the VM
7576 resize_command(&api_socket_restored, None, None, Some(0), None);
7577 thread::sleep(std::time::Duration::new(5, 0));
7578 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000);
7579 // Decrease guest RAM with virtio-mem
7580 resize_command(&api_socket_restored, None, Some(5 << 30), None, None);
7581 thread::sleep(std::time::Duration::new(5, 0));
7582 let total_memory = guest.get_total_memory().unwrap_or_default();
7583 assert!(total_memory > 4_800_000);
7584 assert!(total_memory < 5_760_000);
7585 }
7586
7587 guest.check_devices_common(Some(&socket), Some(&console_text), None);
7588 });
7589 // Shutdown the target VM and check console output
7590 kill_child(&mut child);
7591 let output = child.wait_with_output().unwrap();
7592 handle_child_output(r, &output);
7593
7594 let r = std::panic::catch_unwind(|| {
7595 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text));
7596 });
7597
7598 handle_child_output(r, &output);
7599 }
7600
7601 #[test]
7602 #[cfg_attr(target_arch = "aarch64", ignore = "See #6970")]
test_snapshot_restore_with_fd()7603 fn test_snapshot_restore_with_fd() {
7604 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
7605 let guest = Guest::new(Box::new(focal));
7606 let kernel_path = direct_kernel_boot_path();
7607
7608 let api_socket_source = format!("{}.1", temp_api_path(&guest.tmp_dir));
7609
7610 let net_id = "net123";
7611 let num_queue_pairs: usize = 2;
7612 // use a name that does not conflict with tap dev created from other tests
7613 let tap_name = "chtap999";
7614 use std::str::FromStr;
7615 let taps = net_util::open_tap(
7616 Some(tap_name),
7617 Some(std::net::IpAddr::V4(
7618 std::net::Ipv4Addr::from_str(&guest.network.host_ip).unwrap(),
7619 )),
7620 None,
7621 &mut None,
7622 None,
7623 num_queue_pairs,
7624 Some(libc::O_RDWR | libc::O_NONBLOCK),
7625 )
7626 .unwrap();
7627 let net_params = format!(
7628 "id={},fd=[{},{}],mac={},ip={},mask=255.255.255.0,num_queues={}",
7629 net_id,
7630 taps[0].as_raw_fd(),
7631 taps[1].as_raw_fd(),
7632 guest.network.guest_mac,
7633 guest.network.host_ip,
7634 num_queue_pairs * 2
7635 );
7636
7637 let cloudinit_params = format!(
7638 "path={},iommu=on",
7639 guest.disk_config.disk(DiskType::CloudInit).unwrap()
7640 );
7641
7642 let n_cpu = 2;
7643 let event_path = temp_event_monitor_path(&guest.tmp_dir);
7644
7645 let mut child = GuestCommand::new(&guest)
7646 .args(["--api-socket", &api_socket_source])
7647 .args(["--event-monitor", format!("path={event_path}").as_str()])
7648 .args(["--cpus", format!("boot={n_cpu}").as_str()])
7649 .args(["--memory", "size=1G"])
7650 .args(["--kernel", kernel_path.to_str().unwrap()])
7651 .args([
7652 "--disk",
7653 format!(
7654 "path={}",
7655 guest.disk_config.disk(DiskType::OperatingSystem).unwrap()
7656 )
7657 .as_str(),
7658 cloudinit_params.as_str(),
7659 ])
7660 .args(["--net", net_params.as_str()])
7661 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
7662 .capture_output()
7663 .spawn()
7664 .unwrap();
7665
7666 let console_text = String::from("On a branch floating down river a cricket, singing.");
7667 // Create the snapshot directory
7668 let snapshot_dir = temp_snapshot_dir_path(&guest.tmp_dir);
7669
7670 let r = std::panic::catch_unwind(|| {
7671 guest.wait_vm_boot(None).unwrap();
7672
7673 // close the fds after VM boots, as CH duplicates them before using
7674 for tap in taps.iter() {
7675 unsafe { libc::close(tap.as_raw_fd()) };
7676 }
7677
7678 // Check the number of vCPUs
7679 assert_eq!(guest.get_cpu_count().unwrap_or_default(), n_cpu);
7680 // Check the guest RAM
7681 assert!(guest.get_total_memory().unwrap_or_default() > 960_000);
7682
7683 // Check the guest virtio-devices, e.g. block, rng, vsock, console, and net
7684 guest.check_devices_common(None, Some(&console_text), None);
7685
7686 snapshot_and_check_events(&api_socket_source, &snapshot_dir, &event_path);
7687 });
7688
7689 // Shutdown the source VM and check console output
7690 kill_child(&mut child);
7691 let output = child.wait_with_output().unwrap();
7692 handle_child_output(r, &output);
7693
7694 let r = std::panic::catch_unwind(|| {
7695 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text));
7696 });
7697
7698 handle_child_output(r, &output);
7699
7700 let api_socket_restored = format!("{}.2", temp_api_path(&guest.tmp_dir));
7701 let event_path_restored = format!("{}.2", temp_event_monitor_path(&guest.tmp_dir));
7702
7703 // Restore the VM from the snapshot
7704 let mut child = GuestCommand::new(&guest)
7705 .args(["--api-socket", &api_socket_restored])
7706 .args([
7707 "--event-monitor",
7708 format!("path={event_path_restored}").as_str(),
7709 ])
7710 .capture_output()
7711 .spawn()
7712 .unwrap();
7713 thread::sleep(std::time::Duration::new(2, 0));
7714
7715 let taps = net_util::open_tap(
7716 Some(tap_name),
7717 Some(std::net::IpAddr::V4(
7718 std::net::Ipv4Addr::from_str(&guest.network.host_ip).unwrap(),
7719 )),
7720 None,
7721 &mut None,
7722 None,
7723 num_queue_pairs,
7724 Some(libc::O_RDWR | libc::O_NONBLOCK),
7725 )
7726 .unwrap();
7727 let restore_params = format!(
7728 "source_url=file://{},net_fds=[{}@[{},{}]]",
7729 snapshot_dir,
7730 net_id,
7731 taps[0].as_raw_fd(),
7732 taps[1].as_raw_fd()
7733 );
7734 assert!(remote_command(
7735 &api_socket_restored,
7736 "restore",
7737 Some(restore_params.as_str())
7738 ));
7739
7740 // Wait for the VM to be restored
7741 thread::sleep(std::time::Duration::new(20, 0));
7742
7743 // close the fds as CH duplicates them before using
7744 for tap in taps.iter() {
7745 unsafe { libc::close(tap.as_raw_fd()) };
7746 }
7747
7748 let expected_events = [
7749 &MetaEvent {
7750 event: "starting".to_string(),
7751 device_id: None,
7752 },
7753 &MetaEvent {
7754 event: "activated".to_string(),
7755 device_id: Some("__console".to_string()),
7756 },
7757 &MetaEvent {
7758 event: "activated".to_string(),
7759 device_id: Some("__rng".to_string()),
7760 },
7761 &MetaEvent {
7762 event: "restoring".to_string(),
7763 device_id: None,
7764 },
7765 ];
7766 assert!(check_sequential_events(
7767 &expected_events,
7768 &event_path_restored
7769 ));
7770 let latest_events = [&MetaEvent {
7771 event: "restored".to_string(),
7772 device_id: None,
7773 }];
7774 assert!(check_latest_events_exact(
7775 &latest_events,
7776 &event_path_restored
7777 ));
7778
7779 // Remove the snapshot dir
7780 let _ = remove_dir_all(snapshot_dir.as_str());
7781
7782 let r = std::panic::catch_unwind(|| {
7783 // Resume the VM
7784 assert!(remote_command(&api_socket_restored, "resume", None));
7785 // There is no way that we can ensure the 'write()' to the
7786 // event file is completed when the 'resume' request is
7787 // returned successfully, because the 'write()' was done
7788 // asynchronously from a different thread of Cloud
7789 // Hypervisor (e.g. the event-monitor thread).
7790 thread::sleep(std::time::Duration::new(1, 0));
7791 let latest_events = [
7792 &MetaEvent {
7793 event: "resuming".to_string(),
7794 device_id: None,
7795 },
7796 &MetaEvent {
7797 event: "resumed".to_string(),
7798 device_id: None,
7799 },
7800 ];
7801 assert!(check_latest_events_exact(
7802 &latest_events,
7803 &event_path_restored
7804 ));
7805
7806 // Perform same checks to validate VM has been properly restored
7807 assert_eq!(guest.get_cpu_count().unwrap_or_default(), n_cpu);
7808 assert!(guest.get_total_memory().unwrap_or_default() > 960_000);
7809
7810 guest.check_devices_common(None, Some(&console_text), None);
7811 });
7812 // Shutdown the target VM and check console output
7813 kill_child(&mut child);
7814 let output = child.wait_with_output().unwrap();
7815 handle_child_output(r, &output);
7816
7817 let r = std::panic::catch_unwind(|| {
7818 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text));
7819 });
7820
7821 handle_child_output(r, &output);
7822 }
7823
7824 #[test]
7825 #[cfg(target_arch = "x86_64")]
test_snapshot_restore_pvpanic()7826 fn test_snapshot_restore_pvpanic() {
7827 _test_snapshot_restore_devices(true);
7828 }
7829
_test_snapshot_restore_devices(pvpanic: bool)7830 fn _test_snapshot_restore_devices(pvpanic: bool) {
7831 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
7832 let guest = Guest::new(Box::new(focal));
7833 let kernel_path = direct_kernel_boot_path();
7834
7835 let api_socket_source = format!("{}.1", temp_api_path(&guest.tmp_dir));
7836
7837 let device_params = {
7838 let mut data = vec![];
7839 if pvpanic {
7840 data.push("--pvpanic");
7841 }
7842 data
7843 };
7844
7845 let socket = temp_vsock_path(&guest.tmp_dir);
7846 let event_path = temp_event_monitor_path(&guest.tmp_dir);
7847
7848 let mut child = GuestCommand::new(&guest)
7849 .args(["--api-socket", &api_socket_source])
7850 .args(["--event-monitor", format!("path={event_path}").as_str()])
7851 .args(["--cpus", "boot=2"])
7852 .args(["--memory", "size=1G"])
7853 .args(["--kernel", kernel_path.to_str().unwrap()])
7854 .default_disks()
7855 .default_net()
7856 .args(["--vsock", format!("cid=3,socket={socket}").as_str()])
7857 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
7858 .args(device_params)
7859 .capture_output()
7860 .spawn()
7861 .unwrap();
7862
7863 let console_text = String::from("On a branch floating down river a cricket, singing.");
7864 // Create the snapshot directory
7865 let snapshot_dir = temp_snapshot_dir_path(&guest.tmp_dir);
7866
7867 let r = std::panic::catch_unwind(|| {
7868 guest.wait_vm_boot(None).unwrap();
7869
7870 // Check the number of vCPUs
7871 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2);
7872
7873 snapshot_and_check_events(&api_socket_source, &snapshot_dir, &event_path);
7874 });
7875
7876 // Shutdown the source VM and check console output
7877 kill_child(&mut child);
7878 let output = child.wait_with_output().unwrap();
7879 handle_child_output(r, &output);
7880
7881 // Remove the vsock socket file.
7882 Command::new("rm")
7883 .arg("-f")
7884 .arg(socket.as_str())
7885 .output()
7886 .unwrap();
7887
7888 let api_socket_restored = format!("{}.2", temp_api_path(&guest.tmp_dir));
7889 let event_path_restored = format!("{}.2", temp_event_monitor_path(&guest.tmp_dir));
7890
7891 // Restore the VM from the snapshot
7892 let mut child = GuestCommand::new(&guest)
7893 .args(["--api-socket", &api_socket_restored])
7894 .args([
7895 "--event-monitor",
7896 format!("path={event_path_restored}").as_str(),
7897 ])
7898 .args([
7899 "--restore",
7900 format!("source_url=file://{snapshot_dir}").as_str(),
7901 ])
7902 .capture_output()
7903 .spawn()
7904 .unwrap();
7905
7906 // Wait for the VM to be restored
7907 thread::sleep(std::time::Duration::new(20, 0));
7908
7909 let latest_events = [&MetaEvent {
7910 event: "restored".to_string(),
7911 device_id: None,
7912 }];
7913 assert!(check_latest_events_exact(
7914 &latest_events,
7915 &event_path_restored
7916 ));
7917
7918 // Remove the snapshot dir
7919 let _ = remove_dir_all(snapshot_dir.as_str());
7920
7921 let r = std::panic::catch_unwind(|| {
7922 // Resume the VM
7923 assert!(remote_command(&api_socket_restored, "resume", None));
7924 // There is no way that we can ensure the 'write()' to the
7925 // event file is completed when the 'resume' request is
7926 // returned successfully, because the 'write()' was done
7927 // asynchronously from a different thread of Cloud
7928 // Hypervisor (e.g. the event-monitor thread).
7929 thread::sleep(std::time::Duration::new(1, 0));
7930 let latest_events = [
7931 &MetaEvent {
7932 event: "resuming".to_string(),
7933 device_id: None,
7934 },
7935 &MetaEvent {
7936 event: "resumed".to_string(),
7937 device_id: None,
7938 },
7939 ];
7940 assert!(check_latest_events_exact(
7941 &latest_events,
7942 &event_path_restored
7943 ));
7944
7945 // Check the number of vCPUs
7946 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 2);
7947 guest.check_devices_common(Some(&socket), Some(&console_text), None);
7948
7949 if pvpanic {
7950 // Trigger guest a panic
7951 make_guest_panic(&guest);
7952 // Wait a while for guest
7953 thread::sleep(std::time::Duration::new(10, 0));
7954
7955 let expected_sequential_events = [&MetaEvent {
7956 event: "panic".to_string(),
7957 device_id: None,
7958 }];
7959 assert!(check_latest_events_exact(
7960 &expected_sequential_events,
7961 &event_path_restored
7962 ));
7963 }
7964 });
7965 // Shutdown the target VM and check console output
7966 kill_child(&mut child);
7967 let output = child.wait_with_output().unwrap();
7968 handle_child_output(r, &output);
7969
7970 let r = std::panic::catch_unwind(|| {
7971 assert!(String::from_utf8_lossy(&output.stdout).contains(&console_text));
7972 });
7973
7974 handle_child_output(r, &output);
7975 }
7976
7977 #[test]
test_virtio_pmem_persist_writes()7978 fn test_virtio_pmem_persist_writes() {
7979 test_virtio_pmem(false, false)
7980 }
7981 }
7982
7983 mod windows {
7984 use std::sync::LazyLock;
7985
7986 use crate::*;
7987
7988 static NEXT_DISK_ID: LazyLock<Mutex<u8>> = LazyLock::new(|| Mutex::new(1));
7989
7990 struct WindowsGuest {
7991 guest: Guest,
7992 auth: PasswordAuth,
7993 }
7994
7995 trait FsType {
7996 const FS_FAT: u8;
7997 const FS_NTFS: u8;
7998 }
7999 impl FsType for WindowsGuest {
8000 const FS_FAT: u8 = 0;
8001 const FS_NTFS: u8 = 1;
8002 }
8003
8004 impl WindowsGuest {
new() -> Self8005 fn new() -> Self {
8006 let disk = WindowsDiskConfig::new(WINDOWS_IMAGE_NAME.to_string());
8007 let guest = Guest::new(Box::new(disk));
8008 let auth = PasswordAuth {
8009 username: String::from("administrator"),
8010 password: String::from("Admin123"),
8011 };
8012
8013 WindowsGuest { guest, auth }
8014 }
8015
guest(&self) -> &Guest8016 fn guest(&self) -> &Guest {
8017 &self.guest
8018 }
8019
ssh_cmd(&self, cmd: &str) -> String8020 fn ssh_cmd(&self, cmd: &str) -> String {
8021 ssh_command_ip_with_auth(
8022 cmd,
8023 &self.auth,
8024 &self.guest.network.guest_ip,
8025 DEFAULT_SSH_RETRIES,
8026 DEFAULT_SSH_TIMEOUT,
8027 )
8028 .unwrap()
8029 }
8030
cpu_count(&self) -> u88031 fn cpu_count(&self) -> u8 {
8032 self.ssh_cmd("powershell -Command \"(Get-CimInstance win32_computersystem).NumberOfLogicalProcessors\"")
8033 .trim()
8034 .parse::<u8>()
8035 .unwrap_or(0)
8036 }
8037
ram_size(&self) -> usize8038 fn ram_size(&self) -> usize {
8039 self.ssh_cmd("powershell -Command \"(Get-CimInstance win32_computersystem).TotalPhysicalMemory\"")
8040 .trim()
8041 .parse::<usize>()
8042 .unwrap_or(0)
8043 }
8044
netdev_count(&self) -> u88045 fn netdev_count(&self) -> u8 {
8046 self.ssh_cmd("powershell -Command \"netsh int ipv4 show interfaces | Select-String ethernet | Measure-Object -Line | Format-Table -HideTableHeaders\"")
8047 .trim()
8048 .parse::<u8>()
8049 .unwrap_or(0)
8050 }
8051
disk_count(&self) -> u88052 fn disk_count(&self) -> u8 {
8053 self.ssh_cmd("powershell -Command \"Get-Disk | Measure-Object -Line | Format-Table -HideTableHeaders\"")
8054 .trim()
8055 .parse::<u8>()
8056 .unwrap_or(0)
8057 }
8058
reboot(&self)8059 fn reboot(&self) {
8060 let _ = self.ssh_cmd("shutdown /r /t 0");
8061 }
8062
shutdown(&self)8063 fn shutdown(&self) {
8064 let _ = self.ssh_cmd("shutdown /s /t 0");
8065 }
8066
run_dnsmasq(&self) -> std::process::Child8067 fn run_dnsmasq(&self) -> std::process::Child {
8068 let listen_address = format!("--listen-address={}", self.guest.network.host_ip);
8069 let dhcp_host = format!(
8070 "--dhcp-host={},{}",
8071 self.guest.network.guest_mac, self.guest.network.guest_ip
8072 );
8073 let dhcp_range = format!(
8074 "--dhcp-range=eth,{},{}",
8075 self.guest.network.guest_ip, self.guest.network.guest_ip
8076 );
8077
8078 Command::new("dnsmasq")
8079 .arg("--no-daemon")
8080 .arg("--log-queries")
8081 .arg(listen_address.as_str())
8082 .arg("--except-interface=lo")
8083 .arg("--bind-dynamic") // Allow listening to host_ip while the interface is not ready yet.
8084 .arg("--conf-file=/dev/null")
8085 .arg(dhcp_host.as_str())
8086 .arg(dhcp_range.as_str())
8087 .spawn()
8088 .unwrap()
8089 }
8090
8091 // TODO Cleanup image file explicitly after test, if there's some space issues.
disk_new(&self, fs: u8, sz: usize) -> String8092 fn disk_new(&self, fs: u8, sz: usize) -> String {
8093 let mut guard = NEXT_DISK_ID.lock().unwrap();
8094 let id = *guard;
8095 *guard = id + 1;
8096
8097 let img = PathBuf::from(format!("/tmp/test-hotplug-{id}.raw"));
8098 let _ = fs::remove_file(&img);
8099
8100 // Create an image file
8101 let out = Command::new("qemu-img")
8102 .args([
8103 "create",
8104 "-f",
8105 "raw",
8106 img.to_str().unwrap(),
8107 format!("{sz}m").as_str(),
8108 ])
8109 .output()
8110 .expect("qemu-img command failed")
8111 .stdout;
8112 println!("{out:?}");
8113
8114 // Associate image to a loop device
8115 let out = Command::new("losetup")
8116 .args(["--show", "-f", img.to_str().unwrap()])
8117 .output()
8118 .expect("failed to create loop device")
8119 .stdout;
8120 let _tmp = String::from_utf8_lossy(&out);
8121 let loop_dev = _tmp.trim();
8122 println!("{out:?}");
8123
8124 // Create a partition table
8125 // echo 'type=7' | sudo sfdisk "${LOOP}"
8126 let mut child = Command::new("sfdisk")
8127 .args([loop_dev])
8128 .stdin(Stdio::piped())
8129 .spawn()
8130 .unwrap();
8131 let stdin = child.stdin.as_mut().expect("failed to open stdin");
8132 stdin
8133 .write_all("type=7".as_bytes())
8134 .expect("failed to write stdin");
8135 let out = child.wait_with_output().expect("sfdisk failed").stdout;
8136 println!("{out:?}");
8137
8138 // Disengage the loop device
8139 let out = Command::new("losetup")
8140 .args(["-d", loop_dev])
8141 .output()
8142 .expect("loop device not found")
8143 .stdout;
8144 println!("{out:?}");
8145
8146 // Re-associate loop device pointing to the partition only
8147 let out = Command::new("losetup")
8148 .args([
8149 "--show",
8150 "--offset",
8151 (512 * 2048).to_string().as_str(),
8152 "-f",
8153 img.to_str().unwrap(),
8154 ])
8155 .output()
8156 .expect("failed to create loop device")
8157 .stdout;
8158 let _tmp = String::from_utf8_lossy(&out);
8159 let loop_dev = _tmp.trim();
8160 println!("{out:?}");
8161
8162 // Create filesystem.
8163 let fs_cmd = match fs {
8164 WindowsGuest::FS_FAT => "mkfs.msdos",
8165 WindowsGuest::FS_NTFS => "mkfs.ntfs",
8166 _ => panic!("Unknown filesystem type '{fs}'"),
8167 };
8168 let out = Command::new(fs_cmd)
8169 .args([&loop_dev])
8170 .output()
8171 .unwrap_or_else(|_| panic!("{fs_cmd} failed"))
8172 .stdout;
8173 println!("{out:?}");
8174
8175 // Disengage the loop device
8176 let out = Command::new("losetup")
8177 .args(["-d", loop_dev])
8178 .output()
8179 .unwrap_or_else(|_| panic!("loop device '{loop_dev}' not found"))
8180 .stdout;
8181 println!("{out:?}");
8182
8183 img.to_str().unwrap().to_string()
8184 }
8185
disks_set_rw(&self)8186 fn disks_set_rw(&self) {
8187 let _ = self.ssh_cmd("powershell -Command \"Get-Disk | Where-Object IsOffline -eq $True | Set-Disk -IsReadOnly $False\"");
8188 }
8189
disks_online(&self)8190 fn disks_online(&self) {
8191 let _ = self.ssh_cmd("powershell -Command \"Get-Disk | Where-Object IsOffline -eq $True | Set-Disk -IsOffline $False\"");
8192 }
8193
disk_file_put(&self, fname: &str, data: &str)8194 fn disk_file_put(&self, fname: &str, data: &str) {
8195 let _ = self.ssh_cmd(&format!(
8196 "powershell -Command \"'{data}' | Set-Content -Path {fname}\""
8197 ));
8198 }
8199
disk_file_read(&self, fname: &str) -> String8200 fn disk_file_read(&self, fname: &str) -> String {
8201 self.ssh_cmd(&format!(
8202 "powershell -Command \"Get-Content -Path {fname}\""
8203 ))
8204 }
8205
wait_for_boot(&self) -> bool8206 fn wait_for_boot(&self) -> bool {
8207 let cmd = "dir /b c:\\ | find \"Windows\"";
8208 let tmo_max = 180;
8209 // The timeout increase by n*1+n*2+n*3+..., therefore the initial
8210 // interval must be small.
8211 let tmo_int = 2;
8212 let out = ssh_command_ip_with_auth(
8213 cmd,
8214 &self.auth,
8215 &self.guest.network.guest_ip,
8216 {
8217 let mut ret = 1;
8218 let mut tmo_acc = 0;
8219 loop {
8220 tmo_acc += tmo_int * ret;
8221 if tmo_acc >= tmo_max {
8222 break;
8223 }
8224 ret += 1;
8225 }
8226 ret
8227 },
8228 tmo_int,
8229 )
8230 .unwrap();
8231
8232 if "Windows" == out.trim() {
8233 return true;
8234 }
8235
8236 false
8237 }
8238 }
8239
vcpu_threads_count(pid: u32) -> u88240 fn vcpu_threads_count(pid: u32) -> u8 {
8241 // ps -T -p 12345 | grep vcpu | wc -l
8242 let out = Command::new("ps")
8243 .args(["-T", "-p", format!("{pid}").as_str()])
8244 .output()
8245 .expect("ps command failed")
8246 .stdout;
8247 String::from_utf8_lossy(&out).matches("vcpu").count() as u8
8248 }
8249
netdev_ctrl_threads_count(pid: u32) -> u88250 fn netdev_ctrl_threads_count(pid: u32) -> u8 {
8251 // ps -T -p 12345 | grep "_net[0-9]*_ctrl" | wc -l
8252 let out = Command::new("ps")
8253 .args(["-T", "-p", format!("{pid}").as_str()])
8254 .output()
8255 .expect("ps command failed")
8256 .stdout;
8257 let mut n = 0;
8258 String::from_utf8_lossy(&out)
8259 .split_whitespace()
8260 .for_each(|s| n += (s.starts_with("_net") && s.ends_with("_ctrl")) as u8); // _net1_ctrl
8261 n
8262 }
8263
disk_ctrl_threads_count(pid: u32) -> u88264 fn disk_ctrl_threads_count(pid: u32) -> u8 {
8265 // ps -T -p 15782 | grep "_disk[0-9]*_q0" | wc -l
8266 let out = Command::new("ps")
8267 .args(["-T", "-p", format!("{pid}").as_str()])
8268 .output()
8269 .expect("ps command failed")
8270 .stdout;
8271 let mut n = 0;
8272 String::from_utf8_lossy(&out)
8273 .split_whitespace()
8274 .for_each(|s| n += (s.starts_with("_disk") && s.ends_with("_q0")) as u8); // _disk0_q0, don't care about multiple queues as they're related to the same hdd
8275 n
8276 }
8277
8278 #[test]
test_windows_guest()8279 fn test_windows_guest() {
8280 let windows_guest = WindowsGuest::new();
8281
8282 let mut child = GuestCommand::new(windows_guest.guest())
8283 .args(["--cpus", "boot=2,kvm_hyperv=on"])
8284 .args(["--memory", "size=4G"])
8285 .args(["--kernel", edk2_path().to_str().unwrap()])
8286 .args(["--serial", "tty"])
8287 .args(["--console", "off"])
8288 .default_disks()
8289 .default_net()
8290 .capture_output()
8291 .spawn()
8292 .unwrap();
8293
8294 let fd = child.stdout.as_ref().unwrap().as_raw_fd();
8295 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) };
8296 let fd = child.stderr.as_ref().unwrap().as_raw_fd();
8297 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) };
8298
8299 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE);
8300
8301 let mut child_dnsmasq = windows_guest.run_dnsmasq();
8302
8303 let r = std::panic::catch_unwind(|| {
8304 // Wait to make sure Windows boots up
8305 assert!(windows_guest.wait_for_boot());
8306
8307 windows_guest.shutdown();
8308 });
8309
8310 let _ = child.wait_timeout(std::time::Duration::from_secs(60));
8311 let _ = child.kill();
8312 let output = child.wait_with_output().unwrap();
8313
8314 let _ = child_dnsmasq.kill();
8315 let _ = child_dnsmasq.wait();
8316
8317 handle_child_output(r, &output);
8318 }
8319
8320 #[test]
test_windows_guest_multiple_queues()8321 fn test_windows_guest_multiple_queues() {
8322 let windows_guest = WindowsGuest::new();
8323
8324 let mut ovmf_path = dirs::home_dir().unwrap();
8325 ovmf_path.push("workloads");
8326 ovmf_path.push(OVMF_NAME);
8327
8328 let mut child = GuestCommand::new(windows_guest.guest())
8329 .args(["--cpus", "boot=4,kvm_hyperv=on"])
8330 .args(["--memory", "size=4G"])
8331 .args(["--kernel", ovmf_path.to_str().unwrap()])
8332 .args(["--serial", "tty"])
8333 .args(["--console", "off"])
8334 .args([
8335 "--disk",
8336 format!(
8337 "path={},num_queues=4",
8338 windows_guest
8339 .guest()
8340 .disk_config
8341 .disk(DiskType::OperatingSystem)
8342 .unwrap()
8343 )
8344 .as_str(),
8345 ])
8346 .args([
8347 "--net",
8348 format!(
8349 "tap=,mac={},ip={},mask=255.255.255.0,num_queues=8",
8350 windows_guest.guest().network.guest_mac,
8351 windows_guest.guest().network.host_ip
8352 )
8353 .as_str(),
8354 ])
8355 .capture_output()
8356 .spawn()
8357 .unwrap();
8358
8359 let fd = child.stdout.as_ref().unwrap().as_raw_fd();
8360 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) };
8361 let fd = child.stderr.as_ref().unwrap().as_raw_fd();
8362 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) };
8363
8364 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE);
8365
8366 let mut child_dnsmasq = windows_guest.run_dnsmasq();
8367
8368 let r = std::panic::catch_unwind(|| {
8369 // Wait to make sure Windows boots up
8370 assert!(windows_guest.wait_for_boot());
8371
8372 windows_guest.shutdown();
8373 });
8374
8375 let _ = child.wait_timeout(std::time::Duration::from_secs(60));
8376 let _ = child.kill();
8377 let output = child.wait_with_output().unwrap();
8378
8379 let _ = child_dnsmasq.kill();
8380 let _ = child_dnsmasq.wait();
8381
8382 handle_child_output(r, &output);
8383 }
8384
8385 #[test]
8386 #[cfg(not(feature = "mshv"))]
8387 #[ignore = "See #4327"]
test_windows_guest_snapshot_restore()8388 fn test_windows_guest_snapshot_restore() {
8389 let windows_guest = WindowsGuest::new();
8390
8391 let mut ovmf_path = dirs::home_dir().unwrap();
8392 ovmf_path.push("workloads");
8393 ovmf_path.push(OVMF_NAME);
8394
8395 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap();
8396 let api_socket_source = format!("{}.1", temp_api_path(&tmp_dir));
8397
8398 let mut child = GuestCommand::new(windows_guest.guest())
8399 .args(["--api-socket", &api_socket_source])
8400 .args(["--cpus", "boot=2,kvm_hyperv=on"])
8401 .args(["--memory", "size=4G"])
8402 .args(["--kernel", ovmf_path.to_str().unwrap()])
8403 .args(["--serial", "tty"])
8404 .args(["--console", "off"])
8405 .default_disks()
8406 .default_net()
8407 .capture_output()
8408 .spawn()
8409 .unwrap();
8410
8411 let fd = child.stdout.as_ref().unwrap().as_raw_fd();
8412 let pipesize = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) };
8413 let fd = child.stderr.as_ref().unwrap().as_raw_fd();
8414 let pipesize1 = unsafe { libc::fcntl(fd, libc::F_SETPIPE_SZ, PIPE_SIZE) };
8415
8416 assert!(pipesize >= PIPE_SIZE && pipesize1 >= PIPE_SIZE);
8417
8418 let mut child_dnsmasq = windows_guest.run_dnsmasq();
8419
8420 // Wait to make sure Windows boots up
8421 assert!(windows_guest.wait_for_boot());
8422
8423 let snapshot_dir = temp_snapshot_dir_path(&tmp_dir);
8424
8425 // Pause the VM
8426 assert!(remote_command(&api_socket_source, "pause", None));
8427
8428 // Take a snapshot from the VM
8429 assert!(remote_command(
8430 &api_socket_source,
8431 "snapshot",
8432 Some(format!("file://{snapshot_dir}").as_str()),
8433 ));
8434
8435 // Wait to make sure the snapshot is completed
8436 thread::sleep(std::time::Duration::new(30, 0));
8437
8438 let _ = child.kill();
8439 child.wait().unwrap();
8440
8441 let api_socket_restored = format!("{}.2", temp_api_path(&tmp_dir));
8442
8443 // Restore the VM from the snapshot
8444 let mut child = GuestCommand::new(windows_guest.guest())
8445 .args(["--api-socket", &api_socket_restored])
8446 .args([
8447 "--restore",
8448 format!("source_url=file://{snapshot_dir}").as_str(),
8449 ])
8450 .capture_output()
8451 .spawn()
8452 .unwrap();
8453
8454 // Wait for the VM to be restored
8455 thread::sleep(std::time::Duration::new(20, 0));
8456
8457 let r = std::panic::catch_unwind(|| {
8458 // Resume the VM
8459 assert!(remote_command(&api_socket_restored, "resume", None));
8460
8461 windows_guest.shutdown();
8462 });
8463
8464 let _ = child.wait_timeout(std::time::Duration::from_secs(60));
8465 let _ = child.kill();
8466 let output = child.wait_with_output().unwrap();
8467
8468 let _ = child_dnsmasq.kill();
8469 let _ = child_dnsmasq.wait();
8470
8471 handle_child_output(r, &output);
8472 }
8473
8474 #[test]
8475 #[cfg(not(feature = "mshv"))]
8476 #[cfg(not(target_arch = "aarch64"))]
test_windows_guest_cpu_hotplug()8477 fn test_windows_guest_cpu_hotplug() {
8478 let windows_guest = WindowsGuest::new();
8479
8480 let mut ovmf_path = dirs::home_dir().unwrap();
8481 ovmf_path.push("workloads");
8482 ovmf_path.push(OVMF_NAME);
8483
8484 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap();
8485 let api_socket = temp_api_path(&tmp_dir);
8486
8487 let mut child = GuestCommand::new(windows_guest.guest())
8488 .args(["--api-socket", &api_socket])
8489 .args(["--cpus", "boot=2,max=8,kvm_hyperv=on"])
8490 .args(["--memory", "size=4G"])
8491 .args(["--kernel", ovmf_path.to_str().unwrap()])
8492 .args(["--serial", "tty"])
8493 .args(["--console", "off"])
8494 .default_disks()
8495 .default_net()
8496 .capture_output()
8497 .spawn()
8498 .unwrap();
8499
8500 let mut child_dnsmasq = windows_guest.run_dnsmasq();
8501
8502 let r = std::panic::catch_unwind(|| {
8503 // Wait to make sure Windows boots up
8504 assert!(windows_guest.wait_for_boot());
8505
8506 let vcpu_num = 2;
8507 // Check the initial number of CPUs the guest sees
8508 assert_eq!(windows_guest.cpu_count(), vcpu_num);
8509 // Check the initial number of vcpu threads in the CH process
8510 assert_eq!(vcpu_threads_count(child.id()), vcpu_num);
8511
8512 let vcpu_num = 6;
8513 // Hotplug some CPUs
8514 resize_command(&api_socket, Some(vcpu_num), None, None, None);
8515 // Wait to make sure CPUs are added
8516 thread::sleep(std::time::Duration::new(10, 0));
8517 // Check the guest sees the correct number
8518 assert_eq!(windows_guest.cpu_count(), vcpu_num);
8519 // Check the CH process has the correct number of vcpu threads
8520 assert_eq!(vcpu_threads_count(child.id()), vcpu_num);
8521
8522 let vcpu_num = 4;
8523 // Remove some CPUs. Note that Windows doesn't support hot-remove.
8524 resize_command(&api_socket, Some(vcpu_num), None, None, None);
8525 // Wait to make sure CPUs are removed
8526 thread::sleep(std::time::Duration::new(10, 0));
8527 // Reboot to let Windows catch up
8528 windows_guest.reboot();
8529 // Wait to make sure Windows completely rebooted
8530 thread::sleep(std::time::Duration::new(60, 0));
8531 // Check the guest sees the correct number
8532 assert_eq!(windows_guest.cpu_count(), vcpu_num);
8533 // Check the CH process has the correct number of vcpu threads
8534 assert_eq!(vcpu_threads_count(child.id()), vcpu_num);
8535
8536 windows_guest.shutdown();
8537 });
8538
8539 let _ = child.wait_timeout(std::time::Duration::from_secs(60));
8540 let _ = child.kill();
8541 let output = child.wait_with_output().unwrap();
8542
8543 let _ = child_dnsmasq.kill();
8544 let _ = child_dnsmasq.wait();
8545
8546 handle_child_output(r, &output);
8547 }
8548
8549 #[test]
8550 #[cfg(not(feature = "mshv"))]
8551 #[cfg(not(target_arch = "aarch64"))]
test_windows_guest_ram_hotplug()8552 fn test_windows_guest_ram_hotplug() {
8553 let windows_guest = WindowsGuest::new();
8554
8555 let mut ovmf_path = dirs::home_dir().unwrap();
8556 ovmf_path.push("workloads");
8557 ovmf_path.push(OVMF_NAME);
8558
8559 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap();
8560 let api_socket = temp_api_path(&tmp_dir);
8561
8562 let mut child = GuestCommand::new(windows_guest.guest())
8563 .args(["--api-socket", &api_socket])
8564 .args(["--cpus", "boot=2,kvm_hyperv=on"])
8565 .args(["--memory", "size=2G,hotplug_size=5G"])
8566 .args(["--kernel", ovmf_path.to_str().unwrap()])
8567 .args(["--serial", "tty"])
8568 .args(["--console", "off"])
8569 .default_disks()
8570 .default_net()
8571 .capture_output()
8572 .spawn()
8573 .unwrap();
8574
8575 let mut child_dnsmasq = windows_guest.run_dnsmasq();
8576
8577 let r = std::panic::catch_unwind(|| {
8578 // Wait to make sure Windows boots up
8579 assert!(windows_guest.wait_for_boot());
8580
8581 let ram_size = 2 * 1024 * 1024 * 1024;
8582 // Check the initial number of RAM the guest sees
8583 let current_ram_size = windows_guest.ram_size();
8584 // This size seems to be reserved by the system and thus the
8585 // reported amount differs by this constant value.
8586 let reserved_ram_size = ram_size - current_ram_size;
8587 // Verify that there's not more than 4mb constant diff wasted
8588 // by the reserved ram.
8589 assert!(reserved_ram_size < 4 * 1024 * 1024);
8590
8591 let ram_size = 4 * 1024 * 1024 * 1024;
8592 // Hotplug some RAM
8593 resize_command(&api_socket, None, Some(ram_size), None, None);
8594 // Wait to make sure RAM has been added
8595 thread::sleep(std::time::Duration::new(10, 0));
8596 // Check the guest sees the correct number
8597 assert_eq!(windows_guest.ram_size(), ram_size - reserved_ram_size);
8598
8599 let ram_size = 3 * 1024 * 1024 * 1024;
8600 // Unplug some RAM. Note that hot-remove most likely won't work.
8601 resize_command(&api_socket, None, Some(ram_size), None, None);
8602 // Wait to make sure RAM has been added
8603 thread::sleep(std::time::Duration::new(10, 0));
8604 // Reboot to let Windows catch up
8605 windows_guest.reboot();
8606 // Wait to make sure guest completely rebooted
8607 thread::sleep(std::time::Duration::new(60, 0));
8608 // Check the guest sees the correct number
8609 assert_eq!(windows_guest.ram_size(), ram_size - reserved_ram_size);
8610
8611 windows_guest.shutdown();
8612 });
8613
8614 let _ = child.wait_timeout(std::time::Duration::from_secs(60));
8615 let _ = child.kill();
8616 let output = child.wait_with_output().unwrap();
8617
8618 let _ = child_dnsmasq.kill();
8619 let _ = child_dnsmasq.wait();
8620
8621 handle_child_output(r, &output);
8622 }
8623
8624 #[test]
8625 #[cfg(not(feature = "mshv"))]
test_windows_guest_netdev_hotplug()8626 fn test_windows_guest_netdev_hotplug() {
8627 let windows_guest = WindowsGuest::new();
8628
8629 let mut ovmf_path = dirs::home_dir().unwrap();
8630 ovmf_path.push("workloads");
8631 ovmf_path.push(OVMF_NAME);
8632
8633 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap();
8634 let api_socket = temp_api_path(&tmp_dir);
8635
8636 let mut child = GuestCommand::new(windows_guest.guest())
8637 .args(["--api-socket", &api_socket])
8638 .args(["--cpus", "boot=2,kvm_hyperv=on"])
8639 .args(["--memory", "size=4G"])
8640 .args(["--kernel", ovmf_path.to_str().unwrap()])
8641 .args(["--serial", "tty"])
8642 .args(["--console", "off"])
8643 .default_disks()
8644 .default_net()
8645 .capture_output()
8646 .spawn()
8647 .unwrap();
8648
8649 let mut child_dnsmasq = windows_guest.run_dnsmasq();
8650
8651 let r = std::panic::catch_unwind(|| {
8652 // Wait to make sure Windows boots up
8653 assert!(windows_guest.wait_for_boot());
8654
8655 // Initially present network device
8656 let netdev_num = 1;
8657 assert_eq!(windows_guest.netdev_count(), netdev_num);
8658 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num);
8659
8660 // Hotplug network device
8661 let (cmd_success, cmd_output) = remote_command_w_output(
8662 &api_socket,
8663 "add-net",
8664 Some(windows_guest.guest().default_net_string().as_str()),
8665 );
8666 assert!(cmd_success);
8667 assert!(String::from_utf8_lossy(&cmd_output).contains("\"id\":\"_net2\""));
8668 thread::sleep(std::time::Duration::new(5, 0));
8669 // Verify the device is on the system
8670 let netdev_num = 2;
8671 assert_eq!(windows_guest.netdev_count(), netdev_num);
8672 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num);
8673
8674 // Remove network device
8675 let cmd_success = remote_command(&api_socket, "remove-device", Some("_net2"));
8676 assert!(cmd_success);
8677 thread::sleep(std::time::Duration::new(5, 0));
8678 // Verify the device has been removed
8679 let netdev_num = 1;
8680 assert_eq!(windows_guest.netdev_count(), netdev_num);
8681 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num);
8682
8683 windows_guest.shutdown();
8684 });
8685
8686 let _ = child.wait_timeout(std::time::Duration::from_secs(60));
8687 let _ = child.kill();
8688 let output = child.wait_with_output().unwrap();
8689
8690 let _ = child_dnsmasq.kill();
8691 let _ = child_dnsmasq.wait();
8692
8693 handle_child_output(r, &output);
8694 }
8695
8696 #[test]
8697 #[ignore = "See #6037"]
8698 #[cfg(not(feature = "mshv"))]
8699 #[cfg(not(target_arch = "aarch64"))]
test_windows_guest_disk_hotplug()8700 fn test_windows_guest_disk_hotplug() {
8701 let windows_guest = WindowsGuest::new();
8702
8703 let mut ovmf_path = dirs::home_dir().unwrap();
8704 ovmf_path.push("workloads");
8705 ovmf_path.push(OVMF_NAME);
8706
8707 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap();
8708 let api_socket = temp_api_path(&tmp_dir);
8709
8710 let mut child = GuestCommand::new(windows_guest.guest())
8711 .args(["--api-socket", &api_socket])
8712 .args(["--cpus", "boot=2,kvm_hyperv=on"])
8713 .args(["--memory", "size=4G"])
8714 .args(["--kernel", ovmf_path.to_str().unwrap()])
8715 .args(["--serial", "tty"])
8716 .args(["--console", "off"])
8717 .default_disks()
8718 .default_net()
8719 .capture_output()
8720 .spawn()
8721 .unwrap();
8722
8723 let mut child_dnsmasq = windows_guest.run_dnsmasq();
8724
8725 let disk = windows_guest.disk_new(WindowsGuest::FS_FAT, 100);
8726
8727 let r = std::panic::catch_unwind(|| {
8728 // Wait to make sure Windows boots up
8729 assert!(windows_guest.wait_for_boot());
8730
8731 // Initially present disk device
8732 let disk_num = 1;
8733 assert_eq!(windows_guest.disk_count(), disk_num);
8734 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num);
8735
8736 // Hotplug disk device
8737 let (cmd_success, cmd_output) = remote_command_w_output(
8738 &api_socket,
8739 "add-disk",
8740 Some(format!("path={disk},readonly=off").as_str()),
8741 );
8742 assert!(cmd_success);
8743 assert!(String::from_utf8_lossy(&cmd_output).contains("\"id\":\"_disk2\""));
8744 thread::sleep(std::time::Duration::new(5, 0));
8745 // Online disk device
8746 windows_guest.disks_set_rw();
8747 windows_guest.disks_online();
8748 // Verify the device is on the system
8749 let disk_num = 2;
8750 assert_eq!(windows_guest.disk_count(), disk_num);
8751 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num);
8752
8753 let data = "hello";
8754 let fname = "d:\\world";
8755 windows_guest.disk_file_put(fname, data);
8756
8757 // Unmount disk device
8758 let cmd_success = remote_command(&api_socket, "remove-device", Some("_disk2"));
8759 assert!(cmd_success);
8760 thread::sleep(std::time::Duration::new(5, 0));
8761 // Verify the device has been removed
8762 let disk_num = 1;
8763 assert_eq!(windows_guest.disk_count(), disk_num);
8764 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num);
8765
8766 // Remount and check the file exists with the expected contents
8767 let (cmd_success, _cmd_output) = remote_command_w_output(
8768 &api_socket,
8769 "add-disk",
8770 Some(format!("path={disk},readonly=off").as_str()),
8771 );
8772 assert!(cmd_success);
8773 thread::sleep(std::time::Duration::new(5, 0));
8774 let out = windows_guest.disk_file_read(fname);
8775 assert_eq!(data, out.trim());
8776
8777 // Intentionally no unmount, it'll happen at shutdown.
8778
8779 windows_guest.shutdown();
8780 });
8781
8782 let _ = child.wait_timeout(std::time::Duration::from_secs(60));
8783 let _ = child.kill();
8784 let output = child.wait_with_output().unwrap();
8785
8786 let _ = child_dnsmasq.kill();
8787 let _ = child_dnsmasq.wait();
8788
8789 handle_child_output(r, &output);
8790 }
8791
8792 #[test]
8793 #[ignore = "See #6037"]
8794 #[cfg(not(feature = "mshv"))]
8795 #[cfg(not(target_arch = "aarch64"))]
test_windows_guest_disk_hotplug_multi()8796 fn test_windows_guest_disk_hotplug_multi() {
8797 let windows_guest = WindowsGuest::new();
8798
8799 let mut ovmf_path = dirs::home_dir().unwrap();
8800 ovmf_path.push("workloads");
8801 ovmf_path.push(OVMF_NAME);
8802
8803 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap();
8804 let api_socket = temp_api_path(&tmp_dir);
8805
8806 let mut child = GuestCommand::new(windows_guest.guest())
8807 .args(["--api-socket", &api_socket])
8808 .args(["--cpus", "boot=2,kvm_hyperv=on"])
8809 .args(["--memory", "size=2G"])
8810 .args(["--kernel", ovmf_path.to_str().unwrap()])
8811 .args(["--serial", "tty"])
8812 .args(["--console", "off"])
8813 .default_disks()
8814 .default_net()
8815 .capture_output()
8816 .spawn()
8817 .unwrap();
8818
8819 let mut child_dnsmasq = windows_guest.run_dnsmasq();
8820
8821 // Predefined data to used at various test stages
8822 let disk_test_data: [[String; 4]; 2] = [
8823 [
8824 "_disk2".to_string(),
8825 windows_guest.disk_new(WindowsGuest::FS_FAT, 123),
8826 "d:\\world".to_string(),
8827 "hello".to_string(),
8828 ],
8829 [
8830 "_disk3".to_string(),
8831 windows_guest.disk_new(WindowsGuest::FS_NTFS, 333),
8832 "e:\\hello".to_string(),
8833 "world".to_string(),
8834 ],
8835 ];
8836
8837 let r = std::panic::catch_unwind(|| {
8838 // Wait to make sure Windows boots up
8839 assert!(windows_guest.wait_for_boot());
8840
8841 // Initially present disk device
8842 let disk_num = 1;
8843 assert_eq!(windows_guest.disk_count(), disk_num);
8844 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num);
8845
8846 for it in &disk_test_data {
8847 let disk_id = it[0].as_str();
8848 let disk = it[1].as_str();
8849 // Hotplug disk device
8850 let (cmd_success, cmd_output) = remote_command_w_output(
8851 &api_socket,
8852 "add-disk",
8853 Some(format!("path={disk},readonly=off").as_str()),
8854 );
8855 assert!(cmd_success);
8856 assert!(String::from_utf8_lossy(&cmd_output)
8857 .contains(format!("\"id\":\"{disk_id}\"").as_str()));
8858 thread::sleep(std::time::Duration::new(5, 0));
8859 // Online disk devices
8860 windows_guest.disks_set_rw();
8861 windows_guest.disks_online();
8862 }
8863 // Verify the devices are on the system
8864 let disk_num = (disk_test_data.len() + 1) as u8;
8865 assert_eq!(windows_guest.disk_count(), disk_num);
8866 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num);
8867
8868 // Put test data
8869 for it in &disk_test_data {
8870 let fname = it[2].as_str();
8871 let data = it[3].as_str();
8872 windows_guest.disk_file_put(fname, data);
8873 }
8874
8875 // Unmount disk devices
8876 for it in &disk_test_data {
8877 let disk_id = it[0].as_str();
8878 let cmd_success = remote_command(&api_socket, "remove-device", Some(disk_id));
8879 assert!(cmd_success);
8880 thread::sleep(std::time::Duration::new(5, 0));
8881 }
8882
8883 // Verify the devices have been removed
8884 let disk_num = 1;
8885 assert_eq!(windows_guest.disk_count(), disk_num);
8886 assert_eq!(disk_ctrl_threads_count(child.id()), disk_num);
8887
8888 // Remount
8889 for it in &disk_test_data {
8890 let disk = it[1].as_str();
8891 let (cmd_success, _cmd_output) = remote_command_w_output(
8892 &api_socket,
8893 "add-disk",
8894 Some(format!("path={disk},readonly=off").as_str()),
8895 );
8896 assert!(cmd_success);
8897 thread::sleep(std::time::Duration::new(5, 0));
8898 }
8899
8900 // Check the files exists with the expected contents
8901 for it in &disk_test_data {
8902 let fname = it[2].as_str();
8903 let data = it[3].as_str();
8904 let out = windows_guest.disk_file_read(fname);
8905 assert_eq!(data, out.trim());
8906 }
8907
8908 // Intentionally no unmount, it'll happen at shutdown.
8909
8910 windows_guest.shutdown();
8911 });
8912
8913 let _ = child.wait_timeout(std::time::Duration::from_secs(60));
8914 let _ = child.kill();
8915 let output = child.wait_with_output().unwrap();
8916
8917 let _ = child_dnsmasq.kill();
8918 let _ = child_dnsmasq.wait();
8919
8920 handle_child_output(r, &output);
8921 }
8922
8923 #[test]
8924 #[cfg(not(feature = "mshv"))]
8925 #[cfg(not(target_arch = "aarch64"))]
test_windows_guest_netdev_multi()8926 fn test_windows_guest_netdev_multi() {
8927 let windows_guest = WindowsGuest::new();
8928
8929 let mut ovmf_path = dirs::home_dir().unwrap();
8930 ovmf_path.push("workloads");
8931 ovmf_path.push(OVMF_NAME);
8932
8933 let tmp_dir = TempDir::new_with_prefix("/tmp/ch").unwrap();
8934 let api_socket = temp_api_path(&tmp_dir);
8935
8936 let mut child = GuestCommand::new(windows_guest.guest())
8937 .args(["--api-socket", &api_socket])
8938 .args(["--cpus", "boot=2,kvm_hyperv=on"])
8939 .args(["--memory", "size=4G"])
8940 .args(["--kernel", ovmf_path.to_str().unwrap()])
8941 .args(["--serial", "tty"])
8942 .args(["--console", "off"])
8943 .default_disks()
8944 // The multi net dev config is borrowed from test_multiple_network_interfaces
8945 .args([
8946 "--net",
8947 windows_guest.guest().default_net_string().as_str(),
8948 "tap=,mac=8a:6b:6f:5a:de:ac,ip=192.168.3.1,mask=255.255.255.0",
8949 "tap=mytap42,mac=fe:1f:9e:e1:60:f2,ip=192.168.4.1,mask=255.255.255.0",
8950 ])
8951 .capture_output()
8952 .spawn()
8953 .unwrap();
8954
8955 let mut child_dnsmasq = windows_guest.run_dnsmasq();
8956
8957 let r = std::panic::catch_unwind(|| {
8958 // Wait to make sure Windows boots up
8959 assert!(windows_guest.wait_for_boot());
8960
8961 let netdev_num = 3;
8962 assert_eq!(windows_guest.netdev_count(), netdev_num);
8963 assert_eq!(netdev_ctrl_threads_count(child.id()), netdev_num);
8964
8965 let tap_count = exec_host_command_output("ip link | grep -c mytap42");
8966 assert_eq!(String::from_utf8_lossy(&tap_count.stdout).trim(), "1");
8967
8968 windows_guest.shutdown();
8969 });
8970
8971 let _ = child.wait_timeout(std::time::Duration::from_secs(60));
8972 let _ = child.kill();
8973 let output = child.wait_with_output().unwrap();
8974
8975 let _ = child_dnsmasq.kill();
8976 let _ = child_dnsmasq.wait();
8977
8978 handle_child_output(r, &output);
8979 }
8980 }
8981
8982 #[cfg(target_arch = "x86_64")]
8983 mod sgx {
8984 use crate::*;
8985
8986 #[test]
test_sgx()8987 fn test_sgx() {
8988 let jammy_image = JAMMY_IMAGE_NAME.to_string();
8989 let jammy = UbuntuDiskConfig::new(jammy_image);
8990 let guest = Guest::new(Box::new(jammy));
8991
8992 let mut child = GuestCommand::new(&guest)
8993 .args(["--cpus", "boot=1"])
8994 .args(["--memory", "size=512M"])
8995 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()])
8996 .default_disks()
8997 .default_net()
8998 .args(["--sgx-epc", "id=epc0,size=64M"])
8999 .capture_output()
9000 .spawn()
9001 .unwrap();
9002
9003 let r = std::panic::catch_unwind(|| {
9004 guest.wait_vm_boot(None).unwrap();
9005
9006 // Check if SGX is correctly detected in the guest.
9007 guest.check_sgx_support().unwrap();
9008
9009 // Validate the SGX EPC section is 64MiB.
9010 assert_eq!(
9011 guest
9012 .ssh_command("cpuid -l 0x12 -s 2 | grep 'section size' | cut -d '=' -f 2")
9013 .unwrap()
9014 .trim(),
9015 "0x0000000004000000"
9016 );
9017 });
9018
9019 let _ = child.kill();
9020 let output = child.wait_with_output().unwrap();
9021
9022 handle_child_output(r, &output);
9023 }
9024 }
9025
9026 #[cfg(target_arch = "x86_64")]
9027 mod vfio {
9028 use crate::*;
9029 const NVIDIA_VFIO_DEVICE: &str = "/sys/bus/pci/devices/0002:00:01.0";
9030
test_nvidia_card_memory_hotplug(hotplug_method: &str)9031 fn test_nvidia_card_memory_hotplug(hotplug_method: &str) {
9032 let jammy = UbuntuDiskConfig::new(JAMMY_VFIO_IMAGE_NAME.to_string());
9033 let guest = Guest::new(Box::new(jammy));
9034 let api_socket = temp_api_path(&guest.tmp_dir);
9035
9036 let mut child = GuestCommand::new(&guest)
9037 .args(["--cpus", "boot=4"])
9038 .args([
9039 "--memory",
9040 format!("size=4G,hotplug_size=4G,hotplug_method={hotplug_method}").as_str(),
9041 ])
9042 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()])
9043 .args(["--device", format!("path={NVIDIA_VFIO_DEVICE}").as_str()])
9044 .args(["--api-socket", &api_socket])
9045 .default_disks()
9046 .default_net()
9047 .capture_output()
9048 .spawn()
9049 .unwrap();
9050
9051 let r = std::panic::catch_unwind(|| {
9052 guest.wait_vm_boot(None).unwrap();
9053
9054 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000);
9055
9056 guest.enable_memory_hotplug();
9057
9058 // Add RAM to the VM
9059 let desired_ram = 6 << 30;
9060 resize_command(&api_socket, None, Some(desired_ram), None, None);
9061 thread::sleep(std::time::Duration::new(30, 0));
9062 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000);
9063
9064 // Check the VFIO device works when RAM is increased to 6GiB
9065 guest.check_nvidia_gpu();
9066 });
9067
9068 let _ = child.kill();
9069 let output = child.wait_with_output().unwrap();
9070
9071 handle_child_output(r, &output);
9072 }
9073
9074 #[test]
test_nvidia_card_memory_hotplug_acpi()9075 fn test_nvidia_card_memory_hotplug_acpi() {
9076 test_nvidia_card_memory_hotplug("acpi")
9077 }
9078
9079 #[test]
test_nvidia_card_memory_hotplug_virtio_mem()9080 fn test_nvidia_card_memory_hotplug_virtio_mem() {
9081 test_nvidia_card_memory_hotplug("virtio-mem")
9082 }
9083
9084 #[test]
test_nvidia_card_pci_hotplug()9085 fn test_nvidia_card_pci_hotplug() {
9086 let jammy = UbuntuDiskConfig::new(JAMMY_VFIO_IMAGE_NAME.to_string());
9087 let guest = Guest::new(Box::new(jammy));
9088 let api_socket = temp_api_path(&guest.tmp_dir);
9089
9090 let mut child = GuestCommand::new(&guest)
9091 .args(["--cpus", "boot=4"])
9092 .args(["--memory", "size=4G"])
9093 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()])
9094 .args(["--api-socket", &api_socket])
9095 .default_disks()
9096 .default_net()
9097 .capture_output()
9098 .spawn()
9099 .unwrap();
9100
9101 let r = std::panic::catch_unwind(|| {
9102 guest.wait_vm_boot(None).unwrap();
9103
9104 // Hotplug the card to the VM
9105 let (cmd_success, cmd_output) = remote_command_w_output(
9106 &api_socket,
9107 "add-device",
9108 Some(format!("id=vfio0,path={NVIDIA_VFIO_DEVICE}").as_str()),
9109 );
9110 assert!(cmd_success);
9111 assert!(String::from_utf8_lossy(&cmd_output)
9112 .contains("{\"id\":\"vfio0\",\"bdf\":\"0000:00:06.0\"}"));
9113
9114 thread::sleep(std::time::Duration::new(10, 0));
9115
9116 // Check the VFIO device works after hotplug
9117 guest.check_nvidia_gpu();
9118 });
9119
9120 let _ = child.kill();
9121 let output = child.wait_with_output().unwrap();
9122
9123 handle_child_output(r, &output);
9124 }
9125
9126 #[test]
test_nvidia_card_reboot()9127 fn test_nvidia_card_reboot() {
9128 let jammy = UbuntuDiskConfig::new(JAMMY_VFIO_IMAGE_NAME.to_string());
9129 let guest = Guest::new(Box::new(jammy));
9130 let api_socket = temp_api_path(&guest.tmp_dir);
9131
9132 let mut child = GuestCommand::new(&guest)
9133 .args(["--cpus", "boot=4"])
9134 .args(["--memory", "size=4G"])
9135 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()])
9136 .args([
9137 "--device",
9138 format!("path={NVIDIA_VFIO_DEVICE},iommu=on").as_str(),
9139 ])
9140 .args(["--api-socket", &api_socket])
9141 .default_disks()
9142 .default_net()
9143 .capture_output()
9144 .spawn()
9145 .unwrap();
9146
9147 let r = std::panic::catch_unwind(|| {
9148 guest.wait_vm_boot(None).unwrap();
9149
9150 // Check the VFIO device works after boot
9151 guest.check_nvidia_gpu();
9152
9153 guest.reboot_linux(0, None);
9154
9155 // Check the VFIO device works after reboot
9156 guest.check_nvidia_gpu();
9157 });
9158
9159 let _ = child.kill();
9160 let output = child.wait_with_output().unwrap();
9161
9162 handle_child_output(r, &output);
9163 }
9164
9165 #[test]
test_nvidia_card_iommu_address_width()9166 fn test_nvidia_card_iommu_address_width() {
9167 let jammy = UbuntuDiskConfig::new(JAMMY_VFIO_IMAGE_NAME.to_string());
9168 let guest = Guest::new(Box::new(jammy));
9169 let api_socket = temp_api_path(&guest.tmp_dir);
9170
9171 let mut child = GuestCommand::new(&guest)
9172 .args(["--cpus", "boot=4"])
9173 .args(["--memory", "size=4G"])
9174 .args(["--kernel", fw_path(FwType::RustHypervisorFirmware).as_str()])
9175 .args(["--device", format!("path={NVIDIA_VFIO_DEVICE}").as_str()])
9176 .args([
9177 "--platform",
9178 "num_pci_segments=2,iommu_segments=1,iommu_address_width=42",
9179 ])
9180 .args(["--api-socket", &api_socket])
9181 .default_disks()
9182 .default_net()
9183 .capture_output()
9184 .spawn()
9185 .unwrap();
9186
9187 let r = std::panic::catch_unwind(|| {
9188 guest.wait_vm_boot(None).unwrap();
9189
9190 assert!(guest
9191 .ssh_command("sudo dmesg")
9192 .unwrap()
9193 .contains("input address: 42 bits"));
9194 });
9195
9196 let _ = child.kill();
9197 let output = child.wait_with_output().unwrap();
9198
9199 handle_child_output(r, &output);
9200 }
9201 }
9202
9203 mod live_migration {
9204 use crate::*;
9205
start_live_migration( migration_socket: &str, src_api_socket: &str, dest_api_socket: &str, local: bool, ) -> bool9206 fn start_live_migration(
9207 migration_socket: &str,
9208 src_api_socket: &str,
9209 dest_api_socket: &str,
9210 local: bool,
9211 ) -> bool {
9212 // Start to receive migration from the destination VM
9213 let mut receive_migration = Command::new(clh_command("ch-remote"))
9214 .args([
9215 &format!("--api-socket={dest_api_socket}"),
9216 "receive-migration",
9217 &format! {"unix:{migration_socket}"},
9218 ])
9219 .stderr(Stdio::piped())
9220 .stdout(Stdio::piped())
9221 .spawn()
9222 .unwrap();
9223 // Give it '1s' to make sure the 'migration_socket' file is properly created
9224 thread::sleep(std::time::Duration::new(1, 0));
9225 // Start to send migration from the source VM
9226
9227 let mut args = [
9228 format!("--api-socket={}", &src_api_socket),
9229 "send-migration".to_string(),
9230 format! {"unix:{migration_socket}"},
9231 ]
9232 .to_vec();
9233
9234 if local {
9235 args.insert(2, "--local".to_string());
9236 }
9237
9238 let mut send_migration = Command::new(clh_command("ch-remote"))
9239 .args(&args)
9240 .stderr(Stdio::piped())
9241 .stdout(Stdio::piped())
9242 .spawn()
9243 .unwrap();
9244
9245 // The 'send-migration' command should be executed successfully within the given timeout
9246 let send_success = if let Some(status) = send_migration
9247 .wait_timeout(std::time::Duration::from_secs(30))
9248 .unwrap()
9249 {
9250 status.success()
9251 } else {
9252 false
9253 };
9254
9255 if !send_success {
9256 let _ = send_migration.kill();
9257 let output = send_migration.wait_with_output().unwrap();
9258 eprintln!(
9259 "\n\n==== Start 'send_migration' output ==== \
9260 \n\n---stdout---\n{}\n\n---stderr---\n{} \
9261 \n\n==== End 'send_migration' output ====\n\n",
9262 String::from_utf8_lossy(&output.stdout),
9263 String::from_utf8_lossy(&output.stderr)
9264 );
9265 }
9266
9267 // The 'receive-migration' command should be executed successfully within the given timeout
9268 let receive_success = if let Some(status) = receive_migration
9269 .wait_timeout(std::time::Duration::from_secs(30))
9270 .unwrap()
9271 {
9272 status.success()
9273 } else {
9274 false
9275 };
9276
9277 if !receive_success {
9278 let _ = receive_migration.kill();
9279 let output = receive_migration.wait_with_output().unwrap();
9280 eprintln!(
9281 "\n\n==== Start 'receive_migration' output ==== \
9282 \n\n---stdout---\n{}\n\n---stderr---\n{} \
9283 \n\n==== End 'receive_migration' output ====\n\n",
9284 String::from_utf8_lossy(&output.stdout),
9285 String::from_utf8_lossy(&output.stderr)
9286 );
9287 }
9288
9289 send_success && receive_success
9290 }
9291
print_and_panic(src_vm: Child, dest_vm: Child, ovs_vm: Option<Child>, message: &str) -> !9292 fn print_and_panic(src_vm: Child, dest_vm: Child, ovs_vm: Option<Child>, message: &str) -> ! {
9293 let mut src_vm = src_vm;
9294 let mut dest_vm = dest_vm;
9295
9296 let _ = src_vm.kill();
9297 let src_output = src_vm.wait_with_output().unwrap();
9298 eprintln!(
9299 "\n\n==== Start 'source_vm' stdout ====\n\n{}\n\n==== End 'source_vm' stdout ====",
9300 String::from_utf8_lossy(&src_output.stdout)
9301 );
9302 eprintln!(
9303 "\n\n==== Start 'source_vm' stderr ====\n\n{}\n\n==== End 'source_vm' stderr ====",
9304 String::from_utf8_lossy(&src_output.stderr)
9305 );
9306 let _ = dest_vm.kill();
9307 let dest_output = dest_vm.wait_with_output().unwrap();
9308 eprintln!(
9309 "\n\n==== Start 'destination_vm' stdout ====\n\n{}\n\n==== End 'destination_vm' stdout ====",
9310 String::from_utf8_lossy(&dest_output.stdout)
9311 );
9312 eprintln!(
9313 "\n\n==== Start 'destination_vm' stderr ====\n\n{}\n\n==== End 'destination_vm' stderr ====",
9314 String::from_utf8_lossy(&dest_output.stderr)
9315 );
9316
9317 if let Some(ovs_vm) = ovs_vm {
9318 let mut ovs_vm = ovs_vm;
9319 let _ = ovs_vm.kill();
9320 let ovs_output = ovs_vm.wait_with_output().unwrap();
9321 eprintln!(
9322 "\n\n==== Start 'ovs_vm' stdout ====\n\n{}\n\n==== End 'ovs_vm' stdout ====",
9323 String::from_utf8_lossy(&ovs_output.stdout)
9324 );
9325 eprintln!(
9326 "\n\n==== Start 'ovs_vm' stderr ====\n\n{}\n\n==== End 'ovs_vm' stderr ====",
9327 String::from_utf8_lossy(&ovs_output.stderr)
9328 );
9329
9330 cleanup_ovs_dpdk();
9331 }
9332
9333 panic!("Test failed: {message}")
9334 }
9335
9336 // This test exercises the local live-migration between two Cloud Hypervisor VMs on the
9337 // same host. It ensures the following behaviors:
9338 // 1. The source VM is up and functional (including various virtio-devices are working properly);
9339 // 2. The 'send-migration' and 'receive-migration' command finished successfully;
9340 // 3. The source VM terminated gracefully after live migration;
9341 // 4. The destination VM is functional (including various virtio-devices are working properly) after
9342 // live migration;
9343 // Note: This test does not use vsock as we can't create two identical vsock on the same host.
_test_live_migration(upgrade_test: bool, local: bool)9344 fn _test_live_migration(upgrade_test: bool, local: bool) {
9345 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
9346 let guest = Guest::new(Box::new(focal));
9347 let kernel_path = direct_kernel_boot_path();
9348 let console_text = String::from("On a branch floating down river a cricket, singing.");
9349 let net_id = "net123";
9350 let net_params = format!(
9351 "id={},tap=,mac={},ip={},mask=255.255.255.0",
9352 net_id, guest.network.guest_mac, guest.network.host_ip
9353 );
9354
9355 let memory_param: &[&str] = if local {
9356 &["--memory", "size=4G,shared=on"]
9357 } else {
9358 &["--memory", "size=4G"]
9359 };
9360
9361 let boot_vcpus = 2;
9362 let max_vcpus = 4;
9363
9364 let pmem_temp_file = TempFile::new().unwrap();
9365 pmem_temp_file.as_file().set_len(128 << 20).unwrap();
9366 std::process::Command::new("mkfs.ext4")
9367 .arg(pmem_temp_file.as_path())
9368 .output()
9369 .expect("Expect creating disk image to succeed");
9370 let pmem_path = String::from("/dev/pmem0");
9371
9372 // Start the source VM
9373 let src_vm_path = if !upgrade_test {
9374 clh_command("cloud-hypervisor")
9375 } else {
9376 cloud_hypervisor_release_path()
9377 };
9378 let src_api_socket = temp_api_path(&guest.tmp_dir);
9379 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path);
9380 src_vm_cmd
9381 .args([
9382 "--cpus",
9383 format!("boot={boot_vcpus},max={max_vcpus}").as_str(),
9384 ])
9385 .args(memory_param)
9386 .args(["--kernel", kernel_path.to_str().unwrap()])
9387 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
9388 .default_disks()
9389 .args(["--net", net_params.as_str()])
9390 .args(["--api-socket", &src_api_socket])
9391 .args([
9392 "--pmem",
9393 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(),
9394 ]);
9395 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap();
9396
9397 // Start the destination VM
9398 let mut dest_api_socket = temp_api_path(&guest.tmp_dir);
9399 dest_api_socket.push_str(".dest");
9400 let mut dest_child = GuestCommand::new(&guest)
9401 .args(["--api-socket", &dest_api_socket])
9402 .capture_output()
9403 .spawn()
9404 .unwrap();
9405
9406 let r = std::panic::catch_unwind(|| {
9407 guest.wait_vm_boot(None).unwrap();
9408
9409 // Make sure the source VM is functional
9410 // Check the number of vCPUs
9411 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus);
9412
9413 // Check the guest RAM
9414 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000);
9415
9416 // Check the guest virtio-devices, e.g. block, rng, console, and net
9417 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path));
9418
9419 // x86_64: Following what's done in the `test_snapshot_restore`, we need
9420 // to make sure that removing and adding back the virtio-net device does
9421 // not break the live-migration support for virtio-pci.
9422 #[cfg(target_arch = "x86_64")]
9423 {
9424 assert!(remote_command(
9425 &src_api_socket,
9426 "remove-device",
9427 Some(net_id),
9428 ));
9429 thread::sleep(std::time::Duration::new(10, 0));
9430
9431 // Plug the virtio-net device again
9432 assert!(remote_command(
9433 &src_api_socket,
9434 "add-net",
9435 Some(net_params.as_str()),
9436 ));
9437 thread::sleep(std::time::Duration::new(10, 0));
9438 }
9439
9440 // Start the live-migration
9441 let migration_socket = String::from(
9442 guest
9443 .tmp_dir
9444 .as_path()
9445 .join("live-migration.sock")
9446 .to_str()
9447 .unwrap(),
9448 );
9449
9450 assert!(
9451 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local),
9452 "Unsuccessful command: 'send-migration' or 'receive-migration'."
9453 );
9454 });
9455
9456 // Check and report any errors occurred during the live-migration
9457 if r.is_err() {
9458 print_and_panic(
9459 src_child,
9460 dest_child,
9461 None,
9462 "Error occurred during live-migration",
9463 );
9464 }
9465
9466 // Check the source vm has been terminated successful (give it '3s' to settle)
9467 thread::sleep(std::time::Duration::new(3, 0));
9468 if !src_child.try_wait().unwrap().is_some_and(|s| s.success()) {
9469 print_and_panic(
9470 src_child,
9471 dest_child,
9472 None,
9473 "source VM was not terminated successfully.",
9474 );
9475 };
9476
9477 // Post live-migration check to make sure the destination VM is functional
9478 let r = std::panic::catch_unwind(|| {
9479 // Perform same checks to validate VM has been properly migrated
9480 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus);
9481 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000);
9482
9483 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path));
9484 });
9485
9486 // Clean-up the destination VM and make sure it terminated correctly
9487 let _ = dest_child.kill();
9488 let dest_output = dest_child.wait_with_output().unwrap();
9489 handle_child_output(r, &dest_output);
9490
9491 // Check the destination VM has the expected 'console_text' from its output
9492 let r = std::panic::catch_unwind(|| {
9493 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text));
9494 });
9495 handle_child_output(r, &dest_output);
9496 }
9497
_test_live_migration_balloon(upgrade_test: bool, local: bool)9498 fn _test_live_migration_balloon(upgrade_test: bool, local: bool) {
9499 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
9500 let guest = Guest::new(Box::new(focal));
9501 let kernel_path = direct_kernel_boot_path();
9502 let console_text = String::from("On a branch floating down river a cricket, singing.");
9503 let net_id = "net123";
9504 let net_params = format!(
9505 "id={},tap=,mac={},ip={},mask=255.255.255.0",
9506 net_id, guest.network.guest_mac, guest.network.host_ip
9507 );
9508
9509 let memory_param: &[&str] = if local {
9510 &[
9511 "--memory",
9512 "size=4G,hotplug_method=virtio-mem,hotplug_size=8G,shared=on",
9513 "--balloon",
9514 "size=0",
9515 ]
9516 } else {
9517 &[
9518 "--memory",
9519 "size=4G,hotplug_method=virtio-mem,hotplug_size=8G",
9520 "--balloon",
9521 "size=0",
9522 ]
9523 };
9524
9525 let boot_vcpus = 2;
9526 let max_vcpus = 4;
9527
9528 let pmem_temp_file = TempFile::new().unwrap();
9529 pmem_temp_file.as_file().set_len(128 << 20).unwrap();
9530 std::process::Command::new("mkfs.ext4")
9531 .arg(pmem_temp_file.as_path())
9532 .output()
9533 .expect("Expect creating disk image to succeed");
9534 let pmem_path = String::from("/dev/pmem0");
9535
9536 // Start the source VM
9537 let src_vm_path = if !upgrade_test {
9538 clh_command("cloud-hypervisor")
9539 } else {
9540 cloud_hypervisor_release_path()
9541 };
9542 let src_api_socket = temp_api_path(&guest.tmp_dir);
9543 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path);
9544 src_vm_cmd
9545 .args([
9546 "--cpus",
9547 format!("boot={boot_vcpus},max={max_vcpus}").as_str(),
9548 ])
9549 .args(memory_param)
9550 .args(["--kernel", kernel_path.to_str().unwrap()])
9551 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
9552 .default_disks()
9553 .args(["--net", net_params.as_str()])
9554 .args(["--api-socket", &src_api_socket])
9555 .args([
9556 "--pmem",
9557 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(),
9558 ]);
9559 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap();
9560
9561 // Start the destination VM
9562 let mut dest_api_socket = temp_api_path(&guest.tmp_dir);
9563 dest_api_socket.push_str(".dest");
9564 let mut dest_child = GuestCommand::new(&guest)
9565 .args(["--api-socket", &dest_api_socket])
9566 .capture_output()
9567 .spawn()
9568 .unwrap();
9569
9570 let r = std::panic::catch_unwind(|| {
9571 guest.wait_vm_boot(None).unwrap();
9572
9573 // Make sure the source VM is functional
9574 // Check the number of vCPUs
9575 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus);
9576
9577 // Check the guest RAM
9578 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000);
9579 // Increase the guest RAM
9580 resize_command(&src_api_socket, None, Some(6 << 30), None, None);
9581 thread::sleep(std::time::Duration::new(5, 0));
9582 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000);
9583 // Use balloon to remove RAM from the VM
9584 resize_command(&src_api_socket, None, None, Some(1 << 30), None);
9585 thread::sleep(std::time::Duration::new(5, 0));
9586 let total_memory = guest.get_total_memory().unwrap_or_default();
9587 assert!(total_memory > 4_800_000);
9588 assert!(total_memory < 5_760_000);
9589
9590 // Check the guest virtio-devices, e.g. block, rng, console, and net
9591 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path));
9592
9593 // x86_64: Following what's done in the `test_snapshot_restore`, we need
9594 // to make sure that removing and adding back the virtio-net device does
9595 // not break the live-migration support for virtio-pci.
9596 #[cfg(target_arch = "x86_64")]
9597 {
9598 assert!(remote_command(
9599 &src_api_socket,
9600 "remove-device",
9601 Some(net_id),
9602 ));
9603 thread::sleep(std::time::Duration::new(10, 0));
9604
9605 // Plug the virtio-net device again
9606 assert!(remote_command(
9607 &src_api_socket,
9608 "add-net",
9609 Some(net_params.as_str()),
9610 ));
9611 thread::sleep(std::time::Duration::new(10, 0));
9612 }
9613
9614 // Start the live-migration
9615 let migration_socket = String::from(
9616 guest
9617 .tmp_dir
9618 .as_path()
9619 .join("live-migration.sock")
9620 .to_str()
9621 .unwrap(),
9622 );
9623
9624 assert!(
9625 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local),
9626 "Unsuccessful command: 'send-migration' or 'receive-migration'."
9627 );
9628 });
9629
9630 // Check and report any errors occurred during the live-migration
9631 if r.is_err() {
9632 print_and_panic(
9633 src_child,
9634 dest_child,
9635 None,
9636 "Error occurred during live-migration",
9637 );
9638 }
9639
9640 // Check the source vm has been terminated successful (give it '3s' to settle)
9641 thread::sleep(std::time::Duration::new(3, 0));
9642 if !src_child.try_wait().unwrap().is_some_and(|s| s.success()) {
9643 print_and_panic(
9644 src_child,
9645 dest_child,
9646 None,
9647 "source VM was not terminated successfully.",
9648 );
9649 };
9650
9651 // Post live-migration check to make sure the destination VM is functional
9652 let r = std::panic::catch_unwind(|| {
9653 // Perform same checks to validate VM has been properly migrated
9654 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus);
9655 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000);
9656
9657 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path));
9658
9659 // Perform checks on guest RAM using balloon
9660 let total_memory = guest.get_total_memory().unwrap_or_default();
9661 assert!(total_memory > 4_800_000);
9662 assert!(total_memory < 5_760_000);
9663 // Deflate balloon to restore entire RAM to the VM
9664 resize_command(&dest_api_socket, None, None, Some(0), None);
9665 thread::sleep(std::time::Duration::new(5, 0));
9666 assert!(guest.get_total_memory().unwrap_or_default() > 5_760_000);
9667 // Decrease guest RAM with virtio-mem
9668 resize_command(&dest_api_socket, None, Some(5 << 30), None, None);
9669 thread::sleep(std::time::Duration::new(5, 0));
9670 let total_memory = guest.get_total_memory().unwrap_or_default();
9671 assert!(total_memory > 4_800_000);
9672 assert!(total_memory < 5_760_000);
9673 });
9674
9675 // Clean-up the destination VM and make sure it terminated correctly
9676 let _ = dest_child.kill();
9677 let dest_output = dest_child.wait_with_output().unwrap();
9678 handle_child_output(r, &dest_output);
9679
9680 // Check the destination VM has the expected 'console_text' from its output
9681 let r = std::panic::catch_unwind(|| {
9682 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text));
9683 });
9684 handle_child_output(r, &dest_output);
9685 }
9686
_test_live_migration_numa(upgrade_test: bool, local: bool)9687 fn _test_live_migration_numa(upgrade_test: bool, local: bool) {
9688 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
9689 let guest = Guest::new(Box::new(focal));
9690 let kernel_path = direct_kernel_boot_path();
9691 let console_text = String::from("On a branch floating down river a cricket, singing.");
9692 let net_id = "net123";
9693 let net_params = format!(
9694 "id={},tap=,mac={},ip={},mask=255.255.255.0",
9695 net_id, guest.network.guest_mac, guest.network.host_ip
9696 );
9697
9698 let memory_param: &[&str] = if local {
9699 &[
9700 "--memory",
9701 "size=0,hotplug_method=virtio-mem,shared=on",
9702 "--memory-zone",
9703 "id=mem0,size=1G,hotplug_size=4G,shared=on",
9704 "id=mem1,size=1G,hotplug_size=4G,shared=on",
9705 "id=mem2,size=2G,hotplug_size=4G,shared=on",
9706 "--numa",
9707 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0",
9708 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1",
9709 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2",
9710 ]
9711 } else {
9712 &[
9713 "--memory",
9714 "size=0,hotplug_method=virtio-mem",
9715 "--memory-zone",
9716 "id=mem0,size=1G,hotplug_size=4G",
9717 "id=mem1,size=1G,hotplug_size=4G",
9718 "id=mem2,size=2G,hotplug_size=4G",
9719 "--numa",
9720 "guest_numa_id=0,cpus=[0-2,9],distances=[1@15,2@20],memory_zones=mem0",
9721 "guest_numa_id=1,cpus=[3-4,6-8],distances=[0@20,2@25],memory_zones=mem1",
9722 "guest_numa_id=2,cpus=[5,10-11],distances=[0@25,1@30],memory_zones=mem2",
9723 ]
9724 };
9725
9726 let boot_vcpus = 6;
9727 let max_vcpus = 12;
9728
9729 let pmem_temp_file = TempFile::new().unwrap();
9730 pmem_temp_file.as_file().set_len(128 << 20).unwrap();
9731 std::process::Command::new("mkfs.ext4")
9732 .arg(pmem_temp_file.as_path())
9733 .output()
9734 .expect("Expect creating disk image to succeed");
9735 let pmem_path = String::from("/dev/pmem0");
9736
9737 // Start the source VM
9738 let src_vm_path = if !upgrade_test {
9739 clh_command("cloud-hypervisor")
9740 } else {
9741 cloud_hypervisor_release_path()
9742 };
9743 let src_api_socket = temp_api_path(&guest.tmp_dir);
9744 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path);
9745 src_vm_cmd
9746 .args([
9747 "--cpus",
9748 format!("boot={boot_vcpus},max={max_vcpus}").as_str(),
9749 ])
9750 .args(memory_param)
9751 .args(["--kernel", kernel_path.to_str().unwrap()])
9752 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
9753 .default_disks()
9754 .args(["--net", net_params.as_str()])
9755 .args(["--api-socket", &src_api_socket])
9756 .args([
9757 "--pmem",
9758 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(),
9759 ]);
9760 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap();
9761
9762 // Start the destination VM
9763 let mut dest_api_socket = temp_api_path(&guest.tmp_dir);
9764 dest_api_socket.push_str(".dest");
9765 let mut dest_child = GuestCommand::new(&guest)
9766 .args(["--api-socket", &dest_api_socket])
9767 .capture_output()
9768 .spawn()
9769 .unwrap();
9770
9771 let r = std::panic::catch_unwind(|| {
9772 guest.wait_vm_boot(None).unwrap();
9773
9774 // Make sure the source VM is functional
9775 // Check the number of vCPUs
9776 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus);
9777
9778 // Check the guest RAM
9779 assert!(guest.get_total_memory().unwrap_or_default() > 2_880_000);
9780
9781 // Check the guest virtio-devices, e.g. block, rng, console, and net
9782 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path));
9783
9784 // Check the NUMA parameters are applied correctly and resize
9785 // each zone to test the case where we migrate a VM with the
9786 // virtio-mem regions being used.
9787 {
9788 guest.check_numa_common(
9789 Some(&[960_000, 960_000, 1_920_000]),
9790 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]),
9791 Some(&["10 15 20", "20 10 25", "25 30 10"]),
9792 );
9793
9794 // AArch64 currently does not support hotplug, and therefore we only
9795 // test hotplug-related function on x86_64 here.
9796 #[cfg(target_arch = "x86_64")]
9797 {
9798 guest.enable_memory_hotplug();
9799
9800 // Resize every memory zone and check each associated NUMA node
9801 // has been assigned the right amount of memory.
9802 resize_zone_command(&src_api_socket, "mem0", "2G");
9803 resize_zone_command(&src_api_socket, "mem1", "2G");
9804 resize_zone_command(&src_api_socket, "mem2", "3G");
9805 thread::sleep(std::time::Duration::new(5, 0));
9806
9807 guest.check_numa_common(Some(&[1_920_000, 1_920_000, 1_920_000]), None, None);
9808 }
9809 }
9810
9811 // x86_64: Following what's done in the `test_snapshot_restore`, we need
9812 // to make sure that removing and adding back the virtio-net device does
9813 // not break the live-migration support for virtio-pci.
9814 #[cfg(target_arch = "x86_64")]
9815 {
9816 assert!(remote_command(
9817 &src_api_socket,
9818 "remove-device",
9819 Some(net_id),
9820 ));
9821 thread::sleep(std::time::Duration::new(10, 0));
9822
9823 // Plug the virtio-net device again
9824 assert!(remote_command(
9825 &src_api_socket,
9826 "add-net",
9827 Some(net_params.as_str()),
9828 ));
9829 thread::sleep(std::time::Duration::new(10, 0));
9830 }
9831
9832 // Start the live-migration
9833 let migration_socket = String::from(
9834 guest
9835 .tmp_dir
9836 .as_path()
9837 .join("live-migration.sock")
9838 .to_str()
9839 .unwrap(),
9840 );
9841
9842 assert!(
9843 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local),
9844 "Unsuccessful command: 'send-migration' or 'receive-migration'."
9845 );
9846 });
9847
9848 // Check and report any errors occurred during the live-migration
9849 if r.is_err() {
9850 print_and_panic(
9851 src_child,
9852 dest_child,
9853 None,
9854 "Error occurred during live-migration",
9855 );
9856 }
9857
9858 // Check the source vm has been terminated successful (give it '3s' to settle)
9859 thread::sleep(std::time::Duration::new(3, 0));
9860 if !src_child.try_wait().unwrap().is_some_and(|s| s.success()) {
9861 print_and_panic(
9862 src_child,
9863 dest_child,
9864 None,
9865 "source VM was not terminated successfully.",
9866 );
9867 };
9868
9869 // Post live-migration check to make sure the destination VM is functional
9870 let r = std::panic::catch_unwind(|| {
9871 // Perform same checks to validate VM has been properly migrated
9872 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus);
9873 #[cfg(target_arch = "x86_64")]
9874 assert!(guest.get_total_memory().unwrap_or_default() > 6_720_000);
9875 #[cfg(target_arch = "aarch64")]
9876 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000);
9877
9878 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path));
9879
9880 // Perform NUMA related checks
9881 {
9882 #[cfg(target_arch = "aarch64")]
9883 {
9884 guest.check_numa_common(
9885 Some(&[960_000, 960_000, 1_920_000]),
9886 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]),
9887 Some(&["10 15 20", "20 10 25", "25 30 10"]),
9888 );
9889 }
9890
9891 // AArch64 currently does not support hotplug, and therefore we only
9892 // test hotplug-related function on x86_64 here.
9893 #[cfg(target_arch = "x86_64")]
9894 {
9895 guest.check_numa_common(
9896 Some(&[1_920_000, 1_920_000, 2_880_000]),
9897 Some(&[vec![0, 1, 2], vec![3, 4], vec![5]]),
9898 Some(&["10 15 20", "20 10 25", "25 30 10"]),
9899 );
9900
9901 guest.enable_memory_hotplug();
9902
9903 // Resize every memory zone and check each associated NUMA node
9904 // has been assigned the right amount of memory.
9905 resize_zone_command(&dest_api_socket, "mem0", "4G");
9906 resize_zone_command(&dest_api_socket, "mem1", "4G");
9907 resize_zone_command(&dest_api_socket, "mem2", "4G");
9908 // Resize to the maximum amount of CPUs and check each NUMA
9909 // node has been assigned the right CPUs set.
9910 resize_command(&dest_api_socket, Some(max_vcpus), None, None, None);
9911 thread::sleep(std::time::Duration::new(5, 0));
9912
9913 guest.check_numa_common(
9914 Some(&[3_840_000, 3_840_000, 3_840_000]),
9915 Some(&[vec![0, 1, 2, 9], vec![3, 4, 6, 7, 8], vec![5, 10, 11]]),
9916 None,
9917 );
9918 }
9919 }
9920 });
9921
9922 // Clean-up the destination VM and make sure it terminated correctly
9923 let _ = dest_child.kill();
9924 let dest_output = dest_child.wait_with_output().unwrap();
9925 handle_child_output(r, &dest_output);
9926
9927 // Check the destination VM has the expected 'console_text' from its output
9928 let r = std::panic::catch_unwind(|| {
9929 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text));
9930 });
9931 handle_child_output(r, &dest_output);
9932 }
9933
_test_live_migration_watchdog(upgrade_test: bool, local: bool)9934 fn _test_live_migration_watchdog(upgrade_test: bool, local: bool) {
9935 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
9936 let guest = Guest::new(Box::new(focal));
9937 let kernel_path = direct_kernel_boot_path();
9938 let console_text = String::from("On a branch floating down river a cricket, singing.");
9939 let net_id = "net123";
9940 let net_params = format!(
9941 "id={},tap=,mac={},ip={},mask=255.255.255.0",
9942 net_id, guest.network.guest_mac, guest.network.host_ip
9943 );
9944
9945 let memory_param: &[&str] = if local {
9946 &["--memory", "size=4G,shared=on"]
9947 } else {
9948 &["--memory", "size=4G"]
9949 };
9950
9951 let boot_vcpus = 2;
9952 let max_vcpus = 4;
9953
9954 let pmem_temp_file = TempFile::new().unwrap();
9955 pmem_temp_file.as_file().set_len(128 << 20).unwrap();
9956 std::process::Command::new("mkfs.ext4")
9957 .arg(pmem_temp_file.as_path())
9958 .output()
9959 .expect("Expect creating disk image to succeed");
9960 let pmem_path = String::from("/dev/pmem0");
9961
9962 // Start the source VM
9963 let src_vm_path = if !upgrade_test {
9964 clh_command("cloud-hypervisor")
9965 } else {
9966 cloud_hypervisor_release_path()
9967 };
9968 let src_api_socket = temp_api_path(&guest.tmp_dir);
9969 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path);
9970 src_vm_cmd
9971 .args([
9972 "--cpus",
9973 format!("boot={boot_vcpus},max={max_vcpus}").as_str(),
9974 ])
9975 .args(memory_param)
9976 .args(["--kernel", kernel_path.to_str().unwrap()])
9977 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
9978 .default_disks()
9979 .args(["--net", net_params.as_str()])
9980 .args(["--api-socket", &src_api_socket])
9981 .args([
9982 "--pmem",
9983 format!("file={}", pmem_temp_file.as_path().to_str().unwrap(),).as_str(),
9984 ])
9985 .args(["--watchdog"]);
9986 let mut src_child = src_vm_cmd.capture_output().spawn().unwrap();
9987
9988 // Start the destination VM
9989 let mut dest_api_socket = temp_api_path(&guest.tmp_dir);
9990 dest_api_socket.push_str(".dest");
9991 let mut dest_child = GuestCommand::new(&guest)
9992 .args(["--api-socket", &dest_api_socket])
9993 .capture_output()
9994 .spawn()
9995 .unwrap();
9996
9997 let r = std::panic::catch_unwind(|| {
9998 guest.wait_vm_boot(None).unwrap();
9999
10000 // Make sure the source VM is functional
10001 // Check the number of vCPUs
10002 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus);
10003 // Check the guest RAM
10004 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000);
10005 // Check the guest virtio-devices, e.g. block, rng, console, and net
10006 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path));
10007 // x86_64: Following what's done in the `test_snapshot_restore`, we need
10008 // to make sure that removing and adding back the virtio-net device does
10009 // not break the live-migration support for virtio-pci.
10010 #[cfg(target_arch = "x86_64")]
10011 {
10012 assert!(remote_command(
10013 &src_api_socket,
10014 "remove-device",
10015 Some(net_id),
10016 ));
10017 thread::sleep(std::time::Duration::new(10, 0));
10018
10019 // Plug the virtio-net device again
10020 assert!(remote_command(
10021 &src_api_socket,
10022 "add-net",
10023 Some(net_params.as_str()),
10024 ));
10025 thread::sleep(std::time::Duration::new(10, 0));
10026 }
10027
10028 // Enable watchdog and ensure its functional
10029 let expected_reboot_count = 1;
10030 // Enable the watchdog with a 15s timeout
10031 enable_guest_watchdog(&guest, 15);
10032
10033 assert_eq!(get_reboot_count(&guest), expected_reboot_count);
10034 assert_eq!(
10035 guest
10036 .ssh_command("sudo journalctl | grep -c -- \"Watchdog started\"")
10037 .unwrap()
10038 .trim()
10039 .parse::<u32>()
10040 .unwrap_or_default(),
10041 1
10042 );
10043 // Allow some normal time to elapse to check we don't get spurious reboots
10044 thread::sleep(std::time::Duration::new(40, 0));
10045 // Check no reboot
10046 assert_eq!(get_reboot_count(&guest), expected_reboot_count);
10047
10048 // Start the live-migration
10049 let migration_socket = String::from(
10050 guest
10051 .tmp_dir
10052 .as_path()
10053 .join("live-migration.sock")
10054 .to_str()
10055 .unwrap(),
10056 );
10057
10058 assert!(
10059 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local),
10060 "Unsuccessful command: 'send-migration' or 'receive-migration'."
10061 );
10062 });
10063
10064 // Check and report any errors occurred during the live-migration
10065 if r.is_err() {
10066 print_and_panic(
10067 src_child,
10068 dest_child,
10069 None,
10070 "Error occurred during live-migration",
10071 );
10072 }
10073
10074 // Check the source vm has been terminated successful (give it '3s' to settle)
10075 thread::sleep(std::time::Duration::new(3, 0));
10076 if !src_child.try_wait().unwrap().is_some_and(|s| s.success()) {
10077 print_and_panic(
10078 src_child,
10079 dest_child,
10080 None,
10081 "source VM was not terminated successfully.",
10082 );
10083 };
10084
10085 // Post live-migration check to make sure the destination VM is functional
10086 let r = std::panic::catch_unwind(|| {
10087 // Perform same checks to validate VM has been properly migrated
10088 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus);
10089 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000);
10090
10091 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path));
10092
10093 // Perform checks on watchdog
10094 let mut expected_reboot_count = 1;
10095
10096 // Allow some normal time to elapse to check we don't get spurious reboots
10097 thread::sleep(std::time::Duration::new(40, 0));
10098 // Check no reboot
10099 assert_eq!(get_reboot_count(&guest), expected_reboot_count);
10100
10101 // Trigger a panic (sync first). We need to do this inside a screen with a delay so the SSH command returns.
10102 guest.ssh_command("screen -dmS reboot sh -c \"sleep 5; echo s | tee /proc/sysrq-trigger; echo c | sudo tee /proc/sysrq-trigger\"").unwrap();
10103 // Allow some time for the watchdog to trigger (max 30s) and reboot to happen
10104 guest.wait_vm_boot(Some(50)).unwrap();
10105 // Check a reboot is triggered by the watchdog
10106 expected_reboot_count += 1;
10107 assert_eq!(get_reboot_count(&guest), expected_reboot_count);
10108
10109 #[cfg(target_arch = "x86_64")]
10110 {
10111 // Now pause the VM and remain offline for 30s
10112 assert!(remote_command(&dest_api_socket, "pause", None));
10113 thread::sleep(std::time::Duration::new(30, 0));
10114 assert!(remote_command(&dest_api_socket, "resume", None));
10115
10116 // Check no reboot
10117 assert_eq!(get_reboot_count(&guest), expected_reboot_count);
10118 }
10119 });
10120
10121 // Clean-up the destination VM and make sure it terminated correctly
10122 let _ = dest_child.kill();
10123 let dest_output = dest_child.wait_with_output().unwrap();
10124 handle_child_output(r, &dest_output);
10125
10126 // Check the destination VM has the expected 'console_text' from its output
10127 let r = std::panic::catch_unwind(|| {
10128 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text));
10129 });
10130 handle_child_output(r, &dest_output);
10131 }
10132
_test_live_migration_ovs_dpdk(upgrade_test: bool, local: bool)10133 fn _test_live_migration_ovs_dpdk(upgrade_test: bool, local: bool) {
10134 let ovs_focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
10135 let ovs_guest = Guest::new(Box::new(ovs_focal));
10136
10137 let migration_focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
10138 let migration_guest = Guest::new(Box::new(migration_focal));
10139 let src_api_socket = temp_api_path(&migration_guest.tmp_dir);
10140
10141 // Start two VMs that are connected through ovs-dpdk and one of the VMs is the source VM for live-migration
10142 let (mut ovs_child, mut src_child) =
10143 setup_ovs_dpdk_guests(&ovs_guest, &migration_guest, &src_api_socket, upgrade_test);
10144
10145 // Start the destination VM
10146 let mut dest_api_socket = temp_api_path(&migration_guest.tmp_dir);
10147 dest_api_socket.push_str(".dest");
10148 let mut dest_child = GuestCommand::new(&migration_guest)
10149 .args(["--api-socket", &dest_api_socket])
10150 .capture_output()
10151 .spawn()
10152 .unwrap();
10153
10154 let r = std::panic::catch_unwind(|| {
10155 // Give it '1s' to make sure the 'dest_api_socket' file is properly created
10156 thread::sleep(std::time::Duration::new(1, 0));
10157
10158 // Start the live-migration
10159 let migration_socket = String::from(
10160 migration_guest
10161 .tmp_dir
10162 .as_path()
10163 .join("live-migration.sock")
10164 .to_str()
10165 .unwrap(),
10166 );
10167
10168 assert!(
10169 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, local),
10170 "Unsuccessful command: 'send-migration' or 'receive-migration'."
10171 );
10172 });
10173
10174 // Check and report any errors occurred during the live-migration
10175 if r.is_err() {
10176 print_and_panic(
10177 src_child,
10178 dest_child,
10179 Some(ovs_child),
10180 "Error occurred during live-migration",
10181 );
10182 }
10183
10184 // Check the source vm has been terminated successful (give it '3s' to settle)
10185 thread::sleep(std::time::Duration::new(3, 0));
10186 if !src_child.try_wait().unwrap().is_some_and(|s| s.success()) {
10187 print_and_panic(
10188 src_child,
10189 dest_child,
10190 Some(ovs_child),
10191 "source VM was not terminated successfully.",
10192 );
10193 };
10194
10195 // Post live-migration check to make sure the destination VM is functional
10196 let r = std::panic::catch_unwind(|| {
10197 // Perform same checks to validate VM has been properly migrated
10198 // Spawn a new netcat listener in the OVS VM
10199 let guest_ip = ovs_guest.network.guest_ip.clone();
10200 thread::spawn(move || {
10201 ssh_command_ip(
10202 "nc -l 12345",
10203 &guest_ip,
10204 DEFAULT_SSH_RETRIES,
10205 DEFAULT_SSH_TIMEOUT,
10206 )
10207 .unwrap();
10208 });
10209
10210 // Wait for the server to be listening
10211 thread::sleep(std::time::Duration::new(5, 0));
10212
10213 // And check the connection is still functional after live-migration
10214 migration_guest
10215 .ssh_command("nc -vz 172.100.0.1 12345")
10216 .unwrap();
10217 });
10218
10219 // Clean-up the destination VM and OVS VM, and make sure they terminated correctly
10220 let _ = dest_child.kill();
10221 let _ = ovs_child.kill();
10222 let dest_output = dest_child.wait_with_output().unwrap();
10223 let ovs_output = ovs_child.wait_with_output().unwrap();
10224
10225 cleanup_ovs_dpdk();
10226
10227 handle_child_output(r, &dest_output);
10228 handle_child_output(Ok(()), &ovs_output);
10229 }
10230
10231 // This test exercises the local live-migration between two Cloud Hypervisor VMs on the
10232 // same host with Landlock enabled on both VMs. The test validates the following:
10233 // 1. The source VM is up and functional
10234 // 2. Ensure Landlock is enabled on source VM by hotplugging a disk. As the path for this
10235 // disk is not known to the source VM this step will fail.
10236 // 3. The 'send-migration' and 'receive-migration' command finished successfully;
10237 // 4. The source VM terminated gracefully after live migration;
10238 // 5. The destination VM is functional after live migration;
10239 // 6. Ensure Landlock is enabled on destination VM by hotplugging a disk. As the path for
10240 // this disk is not known to the destination VM this step will fail.
_test_live_migration_with_landlock()10241 fn _test_live_migration_with_landlock() {
10242 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
10243 let guest = Guest::new(Box::new(focal));
10244 let kernel_path = direct_kernel_boot_path();
10245 let net_id = "net123";
10246 let net_params = format!(
10247 "id={},tap=,mac={},ip={},mask=255.255.255.0",
10248 net_id, guest.network.guest_mac, guest.network.host_ip
10249 );
10250
10251 let boot_vcpus = 2;
10252 let max_vcpus = 4;
10253
10254 let mut blk_file_path = dirs::home_dir().unwrap();
10255 blk_file_path.push("workloads");
10256 blk_file_path.push("blk.img");
10257
10258 let src_api_socket = temp_api_path(&guest.tmp_dir);
10259 let mut src_child = GuestCommand::new(&guest)
10260 .args([
10261 "--cpus",
10262 format!("boot={boot_vcpus},max={max_vcpus}").as_str(),
10263 ])
10264 .args(["--memory", "size=4G,shared=on"])
10265 .args(["--kernel", kernel_path.to_str().unwrap()])
10266 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
10267 .default_disks()
10268 .args(["--api-socket", &src_api_socket])
10269 .args(["--landlock"])
10270 .args(["--net", net_params.as_str()])
10271 .args([
10272 "--landlock-rules",
10273 format!("path={:?},access=rw", guest.tmp_dir.as_path()).as_str(),
10274 ])
10275 .capture_output()
10276 .spawn()
10277 .unwrap();
10278
10279 // Start the destination VM
10280 let mut dest_api_socket = temp_api_path(&guest.tmp_dir);
10281 dest_api_socket.push_str(".dest");
10282 let mut dest_child = GuestCommand::new(&guest)
10283 .args(["--api-socket", &dest_api_socket])
10284 .capture_output()
10285 .spawn()
10286 .unwrap();
10287
10288 let r = std::panic::catch_unwind(|| {
10289 guest.wait_vm_boot(None).unwrap();
10290
10291 // Make sure the source VM is functaionl
10292 // Check the number of vCPUs
10293 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus);
10294
10295 // Check the guest RAM
10296 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000);
10297
10298 // Check Landlock is enabled by hot-plugging a disk.
10299 assert!(!remote_command(
10300 &src_api_socket,
10301 "add-disk",
10302 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()),
10303 ));
10304
10305 // Start the live-migration
10306 let migration_socket = String::from(
10307 guest
10308 .tmp_dir
10309 .as_path()
10310 .join("live-migration.sock")
10311 .to_str()
10312 .unwrap(),
10313 );
10314
10315 assert!(
10316 start_live_migration(&migration_socket, &src_api_socket, &dest_api_socket, true),
10317 "Unsuccessful command: 'send-migration' or 'receive-migration'."
10318 );
10319 });
10320
10321 // Check and report any errors occurred during the live-migration
10322 if r.is_err() {
10323 print_and_panic(
10324 src_child,
10325 dest_child,
10326 None,
10327 "Error occurred during live-migration",
10328 );
10329 }
10330
10331 // Check the source vm has been terminated successful (give it '3s' to settle)
10332 thread::sleep(std::time::Duration::new(3, 0));
10333 if !src_child.try_wait().unwrap().is_some_and(|s| s.success()) {
10334 print_and_panic(
10335 src_child,
10336 dest_child,
10337 None,
10338 "source VM was not terminated successfully.",
10339 );
10340 };
10341
10342 // Post live-migration check to make sure the destination VM is funcational
10343 let r = std::panic::catch_unwind(|| {
10344 // Perform same checks to validate VM has been properly migrated
10345 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus);
10346 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000);
10347 });
10348
10349 // Check Landlock is enabled on destination VM by hot-plugging a disk.
10350 assert!(!remote_command(
10351 &dest_api_socket,
10352 "add-disk",
10353 Some(format!("path={},id=test0", blk_file_path.to_str().unwrap()).as_str()),
10354 ));
10355
10356 // Clean-up the destination VM and make sure it terminated correctly
10357 let _ = dest_child.kill();
10358 let dest_output = dest_child.wait_with_output().unwrap();
10359 handle_child_output(r, &dest_output);
10360 }
10361
10362 // Function to get an available port
get_available_port() -> u1610363 fn get_available_port() -> u16 {
10364 TcpListener::bind("127.0.0.1:0")
10365 .expect("Failed to bind to address")
10366 .local_addr()
10367 .unwrap()
10368 .port()
10369 }
10370
start_live_migration_tcp(src_api_socket: &str, dest_api_socket: &str) -> bool10371 fn start_live_migration_tcp(src_api_socket: &str, dest_api_socket: &str) -> bool {
10372 // Get an available TCP port
10373 let migration_port = get_available_port();
10374 let host_ip = "127.0.0.1";
10375
10376 // Start the 'receive-migration' command on the destination
10377 let mut receive_migration = Command::new(clh_command("ch-remote"))
10378 .args([
10379 &format!("--api-socket={dest_api_socket}"),
10380 "receive-migration",
10381 &format!("tcp:0.0.0.0:{migration_port}"),
10382 ])
10383 .stdin(Stdio::null())
10384 .stderr(Stdio::piped())
10385 .stdout(Stdio::piped())
10386 .spawn()
10387 .unwrap();
10388
10389 // Give the destination some time to start listening
10390 thread::sleep(Duration::from_secs(1));
10391
10392 // Start the 'send-migration' command on the source
10393 let mut send_migration = Command::new(clh_command("ch-remote"))
10394 .args([
10395 &format!("--api-socket={src_api_socket}"),
10396 "send-migration",
10397 &format!("tcp:{host_ip}:{migration_port}"),
10398 ])
10399 .stdin(Stdio::null())
10400 .stderr(Stdio::piped())
10401 .stdout(Stdio::piped())
10402 .spawn()
10403 .unwrap();
10404
10405 // Check if the 'send-migration' command executed successfully
10406 let send_success = if let Some(status) = send_migration
10407 .wait_timeout(Duration::from_secs(60))
10408 .unwrap()
10409 {
10410 status.success()
10411 } else {
10412 false
10413 };
10414
10415 if !send_success {
10416 let _ = send_migration.kill();
10417 let output = send_migration.wait_with_output().unwrap();
10418 eprintln!(
10419 "\n\n==== Start 'send_migration' output ====\n\n---stdout---\n{}\n\n---stderr---\n{}\n\n==== End 'send_migration' output ====\n\n",
10420 String::from_utf8_lossy(&output.stdout),
10421 String::from_utf8_lossy(&output.stderr)
10422 );
10423 }
10424
10425 // Check if the 'receive-migration' command executed successfully
10426 let receive_success = if let Some(status) = receive_migration
10427 .wait_timeout(Duration::from_secs(60))
10428 .unwrap()
10429 {
10430 status.success()
10431 } else {
10432 false
10433 };
10434
10435 if !receive_success {
10436 let _ = receive_migration.kill();
10437 let output = receive_migration.wait_with_output().unwrap();
10438 eprintln!(
10439 "\n\n==== Start 'receive_migration' output ====\n\n---stdout---\n{}\n\n---stderr---\n{}\n\n==== End 'receive_migration' output ====\n\n",
10440 String::from_utf8_lossy(&output.stdout),
10441 String::from_utf8_lossy(&output.stderr)
10442 );
10443 }
10444
10445 send_success && receive_success
10446 }
10447
_test_live_migration_tcp()10448 fn _test_live_migration_tcp() {
10449 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
10450 let guest = Guest::new(Box::new(focal));
10451 let kernel_path = direct_kernel_boot_path();
10452 let console_text = String::from("On a branch floating down river a cricket, singing.");
10453 let net_id = "net123";
10454 let net_params = format!(
10455 "id={},tap=,mac={},ip={},mask=255.255.255.0",
10456 net_id, guest.network.guest_mac, guest.network.host_ip
10457 );
10458 let memory_param: &[&str] = &["--memory", "size=4G,shared=on"];
10459 let boot_vcpus = 2;
10460 let max_vcpus = 4;
10461 let pmem_temp_file = TempFile::new().unwrap();
10462 pmem_temp_file.as_file().set_len(128 << 20).unwrap();
10463 std::process::Command::new("mkfs.ext4")
10464 .arg(pmem_temp_file.as_path())
10465 .output()
10466 .expect("Expect creating disk image to succeed");
10467 let pmem_path = String::from("/dev/pmem0");
10468
10469 // Start the source VM
10470 let src_vm_path = clh_command("cloud-hypervisor");
10471 let src_api_socket = temp_api_path(&guest.tmp_dir);
10472 let mut src_vm_cmd = GuestCommand::new_with_binary_path(&guest, &src_vm_path);
10473 src_vm_cmd
10474 .args([
10475 "--cpus",
10476 format!("boot={boot_vcpus},max={max_vcpus}").as_str(),
10477 ])
10478 .args(memory_param)
10479 .args(["--kernel", kernel_path.to_str().unwrap()])
10480 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
10481 .default_disks()
10482 .args(["--net", net_params.as_str()])
10483 .args(["--api-socket", &src_api_socket])
10484 .args([
10485 "--pmem",
10486 format!(
10487 "file={},discard_writes=on",
10488 pmem_temp_file.as_path().to_str().unwrap(),
10489 )
10490 .as_str(),
10491 ])
10492 .capture_output();
10493 let mut src_child = src_vm_cmd.spawn().unwrap();
10494
10495 // Start the destination VM
10496 let mut dest_api_socket = temp_api_path(&guest.tmp_dir);
10497 dest_api_socket.push_str(".dest");
10498 let mut dest_child = GuestCommand::new(&guest)
10499 .args(["--api-socket", &dest_api_socket])
10500 .capture_output()
10501 .spawn()
10502 .unwrap();
10503
10504 let r = std::panic::catch_unwind(|| {
10505 guest.wait_vm_boot(None).unwrap();
10506 // Ensure the source VM is running normally
10507 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus);
10508 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000);
10509 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path));
10510
10511 // On x86_64 architecture, remove and re-add the virtio-net device
10512 #[cfg(target_arch = "x86_64")]
10513 {
10514 assert!(remote_command(
10515 &src_api_socket,
10516 "remove-device",
10517 Some(net_id),
10518 ));
10519 thread::sleep(Duration::new(10, 0));
10520 // Re-add the virtio-net device
10521 assert!(remote_command(
10522 &src_api_socket,
10523 "add-net",
10524 Some(net_params.as_str()),
10525 ));
10526 thread::sleep(Duration::new(10, 0));
10527 }
10528 // Start TCP live migration
10529 assert!(
10530 start_live_migration_tcp(&src_api_socket, &dest_api_socket),
10531 "Unsuccessful command: 'send-migration' or 'receive-migration'."
10532 );
10533 });
10534
10535 // Check and report any errors that occurred during live migration
10536 if r.is_err() {
10537 print_and_panic(
10538 src_child,
10539 dest_child,
10540 None,
10541 "Error occurred during live-migration",
10542 );
10543 }
10544
10545 // Check the source vm has been terminated successful (give it '3s' to settle)
10546 thread::sleep(std::time::Duration::new(3, 0));
10547 if !src_child.try_wait().unwrap().is_some_and(|s| s.success()) {
10548 print_and_panic(
10549 src_child,
10550 dest_child,
10551 None,
10552 "Source VM was not terminated successfully.",
10553 );
10554 };
10555
10556 // After live migration, ensure the destination VM is running normally
10557 let r = std::panic::catch_unwind(|| {
10558 // Perform the same checks to ensure the VM has migrated correctly
10559 assert_eq!(guest.get_cpu_count().unwrap_or_default(), boot_vcpus);
10560 assert!(guest.get_total_memory().unwrap_or_default() > 3_840_000);
10561 guest.check_devices_common(None, Some(&console_text), Some(&pmem_path));
10562 });
10563
10564 // Clean up the destination VM and ensure it terminates properly
10565 let _ = dest_child.kill();
10566 let dest_output = dest_child.wait_with_output().unwrap();
10567 handle_child_output(r, &dest_output);
10568
10569 // Check if the expected `console_text` is present in the destination VM's output
10570 let r = std::panic::catch_unwind(|| {
10571 assert!(String::from_utf8_lossy(&dest_output.stdout).contains(&console_text));
10572 });
10573 handle_child_output(r, &dest_output);
10574 }
10575
10576 mod live_migration_parallel {
10577 use super::*;
10578 #[test]
test_live_migration_basic()10579 fn test_live_migration_basic() {
10580 _test_live_migration(false, false)
10581 }
10582
10583 #[test]
test_live_migration_local()10584 fn test_live_migration_local() {
10585 _test_live_migration(false, true)
10586 }
10587
10588 #[test]
test_live_migration_tcp()10589 fn test_live_migration_tcp() {
10590 _test_live_migration_tcp();
10591 }
10592
10593 #[test]
test_live_migration_watchdog()10594 fn test_live_migration_watchdog() {
10595 _test_live_migration_watchdog(false, false)
10596 }
10597
10598 #[test]
test_live_migration_watchdog_local()10599 fn test_live_migration_watchdog_local() {
10600 _test_live_migration_watchdog(false, true)
10601 }
10602
10603 #[test]
test_live_upgrade_basic()10604 fn test_live_upgrade_basic() {
10605 _test_live_migration(true, false)
10606 }
10607
10608 #[test]
test_live_upgrade_local()10609 fn test_live_upgrade_local() {
10610 _test_live_migration(true, true)
10611 }
10612
10613 #[test]
test_live_upgrade_watchdog()10614 fn test_live_upgrade_watchdog() {
10615 _test_live_migration_watchdog(true, false)
10616 }
10617
10618 #[test]
test_live_upgrade_watchdog_local()10619 fn test_live_upgrade_watchdog_local() {
10620 _test_live_migration_watchdog(true, true)
10621 }
10622 #[test]
10623 #[cfg(target_arch = "x86_64")]
test_live_migration_with_landlock()10624 fn test_live_migration_with_landlock() {
10625 _test_live_migration_with_landlock()
10626 }
10627 }
10628
10629 mod live_migration_sequential {
10630 use super::*;
10631
10632 // NUMA & balloon live migration tests are large so run sequentially
10633
10634 #[test]
test_live_migration_balloon()10635 fn test_live_migration_balloon() {
10636 _test_live_migration_balloon(false, false)
10637 }
10638
10639 #[test]
test_live_migration_balloon_local()10640 fn test_live_migration_balloon_local() {
10641 _test_live_migration_balloon(false, true)
10642 }
10643
10644 #[test]
test_live_upgrade_balloon()10645 fn test_live_upgrade_balloon() {
10646 _test_live_migration_balloon(true, false)
10647 }
10648
10649 #[test]
test_live_upgrade_balloon_local()10650 fn test_live_upgrade_balloon_local() {
10651 _test_live_migration_balloon(true, true)
10652 }
10653
10654 #[test]
10655 #[cfg(not(feature = "mshv"))]
test_live_migration_numa()10656 fn test_live_migration_numa() {
10657 _test_live_migration_numa(false, false)
10658 }
10659
10660 #[test]
10661 #[cfg(not(feature = "mshv"))]
test_live_migration_numa_local()10662 fn test_live_migration_numa_local() {
10663 _test_live_migration_numa(false, true)
10664 }
10665
10666 #[test]
10667 #[cfg(not(feature = "mshv"))]
test_live_upgrade_numa()10668 fn test_live_upgrade_numa() {
10669 _test_live_migration_numa(true, false)
10670 }
10671
10672 #[test]
10673 #[cfg(not(feature = "mshv"))]
test_live_upgrade_numa_local()10674 fn test_live_upgrade_numa_local() {
10675 _test_live_migration_numa(true, true)
10676 }
10677
10678 // Require to run ovs-dpdk tests sequentially because they rely on the same ovs-dpdk setup
10679 #[test]
10680 #[ignore = "See #5532"]
10681 #[cfg(target_arch = "x86_64")]
10682 #[cfg(not(feature = "mshv"))]
test_live_migration_ovs_dpdk()10683 fn test_live_migration_ovs_dpdk() {
10684 _test_live_migration_ovs_dpdk(false, false);
10685 }
10686
10687 #[test]
10688 #[cfg(target_arch = "x86_64")]
10689 #[cfg(not(feature = "mshv"))]
test_live_migration_ovs_dpdk_local()10690 fn test_live_migration_ovs_dpdk_local() {
10691 _test_live_migration_ovs_dpdk(false, true);
10692 }
10693
10694 #[test]
10695 #[ignore = "See #5532"]
10696 #[cfg(target_arch = "x86_64")]
10697 #[cfg(not(feature = "mshv"))]
test_live_upgrade_ovs_dpdk()10698 fn test_live_upgrade_ovs_dpdk() {
10699 _test_live_migration_ovs_dpdk(true, false);
10700 }
10701
10702 #[test]
10703 #[ignore = "See #5532"]
10704 #[cfg(target_arch = "x86_64")]
10705 #[cfg(not(feature = "mshv"))]
test_live_upgrade_ovs_dpdk_local()10706 fn test_live_upgrade_ovs_dpdk_local() {
10707 _test_live_migration_ovs_dpdk(true, true);
10708 }
10709 }
10710 }
10711
10712 #[cfg(target_arch = "aarch64")]
10713 mod aarch64_acpi {
10714 use crate::*;
10715
10716 #[test]
test_simple_launch_acpi()10717 fn test_simple_launch_acpi() {
10718 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
10719
10720 vec![Box::new(focal)].drain(..).for_each(|disk_config| {
10721 let guest = Guest::new(disk_config);
10722
10723 let mut child = GuestCommand::new(&guest)
10724 .args(["--cpus", "boot=1"])
10725 .args(["--memory", "size=512M"])
10726 .args(["--kernel", edk2_path().to_str().unwrap()])
10727 .default_disks()
10728 .default_net()
10729 .args(["--serial", "tty", "--console", "off"])
10730 .capture_output()
10731 .spawn()
10732 .unwrap();
10733
10734 let r = std::panic::catch_unwind(|| {
10735 guest.wait_vm_boot(Some(120)).unwrap();
10736
10737 assert_eq!(guest.get_cpu_count().unwrap_or_default(), 1);
10738 assert!(guest.get_total_memory().unwrap_or_default() > 400_000);
10739 assert_eq!(guest.get_pci_bridge_class().unwrap_or_default(), "0x060000");
10740 });
10741
10742 let _ = child.kill();
10743 let output = child.wait_with_output().unwrap();
10744
10745 handle_child_output(r, &output);
10746 });
10747 }
10748
10749 #[test]
test_guest_numa_nodes_acpi()10750 fn test_guest_numa_nodes_acpi() {
10751 _test_guest_numa_nodes(true);
10752 }
10753
10754 #[test]
test_cpu_topology_421_acpi()10755 fn test_cpu_topology_421_acpi() {
10756 test_cpu_topology(4, 2, 1, true);
10757 }
10758
10759 #[test]
test_cpu_topology_142_acpi()10760 fn test_cpu_topology_142_acpi() {
10761 test_cpu_topology(1, 4, 2, true);
10762 }
10763
10764 #[test]
test_cpu_topology_262_acpi()10765 fn test_cpu_topology_262_acpi() {
10766 test_cpu_topology(2, 6, 2, true);
10767 }
10768
10769 #[test]
test_power_button_acpi()10770 fn test_power_button_acpi() {
10771 _test_power_button(true);
10772 }
10773
10774 #[test]
test_virtio_iommu()10775 fn test_virtio_iommu() {
10776 _test_virtio_iommu(true)
10777 }
10778 }
10779
10780 mod rate_limiter {
10781 use super::*;
10782
10783 // Check if the 'measured' rate is within the expected 'difference' (in percentage)
10784 // compared to given 'limit' rate.
check_rate_limit(measured: f64, limit: f64, difference: f64) -> bool10785 fn check_rate_limit(measured: f64, limit: f64, difference: f64) -> bool {
10786 let upper_limit = limit * (1_f64 + difference);
10787 let lower_limit = limit * (1_f64 - difference);
10788
10789 if measured > lower_limit && measured < upper_limit {
10790 return true;
10791 }
10792
10793 eprintln!(
10794 "\n\n==== Start 'check_rate_limit' failed ==== \
10795 \n\nmeasured={measured}, , lower_limit={lower_limit}, upper_limit={upper_limit} \
10796 \n\n==== End 'check_rate_limit' failed ====\n\n"
10797 );
10798
10799 false
10800 }
10801
_test_rate_limiter_net(rx: bool)10802 fn _test_rate_limiter_net(rx: bool) {
10803 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
10804 let guest = Guest::new(Box::new(focal));
10805
10806 let test_timeout = 10;
10807 let num_queues = 2;
10808 let queue_size = 256;
10809 let bw_size = 10485760_u64; // bytes
10810 let bw_refill_time = 100; // ms
10811 let limit_bps = (bw_size * 8 * 1000) as f64 / bw_refill_time as f64;
10812
10813 let net_params = format!(
10814 "tap=,mac={},ip={},mask=255.255.255.0,num_queues={},queue_size={},bw_size={},bw_refill_time={}",
10815 guest.network.guest_mac,
10816 guest.network.host_ip,
10817 num_queues,
10818 queue_size,
10819 bw_size,
10820 bw_refill_time,
10821 );
10822
10823 let mut child = GuestCommand::new(&guest)
10824 .args(["--cpus", &format!("boot={}", num_queues / 2)])
10825 .args(["--memory", "size=4G"])
10826 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
10827 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
10828 .default_disks()
10829 .args(["--net", net_params.as_str()])
10830 .capture_output()
10831 .spawn()
10832 .unwrap();
10833
10834 let r = std::panic::catch_unwind(|| {
10835 guest.wait_vm_boot(None).unwrap();
10836 let measured_bps =
10837 measure_virtio_net_throughput(test_timeout, num_queues / 2, &guest, rx, true)
10838 .unwrap();
10839 assert!(check_rate_limit(measured_bps, limit_bps, 0.1));
10840 });
10841
10842 let _ = child.kill();
10843 let output = child.wait_with_output().unwrap();
10844 handle_child_output(r, &output);
10845 }
10846
10847 #[test]
test_rate_limiter_net_rx()10848 fn test_rate_limiter_net_rx() {
10849 _test_rate_limiter_net(true);
10850 }
10851
10852 #[test]
test_rate_limiter_net_tx()10853 fn test_rate_limiter_net_tx() {
10854 _test_rate_limiter_net(false);
10855 }
10856
_test_rate_limiter_block(bandwidth: bool, num_queues: u32)10857 fn _test_rate_limiter_block(bandwidth: bool, num_queues: u32) {
10858 let test_timeout = 10;
10859 let fio_ops = FioOps::RandRW;
10860
10861 let bw_size = if bandwidth {
10862 10485760_u64 // bytes
10863 } else {
10864 100_u64 // I/O
10865 };
10866 let bw_refill_time = 100; // ms
10867 let limit_rate = (bw_size * 1000) as f64 / bw_refill_time as f64;
10868
10869 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
10870 let guest = Guest::new(Box::new(focal));
10871 let api_socket = temp_api_path(&guest.tmp_dir);
10872 let test_img_dir = TempDir::new_with_prefix("/var/tmp/ch").unwrap();
10873 let blk_rate_limiter_test_img =
10874 String::from(test_img_dir.as_path().join("blk.img").to_str().unwrap());
10875
10876 // Create the test block image
10877 assert!(exec_host_command_output(&format!(
10878 "dd if=/dev/zero of={blk_rate_limiter_test_img} bs=1M count=1024"
10879 ))
10880 .status
10881 .success());
10882
10883 let test_blk_params = if bandwidth {
10884 format!(
10885 "path={blk_rate_limiter_test_img},num_queues={num_queues},bw_size={bw_size},bw_refill_time={bw_refill_time}"
10886 )
10887 } else {
10888 format!(
10889 "path={blk_rate_limiter_test_img},num_queues={num_queues},ops_size={bw_size},ops_refill_time={bw_refill_time}"
10890 )
10891 };
10892
10893 let mut child = GuestCommand::new(&guest)
10894 .args(["--cpus", &format!("boot={num_queues}")])
10895 .args(["--memory", "size=4G"])
10896 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
10897 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
10898 .args([
10899 "--disk",
10900 format!(
10901 "path={}",
10902 guest.disk_config.disk(DiskType::OperatingSystem).unwrap()
10903 )
10904 .as_str(),
10905 format!(
10906 "path={}",
10907 guest.disk_config.disk(DiskType::CloudInit).unwrap()
10908 )
10909 .as_str(),
10910 test_blk_params.as_str(),
10911 ])
10912 .default_net()
10913 .args(["--api-socket", &api_socket])
10914 .capture_output()
10915 .spawn()
10916 .unwrap();
10917
10918 let r = std::panic::catch_unwind(|| {
10919 guest.wait_vm_boot(None).unwrap();
10920
10921 let fio_command = format!(
10922 "sudo fio --filename=/dev/vdc --name=test --output-format=json \
10923 --direct=1 --bs=4k --ioengine=io_uring --iodepth=64 \
10924 --rw={fio_ops} --runtime={test_timeout} --numjobs={num_queues}"
10925 );
10926 let output = guest.ssh_command(&fio_command).unwrap();
10927
10928 // Parse fio output
10929 let measured_rate = if bandwidth {
10930 parse_fio_output(&output, &fio_ops, num_queues).unwrap()
10931 } else {
10932 parse_fio_output_iops(&output, &fio_ops, num_queues).unwrap()
10933 };
10934 assert!(check_rate_limit(measured_rate, limit_rate, 0.1));
10935 });
10936
10937 let _ = child.kill();
10938 let output = child.wait_with_output().unwrap();
10939 handle_child_output(r, &output);
10940 }
10941
_test_rate_limiter_group_block(bandwidth: bool, num_queues: u32, num_disks: u32)10942 fn _test_rate_limiter_group_block(bandwidth: bool, num_queues: u32, num_disks: u32) {
10943 let test_timeout = 10;
10944 let fio_ops = FioOps::RandRW;
10945
10946 let bw_size = if bandwidth {
10947 10485760_u64 // bytes
10948 } else {
10949 100_u64 // I/O
10950 };
10951 let bw_refill_time = 100; // ms
10952 let limit_rate = (bw_size * 1000) as f64 / bw_refill_time as f64;
10953
10954 let focal = UbuntuDiskConfig::new(FOCAL_IMAGE_NAME.to_string());
10955 let guest = Guest::new(Box::new(focal));
10956 let api_socket = temp_api_path(&guest.tmp_dir);
10957 let test_img_dir = TempDir::new_with_prefix("/var/tmp/ch").unwrap();
10958
10959 let rate_limit_group_arg = if bandwidth {
10960 format!("id=group0,bw_size={bw_size},bw_refill_time={bw_refill_time}")
10961 } else {
10962 format!("id=group0,ops_size={bw_size},ops_refill_time={bw_refill_time}")
10963 };
10964
10965 let mut disk_args = vec![
10966 "--disk".to_string(),
10967 format!(
10968 "path={}",
10969 guest.disk_config.disk(DiskType::OperatingSystem).unwrap()
10970 ),
10971 format!(
10972 "path={}",
10973 guest.disk_config.disk(DiskType::CloudInit).unwrap()
10974 ),
10975 ];
10976
10977 for i in 0..num_disks {
10978 let test_img_path = String::from(
10979 test_img_dir
10980 .as_path()
10981 .join(format!("blk{i}.img"))
10982 .to_str()
10983 .unwrap(),
10984 );
10985
10986 assert!(exec_host_command_output(&format!(
10987 "dd if=/dev/zero of={test_img_path} bs=1M count=1024"
10988 ))
10989 .status
10990 .success());
10991
10992 disk_args.push(format!(
10993 "path={test_img_path},num_queues={num_queues},rate_limit_group=group0"
10994 ));
10995 }
10996
10997 let mut child = GuestCommand::new(&guest)
10998 .args(["--cpus", &format!("boot={}", num_queues * num_disks)])
10999 .args(["--memory", "size=4G"])
11000 .args(["--kernel", direct_kernel_boot_path().to_str().unwrap()])
11001 .args(["--cmdline", DIRECT_KERNEL_BOOT_CMDLINE])
11002 .args(["--rate-limit-group", &rate_limit_group_arg])
11003 .args(disk_args)
11004 .default_net()
11005 .args(["--api-socket", &api_socket])
11006 .capture_output()
11007 .spawn()
11008 .unwrap();
11009
11010 let r = std::panic::catch_unwind(|| {
11011 guest.wait_vm_boot(None).unwrap();
11012
11013 let mut fio_command = format!(
11014 "sudo fio --name=global --output-format=json \
11015 --direct=1 --bs=4k --ioengine=io_uring --iodepth=64 \
11016 --rw={fio_ops} --runtime={test_timeout} --numjobs={num_queues}"
11017 );
11018
11019 // Generate additional argument for each disk:
11020 // --name=job0 --filename=/dev/vdc \
11021 // --name=job1 --filename=/dev/vdd \
11022 // --name=job2 --filename=/dev/vde \
11023 // ...
11024 for i in 0..num_disks {
11025 let c: char = 'c';
11026 let arg = format!(
11027 " --name=job{i} --filename=/dev/vd{}",
11028 char::from_u32((c as u32) + i).unwrap()
11029 );
11030 fio_command += &arg;
11031 }
11032 let output = guest.ssh_command(&fio_command).unwrap();
11033
11034 // Parse fio output
11035 let measured_rate = if bandwidth {
11036 parse_fio_output(&output, &fio_ops, num_queues * num_disks).unwrap()
11037 } else {
11038 parse_fio_output_iops(&output, &fio_ops, num_queues * num_disks).unwrap()
11039 };
11040 assert!(check_rate_limit(measured_rate, limit_rate, 0.2));
11041 });
11042
11043 let _ = child.kill();
11044 let output = child.wait_with_output().unwrap();
11045 handle_child_output(r, &output);
11046 }
11047
11048 #[test]
test_rate_limiter_block_bandwidth()11049 fn test_rate_limiter_block_bandwidth() {
11050 _test_rate_limiter_block(true, 1);
11051 _test_rate_limiter_block(true, 2)
11052 }
11053
11054 #[test]
test_rate_limiter_group_block_bandwidth()11055 fn test_rate_limiter_group_block_bandwidth() {
11056 _test_rate_limiter_group_block(true, 1, 1);
11057 _test_rate_limiter_group_block(true, 2, 1);
11058 _test_rate_limiter_group_block(true, 1, 2);
11059 _test_rate_limiter_group_block(true, 2, 2);
11060 }
11061
11062 #[test]
test_rate_limiter_block_iops()11063 fn test_rate_limiter_block_iops() {
11064 _test_rate_limiter_block(false, 1);
11065 _test_rate_limiter_block(false, 2);
11066 }
11067
11068 #[test]
test_rate_limiter_group_block_iops()11069 fn test_rate_limiter_group_block_iops() {
11070 _test_rate_limiter_group_block(false, 1, 1);
11071 _test_rate_limiter_group_block(false, 2, 1);
11072 _test_rate_limiter_group_block(false, 1, 2);
11073 _test_rate_limiter_group_block(false, 2, 2);
11074 }
11075 }
11076