1#!/usr/bin/env bash 2# shellcheck disable=SC2048,SC2086 3set -x 4 5# shellcheck source=/dev/null 6source "$HOME"/.cargo/env 7source "$(dirname "$0")"/test-util.sh 8source "$(dirname "$0")"/common-aarch64.sh 9 10WORKLOADS_LOCK="$WORKLOADS_DIR/integration_test.lock" 11 12build_spdk_nvme() { 13 SPDK_DIR="$WORKLOADS_DIR/spdk" 14 SPDK_REPO="https://github.com/spdk/spdk.git" 15 SPDK_DEPLOY_DIR="/usr/local/bin/spdk-nvme" 16 checkout_repo "$SPDK_DIR" "$SPDK_REPO" master "ef8bcce58f3f02b79c0619a297e4f17e81e62b24" 17 18 if [ ! -f "$SPDK_DIR/.built" ]; then 19 pushd "$SPDK_DIR" || exit 20 git submodule update --init 21 apt-get update 22 sed -i "/grpcio/d" scripts/pkgdep/debian.sh 23 ./scripts/pkgdep.sh 24 ./configure --with-vfio-user 25 chmod +x /usr/local/lib/python3.10/dist-packages/ninja/data/bin/ninja 26 make -j "$(nproc)" || exit 1 27 touch .built 28 popd || exit 29 fi 30 if [ ! -d "/usr/local/bin/spdk-nvme" ]; then 31 mkdir -p $SPDK_DEPLOY_DIR 32 fi 33 cp "$WORKLOADS_DIR/spdk/build/bin/nvmf_tgt" $SPDK_DEPLOY_DIR/nvmf_tgt 34 cp "$WORKLOADS_DIR/spdk/scripts/rpc.py" $SPDK_DEPLOY_DIR/rpc.py 35 cp -r "$WORKLOADS_DIR/spdk/python/spdk/" $SPDK_DEPLOY_DIR/ 36 cp -r "$WORKLOADS_DIR/spdk/python" $SPDK_DEPLOY_DIR/../ 37} 38 39build_virtiofsd() { 40 VIRTIOFSD_DIR="$WORKLOADS_DIR/virtiofsd_build" 41 VIRTIOFSD_REPO="https://gitlab.com/virtio-fs/virtiofsd.git" 42 43 checkout_repo "$VIRTIOFSD_DIR" "$VIRTIOFSD_REPO" v1.8.0 "97ea7908fe7f9bc59916671a771bdcfaf4044b45" 44 45 if [ ! -f "$VIRTIOFSD_DIR/.built" ]; then 46 pushd "$VIRTIOFSD_DIR" || exit 47 rm -rf target/ 48 time RUSTFLAGS="" TARGET_CC="" cargo build --release 49 cp target/release/virtiofsd "$WORKLOADS_DIR/" || exit 1 50 touch .built 51 popd || exit 52 fi 53} 54 55update_workloads() { 56 cp scripts/sha1sums-aarch64 "$WORKLOADS_DIR" 57 58 FOCAL_OS_RAW_IMAGE_NAME="focal-server-cloudimg-arm64-custom-20210929-0.raw" 59 FOCAL_OS_RAW_IMAGE_DOWNLOAD_URL="https://cloud-hypervisor.azureedge.net/$FOCAL_OS_RAW_IMAGE_NAME" 60 FOCAL_OS_RAW_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_RAW_IMAGE_NAME" 61 if [ ! -f "$FOCAL_OS_RAW_IMAGE" ]; then 62 pushd "$WORKLOADS_DIR" || exit 63 time wget --quiet $FOCAL_OS_RAW_IMAGE_DOWNLOAD_URL || exit 1 64 popd || exit 65 fi 66 67 FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME="focal-server-cloudimg-arm64-custom-20210929-0.qcow2" 68 FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_DOWNLOAD_URL="https://cloud-hypervisor.azureedge.net/$FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME" 69 FOCAL_OS_QCOW2_UNCOMPRESSED_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME" 70 if [ ! -f "$FOCAL_OS_QCOW2_UNCOMPRESSED_IMAGE" ]; then 71 pushd "$WORKLOADS_DIR" || exit 72 time wget --quiet $FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_DOWNLOAD_URL || exit 1 73 popd || exit 74 fi 75 76 FOCAL_OS_QCOW2_IMAGE_BACKING_FILE_NAME="focal-server-cloudimg-arm64-custom-20210929-0-backing.qcow2" 77 FOCAL_OS_QCOW2_BACKING_FILE_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_QCOW2_IMAGE_BACKING_FILE_NAME" 78 if [ ! -f "$FOCAL_OS_QCOW2_BACKING_FILE_IMAGE" ]; then 79 pushd "$WORKLOADS_DIR" || exit 80 time qemu-img create -f qcow2 -b "$FOCAL_OS_QCOW2_UNCOMPRESSED_IMAGE" -F qcow2 $FOCAL_OS_QCOW2_IMAGE_BACKING_FILE_NAME 81 popd || exit 82 fi 83 84 JAMMY_OS_RAW_IMAGE_NAME="jammy-server-cloudimg-arm64-custom-20220329-0.raw" 85 JAMMY_OS_RAW_IMAGE_DOWNLOAD_URL="https://cloud-hypervisor.azureedge.net/$JAMMY_OS_RAW_IMAGE_NAME" 86 JAMMY_OS_RAW_IMAGE="$WORKLOADS_DIR/$JAMMY_OS_RAW_IMAGE_NAME" 87 if [ ! -f "$JAMMY_OS_RAW_IMAGE" ]; then 88 pushd "$WORKLOADS_DIR" || exit 89 time wget --quiet $JAMMY_OS_RAW_IMAGE_DOWNLOAD_URL || exit 1 90 popd || exit 91 fi 92 93 JAMMY_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME="jammy-server-cloudimg-arm64-custom-20220329-0.qcow2" 94 JAMMY_OS_QCOW2_IMAGE_UNCOMPRESSED_DOWNLOAD_URL="https://cloud-hypervisor.azureedge.net/$JAMMY_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME" 95 JAMMY_OS_QCOW2_UNCOMPRESSED_IMAGE="$WORKLOADS_DIR/$JAMMY_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME" 96 if [ ! -f "$JAMMY_OS_QCOW2_UNCOMPRESSED_IMAGE" ]; then 97 pushd "$WORKLOADS_DIR" || exit 98 time wget --quiet $JAMMY_OS_QCOW2_IMAGE_UNCOMPRESSED_DOWNLOAD_URL || exit 1 99 popd || exit 100 fi 101 102 ALPINE_MINIROOTFS_URL="http://dl-cdn.alpinelinux.org/alpine/v3.11/releases/aarch64/alpine-minirootfs-3.11.3-aarch64.tar.gz" 103 ALPINE_MINIROOTFS_TARBALL="$WORKLOADS_DIR/alpine-minirootfs-aarch64.tar.gz" 104 if [ ! -f "$ALPINE_MINIROOTFS_TARBALL" ]; then 105 pushd "$WORKLOADS_DIR" || exit 106 time wget --quiet $ALPINE_MINIROOTFS_URL -O "$ALPINE_MINIROOTFS_TARBALL" || exit 1 107 popd || exit 108 fi 109 110 ALPINE_INITRAMFS_IMAGE="$WORKLOADS_DIR/alpine_initramfs.img" 111 if [ ! -f "$ALPINE_INITRAMFS_IMAGE" ]; then 112 pushd "$WORKLOADS_DIR" || exit 113 mkdir alpine-minirootfs 114 tar xf "$ALPINE_MINIROOTFS_TARBALL" -C alpine-minirootfs 115 cat >alpine-minirootfs/init <<-EOF 116 #! /bin/sh 117 mount -t devtmpfs dev /dev 118 echo \$TEST_STRING > /dev/console 119 poweroff -f 120 EOF 121 chmod +x alpine-minirootfs/init 122 cd alpine-minirootfs || exit 123 find . -print0 | 124 cpio --null --create --verbose --owner root:root --format=newc >"$ALPINE_INITRAMFS_IMAGE" 125 popd || exit 126 fi 127 128 pushd "$WORKLOADS_DIR" || exit 129 130 if ! sha1sum sha1sums-aarch64 --check; then 131 echo "sha1sum validation of images failed, remove invalid images to fix the issue." 132 exit 1 133 fi 134 popd || exit 135 136 # Download Cloud Hypervisor binary from its last stable release 137 LAST_RELEASE_VERSION="v36.0" 138 CH_RELEASE_URL="https://github.com/cloud-hypervisor/cloud-hypervisor/releases/download/$LAST_RELEASE_VERSION/cloud-hypervisor-static-aarch64" 139 CH_RELEASE_NAME="cloud-hypervisor-static-aarch64" 140 pushd "$WORKLOADS_DIR" || exit 141 # Repeat a few times to workaround a random wget failure 142 WGET_RETRY_MAX=10 143 wget_retry=0 144 145 until [ "$wget_retry" -ge "$WGET_RETRY_MAX" ]; do 146 time wget $CH_RELEASE_URL -O "$CH_RELEASE_NAME" && break 147 wget_retry=$((wget_retry + 1)) 148 done 149 150 if [ $wget_retry -ge "$WGET_RETRY_MAX" ]; then 151 exit 1 152 else 153 chmod +x $CH_RELEASE_NAME 154 fi 155 popd || exit 156 157 # Build custom kernel for guest VMs 158 build_custom_linux 159 160 # Update the kernel in the cloud image for some tests that requires recent kernel version 161 FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_NAME="focal-server-cloudimg-arm64-custom-20210929-0-update-kernel.raw" 162 cp "$WORKLOADS_DIR/$FOCAL_OS_RAW_IMAGE_NAME" "$WORKLOADS_DIR/$FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_NAME" 163 FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_ROOT_DIR="$WORKLOADS_DIR/focal-server-cloudimg-root" 164 mkdir -p "$FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_ROOT_DIR" 165 # Mount the 'raw' image, replace the compressed kernel file and umount the working folder 166 guestmount -a "$WORKLOADS_DIR/$FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_NAME" -m /dev/sda1 "$FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_ROOT_DIR" || exit 1 167 cp "$WORKLOADS_DIR"/Image.gz "$FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_ROOT_DIR"/boot/vmlinuz 168 guestunmount "$FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_ROOT_DIR" 169 170 # Build virtiofsd 171 build_virtiofsd 172 173 BLK_IMAGE="$WORKLOADS_DIR/blk.img" 174 MNT_DIR="mount_image" 175 if [ ! -f "$BLK_IMAGE" ]; then 176 pushd "$WORKLOADS_DIR" || exit 177 fallocate -l 16M "$BLK_IMAGE" 178 mkfs.ext4 -j "$BLK_IMAGE" 179 mkdir $MNT_DIR 180 sudo mount -t ext4 "$BLK_IMAGE" $MNT_DIR 181 sudo bash -c "echo bar > $MNT_DIR/foo" || exit 1 182 sudo umount "$BLK_IMAGE" 183 rm -r $MNT_DIR 184 popd || exit 185 fi 186 187 SHARED_DIR="$WORKLOADS_DIR/shared_dir" 188 if [ ! -d "$SHARED_DIR" ]; then 189 mkdir -p "$SHARED_DIR" 190 echo "foo" >"$SHARED_DIR/file1" 191 echo "bar" >"$SHARED_DIR/file3" || exit 1 192 fi 193 194 # Checkout and build SPDK NVMe 195 build_spdk_nvme 196 197 # Checkout and build EDK2 198 build_edk2 199} 200 201process_common_args "$@" 202 203# aarch64 not supported for MSHV 204if [[ "$hypervisor" = "mshv" ]]; then 205 echo "AArch64 is not supported in Microsoft Hypervisor" 206 exit 1 207fi 208 209# lock the workloads folder to avoid parallel updating by different containers 210( 211 echo "try to lock $WORKLOADS_DIR folder and update" 212 flock -x 12 && update_workloads 213) 12>"$WORKLOADS_LOCK" 214 215# Check if there is any error in the execution of `update_workloads`. 216# If there is any error, then kill the shell. Otherwise the script will continue 217# running even if the `update_workloads` function was failed. 218RES=$? 219if [ $RES -ne 0 ]; then 220 exit 1 221fi 222 223export RUST_BACKTRACE=1 224 225cargo build --all --release --target "$BUILD_TARGET" 226 227# Enable KSM with some reasonable parameters so that it won't take too long 228# for the memory to be merged between two processes. 229sudo bash -c "echo 1000000 > /sys/kernel/mm/ksm/pages_to_scan" 230sudo bash -c "echo 10 > /sys/kernel/mm/ksm/sleep_millisecs" 231sudo bash -c "echo 1 > /sys/kernel/mm/ksm/run" 232 233# Both test_vfio and ovs-dpdk rely on hugepages 234HUGEPAGESIZE=$(grep Hugepagesize /proc/meminfo | awk '{print $2}') 235PAGE_NUM=$((12288 * 1024 / HUGEPAGESIZE)) 236echo "$PAGE_NUM" | sudo tee /proc/sys/vm/nr_hugepages 237sudo chmod a+rwX /dev/hugepages 238 239# Run all direct kernel boot (Device Tree) test cases in mod `parallel` 240time cargo test "common_parallel::$test_filter" --target "$BUILD_TARGET" -- ${test_binary_args[*]} 241RES=$? 242 243# Run some tests in sequence since the result could be affected by other tests 244# running in parallel. 245if [ $RES -eq 0 ]; then 246 time cargo test "common_sequential::$test_filter" --target "$BUILD_TARGET" -- --test-threads=1 ${test_binary_args[*]} 247 RES=$? 248else 249 exit $RES 250fi 251 252# Run all ACPI test cases 253if [ $RES -eq 0 ]; then 254 time cargo test "aarch64_acpi::$test_filter" --target "$BUILD_TARGET" -- ${test_binary_args[*]} 255 RES=$? 256else 257 exit $RES 258fi 259 260# Run all test cases related to live migration 261if [ $RES -eq 0 ]; then 262 time cargo test "live_migration_parallel::$test_filter" --target "$BUILD_TARGET" -- ${test_binary_args[*]} 263 RES=$? 264else 265 exit $RES 266fi 267 268if [ $RES -eq 0 ]; then 269 time cargo test "live_migration_sequential::$test_filter" --target "$BUILD_TARGET" -- --test-threads=1 ${test_binary_args[*]} 270 RES=$? 271else 272 exit $RES 273fi 274 275# Run tests on dbus_api 276if [ $RES -eq 0 ]; then 277 cargo build --features "dbus_api" --all --release --target "$BUILD_TARGET" 278 export RUST_BACKTRACE=1 279 time cargo test "dbus_api::$test_filter" --target "$BUILD_TARGET" -- ${test_binary_args[*]} 280 RES=$? 281fi 282 283exit $RES 284