xref: /cloud-hypervisor/scripts/run_integration_tests_aarch64.sh (revision adb318f4cd0079246b3cb07e01c4e978330445d2)
1#!/usr/bin/env bash
2# shellcheck disable=SC2048,SC2086
3set -x
4
5# shellcheck source=/dev/null
6source "$HOME"/.cargo/env
7source "$(dirname "$0")"/test-util.sh
8source "$(dirname "$0")"/common-aarch64.sh
9
10WORKLOADS_LOCK="$WORKLOADS_DIR/integration_test.lock"
11
12build_spdk_nvme() {
13    SPDK_DIR="$WORKLOADS_DIR/spdk"
14    SPDK_REPO="https://github.com/spdk/spdk.git"
15    SPDK_DEPLOY_DIR="/usr/local/bin/spdk-nvme"
16    checkout_repo "$SPDK_DIR" "$SPDK_REPO" master "ef8bcce58f3f02b79c0619a297e4f17e81e62b24"
17
18    if [ ! -f "$SPDK_DIR/.built" ]; then
19        pushd "$SPDK_DIR" || exit
20        git submodule update --init
21        apt-get update
22        sed -i "/grpcio/d" scripts/pkgdep/debian.sh
23        ./scripts/pkgdep.sh
24        ./configure --with-vfio-user
25        chmod +x /usr/local/lib/python3.10/dist-packages/ninja/data/bin/ninja
26        make -j "$(nproc)" || exit 1
27        touch .built
28        popd || exit
29    fi
30    if [ ! -d "/usr/local/bin/spdk-nvme" ]; then
31        mkdir -p $SPDK_DEPLOY_DIR
32    fi
33    cp "$WORKLOADS_DIR/spdk/build/bin/nvmf_tgt" $SPDK_DEPLOY_DIR/nvmf_tgt
34    cp "$WORKLOADS_DIR/spdk/scripts/rpc.py" $SPDK_DEPLOY_DIR/rpc.py
35    cp -r "$WORKLOADS_DIR/spdk/python/spdk/" $SPDK_DEPLOY_DIR/
36    cp -r "$WORKLOADS_DIR/spdk/python" $SPDK_DEPLOY_DIR/../
37}
38
39build_virtiofsd() {
40    VIRTIOFSD_DIR="$WORKLOADS_DIR/virtiofsd_build"
41    VIRTIOFSD_REPO="https://gitlab.com/virtio-fs/virtiofsd.git"
42
43    checkout_repo "$VIRTIOFSD_DIR" "$VIRTIOFSD_REPO" v1.8.0 "97ea7908fe7f9bc59916671a771bdcfaf4044b45"
44
45    if [ ! -f "$VIRTIOFSD_DIR/.built" ]; then
46        pushd "$VIRTIOFSD_DIR" || exit
47        rm -rf target/
48        time RUSTFLAGS="" TARGET_CC="" cargo build --release
49        cp target/release/virtiofsd "$WORKLOADS_DIR/" || exit 1
50        touch .built
51        popd || exit
52    fi
53}
54
55update_workloads() {
56    cp scripts/sha1sums-aarch64 "$WORKLOADS_DIR"
57
58    BIONIC_OS_IMAGE_DOWNLOAD_NAME="bionic-server-cloudimg-arm64.img"
59    BIONIC_OS_IMAGE_DOWNLOAD_URL="https://cloud-hypervisor.azureedge.net/$BIONIC_OS_IMAGE_DOWNLOAD_NAME"
60    BIONIC_OS_DOWNLOAD_IMAGE="$WORKLOADS_DIR/$BIONIC_OS_IMAGE_DOWNLOAD_NAME"
61    if [ ! -f "$BIONIC_OS_DOWNLOAD_IMAGE" ]; then
62        pushd "$WORKLOADS_DIR" || exit
63        time wget --quiet $BIONIC_OS_IMAGE_DOWNLOAD_URL || exit 1
64        popd || exit
65    fi
66
67    BIONIC_OS_RAW_IMAGE_NAME="bionic-server-cloudimg-arm64.raw"
68    BIONIC_OS_RAW_IMAGE="$WORKLOADS_DIR/$BIONIC_OS_RAW_IMAGE_NAME"
69    if [ ! -f "$BIONIC_OS_RAW_IMAGE" ]; then
70        pushd "$WORKLOADS_DIR" || exit
71        time qemu-img convert -p -f qcow2 -O raw $BIONIC_OS_IMAGE_DOWNLOAD_NAME $BIONIC_OS_RAW_IMAGE_NAME || exit 1
72        popd || exit
73    fi
74
75    # Convert the raw image to qcow2 image to remove compressed blocks from the disk. Therefore letting the
76    # qcow2 format image can be directly used in the integration test.
77    BIONIC_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME="bionic-server-cloudimg-arm64.qcow2"
78    BIONIC_OS_QCOW2_UNCOMPRESSED_IMAGE="$WORKLOADS_DIR/$BIONIC_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME"
79    if [ ! -f "$BIONIC_OS_QCOW2_UNCOMPRESSED_IMAGE" ]; then
80        pushd "$WORKLOADS_DIR" || exit
81        time qemu-img convert -p -f raw -O qcow2 $BIONIC_OS_RAW_IMAGE_NAME "$BIONIC_OS_QCOW2_UNCOMPRESSED_IMAGE" || exit 1
82        popd || exit
83    fi
84
85    FOCAL_OS_RAW_IMAGE_NAME="focal-server-cloudimg-arm64-custom-20210929-0.raw"
86    FOCAL_OS_RAW_IMAGE_DOWNLOAD_URL="https://cloud-hypervisor.azureedge.net/$FOCAL_OS_RAW_IMAGE_NAME"
87    FOCAL_OS_RAW_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_RAW_IMAGE_NAME"
88    if [ ! -f "$FOCAL_OS_RAW_IMAGE" ]; then
89        pushd "$WORKLOADS_DIR" || exit
90        time wget --quiet $FOCAL_OS_RAW_IMAGE_DOWNLOAD_URL || exit 1
91        popd || exit
92    fi
93
94    FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME="focal-server-cloudimg-arm64-custom-20210929-0.qcow2"
95    FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_DOWNLOAD_URL="https://cloud-hypervisor.azureedge.net/$FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME"
96    FOCAL_OS_QCOW2_UNCOMPRESSED_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME"
97    if [ ! -f "$FOCAL_OS_QCOW2_UNCOMPRESSED_IMAGE" ]; then
98        pushd "$WORKLOADS_DIR" || exit
99        time wget --quiet $FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_DOWNLOAD_URL || exit 1
100        popd || exit
101    fi
102
103    FOCAL_OS_QCOW2_IMAGE_BACKING_FILE_NAME="focal-server-cloudimg-arm64-custom-20210929-0-backing.qcow2"
104    FOCAL_OS_QCOW2_BACKING_FILE_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_QCOW2_IMAGE_BACKING_FILE_NAME"
105    if [ ! -f "$FOCAL_OS_QCOW2_BACKING_FILE_IMAGE" ]; then
106        pushd "$WORKLOADS_DIR" || exit
107        time qemu-img create -f qcow2 -b "$FOCAL_OS_QCOW2_UNCOMPRESSED_IMAGE" -F qcow2 $FOCAL_OS_QCOW2_IMAGE_BACKING_FILE_NAME
108        popd || exit
109    fi
110
111    JAMMY_OS_RAW_IMAGE_NAME="jammy-server-cloudimg-arm64-custom-20220329-0.raw"
112    JAMMY_OS_RAW_IMAGE_DOWNLOAD_URL="https://cloud-hypervisor.azureedge.net/$JAMMY_OS_RAW_IMAGE_NAME"
113    JAMMY_OS_RAW_IMAGE="$WORKLOADS_DIR/$JAMMY_OS_RAW_IMAGE_NAME"
114    if [ ! -f "$JAMMY_OS_RAW_IMAGE" ]; then
115        pushd "$WORKLOADS_DIR" || exit
116        time wget --quiet $JAMMY_OS_RAW_IMAGE_DOWNLOAD_URL || exit 1
117        popd || exit
118    fi
119
120    JAMMY_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME="jammy-server-cloudimg-arm64-custom-20220329-0.qcow2"
121    JAMMY_OS_QCOW2_IMAGE_UNCOMPRESSED_DOWNLOAD_URL="https://cloud-hypervisor.azureedge.net/$JAMMY_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME"
122    JAMMY_OS_QCOW2_UNCOMPRESSED_IMAGE="$WORKLOADS_DIR/$JAMMY_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME"
123    if [ ! -f "$JAMMY_OS_QCOW2_UNCOMPRESSED_IMAGE" ]; then
124        pushd "$WORKLOADS_DIR" || exit
125        time wget --quiet $JAMMY_OS_QCOW2_IMAGE_UNCOMPRESSED_DOWNLOAD_URL || exit 1
126        popd || exit
127    fi
128
129    ALPINE_MINIROOTFS_URL="http://dl-cdn.alpinelinux.org/alpine/v3.11/releases/aarch64/alpine-minirootfs-3.11.3-aarch64.tar.gz"
130    ALPINE_MINIROOTFS_TARBALL="$WORKLOADS_DIR/alpine-minirootfs-aarch64.tar.gz"
131    if [ ! -f "$ALPINE_MINIROOTFS_TARBALL" ]; then
132        pushd "$WORKLOADS_DIR" || exit
133        time wget --quiet $ALPINE_MINIROOTFS_URL -O "$ALPINE_MINIROOTFS_TARBALL" || exit 1
134        popd || exit
135    fi
136
137    ALPINE_INITRAMFS_IMAGE="$WORKLOADS_DIR/alpine_initramfs.img"
138    if [ ! -f "$ALPINE_INITRAMFS_IMAGE" ]; then
139        pushd "$WORKLOADS_DIR" || exit
140        mkdir alpine-minirootfs
141        tar xf "$ALPINE_MINIROOTFS_TARBALL" -C alpine-minirootfs
142        cat >alpine-minirootfs/init <<-EOF
143			#! /bin/sh
144			mount -t devtmpfs dev /dev
145			echo \$TEST_STRING > /dev/console
146			poweroff -f
147		EOF
148        chmod +x alpine-minirootfs/init
149        cd alpine-minirootfs || exit
150        find . -print0 |
151            cpio --null --create --verbose --owner root:root --format=newc >"$ALPINE_INITRAMFS_IMAGE"
152        popd || exit
153    fi
154
155    pushd "$WORKLOADS_DIR" || exit
156
157    if ! sha1sum sha1sums-aarch64 --check; then
158        echo "sha1sum validation of images failed, remove invalid images to fix the issue."
159        exit 1
160    fi
161    popd || exit
162
163    # Download Cloud Hypervisor binary from its last stable release
164    LAST_RELEASE_VERSION="v36.0"
165    CH_RELEASE_URL="https://github.com/cloud-hypervisor/cloud-hypervisor/releases/download/$LAST_RELEASE_VERSION/cloud-hypervisor-static-aarch64"
166    CH_RELEASE_NAME="cloud-hypervisor-static-aarch64"
167    pushd "$WORKLOADS_DIR" || exit
168    # Repeat a few times to workaround a random wget failure
169    WGET_RETRY_MAX=10
170    wget_retry=0
171
172    until [ "$wget_retry" -ge "$WGET_RETRY_MAX" ]; do
173        time wget $CH_RELEASE_URL -O "$CH_RELEASE_NAME" && break
174        wget_retry=$((wget_retry + 1))
175    done
176
177    if [ $wget_retry -ge "$WGET_RETRY_MAX" ]; then
178        exit 1
179    else
180        chmod +x $CH_RELEASE_NAME
181    fi
182    popd || exit
183
184    # Build custom kernel for guest VMs
185    build_custom_linux
186
187    # Update the kernel in the cloud image for some tests that requires recent kernel version
188    FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_NAME="focal-server-cloudimg-arm64-custom-20210929-0-update-kernel.raw"
189    cp "$WORKLOADS_DIR/$FOCAL_OS_RAW_IMAGE_NAME" "$WORKLOADS_DIR/$FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_NAME"
190    FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_ROOT_DIR="$WORKLOADS_DIR/focal-server-cloudimg-root"
191    mkdir -p "$FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_ROOT_DIR"
192    # Mount the 'raw' image, replace the compressed kernel file and umount the working folder
193    guestmount -a "$WORKLOADS_DIR/$FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_NAME" -m /dev/sda1 "$FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_ROOT_DIR" || exit 1
194    cp "$WORKLOADS_DIR"/Image.gz "$FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_ROOT_DIR"/boot/vmlinuz
195    guestunmount "$FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_ROOT_DIR"
196
197    # Build virtiofsd
198    build_virtiofsd
199
200    BLK_IMAGE="$WORKLOADS_DIR/blk.img"
201    MNT_DIR="mount_image"
202    if [ ! -f "$BLK_IMAGE" ]; then
203        pushd "$WORKLOADS_DIR" || exit
204        fallocate -l 16M "$BLK_IMAGE"
205        mkfs.ext4 -j "$BLK_IMAGE"
206        mkdir $MNT_DIR
207        sudo mount -t ext4 "$BLK_IMAGE" $MNT_DIR
208        sudo bash -c "echo bar > $MNT_DIR/foo" || exit 1
209        sudo umount "$BLK_IMAGE"
210        rm -r $MNT_DIR
211        popd || exit
212    fi
213
214    SHARED_DIR="$WORKLOADS_DIR/shared_dir"
215    if [ ! -d "$SHARED_DIR" ]; then
216        mkdir -p "$SHARED_DIR"
217        echo "foo" >"$SHARED_DIR/file1"
218        echo "bar" >"$SHARED_DIR/file3" || exit 1
219    fi
220
221    # Checkout and build SPDK NVMe
222    build_spdk_nvme
223
224    # Checkout and build EDK2
225    build_edk2
226}
227
228process_common_args "$@"
229
230# aarch64 not supported for MSHV
231if [[ "$hypervisor" = "mshv" ]]; then
232    echo "AArch64 is not supported in Microsoft Hypervisor"
233    exit 1
234fi
235
236# lock the workloads folder to avoid parallel updating by different containers
237(
238    echo "try to lock $WORKLOADS_DIR folder and update"
239    flock -x 12 && update_workloads
240) 12>"$WORKLOADS_LOCK"
241
242# Check if there is any error in the execution of `update_workloads`.
243# If there is any error, then kill the shell. Otherwise the script will continue
244# running even if the `update_workloads` function was failed.
245RES=$?
246if [ $RES -ne 0 ]; then
247    exit 1
248fi
249
250export RUST_BACKTRACE=1
251
252cargo build --all --release --target "$BUILD_TARGET"
253
254# Enable KSM with some reasonable parameters so that it won't take too long
255# for the memory to be merged between two processes.
256sudo bash -c "echo 1000000 > /sys/kernel/mm/ksm/pages_to_scan"
257sudo bash -c "echo 10 > /sys/kernel/mm/ksm/sleep_millisecs"
258sudo bash -c "echo 1 > /sys/kernel/mm/ksm/run"
259
260# Both test_vfio and ovs-dpdk rely on hugepages
261HUGEPAGESIZE=$(grep Hugepagesize /proc/meminfo | awk '{print $2}')
262PAGE_NUM=$((12288 * 1024 / HUGEPAGESIZE))
263echo "$PAGE_NUM" | sudo tee /proc/sys/vm/nr_hugepages
264sudo chmod a+rwX /dev/hugepages
265
266# Run all direct kernel boot (Device Tree) test cases in mod `parallel`
267time cargo test "common_parallel::$test_filter" --target "$BUILD_TARGET" -- ${test_binary_args[*]}
268RES=$?
269
270# Run some tests in sequence since the result could be affected by other tests
271# running in parallel.
272if [ $RES -eq 0 ]; then
273    time cargo test "common_sequential::$test_filter" --target "$BUILD_TARGET" -- --test-threads=1 ${test_binary_args[*]}
274    RES=$?
275else
276    exit $RES
277fi
278
279# Run all ACPI test cases
280if [ $RES -eq 0 ]; then
281    time cargo test "aarch64_acpi::$test_filter" --target "$BUILD_TARGET" -- ${test_binary_args[*]}
282    RES=$?
283else
284    exit $RES
285fi
286
287# Run all test cases related to live migration
288if [ $RES -eq 0 ]; then
289    time cargo test "live_migration_parallel::$test_filter" --target "$BUILD_TARGET" -- ${test_binary_args[*]}
290    RES=$?
291else
292    exit $RES
293fi
294
295if [ $RES -eq 0 ]; then
296    time cargo test "live_migration_sequential::$test_filter" --target "$BUILD_TARGET" -- --test-threads=1 ${test_binary_args[*]}
297    RES=$?
298else
299    exit $RES
300fi
301
302# Run tests on dbus_api
303if [ $RES -eq 0 ]; then
304    cargo build --features "dbus_api" --all --release --target "$BUILD_TARGET"
305    export RUST_BACKTRACE=1
306    time cargo test "dbus_api::$test_filter" --target "$BUILD_TARGET" -- ${test_binary_args[*]}
307    RES=$?
308fi
309
310exit $RES
311