xref: /cloud-hypervisor/scripts/run_integration_tests_aarch64.sh (revision b440cb7d2330770cd415b63544a371d4caa2db3a)
1#!/bin/bash
2set -x
3
4source $HOME/.cargo/env
5source $(dirname "$0")/test-util.sh
6
7export BUILD_TARGET=${BUILD_TARGET-aarch64-unknown-linux-gnu}
8
9WORKLOADS_DIR="$HOME/workloads"
10WORKLOADS_LOCK="$WORKLOADS_DIR/integration_test.lock"
11
12mkdir -p "$WORKLOADS_DIR"
13
14build_edk2() {
15    EDK2_BUILD_DIR="$WORKLOADS_DIR/edk2_build"
16    EDK2_REPO="https://github.com/tianocore/edk2.git"
17    EDK2_DIR="$EDK2_BUILD_DIR/edk2"
18    EDK2_PLAT_REPO="https://github.com/tianocore/edk2-platforms.git"
19    EDK2_PLAT_DIR="$EDK2_BUILD_DIR/edk2-platforms"
20    ACPICA_REPO="https://github.com/acpica/acpica.git"
21    ACPICA_DIR="$EDK2_BUILD_DIR/acpica"
22    export WORKSPACE="$EDK2_BUILD_DIR"
23    export PACKAGES_PATH="$EDK2_DIR:$EDK2_PLAT_DIR"
24    export IASL_PREFIX="$ACPICA_DIR/generate/unix/bin/"
25
26    if [ ! -d "$EDK2_BUILD_DIR" ]; then
27        mkdir -p "$EDK2_BUILD_DIR"
28    fi
29
30    # Prepare source code
31    checkout_repo "$EDK2_DIR" "$EDK2_REPO" master "46b4606ba23498d3d0e66b53e498eb3d5d592586"
32    pushd "$EDK2_DIR"
33    git submodule update --init
34    popd
35    checkout_repo "$EDK2_PLAT_DIR" "$EDK2_PLAT_REPO" master "8227e9e9f6a8aefbd772b40138f835121ccb2307"
36    checkout_repo "$ACPICA_DIR" "$ACPICA_REPO" master "b9c69f81a05c45611c91ea9cbce8756078d76233"
37
38    if [[ ! -f "$EDK2_DIR/.built" || \
39          ! -f "$EDK2_PLAT_DIR/.built" || \
40          ! -f "$ACPICA_DIR/.built" ]]; then
41        pushd "$EDK2_BUILD_DIR"
42        # Build
43        make -C acpica -j `nproc`
44        source edk2/edksetup.sh
45        make -C edk2/BaseTools -j `nproc`
46        build -a AARCH64 -t GCC5 -p ArmVirtPkg/ArmVirtCloudHv.dsc -b RELEASE -n 0
47        cp Build/ArmVirtCloudHv-AARCH64/RELEASE_GCC5/FV/CLOUDHV_EFI.fd "$WORKLOADS_DIR"
48        touch "$EDK2_DIR"/.built
49        touch "$EDK2_PLAT_DIR"/.built
50        touch "$ACPICA_DIR"/.built
51        popd
52    fi
53}
54
55build_spdk_nvme() {
56    SPDK_DIR="$WORKLOADS_DIR/spdk"
57    SPDK_REPO="https://github.com/spdk/spdk.git"
58    SPDK_DEPLOY_DIR="/usr/local/bin/spdk-nvme"
59    checkout_repo "$SPDK_DIR" "$SPDK_REPO" master "6301f8915de32baed10dba1eebed556a6749211a"
60
61    if [ ! -f "$SPDK_DIR/.built" ]; then
62        pushd $SPDK_DIR
63        git submodule update --init
64        apt-get update
65        ./scripts/pkgdep.sh
66        ./configure --with-vfio-user
67        chmod +x /usr/local/lib/python3.8/dist-packages/ninja/data/bin/ninja
68        make -j `nproc` || exit 1
69        touch .built
70        popd
71    fi
72    if [ ! -d "/usr/local/bin/spdk-nvme" ]; then
73        mkdir -p $SPDK_DEPLOY_DIR
74    fi
75    cp "$WORKLOADS_DIR/spdk/build/bin/nvmf_tgt" $SPDK_DEPLOY_DIR/nvmf_tgt
76    cp "$WORKLOADS_DIR/spdk/scripts/rpc.py" $SPDK_DEPLOY_DIR/rpc.py
77    cp -r "$WORKLOADS_DIR/spdk/scripts/rpc" $SPDK_DEPLOY_DIR/rpc
78}
79
80build_virtiofsd() {
81    VIRTIOFSD_DIR="$WORKLOADS_DIR/virtiofsd_build"
82    VIRTIOFSD_REPO="https://gitlab.com/virtio-fs/virtiofsd.git"
83
84    checkout_repo "$VIRTIOFSD_DIR" "$VIRTIOFSD_REPO" v1.1.0 "220405d7a2606c92636d31992b5cb3036a41047b"
85
86    if [ ! -f "$VIRTIOFSD_DIR/.built" ]; then
87        pushd $VIRTIOFSD_DIR
88        time cargo build --release
89        cp target/release/virtiofsd "$WORKLOADS_DIR/" || exit 1
90        touch .built
91        popd
92    fi
93}
94
95update_workloads() {
96    cp scripts/sha1sums-aarch64 $WORKLOADS_DIR
97
98    BIONIC_OS_IMAGE_DOWNLOAD_NAME="bionic-server-cloudimg-arm64.img"
99    BIONIC_OS_IMAGE_DOWNLOAD_URL="https://cloud-hypervisor.azureedge.net/$BIONIC_OS_IMAGE_DOWNLOAD_NAME"
100    BIONIC_OS_DOWNLOAD_IMAGE="$WORKLOADS_DIR/$BIONIC_OS_IMAGE_DOWNLOAD_NAME"
101    if [ ! -f "$BIONIC_OS_DOWNLOAD_IMAGE" ]; then
102        pushd $WORKLOADS_DIR
103        time wget --quiet $BIONIC_OS_IMAGE_DOWNLOAD_URL || exit 1
104        popd
105    fi
106
107    BIONIC_OS_RAW_IMAGE_NAME="bionic-server-cloudimg-arm64.raw"
108    BIONIC_OS_RAW_IMAGE="$WORKLOADS_DIR/$BIONIC_OS_RAW_IMAGE_NAME"
109    if [ ! -f "$BIONIC_OS_RAW_IMAGE" ]; then
110        pushd $WORKLOADS_DIR
111        time qemu-img convert -p -f qcow2 -O raw $BIONIC_OS_IMAGE_DOWNLOAD_NAME $BIONIC_OS_RAW_IMAGE_NAME || exit 1
112        popd
113    fi
114
115    # Convert the raw image to qcow2 image to remove compressed blocks from the disk. Therefore letting the
116    # qcow2 format image can be directly used in the integration test.
117    BIONIC_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME="bionic-server-cloudimg-arm64.qcow2"
118    BIONIC_OS_QCOW2_UNCOMPRESSED_IMAGE="$WORKLOADS_DIR/$BIONIC_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME"
119    if [ ! -f "$BIONIC_OS_QCOW2_UNCOMPRESSED_IMAGE" ]; then
120        pushd $WORKLOADS_DIR
121        time qemu-img convert -p -f raw -O qcow2 $BIONIC_OS_RAW_IMAGE_NAME $BIONIC_OS_QCOW2_UNCOMPRESSED_IMAGE || exit 1
122        popd
123    fi
124
125    FOCAL_OS_RAW_IMAGE_NAME="focal-server-cloudimg-arm64-custom-20210929-0.raw"
126    FOCAL_OS_RAW_IMAGE_DOWNLOAD_URL="https://cloud-hypervisor.azureedge.net/$FOCAL_OS_RAW_IMAGE_NAME"
127    FOCAL_OS_RAW_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_RAW_IMAGE_NAME"
128    if [ ! -f "$FOCAL_OS_RAW_IMAGE" ]; then
129        pushd $WORKLOADS_DIR
130        time wget --quiet $FOCAL_OS_RAW_IMAGE_DOWNLOAD_URL || exit 1
131        popd
132    fi
133
134    FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME="focal-server-cloudimg-arm64-custom-20210929-0.qcow2"
135    FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_DOWNLOAD_URL="https://cloud-hypervisor.azureedge.net/$FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME"
136    FOCAL_OS_QCOW2_UNCOMPRESSED_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME"
137    if [ ! -f "$FOCAL_OS_QCOW2_UNCOMPRESSED_IMAGE" ]; then
138        pushd $WORKLOADS_DIR
139        time wget --quiet $FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_DOWNLOAD_URL || exit 1
140        popd
141    fi
142
143    JAMMY_OS_RAW_IMAGE_NAME="jammy-server-cloudimg-arm64-custom-20220329-0.raw"
144    JAMMY_OS_RAW_IMAGE_DOWNLOAD_URL="https://cloud-hypervisor.azureedge.net/$JAMMY_OS_RAW_IMAGE_NAME"
145    JAMMY_OS_RAW_IMAGE="$WORKLOADS_DIR/$JAMMY_OS_RAW_IMAGE_NAME"
146    if [ ! -f "$JAMMY_OS_RAW_IMAGE" ]; then
147        pushd $WORKLOADS_DIR
148        time wget --quiet $JAMMY_OS_RAW_IMAGE_DOWNLOAD_URL || exit 1
149        popd
150    fi
151
152    JAMMY_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME="jammy-server-cloudimg-arm64-custom-20220329-0.qcow2"
153    JAMMY_OS_QCOW2_IMAGE_UNCOMPRESSED_DOWNLOAD_URL="https://cloud-hypervisor.azureedge.net/$JAMMY_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME"
154    JAMMY_OS_QCOW2_UNCOMPRESSED_IMAGE="$WORKLOADS_DIR/$JAMMY_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME"
155    if [ ! -f "$JAMMY_OS_QCOW2_UNCOMPRESSED_IMAGE" ]; then
156        pushd $WORKLOADS_DIR
157        time wget --quiet $JAMMY_OS_QCOW2_IMAGE_UNCOMPRESSED_DOWNLOAD_URL || exit 1
158        popd
159    fi
160
161    ALPINE_MINIROOTFS_URL="http://dl-cdn.alpinelinux.org/alpine/v3.11/releases/aarch64/alpine-minirootfs-3.11.3-aarch64.tar.gz"
162    ALPINE_MINIROOTFS_TARBALL="$WORKLOADS_DIR/alpine-minirootfs-aarch64.tar.gz"
163    if [ ! -f "$ALPINE_MINIROOTFS_TARBALL" ]; then
164        pushd $WORKLOADS_DIR
165        time wget --quiet $ALPINE_MINIROOTFS_URL -O $ALPINE_MINIROOTFS_TARBALL || exit 1
166        popd
167    fi
168
169    ALPINE_INITRAMFS_IMAGE="$WORKLOADS_DIR/alpine_initramfs.img"
170    if [ ! -f "$ALPINE_INITRAMFS_IMAGE" ]; then
171        pushd $WORKLOADS_DIR
172        mkdir alpine-minirootfs
173        tar xf "$ALPINE_MINIROOTFS_TARBALL" -C alpine-minirootfs
174        cat > alpine-minirootfs/init <<-EOF
175			#! /bin/sh
176			mount -t devtmpfs dev /dev
177			echo \$TEST_STRING > /dev/console
178			poweroff -f
179		EOF
180        chmod +x alpine-minirootfs/init
181        cd alpine-minirootfs
182        find . -print0 |
183            cpio --null --create --verbose --owner root:root --format=newc > "$ALPINE_INITRAMFS_IMAGE"
184        popd
185    fi
186
187    pushd $WORKLOADS_DIR
188    sha1sum sha1sums-aarch64 --check
189    if [ $? -ne 0 ]; then
190        echo "sha1sum validation of images failed, remove invalid images to fix the issue."
191        exit 1
192    fi
193    popd
194
195    # Download Cloud Hypervisor binary from its last stable release
196    LAST_RELEASE_VERSION="v23.0"
197    CH_RELEASE_URL="https://github.com/cloud-hypervisor/cloud-hypervisor/releases/download/$LAST_RELEASE_VERSION/cloud-hypervisor-static-aarch64"
198    CH_RELEASE_NAME="cloud-hypervisor-static-aarch64"
199    pushd $WORKLOADS_DIR
200    time wget --quiet $CH_RELEASE_URL -O "$CH_RELEASE_NAME" || exit 1
201    chmod +x $CH_RELEASE_NAME
202    popd
203
204    # Build custom kernel for guest VMs
205    build_custom_linux
206
207    # Update the kernel in the cloud image for some tests that requires recent kernel version
208    FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_NAME="focal-server-cloudimg-arm64-custom-20210929-0-update-kernel.raw"
209    cp "$WORKLOADS_DIR/$FOCAL_OS_RAW_IMAGE_NAME" "$WORKLOADS_DIR/$FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_NAME"
210    FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_ROOT_DIR="$WORKLOADS_DIR/focal-server-cloudimg-root"
211    mkdir -p "$FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_ROOT_DIR"
212    # Mount the 'raw' image, replace the compressed kernel file and umount the working folder
213    guestmount -a "$WORKLOADS_DIR/$FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_NAME" -m /dev/sda1 "$FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_ROOT_DIR" || exit 1
214    cp "$WORKLOADS_DIR"/Image.gz "$FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_ROOT_DIR"/boot/vmlinuz
215    guestunmount "$FOCAL_OS_RAW_IMAGE_UPDATE_KERNEL_ROOT_DIR"
216
217    # Build virtiofsd
218    build_virtiofsd
219
220    BLK_IMAGE="$WORKLOADS_DIR/blk.img"
221    MNT_DIR="mount_image"
222    if [ ! -f "$BLK_IMAGE" ]; then
223        pushd $WORKLOADS_DIR
224        fallocate -l 16M $BLK_IMAGE
225        mkfs.ext4 -j $BLK_IMAGE
226        mkdir $MNT_DIR
227        sudo mount -t ext4 $BLK_IMAGE $MNT_DIR
228        sudo bash -c "echo bar > $MNT_DIR/foo" || exit 1
229        sudo umount $BLK_IMAGE
230        rm -r $MNT_DIR
231        popd
232    fi
233
234    SHARED_DIR="$WORKLOADS_DIR/shared_dir"
235    if [ ! -d "$SHARED_DIR" ]; then
236        mkdir -p $SHARED_DIR
237        echo "foo" > "$SHARED_DIR/file1"
238        echo "bar" > "$SHARED_DIR/file3" || exit 1
239    fi
240
241    # Checkout and build SPDK NVMe
242    build_spdk_nvme
243
244    # Checkout and build EDK2
245    build_edk2
246}
247
248process_common_args "$@"
249
250# aarch64 not supported for MSHV
251if [[ "$hypervisor" = "mshv" ]]; then
252    echo "AArch64 is not supported in Microsoft Hypervisor"
253    exit 1
254fi
255
256# For now these values are deafult for kvm
257features=""
258
259# lock the workloads folder to avoid parallel updating by different containers
260(
261    echo "try to lock $WORKLOADS_DIR folder and update"
262    flock -x 12 && update_workloads
263) 12>$WORKLOADS_LOCK
264
265# Check if there is any error in the execution of `update_workloads`.
266# If there is any error, then kill the shell. Otherwise the script will continue
267# running even if the `update_workloads` function was failed.
268RES=$?
269if [ $RES -ne 0 ]; then
270    exit 1
271fi
272
273BUILD_TARGET="aarch64-unknown-linux-${CH_LIBC}"
274if [[ "${BUILD_TARGET}" == "aarch64-unknown-linux-musl" ]]; then
275export TARGET_CC="musl-gcc"
276export RUSTFLAGS="-C link-arg=-lgcc -C link_arg=-specs -C link_arg=/usr/lib/aarch64-linux-musl/musl-gcc.specs"
277fi
278
279export RUST_BACKTRACE=1
280
281# Test without ACPI
282cargo build --all --release $features --target $BUILD_TARGET
283strip target/$BUILD_TARGET/release/cloud-hypervisor
284strip target/$BUILD_TARGET/release/vhost_user_net
285strip target/$BUILD_TARGET/release/ch-remote
286
287# Enable KSM with some reasonable parameters so that it won't take too long
288# for the memory to be merged between two processes.
289sudo bash -c "echo 1000000 > /sys/kernel/mm/ksm/pages_to_scan"
290sudo bash -c "echo 10 > /sys/kernel/mm/ksm/sleep_millisecs"
291sudo bash -c "echo 1 > /sys/kernel/mm/ksm/run"
292
293# Both test_vfio and ovs-dpdk rely on hugepages
294echo 6144 | sudo tee /proc/sys/vm/nr_hugepages
295sudo chmod a+rwX /dev/hugepages
296
297# Run all direct kernel boot (Device Tree) test cases in mod `parallel`
298time cargo test $features "parallel::$test_filter" --target $BUILD_TARGET -- ${test_binary_args[*]}
299RES=$?
300
301# Run some tests in sequence since the result could be affected by other tests
302# running in parallel.
303if [ $RES -eq 0 ]; then
304    time cargo test $features "sequential::$test_filter" --target $BUILD_TARGET -- --test-threads=1 ${test_binary_args[*]}
305    RES=$?
306else
307    exit $RES
308fi
309
310# Run all ACPI test cases
311if [ $RES -eq 0 ]; then
312    time cargo test $features "aarch64_acpi::$test_filter" --target $BUILD_TARGET -- ${test_binary_args[*]}
313    RES=$?
314else
315    exit $RES
316fi
317
318# Run all test cases related to live migration
319if [ $RES -eq 0 ]; then
320    time cargo test $features "live_migration::$test_filter" --target $BUILD_TARGET -- --test-threads=1 ${test_binary_args[*]}
321    RES=$?
322else
323    exit $RES
324fi
325
326exit $RES
327