xref: /src/sys/contrib/openzfs/.github/workflows/scripts/qemu-5-setup.sh (revision 8a62a2a5659d1839d8799b4274c04469d7f17c78)
1#!/usr/bin/env bash
2
3######################################################################
4# 5) start test machines and load openzfs module
5######################################################################
6
7set -eu
8
9# read our defined variables
10source /var/tmp/env.txt
11
12# wait for poweroff to succeed
13PID=$(pidof /usr/bin/qemu-system-x86_64)
14tail --pid=$PID -f /dev/null
15sudo virsh undefine --nvram openzfs
16
17# cpu pinning
18CPUSET=("0,1" "2,3")
19
20# additional options for virt-install
21OPTS[0]=""
22OPTS[1]=""
23
24case "$OS" in
25  freebsd*)
26    # FreeBSD needs only 6GiB
27    RAM=6
28    ;;
29  debian13)
30    RAM=8
31    # Boot Debian 13 with uefi=on and secureboot=off (ZFS Kernel Module not signed)
32    OPTS[0]="--boot"
33    OPTS[1]="firmware=efi,firmware.feature0.name=secure-boot,firmware.feature0.enabled=no"
34    ;;
35  *)
36    # Linux needs more memory, but can be optimized to share it via KSM
37    RAM=8
38    ;;
39esac
40
41# create snapshot we can clone later
42sudo zfs snapshot zpool/openzfs@now
43
44# setup the testing vm's
45PUBKEY=$(cat ~/.ssh/id_ed25519.pub)
46
47# start testing VMs
48for ((i=1; i<=VMs; i++)); do
49  echo "Creating disk for vm$i..."
50  DISK="/dev/zvol/zpool/vm$i"
51  FORMAT="raw"
52  sudo zfs clone zpool/openzfs@now zpool/vm$i-system
53  sudo zfs create -ps -b 64k -V 64g zpool/vm$i-tests
54
55  cat <<EOF > /tmp/user-data
56#cloud-config
57
58fqdn: vm$i
59
60users:
61  - name: root
62    shell: /bin/bash
63    sudo: ['ALL=(ALL) NOPASSWD:ALL']
64  - name: zfs
65    shell: /bin/bash
66    sudo: ['ALL=(ALL) NOPASSWD:ALL']
67    ssh_authorized_keys:
68      - $PUBKEY
69    # Workaround for Alpine Linux.
70    lock_passwd: false
71    passwd: '*'
72
73packages:
74  - sudo
75  - bash
76
77growpart:
78  mode: auto
79  devices: ['/']
80  ignore_growroot_disabled: false
81EOF
82
83  sudo virsh net-update default add ip-dhcp-host \
84    "<host mac='52:54:00:83:79:0$i' ip='192.168.122.1$i'/>" --live --config
85
86  sudo virt-install \
87    --os-variant $OSv \
88    --name "vm$i" \
89    --cpu host-passthrough \
90    --virt-type=kvm --hvm \
91    --vcpus=$CPU,sockets=1 \
92    --cpuset=${CPUSET[$((i-1))]} \
93    --memory $((1024*RAM)) \
94    --memballoon model=virtio \
95    --graphics none \
96    --cloud-init user-data=/tmp/user-data \
97    --network bridge=virbr0,model=$NIC,mac="52:54:00:83:79:0$i" \
98    --disk $DISK-system,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
99    --disk $DISK-tests,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
100    --import --noautoconsole ${OPTS[0]} ${OPTS[1]}
101done
102
103# generate some memory stats
104cat <<EOF > cronjob.sh
105exec 1>>/var/tmp/stats.txt
106exec 2>&1
107echo "********************************************************************************"
108uptime
109free -m
110zfs list
111EOF
112
113sudo chmod +x cronjob.sh
114sudo mv -f cronjob.sh /root/cronjob.sh
115echo '*/5 * * * *  /root/cronjob.sh' > crontab.txt
116sudo crontab crontab.txt
117rm crontab.txt
118
119# Save the VM's serial output (ttyS0) to /var/tmp/console.txt
120# - ttyS0 on the VM corresponds to a local /dev/pty/N entry
121# - use 'virsh ttyconsole' to lookup the /dev/pty/N entry
122for ((i=1; i<=VMs; i++)); do
123  mkdir -p $RESPATH/vm$i
124  read "pty" <<< $(sudo virsh ttyconsole vm$i)
125
126  # Create the file so we can tail it, even if there's no output.
127  touch $RESPATH/vm$i/console.txt
128
129  sudo nohup bash -c "cat $pty > $RESPATH/vm$i/console.txt" &
130
131  # Write all VM boot lines to the console to aid in debugging failed boots.
132  # The boot lines from all the VMs will be munged together, so prepend each
133  # line with the vm hostname (like 'vm1:').
134  (while IFS=$'\n' read -r line; do echo "vm$i: $line" ; done < <(sudo tail -f $RESPATH/vm$i/console.txt)) &
135
136done
137echo "Console logging for ${VMs}x $OS started."
138
139
140# check if the machines are okay
141echo "Waiting for vm's to come up...  (${VMs}x CPU=$CPU RAM=$RAM)"
142for ((i=1; i<=VMs; i++)); do
143  .github/workflows/scripts/qemu-wait-for-vm.sh vm$i
144done
145echo "All $VMs VMs are up now."
146