xref: /qemu/tests/functional/qemu_test/tuxruntest.py (revision 57e504ad4f0cc2037aebf18b1ca6f73c8a4b304b)
1# Functional test that boots known good tuxboot images the same way
2# that tuxrun (www.tuxrun.org) does. This tool is used by things like
3# the LKFT project to run regression tests on kernels.
4#
5# Copyright (c) 2023 Linaro Ltd.
6#
7# Author:
8#  Alex Bennée <alex.bennee@linaro.org>
9#
10# SPDX-License-Identifier: GPL-2.0-or-later
11
12import os
13import stat
14import time
15
16from qemu_test import QemuSystemTest
17from qemu_test import exec_command, exec_command_and_wait_for_pattern
18from qemu_test import wait_for_console_pattern
19from qemu_test import has_cmd, run_cmd, get_qemu_img
20
21class TuxRunBaselineTest(QemuSystemTest):
22
23    KERNEL_COMMON_COMMAND_LINE = 'printk.time=0'
24    # Tests are ~10-40s, allow for --debug/--enable-gcov overhead
25    timeout = 100
26
27    def get_tag(self, tagname, default=None):
28        """
29        Get the metadata tag or return the default.
30        """
31        utag = self._get_unique_tag_val(tagname)
32        print(f"{tagname}/{default} -> {utag}")
33        if utag:
34            return utag
35
36        return default
37
38    def setUp(self):
39        super().setUp()
40
41        # We need zstd for all the tuxrun tests
42        (has_zstd, msg) = has_cmd('zstd')
43        if has_zstd is False:
44            self.skipTest(msg)
45        self.zstd = 'zstd'
46
47        # Pre-init TuxRun specific settings: Most machines work with
48        # reasonable defaults but we sometimes need to tweak the
49        # config. To avoid open coding everything we store all these
50        # details in the metadata for each test.
51
52        # The tuxboot tag matches the root directory
53        self.tuxboot = self.arch
54
55        # Most Linux's use ttyS0 for their serial port
56        self.console = "ttyS0"
57
58        # Does the machine shutdown QEMU nicely on "halt"
59        self.wait_for_shutdown = True
60
61        self.root = "vda"
62
63        # Occasionally we need extra devices to hook things up
64        self.extradev = None
65
66        self.qemu_img = get_qemu_img(self)
67
68    def wait_for_console_pattern(self, success_message, vm=None):
69        wait_for_console_pattern(self, success_message,
70                                 failure_message='Kernel panic - not syncing',
71                                 vm=vm)
72
73    def fetch_tuxrun_assets(self, kernel_asset, rootfs_asset, dtb_asset=None):
74        """
75        Fetch the TuxBoot assets.
76        """
77        kernel_image =  kernel_asset.fetch()
78        disk_image_zst = rootfs_asset.fetch()
79
80        disk_image = self.workdir + "/rootfs.ext4"
81
82        run_cmd([self.zstd, "-f", "-d", disk_image_zst,
83                 "-o", disk_image])
84        # zstd copies source archive permissions for the output
85        # file, so must make this writable for QEMU
86        os.chmod(disk_image, stat.S_IRUSR | stat.S_IWUSR)
87
88        dtb = dtb_asset.fetch() if dtb_asset is not None else None
89
90        return (kernel_image, disk_image, dtb)
91
92    def prepare_run(self, kernel, disk, drive, dtb=None, console_index=0):
93        """
94        Setup to run and add the common parameters to the system
95        """
96        self.vm.set_console(console_index=console_index)
97
98        # all block devices are raw ext4's
99        blockdev = "driver=raw,file.driver=file," \
100            + f"file.filename={disk},node-name=hd0"
101
102        kcmd_line = self.KERNEL_COMMON_COMMAND_LINE
103        kcmd_line += f" root=/dev/{self.root}"
104        kcmd_line += f" console={self.console}"
105
106        self.vm.add_args('-kernel', kernel,
107                         '-append', kcmd_line,
108                         '-blockdev', blockdev)
109
110        # Sometimes we need extra devices attached
111        if self.extradev:
112            self.vm.add_args('-device', self.extradev)
113
114        self.vm.add_args('-device',
115                         f"{drive},drive=hd0")
116
117        # Some machines need an explicit DTB
118        if dtb:
119            self.vm.add_args('-dtb', dtb)
120
121    def run_tuxtest_tests(self, haltmsg):
122        """
123        Wait for the system to boot up, wait for the login prompt and
124        then do a few things on the console. Trigger a shutdown and
125        wait to exit cleanly.
126        """
127        self.wait_for_console_pattern("Welcome to TuxTest")
128        time.sleep(0.2)
129        exec_command(self, 'root')
130        time.sleep(0.2)
131        exec_command(self, 'cat /proc/interrupts')
132        time.sleep(0.1)
133        exec_command(self, 'cat /proc/self/maps')
134        time.sleep(0.1)
135        exec_command(self, 'uname -a')
136        time.sleep(0.1)
137        exec_command_and_wait_for_pattern(self, 'halt', haltmsg)
138
139        # Wait for VM to shut down gracefully if it can
140        if self.wait_for_shutdown:
141            self.vm.wait()
142        else:
143            self.vm.shutdown()
144
145    def common_tuxrun(self,
146                      kernel_asset,
147                      rootfs_asset,
148                      dtb_asset=None,
149                      drive="virtio-blk-device",
150                      haltmsg="reboot: System halted",
151                      console_index=0):
152        """
153        Common path for LKFT tests. Unless we need to do something
154        special with the command line we can process most things using
155        the tag metadata.
156        """
157        (kernel, disk, dtb) = self.fetch_tuxrun_assets(kernel_asset, rootfs_asset,
158                                                       dtb_asset)
159
160        self.prepare_run(kernel, disk, drive, dtb, console_index)
161        self.vm.launch()
162        self.run_tuxtest_tests(haltmsg)
163        os.remove(disk)
164