[PATCH v2 00/15] Add KVM Selftests runner

Sean Christopherson seanjc at google.com
Wed Jul 9 15:25:02 PDT 2025


On Fri, Jun 06, 2025, Vipin Sharma wrote:
>  tools/testing/selftests/kvm/.gitignore        |   4 +-
>  tools/testing/selftests/kvm/Makefile.kvm      |   8 +
>  .../testing/selftests/kvm/runner/__main__.py  | 271 ++++++++++++++++++
>  tools/testing/selftests/kvm/runner/command.py |  53 ++++
>  .../testing/selftests/kvm/runner/selftest.py  |  66 +++++
>  .../selftests/kvm/runner/test_runner.py       |  88 ++++++

Overall, looks great!  I think the only significant feedback is on the command
line options.

One thing we probably need is a README of some form, to explain how this works
and to give some examples.  Outside of you and I, I doubt anyone will know how
to use this :-)

Here's the full diff of the modifications I made to massage things to my liking.
It's not complete, e.g. I punted on the help messages and didn't change to
-p/--path, but otherwise it seems to work?

---
 .../testing/selftests/kvm/runner/__main__.py  | 86 +++----------------
 tools/testing/selftests/kvm/runner/command.py | 53 ------------
 .../testing/selftests/kvm/runner/selftest.py  | 39 +++++++--
 .../selftests/kvm/runner/test_runner.py       | 20 ++---
 4 files changed, 53 insertions(+), 145 deletions(-)
 delete mode 100644 tools/testing/selftests/kvm/runner/command.py

diff --git a/tools/testing/selftests/kvm/runner/__main__.py b/tools/testing/selftests/kvm/runner/__main__.py
index c02035a62873..0105835c557c 100644
--- a/tools/testing/selftests/kvm/runner/__main__.py
+++ b/tools/testing/selftests/kvm/runner/__main__.py
@@ -59,71 +59,26 @@ def cli():
                         type=int,
                         help="Maximum number of tests that can be run concurrently. (Default: 1)")
 
-    parser.add_argument("--print-status",
-                        action="store_true",
-                        default=False,
-                        help="Print only test's status and avoid printing stdout and stderr of the tests")
-
-    parser.add_argument("--print-passed",
-                        action="store_true",
-                        default=False,
-                        help="Print passed test's stdout, stderr and status."
-                        )
-
-    parser.add_argument("--print-passed-status",
-                        action="store_true",
-                        default=False,
-                        help="Print only passed test's status."
-                        )
-
-    parser.add_argument("--print-failed",
-                        action="store_true",
-                        default=False,
-                        help="Print failed test's stdout, stderr and status."
+    parser.add_argument("--print-passed", default="full", const="full", nargs='?', choices=["off", "full", "stderr", "stdout", "status"],
+                        help="blah"
                         )
 
-    parser.add_argument("--print-failed-status",
-                        action="store_true",
-                        default=False,
-                        help="Print only failed test's status."
+    parser.add_argument("--print-failed", default="full", const="full", nargs='?', choices=["off", "full", "stderr", "stdout", "status"],
+                        help="Full = print each test's stdout, stderr and status; status = only status."
                         )
 
-    parser.add_argument("--print-skipped",
-                        action="store_true",
-                        default=False,
+    parser.add_argument("--print-skipped", default="full", const="full", nargs='?', choices=["off", "full", "stderr", "stdout", "status"],
                         help="Print skipped test's stdout, stderr and status."
                         )
 
-    parser.add_argument("--print-skipped-status",
-                        action="store_true",
-                        default=False,
-                        help="Print only skipped test's status."
-                        )
-
-    parser.add_argument("--print-timed-out",
-                        action="store_true",
-                        default=False,
+    parser.add_argument("--print-timed-out", default="full", const="full", nargs='?', choices=["off", "full", "stderr", "stdout", "status"],
                         help="Print timed out test's stdout, stderr and status."
                         )
 
-    parser.add_argument("--print-timed-out-status",
-                        action="store_true",
-                        default=False,
-                        help="Print only timed out test's status."
-                        )
-
-    parser.add_argument("--print-no-runs",
-                        action="store_true",
-                        default=False,
+    parser.add_argument("--print-no-run", default="full", const="full", nargs='?', choices=["off", "full", "stderr", "stdout", "status"],
                         help="Print stdout, stderr and status for tests which didn't run."
                         )
 
-    parser.add_argument("--print-no-runs-status",
-                        action="store_true",
-                        default=False,
-                        help="Print only tests which didn't run."
-                        )
-
     parser.add_argument("--sticky-summary-only",
                         action="store_true",
                         default=False,
@@ -145,36 +100,19 @@ def level_filters(args):
     if args.sticky_summary_only or args.quiet:
         return levels
 
-    if args.print_passed or args.print_passed_status or args.print_status:
+    if args.print_passed != "off":
         levels.add(SelftestStatus.PASSED)
 
-    if args.print_failed or args.print_failed_status or args.print_status:
+    if args.print_failed != "off":
         levels.add(SelftestStatus.FAILED)
 
-    if args.print_skipped or args.print_skipped_status or args.print_status:
+    if args.print_skipped != "off":
         levels.add(SelftestStatus.SKIPPED)
 
-    if args.print_timed_out or args.print_timed_out_status or args.print_status:
+    if args.print_timed_out != "off":
         levels.add(SelftestStatus.TIMED_OUT)
 
-    if args.print_no_runs or args.print_no_runs_status or args.print_status:
-        levels.add(SelftestStatus.NO_RUN)
-
-    # Nothing set explicitly, enable all.
-    if not levels:
-        args.print_passed = True
-        levels.add(SelftestStatus.PASSED)
-
-        args.print_failed = True
-        levels.add(SelftestStatus.FAILED)
-
-        args.print_skipped = True
-        levels.add(SelftestStatus.SKIPPED)
-
-        args.print_timed_out = True
-        levels.add(SelftestStatus.TIMED_OUT)
-
-        args.print_no_runs = True
+    if args.print_no_run != "off":
         levels.add(SelftestStatus.NO_RUN)
 
     levels.add(logging.NOTSET)
diff --git a/tools/testing/selftests/kvm/runner/command.py b/tools/testing/selftests/kvm/runner/command.py
deleted file mode 100644
index 6f6b1811b490..000000000000
--- a/tools/testing/selftests/kvm/runner/command.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# Copyright 2025 Google LLC
-#
-# Author: vipinsh at google.com (Vipin Sharma)
-
-import subprocess
-import pathlib
-import contextlib
-import os
-
-
-class Command:
-    """Executes a command in shell.
-
-    Returns the exit code, std output and std error of the command.
-    """
-
-    def __init__(self, command, timeout, output_dir):
-        self.command = command
-        self.timeout = timeout
-        self.output_dir = output_dir
-
-    def _run(self, output=None, error=None):
-        run_args = {
-            "universal_newlines": True,
-            "shell": True,
-            "timeout": self.timeout,
-        }
-
-        if output is None and error is None:
-            run_args.update({"capture_output": True})
-        else:
-            run_args.update({"stdout": output, "stderr": error})
-
-        proc = subprocess.run(self.command, **run_args)
-        return proc.returncode, proc.stdout, proc.stderr
-
-    def run(self):
-        if self.output_dir is not None:
-            pathlib.Path(self.output_dir).mkdir(parents=True, exist_ok=True)
-
-        output = None
-        error = None
-        with contextlib.ExitStack() as stack:
-            if self.output_dir is not None:
-                output_path = os.path.join(self.output_dir, "stdout")
-                output = stack.enter_context(
-                    open(output_path, encoding="utf-8", mode="w"))
-
-                error_path = os.path.join(self.output_dir, "stderr")
-                error = stack.enter_context(
-                    open(error_path, encoding="utf-8", mode="w"))
-            return self._run(output, error)
diff --git a/tools/testing/selftests/kvm/runner/selftest.py b/tools/testing/selftests/kvm/runner/selftest.py
index 664958c693e5..1ec1ddfbf034 100644
--- a/tools/testing/selftests/kvm/runner/selftest.py
+++ b/tools/testing/selftests/kvm/runner/selftest.py
@@ -3,7 +3,6 @@
 #
 # Author: vipinsh at google.com (Vipin Sharma)
 
-import command
 import pathlib
 import enum
 import os
@@ -37,17 +36,18 @@ class Selftest:
         if not test_command:
             raise ValueError("Empty test command in " + test_path)
 
-        test_command = os.path.join(executable_dir, test_command)
-        self.exists = os.path.isfile(test_command.split(maxsplit=1)[0])
+        self.command = os.path.join(executable_dir, test_command)
+        self.exists = os.path.isfile(self.command.split(maxsplit=1)[0])
         self.test_path = test_path
 
         if output_dir is not None:
             output_dir = os.path.join(output_dir, test_path.lstrip("/"))
-        self.command = command.Command(test_command, timeout, output_dir)
 
         self.status = SelftestStatus.NO_RUN
         self.stdout = ""
         self.stderr = ""
+        self.timeout = timeout
+        self.output_dir = output_dir
 
     def run(self):
         if not self.exists:
@@ -55,12 +55,37 @@ class Selftest:
             return
 
         try:
-            ret, self.stdout, self.stderr = self.command.run()
-            if ret == 0:
+            run_args = {
+                "universal_newlines": True,
+                "shell": True,
+                "stdout": subprocess.PIPE,
+                "stderr": subprocess.PIPE,
+                "timeout": self.timeout,
+            }
+            proc = subprocess.run(self.command, **run_args)
+
+            self.stdout = proc.stdout
+            self.stderr = proc.stderr
+
+            if proc.returncode == 0:
                 self.status = SelftestStatus.PASSED
-            elif ret == 4:
+            elif proc.returncode == 4:
                 self.status = SelftestStatus.SKIPPED
             else:
                 self.status = SelftestStatus.FAILED
         except subprocess.TimeoutExpired as e:
+            self.stdout = e.stdout
+            self.stderr = e.stderr
+
             self.status = SelftestStatus.TIMED_OUT
+
+        if self.output_dir is not None:
+            pathlib.Path(self.output_dir).mkdir(parents=True, exist_ok=True)
+
+            output_path = os.path.join(self.output_dir, "stdout")
+            with open(output_path, encoding="utf-8", mode="w") as f:
+                f.write(self.stdout)
+
+            error_path = os.path.join(self.output_dir, "stderr")
+            with open(error_path, encoding="utf-8", mode="w") as f:
+                f.write(self.stderr)
diff --git a/tools/testing/selftests/kvm/runner/test_runner.py b/tools/testing/selftests/kvm/runner/test_runner.py
index e7730880907d..a285a711a686 100644
--- a/tools/testing/selftests/kvm/runner/test_runner.py
+++ b/tools/testing/selftests/kvm/runner/test_runner.py
@@ -19,13 +19,12 @@ class TestRunner:
         self.output_dir = args.output
         self.jobs = args.jobs
         self.quiet = args.quiet
-        self.print_status = args.print_status
         self.print_stds = {
             SelftestStatus.PASSED: args.print_passed,
             SelftestStatus.FAILED: args.print_failed,
             SelftestStatus.SKIPPED: args.print_skipped,
             SelftestStatus.TIMED_OUT: args.print_timed_out,
-            SelftestStatus.NO_RUN: args.print_no_runs
+            SelftestStatus.NO_RUN: args.print_no_run
         }
 
         for test_file in test_files:
@@ -52,15 +51,14 @@ class TestRunner:
         # Clear the status line
         self._print("\033[2K", end="\r")
         logger.log(test_result.status,
-                   f"[{test_result.status}] {test_result.test_path}")
-        if (self.output_dir is None and self.print_status is False
-                and self.print_stds.get(test_result.status, True)):
-            logger.info("************** STDOUT BEGIN **************")
-            logger.info(test_result.stdout)
-            logger.info("************** STDOUT END **************")
-            logger.info("************** STDERR BEGIN **************")
-            logger.info(test_result.stderr)
-            logger.info("************** STDERR END **************")
+                   f"[{test_result.status.name}] {test_result.test_path}")
+
+        print_level = self.print_stds.get(test_result.status);
+        if (print_level == "full" or print_level == "stdout"):
+            logger.info("*** stdout ***\n" + test_result.stdout)
+
+        if (print_level == "full" or print_level == "stderr"):
+            logger.info("*** stderr ***\n" + test_result.stderr)
 
         self.status[test_result.status] += 1
         # Sticky bottom line

base-commit: 611829e42fb47b99ff2b6c75637aec2410739611
--



More information about the kvm-riscv mailing list