Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Lib/test/libregrtest/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@

83 changes: 58 additions & 25 deletions Lib/test/libregrtest/cmdline.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,11 +44,19 @@
doing memory analysis on the Python interpreter, which process tends to
consume too many resources to run the full regression test non-stop.

-S is used to continue running tests after an aborted run. It will
maintain the order a standard run (ie, this assumes -r is not used).
-S is used to resume running tests after an interrupted run. It will
maintain the order a standard run (i.e. it assumes -r is not used).
This is useful after the tests have prematurely stopped for some external
reason and you want to start running from where you left off rather
than starting from the beginning.
reason and you want to resume the run from where you left off rather
than starting from the beginning. Note: this is different from --prioritize.

--prioritize is used to influence the order of selected tests, such that
the tests listed as an argument are executed first. This is especially
useful when combined with -j and -r to pin the longest-running tests
to start at the beginning of a test run. Pass --prioritize=test_a,test_b
to make test_a run first, followed by test_b, and then the other tests.
If test_a wasn't selected for execution by regular means, --prioritize will
not make it execute.

-f reads the names of tests from the file given as f's argument, one
or more test names per line. Whitespace is ignored. Blank lines and
Expand Down Expand Up @@ -87,38 +95,40 @@
The argument is a comma-separated list of words indicating the
resources to test. Currently only the following are defined:

all - Enable all special resources.
all - Enable all special resources.

none - Disable all special resources (this is the default).

none - Disable all special resources (this is the default).
audio - Tests that use the audio device. (There are known
cases of broken audio drivers that can crash Python or
even the Linux kernel.)

audio - Tests that use the audio device. (There are known
cases of broken audio drivers that can crash Python or
even the Linux kernel.)
curses - Tests that use curses and will modify the terminal's
state and output modes.

curses - Tests that use curses and will modify the terminal's
state and output modes.
largefile - It is okay to run some test that may create huge
files. These tests can take a long time and may
consume >2 GiB of disk space temporarily.

largefile - It is okay to run some test that may create huge
files. These tests can take a long time and may
consume >2 GiB of disk space temporarily.
extralargefile - Like 'largefile', but even larger (and slower).

network - It is okay to run tests that use external network
resource, e.g. testing SSL support for sockets.
network - It is okay to run tests that use external network
resource, e.g. testing SSL support for sockets.

decimal - Test the decimal module against a large suite that
verifies compliance with standards.
decimal - Test the decimal module against a large suite that
verifies compliance with standards.

cpu - Used for certain CPU-heavy tests.
cpu - Used for certain CPU-heavy tests.

walltime - Long running but not CPU-bound tests.
walltime - Long running but not CPU-bound tests.

subprocess Run all tests for the subprocess module.
subprocess Run all tests for the subprocess module.

urlfetch - It is okay to download files required on testing.
urlfetch - It is okay to download files required on testing.

gui - Run tests that require a running GUI.
gui - Run tests that require a running GUI.

tzdata - Run tests that require timezone data.
tzdata - Run tests that require timezone data.

To enable all resources except one, use '-uall,-<resource>'. For
example, to run all the tests except for the gui tests, give the
Expand Down Expand Up @@ -158,13 +168,15 @@ def __init__(self, **kwargs) -> None:
self.print_slow = False
self.random_seed = None
self.use_mp = None
self.parallel_threads = None
self.forever = False
self.header = False
self.failfast = False
self.match_tests: TestFilter = []
self.pgo = False
self.pgo_extended = False
self.tsan = False
self.tsan_parallel = False
self.worker_json = None
self.start = None
self.timeout = None
Expand Down Expand Up @@ -232,7 +244,7 @@ def _create_parser():
help='wait for user input, e.g., allow a debugger '
'to be attached')
group.add_argument('-S', '--start', metavar='START',
help='the name of the test at which to start.' +
help='resume an interrupted run at the following test.' +
more_details)
group.add_argument('-p', '--python', metavar='PYTHON',
help='Command to run Python test subprocesses with.')
Expand Down Expand Up @@ -262,6 +274,10 @@ def _create_parser():
group.add_argument('--no-randomize', dest='no_randomize', action='store_true',
help='do not randomize test execution order, even if '
'it would be implied by another option')
group.add_argument('--prioritize', metavar='TEST1,TEST2,...',
action='append', type=priority_list,
help='select these tests first, even if the order is'
' randomized.' + more_details)
group.add_argument('-f', '--fromfile', metavar='FILE',
help='read names of tests to run from a file.' +
more_details)
Expand Down Expand Up @@ -317,6 +333,10 @@ def _create_parser():
'a single process, ignore -jN option, '
'and failed tests are also rerun sequentially '
'in the same process')
group.add_argument('--parallel-threads', metavar='PARALLEL_THREADS',
type=int,
help='run copies of each test in PARALLEL_THREADS at '
'once')
group.add_argument('-T', '--coverage', action='store_true',
dest='trace',
help='turn on code coverage tracing using the trace '
Expand Down Expand Up @@ -347,6 +367,9 @@ def _create_parser():
help='enable extended PGO training (slower training)')
group.add_argument('--tsan', dest='tsan', action='store_true',
help='run a subset of test cases that are proper for the TSAN test')
group.add_argument('--tsan-parallel', action='store_true',
help='run a subset of test cases that are appropriate '
'for TSAN with `--parallel-threads=N`')
group.add_argument('--fail-env-changed', action='store_true',
help='if a test file alters the environment, mark '
'the test as failed')
Expand Down Expand Up @@ -398,6 +421,10 @@ def resources_list(string):
return u


def priority_list(string):
return string.split(",")


def _parse_args(args, **kwargs):
# Defaults
ns = Namespace()
Expand Down Expand Up @@ -549,4 +576,10 @@ def _parse_args(args, **kwargs):
print(msg, file=sys.stderr, flush=True)
sys.exit(2)

ns.prioritize = [
test
for test_list in (ns.prioritize or ())
for test in test_list
]

return ns
51 changes: 31 additions & 20 deletions Lib/test/libregrtest/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,22 +6,21 @@
import sysconfig
import time
import trace
from _colorize import get_colors # type: ignore[import-not-found]
from typing import NoReturn

from test.support import (os_helper, MS_WINDOWS, flush_std_streams,
can_use_suppress_immortalization,
suppress_immortalization)
from test.support import os_helper, MS_WINDOWS, flush_std_streams

from .cmdline import _parse_args, Namespace
from .findtests import findtests, split_test_packages, list_cases
from .logger import Logger
from .pgo import setup_pgo_tests
from .result import State, TestResult
from .result import TestResult
from .results import TestResults, EXITCODE_INTERRUPTED
from .runtests import RunTests, HuntRefleak
from .setup import setup_process, setup_test_dir
from .single import run_single_test, PROGRESS_MIN_TIME
from .tsan import setup_tsan_tests
from .tsan import setup_tsan_tests, setup_tsan_parallel_tests
from .utils import (
StrPath, StrJSON, TestName, TestList, TestTuple, TestFilter,
strip_py_suffix, count, format_duration,
Expand Down Expand Up @@ -61,6 +60,7 @@ def __init__(self, ns: Namespace, _add_python_opts: bool = False):
self.pgo: bool = ns.pgo
self.pgo_extended: bool = ns.pgo_extended
self.tsan: bool = ns.tsan
self.tsan_parallel: bool = ns.tsan_parallel

# Test results
self.results: TestResults = TestResults()
Expand Down Expand Up @@ -142,6 +142,9 @@ def __init__(self, ns: Namespace, _add_python_opts: bool = False):
self.random_seed = random.getrandbits(32)
else:
self.random_seed = ns.random_seed
self.prioritize_tests: tuple[str, ...] = tuple(ns.prioritize)

self.parallel_threads = ns.parallel_threads

# tests
self.first_runtests: RunTests | None = None
Expand Down Expand Up @@ -200,6 +203,9 @@ def find_tests(self, tests: TestList | None = None) -> tuple[TestTuple, TestList
if self.tsan:
setup_tsan_tests(self.cmdline_args)

if self.tsan_parallel:
setup_tsan_parallel_tests(self.cmdline_args)

alltests = findtests(testdir=self.test_dir,
exclude=exclude_tests)

Expand Down Expand Up @@ -236,6 +242,16 @@ def find_tests(self, tests: TestList | None = None) -> tuple[TestTuple, TestList
if self.randomize:
random.shuffle(selected)

for priority_test in reversed(self.prioritize_tests):
try:
selected.remove(priority_test)
except ValueError:
print(f"warning: --prioritize={priority_test} used"
f" but test not actually selected")
continue
else:
selected.insert(0, priority_test)

return (tuple(selected), tests)

@staticmethod
Expand Down Expand Up @@ -276,6 +292,9 @@ def _rerun_failed_tests(self, runtests: RunTests) -> RunTests:
return runtests

def rerun_failed_tests(self, runtests: RunTests) -> None:
ansi = get_colors()
red, reset = ansi.BOLD_RED, ansi.RESET

if self.python_cmd:
# Temp patch for https://github.com/python/cpython/issues/94052
self.log(
Expand All @@ -290,7 +309,10 @@ def rerun_failed_tests(self, runtests: RunTests) -> None:
rerun_runtests = self._rerun_failed_tests(runtests)

if self.results.bad:
print(count(len(self.results.bad), 'test'), "failed again:")
print(
f"{red}{count(len(self.results.bad), 'test')} "
f"failed again:{reset}"
)
printlist(self.results.bad)

self.display_result(rerun_runtests)
Expand Down Expand Up @@ -496,6 +518,7 @@ def create_run_tests(self, tests: TestTuple) -> RunTests:
python_cmd=self.python_cmd,
randomize=self.randomize,
random_seed=self.random_seed,
parallel_threads=self.parallel_threads,
)

def _run_tests(self, selected: TestTuple, tests: TestList | None) -> int:
Expand Down Expand Up @@ -529,27 +552,15 @@ def _run_tests(self, selected: TestTuple, tests: TestList | None) -> int:
use_load_tracker = False
else:
# WindowsLoadTracker is only needed on Windows
# use_load_tracker = MS_WINDOWS # TODO: RUSTPYTHON, investigate why this was disabled in the first place
use_load_tracker = False
use_load_tracker = MS_WINDOWS

if use_load_tracker:
self.logger.start_load_tracker()
try:
if self.num_workers:
self._run_tests_mp(runtests, self.num_workers)
else:
# gh-135734: suppress_immortalization() raises SkipTest
# if _testinternalcapi is missing and the -R option is set.
if not can_use_suppress_immortalization(runtests.hunt_refleak):
print("Module '_testinternalcapi' is missing. "
"Did you disable it with --disable-test-modules?",
file=sys.stderr)
raise SystemExit(1)

# gh-117783: don't immortalize deferred objects when tracking
# refleaks. Only relevant for the free-threaded build.
with suppress_immortalization(runtests.hunt_refleak):
self.run_tests_sequentially(runtests)
self.run_tests_sequentially(runtests)

coverage = self.results.get_coverage_results()
self.display_result(runtests)
Expand Down
78 changes: 78 additions & 0 deletions Lib/test/libregrtest/parallel_case.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
"""Run a test case multiple times in parallel threads."""

import copy
import threading
import unittest

from unittest import TestCase


class ParallelTestCase(TestCase):
def __init__(self, test_case: TestCase, num_threads: int):
self.test_case = test_case
self.num_threads = num_threads
self._testMethodName = test_case._testMethodName
self._testMethodDoc = test_case._testMethodDoc

def __str__(self):
return f"{str(self.test_case)} [threads={self.num_threads}]"

def run_worker(self, test_case: TestCase, result: unittest.TestResult,
barrier: threading.Barrier):
barrier.wait()
test_case.run(result)

def run(self, result=None):
if result is None:
result = test_case.defaultTestResult()
startTestRun = getattr(result, 'startTestRun', None)
stopTestRun = getattr(result, 'stopTestRun', None)
if startTestRun is not None:
startTestRun()
else:
stopTestRun = None

# Called at the beginning of each test. See TestCase.run.
result.startTest(self)

cases = [copy.copy(self.test_case) for _ in range(self.num_threads)]
results = [unittest.TestResult() for _ in range(self.num_threads)]

barrier = threading.Barrier(self.num_threads)
threads = []
for i, (case, r) in enumerate(zip(cases, results)):
thread = threading.Thread(target=self.run_worker,
args=(case, r, barrier),
name=f"{str(self.test_case)}-{i}",
daemon=True)
threads.append(thread)

for thread in threads:
thread.start()

for threads in threads:
threads.join()

# Aggregate test results
if all(r.wasSuccessful() for r in results):
result.addSuccess(self)

# Note: We can't call result.addError, result.addFailure, etc. because
# we no longer have the original exception, just the string format.
for r in results:
if len(r.errors) > 0 or len(r.failures) > 0:
result._mirrorOutput = True
result.errors.extend(r.errors)
result.failures.extend(r.failures)
result.skipped.extend(r.skipped)
result.expectedFailures.extend(r.expectedFailures)
result.unexpectedSuccesses.extend(r.unexpectedSuccesses)
result.collectedDurations.extend(r.collectedDurations)

if any(r.shouldStop for r in results):
result.stop()

# Test has finished running
result.stopTest(self)
if stopTestRun is not None:
stopTestRun()
3 changes: 1 addition & 2 deletions Lib/test/libregrtest/refleak.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,9 +129,9 @@ def get_pooled_int(value):
xml_filename = 'refleak-xml.tmp'
result = None
dash_R_cleanup(fs, ps, pic, zdc, abcs, linecache_data)
support.gc_collect()

for i in rep_range:
support.gc_collect()
current = refleak_helper._hunting_for_refleaks
refleak_helper._hunting_for_refleaks = True
try:
Expand Down Expand Up @@ -253,7 +253,6 @@ def dash_R_cleanup(fs, ps, pic, zdc, abcs, linecache_data):
zipimport._zip_directory_cache.update(zdc)

# Clear ABC registries, restoring previously saved ABC registries.
# ignore deprecation warning for collections.abc.ByteString
abs_classes = [getattr(collections.abc, a) for a in collections.abc.__all__]
abs_classes = filter(isabstract, abs_classes)
for abc in abs_classes:
Expand Down
Loading
Loading