Skip to content

Commit 41daaca

Browse files
committed
Update libregrtest from v3.14.2
1 parent bd1b54e commit 41daaca

File tree

21 files changed

+496
-271
lines changed

21 files changed

+496
-271
lines changed

Lib/test/libregrtest/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+

Lib/test/libregrtest/cmdline.py

Lines changed: 58 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -44,11 +44,19 @@
4444
doing memory analysis on the Python interpreter, which process tends to
4545
consume too many resources to run the full regression test non-stop.
4646
47-
-S is used to continue running tests after an aborted run. It will
48-
maintain the order a standard run (ie, this assumes -r is not used).
47+
-S is used to resume running tests after an interrupted run. It will
48+
maintain the order a standard run (i.e. it assumes -r is not used).
4949
This is useful after the tests have prematurely stopped for some external
50-
reason and you want to start running from where you left off rather
51-
than starting from the beginning.
50+
reason and you want to resume the run from where you left off rather
51+
than starting from the beginning. Note: this is different from --prioritize.
52+
53+
--prioritize is used to influence the order of selected tests, such that
54+
the tests listed as an argument are executed first. This is especially
55+
useful when combined with -j and -r to pin the longest-running tests
56+
to start at the beginning of a test run. Pass --prioritize=test_a,test_b
57+
to make test_a run first, followed by test_b, and then the other tests.
58+
If test_a wasn't selected for execution by regular means, --prioritize will
59+
not make it execute.
5260
5361
-f reads the names of tests from the file given as f's argument, one
5462
or more test names per line. Whitespace is ignored. Blank lines and
@@ -87,38 +95,40 @@
8795
The argument is a comma-separated list of words indicating the
8896
resources to test. Currently only the following are defined:
8997
90-
all - Enable all special resources.
98+
all - Enable all special resources.
99+
100+
none - Disable all special resources (this is the default).
91101
92-
none - Disable all special resources (this is the default).
102+
audio - Tests that use the audio device. (There are known
103+
cases of broken audio drivers that can crash Python or
104+
even the Linux kernel.)
93105
94-
audio - Tests that use the audio device. (There are known
95-
cases of broken audio drivers that can crash Python or
96-
even the Linux kernel.)
106+
curses - Tests that use curses and will modify the terminal's
107+
state and output modes.
97108
98-
curses - Tests that use curses and will modify the terminal's
99-
state and output modes.
109+
largefile - It is okay to run some test that may create huge
110+
files. These tests can take a long time and may
111+
consume >2 GiB of disk space temporarily.
100112
101-
largefile - It is okay to run some test that may create huge
102-
files. These tests can take a long time and may
103-
consume >2 GiB of disk space temporarily.
113+
extralargefile - Like 'largefile', but even larger (and slower).
104114
105-
network - It is okay to run tests that use external network
106-
resource, e.g. testing SSL support for sockets.
115+
network - It is okay to run tests that use external network
116+
resource, e.g. testing SSL support for sockets.
107117
108-
decimal - Test the decimal module against a large suite that
109-
verifies compliance with standards.
118+
decimal - Test the decimal module against a large suite that
119+
verifies compliance with standards.
110120
111-
cpu - Used for certain CPU-heavy tests.
121+
cpu - Used for certain CPU-heavy tests.
112122
113-
walltime - Long running but not CPU-bound tests.
123+
walltime - Long running but not CPU-bound tests.
114124
115-
subprocess Run all tests for the subprocess module.
125+
subprocess Run all tests for the subprocess module.
116126
117-
urlfetch - It is okay to download files required on testing.
127+
urlfetch - It is okay to download files required on testing.
118128
119-
gui - Run tests that require a running GUI.
129+
gui - Run tests that require a running GUI.
120130
121-
tzdata - Run tests that require timezone data.
131+
tzdata - Run tests that require timezone data.
122132
123133
To enable all resources except one, use '-uall,-<resource>'. For
124134
example, to run all the tests except for the gui tests, give the
@@ -158,13 +168,15 @@ def __init__(self, **kwargs) -> None:
158168
self.print_slow = False
159169
self.random_seed = None
160170
self.use_mp = None
171+
self.parallel_threads = None
161172
self.forever = False
162173
self.header = False
163174
self.failfast = False
164175
self.match_tests: TestFilter = []
165176
self.pgo = False
166177
self.pgo_extended = False
167178
self.tsan = False
179+
self.tsan_parallel = False
168180
self.worker_json = None
169181
self.start = None
170182
self.timeout = None
@@ -232,7 +244,7 @@ def _create_parser():
232244
help='wait for user input, e.g., allow a debugger '
233245
'to be attached')
234246
group.add_argument('-S', '--start', metavar='START',
235-
help='the name of the test at which to start.' +
247+
help='resume an interrupted run at the following test.' +
236248
more_details)
237249
group.add_argument('-p', '--python', metavar='PYTHON',
238250
help='Command to run Python test subprocesses with.')
@@ -262,6 +274,10 @@ def _create_parser():
262274
group.add_argument('--no-randomize', dest='no_randomize', action='store_true',
263275
help='do not randomize test execution order, even if '
264276
'it would be implied by another option')
277+
group.add_argument('--prioritize', metavar='TEST1,TEST2,...',
278+
action='append', type=priority_list,
279+
help='select these tests first, even if the order is'
280+
' randomized.' + more_details)
265281
group.add_argument('-f', '--fromfile', metavar='FILE',
266282
help='read names of tests to run from a file.' +
267283
more_details)
@@ -317,6 +333,10 @@ def _create_parser():
317333
'a single process, ignore -jN option, '
318334
'and failed tests are also rerun sequentially '
319335
'in the same process')
336+
group.add_argument('--parallel-threads', metavar='PARALLEL_THREADS',
337+
type=int,
338+
help='run copies of each test in PARALLEL_THREADS at '
339+
'once')
320340
group.add_argument('-T', '--coverage', action='store_true',
321341
dest='trace',
322342
help='turn on code coverage tracing using the trace '
@@ -347,6 +367,9 @@ def _create_parser():
347367
help='enable extended PGO training (slower training)')
348368
group.add_argument('--tsan', dest='tsan', action='store_true',
349369
help='run a subset of test cases that are proper for the TSAN test')
370+
group.add_argument('--tsan-parallel', action='store_true',
371+
help='run a subset of test cases that are appropriate '
372+
'for TSAN with `--parallel-threads=N`')
350373
group.add_argument('--fail-env-changed', action='store_true',
351374
help='if a test file alters the environment, mark '
352375
'the test as failed')
@@ -398,6 +421,10 @@ def resources_list(string):
398421
return u
399422

400423

424+
def priority_list(string):
425+
return string.split(",")
426+
427+
401428
def _parse_args(args, **kwargs):
402429
# Defaults
403430
ns = Namespace()
@@ -549,4 +576,10 @@ def _parse_args(args, **kwargs):
549576
print(msg, file=sys.stderr, flush=True)
550577
sys.exit(2)
551578

579+
ns.prioritize = [
580+
test
581+
for test_list in (ns.prioritize or ())
582+
for test in test_list
583+
]
584+
552585
return ns

Lib/test/libregrtest/main.py

Lines changed: 31 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -6,22 +6,21 @@
66
import sysconfig
77
import time
88
import trace
9+
from _colorize import get_colors # type: ignore[import-not-found]
910
from typing import NoReturn
1011

11-
from test.support import (os_helper, MS_WINDOWS, flush_std_streams,
12-
can_use_suppress_immortalization,
13-
suppress_immortalization)
12+
from test.support import os_helper, MS_WINDOWS, flush_std_streams
1413

1514
from .cmdline import _parse_args, Namespace
1615
from .findtests import findtests, split_test_packages, list_cases
1716
from .logger import Logger
1817
from .pgo import setup_pgo_tests
19-
from .result import State, TestResult
18+
from .result import TestResult
2019
from .results import TestResults, EXITCODE_INTERRUPTED
2120
from .runtests import RunTests, HuntRefleak
2221
from .setup import setup_process, setup_test_dir
2322
from .single import run_single_test, PROGRESS_MIN_TIME
24-
from .tsan import setup_tsan_tests
23+
from .tsan import setup_tsan_tests, setup_tsan_parallel_tests
2524
from .utils import (
2625
StrPath, StrJSON, TestName, TestList, TestTuple, TestFilter,
2726
strip_py_suffix, count, format_duration,
@@ -61,6 +60,7 @@ def __init__(self, ns: Namespace, _add_python_opts: bool = False):
6160
self.pgo: bool = ns.pgo
6261
self.pgo_extended: bool = ns.pgo_extended
6362
self.tsan: bool = ns.tsan
63+
self.tsan_parallel: bool = ns.tsan_parallel
6464

6565
# Test results
6666
self.results: TestResults = TestResults()
@@ -142,6 +142,9 @@ def __init__(self, ns: Namespace, _add_python_opts: bool = False):
142142
self.random_seed = random.getrandbits(32)
143143
else:
144144
self.random_seed = ns.random_seed
145+
self.prioritize_tests: tuple[str, ...] = tuple(ns.prioritize)
146+
147+
self.parallel_threads = ns.parallel_threads
145148

146149
# tests
147150
self.first_runtests: RunTests | None = None
@@ -200,6 +203,9 @@ def find_tests(self, tests: TestList | None = None) -> tuple[TestTuple, TestList
200203
if self.tsan:
201204
setup_tsan_tests(self.cmdline_args)
202205

206+
if self.tsan_parallel:
207+
setup_tsan_parallel_tests(self.cmdline_args)
208+
203209
alltests = findtests(testdir=self.test_dir,
204210
exclude=exclude_tests)
205211

@@ -236,6 +242,16 @@ def find_tests(self, tests: TestList | None = None) -> tuple[TestTuple, TestList
236242
if self.randomize:
237243
random.shuffle(selected)
238244

245+
for priority_test in reversed(self.prioritize_tests):
246+
try:
247+
selected.remove(priority_test)
248+
except ValueError:
249+
print(f"warning: --prioritize={priority_test} used"
250+
f" but test not actually selected")
251+
continue
252+
else:
253+
selected.insert(0, priority_test)
254+
239255
return (tuple(selected), tests)
240256

241257
@staticmethod
@@ -276,6 +292,9 @@ def _rerun_failed_tests(self, runtests: RunTests) -> RunTests:
276292
return runtests
277293

278294
def rerun_failed_tests(self, runtests: RunTests) -> None:
295+
ansi = get_colors()
296+
red, reset = ansi.BOLD_RED, ansi.RESET
297+
279298
if self.python_cmd:
280299
# Temp patch for https://github.com/python/cpython/issues/94052
281300
self.log(
@@ -290,7 +309,10 @@ def rerun_failed_tests(self, runtests: RunTests) -> None:
290309
rerun_runtests = self._rerun_failed_tests(runtests)
291310

292311
if self.results.bad:
293-
print(count(len(self.results.bad), 'test'), "failed again:")
312+
print(
313+
f"{red}{count(len(self.results.bad), 'test')} "
314+
f"failed again:{reset}"
315+
)
294316
printlist(self.results.bad)
295317

296318
self.display_result(rerun_runtests)
@@ -496,6 +518,7 @@ def create_run_tests(self, tests: TestTuple) -> RunTests:
496518
python_cmd=self.python_cmd,
497519
randomize=self.randomize,
498520
random_seed=self.random_seed,
521+
parallel_threads=self.parallel_threads,
499522
)
500523

501524
def _run_tests(self, selected: TestTuple, tests: TestList | None) -> int:
@@ -529,27 +552,15 @@ def _run_tests(self, selected: TestTuple, tests: TestList | None) -> int:
529552
use_load_tracker = False
530553
else:
531554
# WindowsLoadTracker is only needed on Windows
532-
# use_load_tracker = MS_WINDOWS # TODO: RUSTPYTHON, investigate why this was disabled in the first place
533-
use_load_tracker = False
555+
use_load_tracker = MS_WINDOWS
534556

535557
if use_load_tracker:
536558
self.logger.start_load_tracker()
537559
try:
538560
if self.num_workers:
539561
self._run_tests_mp(runtests, self.num_workers)
540562
else:
541-
# gh-135734: suppress_immortalization() raises SkipTest
542-
# if _testinternalcapi is missing and the -R option is set.
543-
if not can_use_suppress_immortalization(runtests.hunt_refleak):
544-
print("Module '_testinternalcapi' is missing. "
545-
"Did you disable it with --disable-test-modules?",
546-
file=sys.stderr)
547-
raise SystemExit(1)
548-
549-
# gh-117783: don't immortalize deferred objects when tracking
550-
# refleaks. Only relevant for the free-threaded build.
551-
with suppress_immortalization(runtests.hunt_refleak):
552-
self.run_tests_sequentially(runtests)
563+
self.run_tests_sequentially(runtests)
553564

554565
coverage = self.results.get_coverage_results()
555566
self.display_result(runtests)
Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
"""Run a test case multiple times in parallel threads."""
2+
3+
import copy
4+
import threading
5+
import unittest
6+
7+
from unittest import TestCase
8+
9+
10+
class ParallelTestCase(TestCase):
11+
def __init__(self, test_case: TestCase, num_threads: int):
12+
self.test_case = test_case
13+
self.num_threads = num_threads
14+
self._testMethodName = test_case._testMethodName
15+
self._testMethodDoc = test_case._testMethodDoc
16+
17+
def __str__(self):
18+
return f"{str(self.test_case)} [threads={self.num_threads}]"
19+
20+
def run_worker(self, test_case: TestCase, result: unittest.TestResult,
21+
barrier: threading.Barrier):
22+
barrier.wait()
23+
test_case.run(result)
24+
25+
def run(self, result=None):
26+
if result is None:
27+
result = test_case.defaultTestResult()
28+
startTestRun = getattr(result, 'startTestRun', None)
29+
stopTestRun = getattr(result, 'stopTestRun', None)
30+
if startTestRun is not None:
31+
startTestRun()
32+
else:
33+
stopTestRun = None
34+
35+
# Called at the beginning of each test. See TestCase.run.
36+
result.startTest(self)
37+
38+
cases = [copy.copy(self.test_case) for _ in range(self.num_threads)]
39+
results = [unittest.TestResult() for _ in range(self.num_threads)]
40+
41+
barrier = threading.Barrier(self.num_threads)
42+
threads = []
43+
for i, (case, r) in enumerate(zip(cases, results)):
44+
thread = threading.Thread(target=self.run_worker,
45+
args=(case, r, barrier),
46+
name=f"{str(self.test_case)}-{i}",
47+
daemon=True)
48+
threads.append(thread)
49+
50+
for thread in threads:
51+
thread.start()
52+
53+
for threads in threads:
54+
threads.join()
55+
56+
# Aggregate test results
57+
if all(r.wasSuccessful() for r in results):
58+
result.addSuccess(self)
59+
60+
# Note: We can't call result.addError, result.addFailure, etc. because
61+
# we no longer have the original exception, just the string format.
62+
for r in results:
63+
if len(r.errors) > 0 or len(r.failures) > 0:
64+
result._mirrorOutput = True
65+
result.errors.extend(r.errors)
66+
result.failures.extend(r.failures)
67+
result.skipped.extend(r.skipped)
68+
result.expectedFailures.extend(r.expectedFailures)
69+
result.unexpectedSuccesses.extend(r.unexpectedSuccesses)
70+
result.collectedDurations.extend(r.collectedDurations)
71+
72+
if any(r.shouldStop for r in results):
73+
result.stop()
74+
75+
# Test has finished running
76+
result.stopTest(self)
77+
if stopTestRun is not None:
78+
stopTestRun()

Lib/test/libregrtest/refleak.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -129,9 +129,9 @@ def get_pooled_int(value):
129129
xml_filename = 'refleak-xml.tmp'
130130
result = None
131131
dash_R_cleanup(fs, ps, pic, zdc, abcs, linecache_data)
132-
support.gc_collect()
133132

134133
for i in rep_range:
134+
support.gc_collect()
135135
current = refleak_helper._hunting_for_refleaks
136136
refleak_helper._hunting_for_refleaks = True
137137
try:
@@ -253,7 +253,6 @@ def dash_R_cleanup(fs, ps, pic, zdc, abcs, linecache_data):
253253
zipimport._zip_directory_cache.update(zdc)
254254

255255
# Clear ABC registries, restoring previously saved ABC registries.
256-
# ignore deprecation warning for collections.abc.ByteString
257256
abs_classes = [getattr(collections.abc, a) for a in collections.abc.__all__]
258257
abs_classes = filter(isabstract, abs_classes)
259258
for abc in abs_classes:

0 commit comments

Comments
 (0)