-
Notifications
You must be signed in to change notification settings - Fork 3.4k
Implement a new --failing-and-slow-first command line argument to test runner. #24624
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from 6 commits
be281b2
1008e6d
28e4ab8
a8544b2
78aa3fb
1fd0b64
b3cec56
06420cf
15cb203
846455f
ddcde4f
01df1ad
64b09c7
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -3,6 +3,7 @@ | |
# University of Illinois/NCSA Open Source License. Both these licenses can be | ||
# found in the LICENSE file. | ||
|
||
import json | ||
import multiprocessing | ||
import os | ||
import sys | ||
|
@@ -19,7 +20,12 @@ | |
seen_class = set() | ||
|
||
|
||
def run_test(test): | ||
def run_test(test, failfast_event): | ||
# If failfast mode is in effect and any of the tests have failed, | ||
# and then we should abort executing further tests immediately. | ||
if failfast_event is not None and failfast_event.is_set(): | ||
return None | ||
|
||
olddir = os.getcwd() | ||
result = BufferedParallelTestResult() | ||
temp_dir = tempfile.mkdtemp(prefix='emtest_') | ||
|
@@ -29,10 +35,16 @@ def run_test(test): | |
seen_class.add(test.__class__) | ||
test.__class__.setUpClass() | ||
test(result) | ||
|
||
# Alert all other multiprocess pool runners that they need to stop executing further tests. | ||
if failfast_event is not None and result.test_result not in ['success', 'skipped']: | ||
failfast_event.set() | ||
except unittest.SkipTest as e: | ||
result.addSkip(test, e) | ||
except Exception as e: | ||
result.addError(test, e) | ||
if failfast_event is not None: | ||
failfast_event.set() | ||
# Before attempting to delete the tmp dir make sure the current | ||
# working directory is not within it. | ||
os.chdir(olddir) | ||
|
@@ -46,9 +58,11 @@ class ParallelTestSuite(unittest.BaseTestSuite): | |
Creates worker threads, manages the task queue, and combines the results. | ||
""" | ||
|
||
def __init__(self, max_cores): | ||
def __init__(self, max_cores, options): | ||
super().__init__() | ||
self.max_cores = max_cores | ||
self.failfast = options.failfast | ||
self.failing_and_slow_first = options.failing_and_slow_first | ||
|
||
def addTest(self, test): | ||
super().addTest(test) | ||
|
@@ -61,12 +75,48 @@ def run(self, result): | |
# inherited by the child process, but can lead to hard-to-debug windows-only | ||
# issues. | ||
# multiprocessing.set_start_method('spawn') | ||
tests = list(self.reversed_tests()) | ||
|
||
# If we are running with --failing-and-slow-first, then the test list has been | ||
# pre-sorted based on previous test run results. Otherwise run the tests in | ||
# reverse alphabetical order. | ||
tests = list(self if self.failing_and_slow_first else self.reversed_tests()) | ||
use_cores = cap_max_workers_in_pool(min(self.max_cores, len(tests), num_cores())) | ||
print('Using %s parallel test processes' % use_cores) | ||
pool = multiprocessing.Pool(use_cores) | ||
results = [pool.apply_async(run_test, (t,)) for t in tests] | ||
results = [r.get() for r in results] | ||
with multiprocessing.Manager() as manager: | ||
pool = multiprocessing.Pool(use_cores) | ||
failfast_event = manager.Event() if self.failfast else None | ||
results = [pool.apply_async(run_test, (t, failfast_event)) for t in tests] | ||
results = [r.get() for r in results] | ||
results = [r for r in results if r is not None] | ||
|
||
try: | ||
previous_test_run_results = json.load(open('__previous_test_run_results.json')) | ||
except FileNotFoundError: | ||
previous_test_run_results = {} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Do we need to duplicate this code for handling __previous_test_run_results between here and runner.py? Perhaps a shared sorting function that they can both call? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This code is not creating a sorter, but I see the shared block amounts to a def load_previous_test_run_results():
try:
return json.load(open('out/__previous_test_run_results.json'))
except FileNotFoundError:
return {} I could refactor that to e.g. common.py, though that is not a large win necessarily. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I guess i don't understand quite what is going on here then.. I would have though the new flag There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Merged the code. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The new flag only applies to the parallel test runner. If we wanted to extend this to the non-parallel runner, that's fine, though that could be worked on as a later PR as well. |
||
|
||
if self.failing_and_slow_first: | ||
for r in results: | ||
# Save a test result record with the specific suite name (e.g. "core0.test_foo") | ||
test_failed = r.test_result not in ['success', 'skipped'] | ||
fail_frequency = previous_test_run_results[r.test_name]['fail_frequency'] if r.test_name in previous_test_run_results else int(test_failed) | ||
fail_frequency = (fail_frequency + int(test_failed)) / 2 | ||
previous_test_run_results[r.test_name] = { | ||
'result': r.test_result, | ||
'duration': r.test_duration, | ||
'fail_frequency': fail_frequency, | ||
} | ||
# Also save a test result record without suite name (e.g. just "test_foo"). This enables different suite runs to order tests | ||
# for quick --failfast termination, in case a test fails in multiple suites | ||
test_in_any_suite = r.test_name.split(' ')[0] | ||
fail_frequency = previous_test_run_results[test_in_any_suite]['fail_frequency'] if test_in_any_suite in previous_test_run_results else int(test_failed) | ||
fail_frequency = (fail_frequency + int(test_failed)) / 2 | ||
previous_test_run_results[test_in_any_suite] = { | ||
'result': r.test_result, | ||
'duration': r.test_duration, | ||
'fail_frequency': fail_frequency, | ||
} | ||
|
||
json.dump(previous_test_run_results, open('__previous_test_run_results.json', 'w'), indent=2) | ||
pool.close() | ||
pool.join() | ||
return self.combine_results(result, results) | ||
|
@@ -104,6 +154,8 @@ class BufferedParallelTestResult: | |
def __init__(self): | ||
self.buffered_result = None | ||
self.test_duration = 0 | ||
self.test_result = 'errored' | ||
self.test_name = '' | ||
|
||
@property | ||
def test(self): | ||
|
@@ -122,6 +174,7 @@ def updateResult(self, result): | |
result.core_time += self.test_duration | ||
|
||
def startTest(self, test): | ||
self.test_name = str(test) | ||
self.start_time = time.perf_counter() | ||
|
||
def stopTest(self, test): | ||
|
@@ -134,28 +187,34 @@ def addSuccess(self, test): | |
if hasattr(time, 'perf_counter'): | ||
print(test, '... ok (%.2fs)' % (self.calculateElapsed()), file=sys.stderr) | ||
self.buffered_result = BufferedTestSuccess(test) | ||
self.test_result = 'success' | ||
|
||
def addExpectedFailure(self, test, err): | ||
if hasattr(time, 'perf_counter'): | ||
print(test, '... expected failure (%.2fs)' % (self.calculateElapsed()), file=sys.stderr) | ||
self.buffered_result = BufferedTestExpectedFailure(test, err) | ||
self.test_result = 'expected failure' | ||
|
||
def addUnexpectedSuccess(self, test): | ||
if hasattr(time, 'perf_counter'): | ||
print(test, '... unexpected success (%.2fs)' % (self.calculateElapsed()), file=sys.stderr) | ||
self.buffered_result = BufferedTestUnexpectedSuccess(test) | ||
self.test_result = 'unexpected success' | ||
|
||
def addSkip(self, test, reason): | ||
print(test, "... skipped '%s'" % reason, file=sys.stderr) | ||
self.buffered_result = BufferedTestSkip(test, reason) | ||
self.test_result = 'skipped' | ||
|
||
def addFailure(self, test, err): | ||
print(test, '... FAIL', file=sys.stderr) | ||
self.buffered_result = BufferedTestFailure(test, err) | ||
self.test_result = 'failed' | ||
|
||
def addError(self, test, err): | ||
print(test, '... ERROR', file=sys.stderr) | ||
self.buffered_result = BufferedTestError(test, err) | ||
self.test_result = 'errored' | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It this needed? Isn't the existing There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Python doesn't have an API to ask the result of a test object, and it would have required some kind of an awkward isinstance() jungle to convert the test to a string, so I opted to writing simple looking code as a more preferable way. |
||
|
||
|
||
class BufferedTestBase: | ||
|
Uh oh!
There was an error while loading. Please reload this page.