Commit 11bcd0a5 authored by SVN-Git Migration's avatar SVN-Git Migration

Imported Upstream version 0.0.18

parent 686acae0
......@@ -5,6 +5,23 @@ testrepository release notes
NEXT (In development)
+++++++++++++++++++++
0.0.18
++++++
CHANGES
-------
* ``run`` now accepts ``--isolated`` as a parameter, which will cause each
selected test to be run independently. This can be useful to both workaround
isolation bugs and detect tests that can not be run independently.
(Robert Collins)
INTERNALS
---------
* ``capture_ids`` in test_run now returns a list of captures, permitting tests
that need to test multiple runs to do so. (Robert Collins)
0.0.17
++++++
......
Metadata-Version: 1.1
Name: testrepository
Version: 0.0.17
Version: 0.0.18
Summary: A repository of test results.
Home-page: https://launchpad.net/testrepository
Author: Robert Collins
......
......@@ -366,6 +366,21 @@ will perform that analysis for you. (This requires that your test runner is
then either the isolation issue is racy, or it is a 3-or-more test
isolation issue. Neither of those cases are automated today.
Forcing isolation
~~~~~~~~~~~~~~~~~
Sometimes it is useful to force a separate test runner instance for each test
executed. The ``--isolated`` flag will cause testr to execute a separate runner
per test::
$ testr run --isolated
In this mode testr first determines tests to run (either automatically listed,
using the failing set, or a user supplied load-list), and then spawns one test
runner per test it runs. To avoid cross-test-runner interactions concurrency
is disabled in this mode. ``--analyze-isolation`` supercedes ``--isolated`` if
they are both supplied.
Repositories
~~~~~~~~~~~~
......
Metadata-Version: 1.1
Name: testrepository
Version: 0.0.17
Version: 0.0.18
Summary: A repository of test results.
Home-page: https://launchpad.net/testrepository
Author: Robert Collins
......
......@@ -33,4 +33,4 @@ The tests package contains tests and test specific support code.
# established at this point, and setup.py will use a version of next-$(revno).
# If the releaselevel is 'final', then the tarball will be major.minor.micro.
# Otherwise it is major.minor.micro~$(revno).
__version__ = (0, 0, 17, 'final', 0)
__version__ = (0, 0, 18, 'final', 0)
......@@ -143,6 +143,9 @@ class run(Command):
optparse.Option("--analyze-isolation", action="store_true",
default=False,
help="Search the last test run for 2-test test isolation interactions."),
optparse.Option("--isolated", action="store_true",
default=False,
help="Run each test id in a separate test runner."),
]
args = [StringArgument('testfilters', 0, None), DoubledashArgument(),
StringArgument('testargs', 0, None)]
......@@ -192,7 +195,22 @@ class run(Command):
if not self.ui.options.analyze_isolation:
cmd = testcommand.get_run_command(ids, self.ui.arguments['testargs'],
test_filters = filters)
return self._run_tests(cmd)
if self.ui.options.isolated:
result = 0
cmd.setUp()
try:
ids = cmd.list_tests()
finally:
cmd.cleanUp()
for test_id in ids:
cmd = testcommand.get_run_command([test_id],
self.ui.arguments['testargs'], test_filters=filters)
run_result = self._run_tests(cmd)
if run_result > result:
result = run_result
return result
else:
return self._run_tests(cmd)
else:
# Where do we source data about the cause of conflicts.
# XXX: Should instead capture the run id in with the failing test
......@@ -329,7 +347,8 @@ class run(Command):
def run_tests():
run_procs = [('subunit', ReturnCodeToSubunit(proc)) for proc in cmd.run_tests()]
options = {}
if self.ui.options.failing or self.ui.options.analyze_isolation:
if (self.ui.options.failing or self.ui.options.analyze_isolation
or self.ui.options.isolated):
options['partial'] = True
load_ui = decorator.UI(input_streams=run_procs, options=options,
decorated=self.ui)
......
......@@ -31,6 +31,7 @@ from testscenarios.scenarios import multiply_scenarios
from testtools.compat import _b
from testtools.matchers import (
Equals,
HasLength,
MatchesException,
MatchesListwise,
)
......@@ -186,15 +187,14 @@ class TestCommand(ResourcedTestCase):
('summary', True, 0, -3, None, None, [('id', 1, None)]),
], ui.outputs)
def capture_ids(self):
def capture_ids(self, list_result=None):
params = []
def capture_ids(self, ids, args, test_filters=None):
params.append(self)
params.append(ids)
params.append(args)
params.append(test_filters)
params.append([self, ids, args, test_filters])
result = Fixture()
result.run_tests = lambda:[]
if list_result is not None:
result.list_tests = lambda:list(list_result)
return result
return params, capture_ids
......@@ -222,7 +222,7 @@ class TestCommand(ResourcedTestCase):
('summary', True, 0, -3, None, None, [('id', 1, None)])
], ui.outputs)
self.assertEqual(0, cmd_result)
self.assertEqual([Wildcard, expected_ids, [], None], params)
self.assertEqual([[Wildcard, expected_ids, [], None]], params)
def test_load_list_passes_ids(self):
list_file = tempfile.NamedTemporaryFile()
......@@ -246,7 +246,7 @@ class TestCommand(ResourcedTestCase):
('summary', True, 0, -3, None, None, [('id', 1, None)])
], ui.outputs)
self.assertEqual(0, cmd_result)
self.assertEqual([Wildcard, expected_ids, [], None], params)
self.assertEqual([[Wildcard, expected_ids, [], None]], params)
def test_extra_options_passed_in(self):
ui, cmd = self.get_test_ui_and_cmd(args=('--', 'bar', 'quux'))
......@@ -341,7 +341,6 @@ class TestCommand(ResourcedTestCase):
def test_regex_test_filter(self):
ui, cmd = self.get_test_ui_and_cmd(args=('ab.*cd', '--', 'bar', 'quux'))
ui.proc_outputs = [_b('ab-cd\nefgh\n')]
cmd.repository_factory = memory.RepositoryFactory()
self.setup_repo(cmd, ui)
self.set_config(
......@@ -358,10 +357,11 @@ class TestCommand(ResourcedTestCase):
('summary', True, 0, -3, None, None, [('id', 1, None)])
], ui.outputs)
self.assertEqual(0, cmd_result)
self.assertThat(params[1], Equals(None))
self.assertThat(params[0][1], Equals(None))
self.assertThat(
params[2], MatchesListwise([Equals('bar'), Equals('quux')]))
self.assertThat(params[3], MatchesListwise([Equals('ab.*cd')]))
params[0][2], MatchesListwise([Equals('bar'), Equals('quux')]))
self.assertThat(params[0][3], MatchesListwise([Equals('ab.*cd')]))
self.assertThat(params, HasLength(1))
def test_regex_test_filter_with_explicit_ids(self):
ui, cmd = self.get_test_ui_and_cmd(
......@@ -382,10 +382,11 @@ class TestCommand(ResourcedTestCase):
('summary', True, 0, -3, None, None, [('id', 1, None)])
], ui.outputs)
self.assertEqual(0, cmd_result)
self.assertThat(params[1], Equals(['failing1', 'failing2']))
self.assertThat(params[0][1], Equals(['failing1', 'failing2']))
self.assertThat(
params[2], MatchesListwise([Equals('bar'), Equals('quux')]))
self.assertThat(params[3], MatchesListwise([Equals('g1')]))
params[0][2], MatchesListwise([Equals('bar'), Equals('quux')]))
self.assertThat(params[0][3], MatchesListwise([Equals('g1')]))
self.assertThat(params, HasLength(1))
def test_until_failure(self):
ui, cmd = self.get_test_ui_and_cmd(options=[('until_failure', True)])
......@@ -445,6 +446,35 @@ class TestCommand(ResourcedTestCase):
], ui.outputs)
self.assertEqual(0, result)
def test_isolated_runs_multiple_processes(self):
ui, cmd = self.get_test_ui_and_cmd(options=[('isolated', True)])
cmd.repository_factory = memory.RepositoryFactory()
self.setup_repo(cmd, ui)
self.set_config(
'[DEFAULT]\ntest_command=foo $IDLIST $LISTOPT\n'
'test_id_option=--load-list $IDFILE\n'
'test_list_option=--list\n')
params, capture_ids = self.capture_ids(list_result=['ab', 'cd', 'ef'])
self.useFixture(MonkeyPatch(
'testrepository.testcommand.TestCommand.get_run_command',
capture_ids))
cmd_result = cmd.execute()
self.assertEqual([
('results', Wildcard),
('summary', True, 0, -3, None, None, [('id', 1, None)]),
('results', Wildcard),
('summary', True, 0, 0, None, None, [('id', 2, None)]),
('results', Wildcard),
('summary', True, 0, 0, None, None, [('id', 3, None)]),
], ui.outputs)
self.assertEqual(0, cmd_result)
# once to list, then 3 each executing one test.
self.assertThat(params, HasLength(4))
self.assertThat(params[0][1], Equals(None))
self.assertThat(params[1][1], Equals(['ab']))
self.assertThat(params[2][1], Equals(['cd']))
self.assertThat(params[3][1], Equals(['ef']))
def read_all(stream):
return stream.read()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment