From 700c62e1d4c5f488a1d12d78745f94ab8c8f9ff4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9rome=20Perrin?= <jerome@nexedi.com> Date: Wed, 29 Aug 2018 08:25:17 +0200 Subject: [PATCH] TaskDistributionTool: run first tests failed in previous run --- .../test.erp5.testTaskDistribution.py | 28 +++++++++++++++++++ product/ERP5/Tool/TaskDistributionTool.py | 19 +++++++++---- 2 files changed, 41 insertions(+), 6 deletions(-) diff --git a/bt5/erp5_test_result/TestTemplateItem/portal_components/test.erp5.testTaskDistribution.py b/bt5/erp5_test_result/TestTemplateItem/portal_components/test.erp5.testTaskDistribution.py index f1521bba27..b7686dd6dc 100644 --- a/bt5/erp5_test_result/TestTemplateItem/portal_components/test.erp5.testTaskDistribution.py +++ b/bt5/erp5_test_result/TestTemplateItem/portal_components/test.erp5.testTaskDistribution.py @@ -465,6 +465,34 @@ class TestTaskDistribution(ERP5TypeTestCase): next_line_url, next_test = self.tool.startUnitTest(next_test_result_path) self.assertEqual(['testFoo', 'testBar'], [test, next_test]) + def test_startUnitTestRunsFailedTestFirst(self): + # simulate previous run + test_result = self.portal.test_result_module.newContent( + portal_type='Test Result', + title=self.default_test_title, + start_date=DateTime()) + test_result.newContent( + portal_type='Test Result Line', + title='testFailing', + ).stop(test_count=1, duration=100, failure_count=1) + test_result.newContent( + portal_type='Test Result Line', + title='testFast', + ).stop(test_count=1, duration=50) + test_result.newContent( + portal_type='Test Result Line', + title='testSlow', + ).stop(test_count=1, duration=1000) + test_result.stop() + self.tic() + + test_result_path, _ = self._createTestResult( + test_list=['testSlow', 'testFast', 'testFailing']) + # we run first the tests failing in previous run + self.assertEqual( + ['testFailing', 'testSlow', 'testFast'], + [self.tool.startUnitTest(test_result_path)[1] for _ in range(3)]) + def test_06b_restartStuckTest(self): """ Check if a test result line is not stuck in 'started', if so, redraft diff --git a/product/ERP5/Tool/TaskDistributionTool.py b/product/ERP5/Tool/TaskDistributionTool.py index b97f02a9af..25995996a5 100644 --- a/product/ERP5/Tool/TaskDistributionTool.py +++ b/product/ERP5/Tool/TaskDistributionTool.py @@ -107,7 +107,7 @@ class TaskDistributionTool(BaseTool): node_title)) node.start() def createTestResultLineList(test_result, test_name_list): - duration_list = [] + test_priority_list = [] previous_test_result_list = portal.test_result_module.searchFolder( title=SimpleQuery(comparison_operator='=', title=test_result.getTitle()), sort_on=[('creation_date','descending')], @@ -117,11 +117,18 @@ class TaskDistributionTool(BaseTool): previous_test_result = previous_test_result_list[0].getObject() for line in previous_test_result.objectValues(): if line.getSimulationState() in ('stopped', 'public_stopped'): - duration_list.append((line.getTitle(),line.getProperty('duration'))) - duration_list.sort(key=lambda x: -x[1]) - sorted_test_list = [x[0] for x in duration_list] - # Sort tests by name to have consistent numbering of test result line on - # a test suite. + # Execute first the tests that failed on previous run (so that we + # can see quickly if a fix was effective) and the slowest tests (to + # make sure slow tests are executed in parrallel and prevent + # situations where at the end all test nodes are waiting for the + # latest to finish). + test_priority_list.append( + (line.getStringIndex() == 'PASSED', + -line.getProperty('duration'), + line.getTitle())) + sorted_test_list = [x[2] for x in sorted(test_priority_list)] + # Sort tests by name to have consistent ids for test result line on a + # test suite. for test_name in sorted(test_name_list): index = 0 if sorted_test_list: -- 2.30.9