test.erp5.testTaskDistribution.py 52.9 KB
Newer Older
1
from Products.DCWorkflow.DCWorkflow import ValidationFailed
2 3
from Products.ERP5Type.tests.ERP5TypeTestCase import ERP5TypeTestCase
import json 
4
from time import sleep
5
from DateTime import DateTime
6 7 8 9 10 11 12 13 14 15 16 17

class TestTaskDistribution(ERP5TypeTestCase):
  def afterSetUp(self):
    self.portal = portal = self.getPortalObject()
    self.test_node_module = self.portal.getDefaultModule(portal_type = 'Test Node Module')
    self.test_suite_module = self.portal.getDefaultModule(portal_type = 'Test Suite Module')
    self.test_result_module = self.portal.getDefaultModule(portal_type = 'Test Result Module')
    self.test_suite = self.portal.getDefaultModule(portal_type = 'Test Suite')
    self.tool = tool = self.portal.portal_task_distribution
    if getattr(tool, "TestTaskDistribution", None) is None:
      tool.newContent(id="TestTaskDistribution",
           portal_type="ERP5 Project Unit Test Distributor")
18
    tool.TestTaskDistribution.setMaxTestSuite(None)
19 20 21
    if getattr(tool, "TestPerformanceTaskDistribution", None) is None:
      tool.newContent(id="TestPerformanceTaskDistribution",
           portal_type="Cloud Performance Unit Test Distributor")
22 23 24
    if getattr(tool, "TestScalabilityTaskDistribution", None) is None:
      tool.newContent(id="TestScalabilityTaskDistribution",
           portal_type="ERP5 Scalability Distributor")
25 26
    self.distributor = tool.TestTaskDistribution
    self.performance_distributor = tool.TestPerformanceTaskDistribution
27
    self.scalability_distributor = tool.TestScalabilityTaskDistribution
28 29 30 31 32 33 34 35 36 37 38 39
    if getattr(portal, "test_test_node_module", None) is None:
      portal.newContent(portal_type="Test Node Module",
                        id="test_test_node_module")
    if getattr(portal, "test_test_suite_module", None) is None:
      portal.newContent(portal_type="Test Suite Module",
                        id="test_test_suite_module")
    self.test_suite_module = portal.test_test_suite_module
    self.test_node_module = portal.test_test_node_module
    self.test_suite_module.manage_delObjects(ids=[
      x for x in self.test_suite_module.objectIds()])
    self.test_node_module.manage_delObjects(ids=[
      x for x in self.test_node_module.objectIds()])
40 41 42 43 44

    original_class = self.distributor.__class__
    original_scalability_class = self.scalability_distributor.__class__
    original_performance_class = self.performance_distributor.__class__

45 46 47 48
    self._original_getTestNodeModule = original_class._getTestNodeModule
    def _getTestNodeModule(self):
      return self.getPortalObject().test_test_node_module
    original_class._getTestNodeModule = _getTestNodeModule
49 50 51
    original_scalability_class._getTestNodeModule = _getTestNodeModule
    original_performance_class._getTestNodeModule = _getTestNodeModule

52 53 54 55
    self._original_getTestSuiteModule = original_class._getTestSuiteModule
    def _getTestSuiteModule(self):
      return self.getPortalObject().test_test_suite_module
    original_class._getTestSuiteModule = _getTestSuiteModule
56 57 58
    original_scalability_class._getTestSuiteModule = _getTestSuiteModule
    original_performance_class._getTestSuiteModule = _getTestSuiteModule

59 60
    self._cleanupTestResult()

61 62 63 64 65 66 67 68 69 70
    self.test_node_module.newContent(
      portal_type='Test Node', title='Node0',
      test_suite_max=4).getTitle()
    default_test_suite = self.test_suite_module.newContent(
        portal_type='Test Suite', title='Default Test Suite',
        test_suite_title='Default Test Suite', int_index=1)
    default_test_suite.validate()
    self.default_test_title = default_test_suite.getTitle()
    self.tic()

71
  def beforeTearDown(self):
72 73 74 75
    original_class = self.distributor.__class__
    original_scalability_class = self.scalability_distributor.__class__
    original_performance_class = self.performance_distributor.__class__
    
76 77
    original_class._getTestNodeModule = self._original_getTestNodeModule
    original_class._getTestSuiteModule = self._original_getTestSuiteModule
78 79 80 81 82 83
    original_scalability_class._getTestNodeModule = self._original_getTestNodeModule
    original_scalability_class._getTestSuiteModule = self._original_getTestSuiteModule
    original_performance_class._getTestNodeModule = self._original_getTestNodeModule
    original_performance_class._getTestSuiteModule = self._original_getTestSuiteModule


84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99

  def _createTestNode(self, quantity=1, reference_correction=0,
                      specialise_value=None):
    if specialise_value is None:
      specialise_value = self.distributor
    test_node_list = []
    for i in range(quantity):
      test_node = self.test_node_module.newContent(
        title = 'UnitTestNode %i' % (i + 1 + reference_correction),
        test_suite_max = 4,
        specialise_value = specialise_value,
        )
      test_node_list.append(test_node)
    return test_node_list

  def _createTestSuite(self,quantity=1,priority=1, reference_correction=0,
100 101
                       specialise_value=None, title=None, portal_type="Test Suite",
                       graph_coordinate="1", cluster_configuration='{ "test": {{ count }} }'):
102 103
    if title is None:
      title = ""
104 105 106 107
    if specialise_value is None:
      specialise_value = self.distributor
    test_suite_list = []
    for i in range(quantity):
108 109 110
      test_suite_title = "test suite %i" % (i + 1 + reference_correction)
      if title:
        test_suite_title += " %s" % title
111

112
      test_suite =  self.test_suite_module.newContent(
113
                    portal_type = portal_type,
114 115 116 117 118
                    title = test_suite_title,
                    test_suite = 'B%i' % i,
                    int_index = priority,
                    specialise_value = specialise_value,
                   )
119 120

      test_suite.setClusterConfiguration(cluster_configuration)
121 122 123 124
      if portal_type == "Scalability Test Suite":
        test_suite.setGraphCoordinate(graph_coordinate)


125 126
      test_suite.newContent( portal_type= 'Test Suite Repository',
                        branch = 'master',
127
                        git_url = 'https://lab.nexedi.com/nexedi/erp5.git',
128 129 130 131 132 133 134 135 136
                        buildout_section_id  = 'erp5',
                        profile_path = 'software-release/software.cfg'
                        )
      test_suite.validate()
      test_suite_list.append(test_suite)
    return test_suite_list 

  def test_01_createTestNode(self):
    test_node = self._createTestNode()[0]
137
    self.assertEqual(test_node.getPortalType(), "Test Node")
138 139

  def test_02_createTestSuite(self):
140
    # Test Test Suite
141
    test_suite = self._createTestSuite()[0]
142 143 144
    self.assertEquals(test_suite.getPortalType(), "Test Suite")
    self.assertEquals(test_suite.getSpecialise(), self.distributor.getRelativeUrl())
    # Test Scalability Test Suite
145
    scalability_test_suite  = self._createTestSuite(
146 147
                                 portal_type="Scalability Test Suite",
                                 specialise_value = self.scalability_distributor
148
                               )[0]
149 150 151 152
    self.assertEquals(scalability_test_suite.getPortalType(),
                       "Scalability Test Suite")
    self.assertEquals(scalability_test_suite.getSpecialise(),
                       self.scalability_distributor.getRelativeUrl())
153

154 155 156 157 158 159 160 161 162 163 164 165 166
  def test_02b_checkConsistencyOnTestSuite(self):
    test_suite, = self._createTestSuite()
    self.tic()
    test_suite_repository, = test_suite.objectValues(portal_type="Test Suite Repository")
    self.checkPropertyConstraint(test_suite, 'title', 'ERP5-MASTER')
    self.checkPropertyConstraint(test_suite, 'test_suite', 'ERP5')
    self.checkPropertyConstraint(test_suite_repository, 'git_url', 'https://lab.nexedi.com/nexedi/erp5.git')
    self.checkPropertyConstraint(test_suite_repository, 'buildout_section_id', 'erp5')
    self.checkPropertyConstraint(test_suite_repository, 'branch', 'master')
    test_suite_repository.setGitUrl(None)
    test_suite.invalidate()
    self.assertRaises(ValidationFailed, self.portal.portal_workflow.doActionFor, test_suite, 'validate_action')

167 168 169 170
  def _callOptimizeAlarm(self):
    self.portal.portal_alarms.task_distributor_alarm_optimize.activeSense()
    self.tic()

171 172 173 174
  def _callRestartStuckTestResultAlarm(self):
    self.portal.portal_alarms.test_result_alarm_restarted_stuck_test_result.activeSense()
    self.tic()

175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
  def test_subscribeNode_ReturnValue(self):
    config = self.distributor.subscribeNode('COMP-X', 'QPK')
    config = json.loads(config)
    self.assertEqual(
      config,
      {'process_timeout': None}
    )

    self.distributor.edit(
      process_timeout=50
    )
    config = self.distributor.subscribeNode('COMP-X', 'QPK')
    config = json.loads(config)
    self.assertEqual(
      config,
      {'process_timeout': 50}
    )

193 194 195
  def test_03_startTestSuiteWithOneTestNode(self):
    config_list = json.loads(self.distributor.startTestSuite(
                             title="COMP32-Node1"))
196
    self.assertEqual([], config_list)
197 198 199 200 201
    self._createTestSuite(quantity=3)
    self.tic()
    self._callOptimizeAlarm()
    config_list = json.loads(self.distributor.startTestSuite(
                             title="COMP32-Node1"))
202 203
    self.assertEqual(3, len(config_list))
    self.assertEqual(set(['B0','B1','B2']),
204 205 206 207 208 209 210 211 212 213 214
                      set([x['test_suite'] for x in config_list]))

  def test_04_startTestSuiteWithTwoTestNode(self):
    """
    When we have two test suites and we have two test nodes, we should have
    one test suite distributed per test node
    """
    config_list = json.loads(self.distributor.startTestSuite(
                             title="COMP32-Node1"))
    config_list = json.loads(self.distributor.startTestSuite(
                             title="COMP32-Node2"))
215
    self.assertEqual([], config_list)
216 217 218 219 220 221
    self._createTestSuite(quantity=2)
    self.tic()
    self._callOptimizeAlarm()
    def checkConfigListForTestNode(test_node_title):
      config_list = json.loads(self.distributor.startTestSuite(
                             title=test_node_title))
222
      self.assertEqual(1, len(config_list))
223 224 225 226 227 228 229 230
      return (test_node_title, set([x['test_suite'] for x in config_list]))
    config1 = checkConfigListForTestNode("COMP32-Node1")
    config2 = checkConfigListForTestNode("COMP32-Node2")
    self.assertTrue([config1, config2] in  [
                    [('COMP32-Node1',set([u'B0'])), ('COMP32-Node2',set([u'B1']))],
                    [('COMP32-Node1',set([u'B1'])), ('COMP32-Node2',set([u'B0']))]],
                    "%r" % ([config1, config2],))

231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
  def test_04b_startTestSuiteOrder(self):
    """
    When we have many test suites associated to one test nodes, the method
    startTestSuite should give first test suites with oldest test results. Like
    this we stop using the random order that was unfair for unlucky peoples
    """
    config_list = json.loads(self.distributor.startTestSuite(
                             title="COMP42-Node1"))
    self.assertEqual([], config_list)
    self._createTestSuite(quantity=3)
    self.tic()
    self._callOptimizeAlarm()
    def getTestSuiteList():
      config_list = json.loads(self.distributor.startTestSuite(
                             title="COMP42-Node1"))
      return ["%s" % x["test_suite_title"] for x in config_list]
    # By default we have random order between test suites
    self.assertEquals(set(["test suite 1", "test suite 2", "test suite 3"]),
                      set(getTestSuiteList()))
    # Check that if test suite 1 and test suite 2 are recently processed,
    # then next work must be test suite 3
252 253 254 255
    def processTest(test_title, revision, start_count=2, stop_count=2):
      """start_count: number of test line to start
         stop_count: number of test line to stop
      """
256 257 258
      status_dict = {}
      test_result_path, revision = self._createTestResult(revision=revision,
        test_list=['testFoo', 'testBar'], test_title=test_title)
259 260 261 262 263 264 265 266 267
      if start_count:
        line_url, test = self.tool.startUnitTest(test_result_path)
      if start_count == 2:
        next_line_url, next_test = self.tool.startUnitTest(test_result_path)
        self.assertEqual(set(['testFoo', 'testBar']), set([test, next_test]))
      if stop_count:
        self.tool.stopUnitTest(line_url, status_dict)
      if stop_count == 2:
        self.tool.stopUnitTest(next_line_url, status_dict)
268
      test_result = self.portal.restrictedTraverse(test_result_path)
269 270
      self.assertEqual(test_result.getSimulationState(), "started")
      self.tic()
271 272 273 274 275
      if stop_count == 2:
        self.assertEquals(test_result.getSimulationState(), "stopped")
      else:
        self.assertEquals(test_result.getSimulationState(), "started")

276 277 278 279 280 281 282 283 284 285 286 287 288 289
    processTest("test suite 1", "r0=a")
    self.tic()
    sleep(1) # needed because creation date sql value does not record millesecond
    processTest("test suite 2", "r0=b")
    self.tic()
    sleep(1)
    self.assertEquals(getTestSuiteList()[0], "test suite 3")
    processTest("test suite 3", "r0=b")
    # after test suite 3, we now have to process test suite 1
    # since it is the oldest one
    self.tic()
    sleep(1)
    self.assertEquals(getTestSuiteList()[0], "test suite 1")
    processTest("test suite 1", "r0=c")
290
    # after test suite 1, we now have to process test suite 2
291 292 293 294 295 296 297
    # since it is the oldest one
    self.tic()
    sleep(1)
    self.assertEquals(getTestSuiteList()[0], "test suite 2")
    processTest("test suite 2", "r0=d")
    self.tic()
    sleep(1)
298
    # now let's say for any reason test suite 1 has been done
299 300 301 302 303 304
    processTest("test suite 1", "r0=e")
    self.tic()
    sleep(1)
    # we should then have by order 3, 2, 1
    self.assertEquals(["test suite 3", "test suite 2", "test suite 1"],
                      getTestSuiteList())
305 306 307 308 309 310 311 312 313 314 315 316 317
    # now launch all test of test 3, even if they are not finished yet
    processTest("test suite 3", "r0=f", stop_count=1)
    self.tic()
    sleep(1)
    self.assertEquals(["test suite 2", "test suite 1", "test suite 3"],
                      getTestSuiteList())
    # now launch partially tests of suite 2, it must have priority over
    # test 3, even if test 3 is older because all tests of test 3 are ongoing
    processTest("test suite 2", "r0=g", start_count=1, stop_count=0)
    self.tic()
    sleep(1)
    self.assertEquals(["test suite 1", "test suite 2", "test suite 3"],
                      getTestSuiteList())
318

319 320 321
  def _cleanupTestResult(self):
    self.tic()
    cleanup_state_list = ['started', 'stopped']
322
    test_list =  self.test_result_module.searchFolder(title='"TEST FOO" OR "test suite %" OR "Default Test Suite"',
323 324 325 326 327 328
               simulation_state=cleanup_state_list)
    for test_result in test_list:
      if test_result.getSimulationState() in cleanup_state_list:
        test_result.cancel()
    self.tic()

329
  def _createTestResult(self, revision="r0=a,r1=a", node_title='Node0',
330
                              test_list=None, tic=1, allow_restart=False,
331 332 333
                              test_title=None):
    if test_title is None:
      test_title = self.default_test_title
334
    result =  self.distributor.createTestResult(
335
                               "", revision, test_list or [], allow_restart,
336
                               test_title=test_title, node_title=node_title)
337 338 339 340 341 342
    # we commit, since usually we have a remote call only doing this
    (self.tic if tic else self.commit)()
    return result
    
  def test_05_createTestResult(self):
    """
343
    We will check the method createTestResult of distributor
344
    """
345 346
    self._createTestNode()
    self.tic()
347
    test_result_path, revision = self._createTestResult()
348
    self.assertEqual("r0=a,r1=a", revision)
349 350 351 352
    self.assertTrue(test_result_path.startswith("test_result_module/"))
    # If we ask again with another revision, we should get with previous
    # revision
    next_test_result_path, next_revision = self._createTestResult(
353
      revision="r0=a,r1=b", node_title="UnitTestNode 1")
354 355
    self.assertEqual(revision, next_revision)
    self.assertEqual(next_test_result_path, test_result_path)
356 357
    # Check if test result object is well created
    test_result = self.getPortalObject().unrestrictedTraverse(test_result_path)
358 359
    self.assertEqual("Test Result", test_result.getPortalType())
    self.assertEqual(0, len(test_result.objectValues(
360 361 362 363
                             portal_type="Test Result Line")))
    # now check that if we pass list of test, new lines will be created
    next_test_result_path, next_revision = self._createTestResult(
      test_list=['testFoo', 'testBar'])
364
    self.assertEqual(next_test_result_path, test_result_path)
365
    line_list = test_result.objectValues(portal_type="Test Result Line")
366 367
    self.assertEqual(2, len(line_list))
    self.assertEqual(set(['testFoo', 'testBar']), set([x.getTitle() for x
368
                      in line_list]))
369
    line_url, _ = self.tool.startUnitTest(test_result_path)
370 371
    result = self._createTestResult(test_list=['testFoo', 'testBar'])
    self.assertEqual((test_result_path, revision), result)
372
    self.tool.startUnitTest(test_result_path)
373 374 375 376 377 378 379 380
    # all tests of this test suite are now started, stop affecting test node to it
    result = self._createTestResult(test_list=['testFoo', 'testBar'])
    self.assertEqual(None, result)
    # though, is we restart one line, we will have affectation again
    self.portal.restrictedTraverse(line_url).redraft()
    self.commit()
    result = self._createTestResult(test_list=['testFoo', 'testBar'])
    self.assertEqual((test_result_path, revision), result)
381
    self.tool.startUnitTest(test_result_path)
382

383 384 385 386 387 388 389 390 391 392
  def test_05b_createTestResultDoesNotReexecuteRevision(self):
    """
    Make sure to no retest former revision. This scenario must work
    - testnode call createTestResult with revision r0=b. Test is executed
    - By hand is created test with revision r0=a (to make testnode checking old
      revision). Test is executed
    - if testnode ask again for r0=b, no test must be created
    - if testnode ask for r0=c, then usual test is created/executed
    """
    # launch test r0=b
393 394
    test_result_path, _ = self._createTestResult(revision="r0=b", test_list=["testFoo"])
    line_url, _ = self.tool.startUnitTest(test_result_path)
395 396 397
    status_dict = {}
    self.tool.stopUnitTest(line_url, status_dict)
    test_result = self.getPortalObject().unrestrictedTraverse(test_result_path)
398
    self.tic()
399 400
    self.assertEqual("stopped", test_result.getSimulationState())
    # launch test r0=a
401 402
    test_result_path, _ = self._createTestResult(revision="r0=a", test_list=["testFoo"])
    line_url, _ = self.tool.startUnitTest(test_result_path)
403 404
    self.tool.stopUnitTest(line_url, status_dict)
    test_result = self.getPortalObject().unrestrictedTraverse(test_result_path)
405
    self.tic()
406 407 408 409 410
    self.assertEqual("stopped", test_result.getSimulationState())
    # Make sure we do not relaunch test with revision r0=b
    result = self._createTestResult(revision="r0=b", test_list=["testFoo"])
    self.assertEqual(None, result)
    # launch test r0=c
411 412
    test_result_path, _ = self._createTestResult(revision="r0=c", test_list=["testFoo"])
    line_url, _ = self.tool.startUnitTest(test_result_path)
413 414
    self.tool.stopUnitTest(line_url, status_dict)
    test_result = self.getPortalObject().unrestrictedTraverse(test_result_path)
415
    self.tic()
416 417
    self.assertEqual("stopped", test_result.getSimulationState())

418
  def test_05c_createTestResult_with_registered_test_node(self):
419
    test_result_path, _ = self._createTestResult()
420 421 422 423
    # check that Test Node Result used in Test Result is specialised
    # into registered Test Node
    test_result = self.getPortalObject().unrestrictedTraverse(test_result_path)
    test_result_node = test_result.contentValues(portal_type='Test Result Node')[0]
424
    self.assertEqual(test_result_node.getSpecialiseTitle(), 'Node0')
425

426 427 428 429
  def test_06_startStopUnitTest(self):
    """
    We will check methods startUnitTest/stopUnitTest of task distribution tool
    """
430
    test_result_path, _ = self._createTestResult(
431 432 433 434
      test_list=['testFoo', 'testBar'])
    test_result = self.getPortalObject().unrestrictedTraverse(test_result_path)
    line_url, test = self.tool.startUnitTest(test_result_path)
    next_line_url, next_test = self.tool.startUnitTest(test_result_path)
435 436 437
    # once all tests are affected, stop affecting resources on this test result
    next_result = self.tool.startUnitTest(test_result_path)
    self.assertEqual(None, next_result)
438
    # first launch, we have no time optimisations, so tests are
439 440
    # launched in alphabetical order
    self.assertEqual(['testBar', 'testFoo'], [test, next_test])
441 442 443 444 445 446 447 448 449 450 451 452 453 454
    status_dict = {}
    self.tool.stopUnitTest(line_url, status_dict)
    self.tool.stopUnitTest(next_line_url, status_dict)
    line = self.portal.unrestrictedTraverse(line_url)
    def checkDuration(line):
      duration = getattr(line, "duration", None)
      self.assertTrue(isinstance(duration, float))
      self.assertTrue(duration>0)
    checkDuration(line)
    next_line = self.portal.unrestrictedTraverse(next_line_url)
    checkDuration(next_line)
    # Make sure second test takes more time
    next_line.duration = line.duration + 1
    # So if we launch another unit test, it will process first the
455
    # one which is the slowest
456
    self.tic()
457
    self.assertEqual("stopped", test_result.getSimulationState())
458
    self.tic()
459
    next_test_result_path, _ = self._createTestResult(
460 461 462 463
      test_list=['testFoo', 'testBar'], revision="r0=a,r1=b")
    self.assertNotEquals(next_test_result_path, test_result_path)
    line_url, test = self.tool.startUnitTest(next_test_result_path)
    next_line_url, next_test = self.tool.startUnitTest(next_test_result_path)
464
    self.assertEqual(['testFoo', 'testBar'], [test, next_test])
465

466 467 468 469 470
  def test_06b_restartStuckTest(self):
    """
    Check if a test result line is not stuck in 'started', if so, redraft
    if with alarm to let opportunity of another test node to work on it
    """
471
    test_result_path, _ = self._createTestResult(
472 473
      test_list=['testFoo', 'testBar'])
    test_result = self.portal.unrestrictedTraverse(test_result_path)
474
    line_url, _ = self.tool.startUnitTest(test_result_path)
475 476 477 478 479 480 481 482 483
    now = DateTime()
    def checkTestResultLine(expected):
      line_list = test_result.objectValues(portal_type="Test Result Line")
      found_list = [(x.getTitle(), x.getSimulationState()) for x in line_list]
      found_list.sort(key=lambda x: x[0])
      self.assertEqual(expected, found_list)
    checkTestResultLine([('testBar', 'started'), ('testFoo', 'draft')])
    self._callRestartStuckTestResultAlarm()
    checkTestResultLine([('testBar', 'started'), ('testFoo', 'draft')])
484
    line_url, _ = self.tool.startUnitTest(test_result_path)
485 486 487 488 489 490 491 492 493 494 495
    checkTestResultLine([('testBar', 'started'), ('testFoo', 'started')])
    self._callRestartStuckTestResultAlarm()
    checkTestResultLine([('testBar', 'started'), ('testFoo', 'started')])
    # now let change history to do like if a test result line was started
    # a long time ago
    line = self.portal.restrictedTraverse(line_url)
    for history_line in line.workflow_history["test_result_workflow"]:
      if history_line['action'] == 'start':
        history_line['time'] = now - 1
    self._callRestartStuckTestResultAlarm()
    checkTestResultLine([('testBar', 'started'), ('testFoo', 'draft')])
496 497
    self.tool.stopUnitTest(line_url, {})
    checkTestResultLine([('testBar', 'started'), ('testFoo', 'stopped')])
498

499
  def test_07_reportTaskFailure(self):
500 501 502 503 504 505
    """
    When all test nodes report failures, we should mark the test result as
    failed. If we do not do so, test node would always pickup same repository
    revision and might fail with same failure forever (for example, a slapos
    build issue).
    """
506 507 508
    self._createTestNode()
    test_result_path, _ = self._createTestResult()
    next_test_result_path, _ = self._createTestResult(node_title="UnitTestNode 1")
509 510
    self.assertEqual(test_result_path, next_test_result_path)
    test_result = self.getPortalObject().unrestrictedTraverse(test_result_path)
511
    self.assertEqual("started", test_result.getSimulationState())
512 513 514
    node_list = test_result.objectValues(portal_type="Test Result Node",
                                         sort_on=[("title", "ascending")])
    def checkNodeState(first_state, second_state):
515
      self.assertEqual([("Node0", first_state), ("UnitTestNode 1", second_state)],
516 517 518
              [(x.getTitle(), x.getSimulationState()) for x in node_list])
    checkNodeState("started", "started")
    self.tool.reportTaskFailure(test_result_path, {}, "Node0")
519
    self.assertEqual("started", test_result.getSimulationState())
520
    checkNodeState("failed", "started")
521
    self.tool.reportTaskFailure(test_result_path, {}, "UnitTestNode 1")
522
    self.assertEqual("failed", test_result.getSimulationState())
523 524
    checkNodeState("failed", "failed")

525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
  def test_07b_reportTaskFailureWithRunningTest(self):
    """
    Similar to above test. Though, sometimes there is failure reported only because
    runTestSuite reached timeout. This happens when not enough testnode are working
    on a very long test suite. So code investigate if tests looked working fine, and
    it might try to not cancel test result if there is chance that tests could be
    continued.

    For example :
    - testnode0 start test suite Foo with revision r0 which would take 6 hours (other
      testnodes are busy)
    - after 4 hours, runTestSuite reach timeout of 4 hours (value set in test nodes).
      thus it report a failure. We do not cancel the test result since everything went
      fine up to know
    - after some time testnode0 come back to run test suite Foo, revision r0, and
      just do the 2 remaining hours. Test Suite can go up to the end even if we have
      timeout smaller than total time for test suite.
    """
    now = DateTime()
    try:
      self.pinDateTime(now - 1.0/24*2)
546
      test_result_path, _ = self._createTestResult(
547 548 549 550 551 552
                                               test_list=['testFoo', 'testBar'])
      test_result = self.getPortalObject().unrestrictedTraverse(test_result_path)
      self.assertEqual("started", test_result.getSimulationState())
      node, = test_result.objectValues(portal_type="Test Result Node",
                                           sort_on=[("title", "ascending")])
      self.assertEqual("started", node.getSimulationState())
553
      self.tool.startUnitTest(test_result_path)
554 555 556 557 558 559 560 561 562 563 564 565 566
      # We have a failure but with recent activities on tests
      self.pinDateTime(now - 1.0/24*1.5)
      self.tool.reportTaskFailure(test_result_path, {}, "Node0")
      self.assertEqual("failed", node.getSimulationState())
      self.assertEqual("started", test_result.getSimulationState())
      # We have a failure but with no recent activities on tests
      self.pinDateTime(now)
      self.tool.reportTaskFailure(test_result_path, {}, "Node0")
      self.assertEqual("failed", node.getSimulationState())
      self.assertEqual("failed", test_result.getSimulationState())
    finally:
      self.unpinDateTime()

567 568 569 570 571 572
  def test_08_checkWeCanNotCreateTwoTestResultInParallel(self):
    """
    To avoid duplicates of test result when several testnodes works on the
    same suite, we create test and we immediately reindex it. So we must
    be able to find new test immediately after.
    """
573 574 575 576 577
    self._createTestNode()
    self.tic()
    test_result_path, _ = self._createTestResult(tic=0)
    next_test_result_path, _ = self._createTestResult(
                                      node_title="UnitTestNode 1", tic=0)
578
    self.assertEqual(test_result_path, next_test_result_path)
579 580

  def _checkCreateTestResultAndAllowRestart(self, tic=False):
581 582
    test_result_path, _ = self._createTestResult(test_list=["testFoo"])
    line_url, _ = self.tool.startUnitTest(test_result_path)
583 584 585 586 587
    status_dict = {}
    self.tool.stopUnitTest(line_url, status_dict)
    if tic:
      self.tic()
    test_result = self.getPortalObject().unrestrictedTraverse(test_result_path)
588
    self.assertEqual(None, self._createTestResult(test_list=["testFoo"]))
589
    next_test_result_path, _ = self._createTestResult(
590 591
      test_list=["testFoo"], allow_restart=True)
    self.assertTrue(next_test_result_path != test_result_path)
592 593
    self.tic()
    self.assertEqual("stopped", test_result.getSimulationState())
594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614

  def test_09_checkCreateTestResultAndAllowRestartWithoutTic(self):
    """
    The option allow restart of createTestResult enable to possibility to
    always launch tests even if the given revision is already tested.

    Is this really useful and used ?
    """
    self._checkCreateTestResultAndAllowRestart()    

  def test_09b_checkCreateTestResultAndAllowRestartWithTic(self):
    """
    The option allow restart of createTestResult enable to possibility to
    always launch tests even if the given revision is already tested. We
    try here with reindex after stopUnitTest
    """
    self._checkCreateTestResultAndAllowRestart(tic=True)

  def test_10_cancelTestResult(self):
    pass

615 616 617 618 619 620 621 622 623 624 625 626
  def test_10b_generateConfiguration(self):
    """
    It shall be possible on a test suite to define configuration we would like
    to use to create slapos instance.
    """
    test_suite, = self._createTestSuite(cluster_configuration=None)
    self.tic()
    self.assertEquals('{"configuration_list": [{}]}', self.distributor.generateConfiguration(test_suite.getTitle()))
    test_suite.setClusterConfiguration("{'foo': 3}")
    self.assertEquals('{"configuration_list": [{}]}', self.distributor.generateConfiguration(test_suite.getTitle()))
    test_suite.setClusterConfiguration('{"foo": 3}')
    self.assertEquals('{"configuration_list": [{"foo": 3}]}', self.distributor.generateConfiguration(test_suite.getTitle()))
627 628 629 630
    # make sure generateConfiguration does not fail if test suite is invalidated
    test_suite.invalidate()
    self.tic()
    self.assertEquals('{"configuration_list": [{}]}', self.distributor.generateConfiguration(test_suite.getTitle()))
631

632 633 634 635
  def _checkTestSuiteAggregateList(self, *args):
    self.tic()
    self._callOptimizeAlarm()
    for test_node, aggregate_list in args:
636 637
      test_note_aggregate_title_list = [x.split(" ")[-1] for x in test_node.getAggregateTitleList()]
      self.assertEqual(set(test_note_aggregate_title_list),
638 639
        set(aggregate_list),
        "incorrect aggregate for %r, got %r instead of %r" % \
640
        (test_node.getTitle(), test_note_aggregate_title_list, aggregate_list))
641 642 643 644 645 646 647

  def test_11_checkERP5ProjectOptimizationIsStable(self):
    """
    When we have two test suites and we have two test nodes, we should have
    one test suite distributed per test node
    """
    test_node_one, test_node_two = self._createTestNode(quantity=2)
648 649
    test_suite_one = self._createTestSuite(reference_correction=+0,
                              title='one')[0]
650
    self._createTestSuite(reference_correction=+1,
651
                              title='two')[0].getRelativeUrl()
652 653 654
    self.tic()
    self._callOptimizeAlarm()
    check = self._checkTestSuiteAggregateList
655 656
    check([test_node_one, ["one"]],
          [test_node_two, ["two"]])
657 658 659 660
    # first test suite is invalidated, so it should be removed from nodes, 
    # but this should not change assignment of second test suite
    test_suite_one.invalidate()
    check([test_node_one, []],
661
          [test_node_two, ["two"]])
662 663 664 665 666
    # an additional test node is added, with lower title, this should
    # still not change anyting
    test_node_zero = self._createTestNode(quantity=1, reference_correction=-1)[0]
    check([test_node_zero, []],
          [test_node_one, []],
667
          [test_node_two, ["two"]])
668
    # test suite one is validated again, it is installed on first
669 670
    # available test node
    test_suite_one.validate()
671
    check([test_node_zero, ["one"]],
672
          [test_node_one, []],
673
          [test_node_two, ["two"]])
674 675 676
    # for some reasons, test_node two is dead, so the work is distributed
    # to remaining test nodes
    test_node_two.invalidate()
677 678
    check([test_node_zero, ["one"]],
          [test_node_one, ["two"]],
679 680 681
          [test_node_two, []])
    # we add another test suite, since all test node already have one
    # test suite, the new test suite is given to first available one
682
    self._createTestSuite(reference_correction=+2,
683
                                title='three')[0].getRelativeUrl()
684 685
    check([test_node_zero, ["one", "three"]],
          [test_node_one, ["two"]],
686
          [test_node_two, []])
687 688 689
    # test node two is coming back. To have better repartition of work,
    # move some work from overloaded test node to less busy test node, while
    # still trying to move as less test suite as possible (here only one)
690
    test_node_two.validate()
691 692 693
    check([test_node_zero, ["three"]],
          [test_node_one, ["two"]],
          [test_node_two, ["one"]])
694
    # Now let's create a test suite needing between 1 to 2 test nodes
695 696
    # Make sure additional work is added without moving other test suites
    self._createTestSuite(reference_correction=+3,
697
                             priority=4, title='four')[0].getRelativeUrl()
698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715
    check([test_node_zero, ["three", "four"]],
          [test_node_one, ["two", "four"]],
          [test_node_two, ["one"]])
    # Now let's create a a test suite needing 1 nodes
    # to make sure test nodes with less work get the work first
    test_suite_five = self._createTestSuite(reference_correction=+4,
                             title='five')[0]
    check([test_node_zero, ["three", "four"]],
          [test_node_one, ["two", "four"]],
          [test_node_two, ["one", "five"]])
    # Now let's create another test suite needing between 2 to 3 test nodes
    # and increase priority of one suite to make all test nodes almost satured
    test_suite_five.setIntIndex(7)
    self._createTestSuite(reference_correction=+5,
                             priority=7, title='six')
    check([test_node_zero, ["three", "four","five", "six"]],
          [test_node_one, ["two", "four", "five", "six"]],
          [test_node_two, ["one", "five", "six"]])
716 717
    # Then, check what happens if all nodes are more than saturated
    # with a test suite needing between 3 to 5 test nodes
718 719 720 721 722
    self._createTestSuite(reference_correction=+6,
                             priority=9, title='seven')
    check([test_node_zero, ["three", "four", "five", "six"]],
          [test_node_one, ["two", "four", "five", "six"]],
          [test_node_two, ["one", "seven", "five", "six"]])
723
    # No place any more, adding more test suite has no consequence
724 725 726 727 728 729
    # we need 5*2 + 3*2  + 2*1 + 1*3 => 21 slots
    self._createTestSuite(reference_correction=+7,
                             priority=9, title='height')
    check([test_node_zero, ["three", "four", "five", "six"]],
          [test_node_one, ["two", "four", "five", "six"]],
          [test_node_two, ["one", "seven", "five", "six"]])
730
    # free some place by removing a test suite
731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764
    # make sure free slots are fairly distributed to test suite having
    # less test nodes
    # We remove 3 slots, so we would need 18 slots
    test_suite_five.invalidate()
    check([test_node_zero, ["three", "four", "height", "six"]],
          [test_node_one, ["two", "four", "seven" , "six"]],
          [test_node_two, ["one", "seven", "height" , "six"]])
    # Check that additional test node would get work for missing assignments
    # No move a test suite is done since in average we miss slots
    test_node_three, = self._createTestNode(reference_correction=2)
    check([test_node_zero, ["three", "four", "height", "six"]],
          [test_node_one, ["two", "four", "seven" , "six"]],
          [test_node_two, ["one", "seven", "height" , "six"]],
          [test_node_three, ["seven", "height"]])
    # With even more test node, check that we move some work to less
    # busy test nodes
    test_node_four, = self._createTestNode(reference_correction=3)
    test_node_five, = self._createTestNode(reference_correction=4)
    check([test_node_zero, ["three", "six", "height"]],
          [test_node_one, ["two", "six", "seven"]],
          [test_node_two, ["one", "seven", "height"]],
          [test_node_three, ["four", "seven", "height"]],
          [test_node_four, ["four", "seven", "height"]],
          [test_node_five, ["six", "seven", "height"]])
    test_node_six, = self._createTestNode(reference_correction=5)
    test_node_seven, = self._createTestNode(reference_correction=6)
    check([test_node_zero, ["three", "height"]],
          [test_node_one, ["two", "seven"]],
          [test_node_two, ["one", "height"]],
          [test_node_three, ["seven", "height"]],
          [test_node_four, ["four", "seven", "height"]],
          [test_node_five, ["six", "seven", "height"]],
          [test_node_six, ["six", "seven"]],
          [test_node_seven, ["four", "six"]])
765

766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785
  def test_11b_checkERP5ProjectDistributionWithCustomMaxQuantity(self):
    """
    Check that the property max_test_suite on the distributor could
    be used to customize the quantity of test suite affected per test node
    """
    test_node, = self._createTestNode(quantity=1)
    test_suite_list = self._createTestSuite(quantity=5)
    self.tool.TestTaskDistribution.setMaxTestSuite(None)
    self.tic()
    self._callOptimizeAlarm()
    self.assertEqual(4, len(set(test_node.getAggregateList())))
    self.tool.TestTaskDistribution.setMaxTestSuite(1)
    self._callOptimizeAlarm()
    self.assertEqual(1, len(set(test_node.getAggregateList())))
    self.tool.TestTaskDistribution.setMaxTestSuite(10)
    self._callOptimizeAlarm()
    self.assertEqual(5, len(set(test_node.getAggregateList())))
    self.assertEqual(set(test_node.getAggregateList()), 
                     set([x.getRelativeUrl() for x in test_suite_list]))

786 787 788 789 790 791 792
  def test_12_checkCloudPerformanceOptimizationIsStable(self):
    """
    When we have two test suites and we have two test nodes, we should have
    one test suite distributed per test node
    """
    test_node_one, test_node_two = self._createTestNode(quantity=2,
                               specialise_value=self.performance_distributor)
793 794
    test_suite_one = self._createTestSuite(
                          title='one', specialise_value=self.performance_distributor)[0]
795 796
    self._createTestSuite(title='two', reference_correction=+1,
                          specialise_value=self.performance_distributor)
797 798 799
    self.tic()
    self._callOptimizeAlarm()
    check = self._checkTestSuiteAggregateList
800 801
    check([test_node_one, ["one", "two"]],
          [test_node_two, ["one", "two"]])
802 803 804
    # first test suite is invalidated, so it should be removed from nodes, 
    # but this should not change assignment of second test suite
    test_suite_one.invalidate()
805 806
    check([test_node_one, ["two"]],
          [test_node_two, ["two"]])
807 808 809 810
    # an additional test node is added, with lower title, it should
    # get in any case all test suite
    test_node_zero = self._createTestNode(quantity=1, reference_correction=-1,
                            specialise_value=self.performance_distributor)[0]
811 812 813
    check([test_node_zero, ["two"]],
          [test_node_one, ["two"]],
          [test_node_two, ["two"]])
814 815 816
    # test suite one is validating again, it is installed on first
    # available test node
    test_suite_one.validate()
817 818 819
    check([test_node_zero, ["one", "two"]],
          [test_node_one, ["one", "two"]],
          [test_node_two, ["one", "two"]])
820 821 822
    # for some reasons, test_node two is dead, this has no consequence
    # for others
    test_node_two.invalidate()
823 824 825
    check([test_node_zero, ["one", "two"]],
          [test_node_one, ["one", "two"]],
          [test_node_two, ["one", "two"]])
826 827
    # we add another test suite, all test nodes should run it, except
    # test_node_two which is dead
828 829 830 831 832
    self._createTestSuite(title="three", reference_correction=+2,
                             specialise_value=self.performance_distributor)
    check([test_node_zero, ["one", "two", "three"]],
          [test_node_one, ["one", "two", "three"]],
          [test_node_two, ["one", "two"]])
833 834
    # test node two is coming back. It should run all test suites
    test_node_two.validate()
835 836 837
    check([test_node_zero, ["one", "two", "three"]],
          [test_node_one, ["one", "two", "three"]],
          [test_node_two, ["one", "two", "three"]])
838
    # now we are going to
839 840 841 842

  def test_13_startTestSuiteWithOneTestNodeAndPerformanceDistributor(self):
    config_list = json.loads(self.performance_distributor.startTestSuite(
                             title="COMP32-Node1"))
843
    self.assertEqual([], config_list)
844 845 846 847 848 849
    self._createTestSuite(quantity=2, 
                          specialise_value=self.performance_distributor)
    self.tic()
    self._callOptimizeAlarm()
    config_list = json.loads(self.performance_distributor.startTestSuite(
                             title="COMP32-Node1"))
850 851
    self.assertEqual(2, len(config_list))
    self.assertEqual(set(['test suite 1-COMP32-Node1',
852
                           'test suite 2-COMP32-Node1']),
853
                      set([x['test_suite_title'] for x in config_list]))
854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954

  def test_14_subscribeNodeCheckERP5ScalabilityDistributor(self):
    """
    Check test node subscription.
    """
    test_node_module = self.test_node_module
    
    # Generate informations for nodes to subscribe
    nodes = dict([("COMP%d-Scalability-Node_test14" %i, "COMP-%d" %i) for i in range(0,5)])
    # Subscribe nodes
    for node_title in nodes.keys():
      self.scalability_distributor.subscribeNode(node_title, computer_guid=nodes[node_title])
    self.tic()
    # Get validated test nodes
    test_nodes = test_node_module.searchFolder(validation_state = 'validated')
    # Get test node title list
    test_node_titles = [x.getTitle() for x in test_nodes]
    # Check subscription
    for node_title in nodes.keys():
      self.assertTrue(node_title in test_node_titles)
    # Check ping date
    # TODO..

  def test_15_optimizeConfigurationCheckElectionERP5ScalabilityDistributor(self):
    """
    Check optimizeConfiguration method of scalability distributor.
     - Check the master election
    """
    test_node_module = self.test_node_module
    
    ## 1 (check election, classic)
    # Subscribe nodes
    self.scalability_distributor.subscribeNode("COMP1-Scalability-Node1", computer_guid="COMP-1")
    self.scalability_distributor.subscribeNode("COMP2-Scalability-Node2", computer_guid="COMP-2")
    self.scalability_distributor.subscribeNode("COMP3-Scalability-Node3", computer_guid="COMP-3")
    self.scalability_distributor.subscribeNode("COMP4-Scalability-Node4", computer_guid="COMP-4")
    self.tic()
    # Check test node election
    def getMasterAndSlaveNodeList():
      """
      Optimize the configuration and return which nodes are master/slave
      """
      self._callOptimizeAlarm()
      master_test_node_list = [x for x in test_node_module.searchFolder()\
                               if (x.getMaster() == True  and x.getValidationState() == 'validated')]
      slave_test_node_list =  [x for x in test_node_module.searchFolder()\
                               if (x.getMaster() == False and x.getValidationState() == 'validated')]
      return master_test_node_list, slave_test_node_list
    master_test_node_list, slave_test_node_list = getMasterAndSlaveNodeList()

    # -Only one master must be elected
    self.assertEquals(1, len(master_test_node_list))
    # -Others test node must not be the matser
    self.assertEquals(3, len(slave_test_node_list))
    
    # Get the current master test node 
    current_master_test_node_1 = master_test_node_list[0]
    
    ## 2 (check election, with adding new nodes)
    # Add new nodes
    self.scalability_distributor.subscribeNode("COMP5-Scalability-Node5", computer_guid="COMP-5")
    self.scalability_distributor.subscribeNode("COMP6-Scalability-Node6", computer_guid="COMP-6")
    self.tic()
    # Check test node election
    master_test_node_list, slave_test_node_list = getMasterAndSlaveNodeList()
    # -Only one master must be elected
    self.assertEquals(1, len(master_test_node_list))
    # -Others test node must not be the matser
    self.assertEquals(5, len(slave_test_node_list))

    # Get the current master test node
    current_master_test_node_2 =  master_test_node_list[0]
    # Master test node while he is alive
    self.assertEquals(current_master_test_node_1.getTitle(),
                      current_master_test_node_2.getTitle())

    ## 3 (check election, with master deletion)
    # Invalidate master
    current_master_test_node_2.invalidate()
    # Check test node election
    master_test_node_list, slave_test_node_list = getMasterAndSlaveNodeList()
    # -Only one master must be elected
    self.assertEquals(1, len(master_test_node_list))
    # -Others test node must not be the matser
    self.assertEquals(4, len(slave_test_node_list))

    # Get the current master test node 
    current_master_test_node_3 = master_test_node_list[0]
    # Master test node must be an other test node than previously
    self.assertNotEquals(current_master_test_node_2.getTitle(), 
                         current_master_test_node_3.getTitle())
    

  def test_16_startTestSuiteERP5ScalabilityDistributor(self):
    """
    Check test suite getting, for the scalability case only the master
    test node receive test suite.
    """
    test_node_module = self.test_node_module

    # Subscribe nodes
955 956 957 958 959 960
    self.scalability_distributor.subscribeNode("COMP1-Scalability-Node1", computer_guid="COMP-1")
    self.scalability_distributor.subscribeNode("COMP2-Scalability-Node2", computer_guid="COMP-2")
    self.scalability_distributor.subscribeNode("COMP3-Scalability-Node3", computer_guid="COMP-3")
    self.scalability_distributor.subscribeNode("COMP4-Scalability-Node4", computer_guid="COMP-4")
    # Create test suite
    self._createTestSuite(quantity=1,priority=1, reference_correction=0,
961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
                       specialise_value=self.scalability_distributor, portal_type="Scalability Test Suite")  
    self.tic()
    self._callOptimizeAlarm()
    # Get current master test node
    master_test_nodes = [x for x in test_node_module.searchFolder()\
                         if (x.getMaster() == True and x.getValidationState() == "validated")]     
    current_master_test_node = master_test_nodes[0]
    self.tic()
    # Each node run startTestSuite
    config_nodes = {
                     'COMP1-Scalability-Node1' :
                        json.loads(self.scalability_distributor.startTestSuite(
                                   title="COMP1-Scalability-Node1")),
                     'COMP2-Scalability-Node2' :
                        json.loads(self.scalability_distributor.startTestSuite(
                                   title="COMP2-Scalability-Node2")),
                     'COMP3-Scalability-Node3' :
                        json.loads(self.scalability_distributor.startTestSuite(
                                   title="COMP3-Scalability-Node3")),
                     'COMP4-Scalability-Node4' :
                        json.loads(self.scalability_distributor.startTestSuite(
                                   title="COMP4-Scalability-Node4"))
                   }
    # Check if master has got a non empty configuration
    self.assertNotEquals(config_nodes[current_master_test_node.getTitle()], [])
    # -Delete master test node suite from dict
    del config_nodes[current_master_test_node.getTitle()]
    # Check if slave test node have got empty list
    for suite in config_nodes.values():
      self.assertEquals(suite, [])

  def test_17_isMasterTestnodeERP5ScalabilityDistributor(self):
    """
    Check the method isMasterTestnode()
    """
    test_node_module = self.test_node_module

    # Subscribe nodes
    self.scalability_distributor.subscribeNode("COMP1-Scalability-Node1", computer_guid="COMP-1")
    self.scalability_distributor.subscribeNode("COMP2-Scalability-Node2", computer_guid="COMP-2")
    self.scalability_distributor.subscribeNode("COMP3-Scalability-Node3", computer_guid="COMP-3")
    self.scalability_distributor.subscribeNode("COMP4-Scalability-Node4", computer_guid="COMP-4")
    self.tic()
    self._callOptimizeAlarm()
    # Optimize configuration
    self.scalability_distributor.optimizeConfiguration()
    self.tic()
    # Get test nodes 
    master_test_nodes = [x for x in test_node_module.searchFolder()
                         if (x.getMaster() == True and x.getValidationState() == 'validated')]
    slave_test_nodes = [x for x in test_node_module.searchFolder()
                         if (x.getMaster() == False and x.getValidationState() == 'validated')]
    # Check isMasterTestnode method
    for master in master_test_nodes:
      self.assertTrue(self.scalability_distributor.isMasterTestnode(master.getTitle()))
    for slave in slave_test_nodes:
      self.assertTrue(not self.scalability_distributor.isMasterTestnode(slave.getTitle()))

  def test_18_checkConfigurationGenerationERP5ScalabilityDistributor(self):
    """
    Check configuration generation
    """
    # Subscribe nodes
Łukasz Nowak's avatar
Łukasz Nowak committed
1024 1025 1026
    node_list = [
      ('NODE-%s' % (i,), self.scalability_distributor.subscribeNode("COMP%s-Scalability-Node%s" % (i, i), computer_guid="COMP-%s" % (i,))) for i in range(1,5)
    ]
1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037
    self.tic()
    self._callOptimizeAlarm()

    #
    def generateZopePartitionDict(i):
      """
      Generate a configuration wich uses jinja2
      """
      partition_dict = ""
      for j in range(0,i):
        family_name = ['user', 'activity'][j%2]
Łukasz Nowak's avatar
Łukasz Nowak committed
1038
        partition_dict += '"%s-%s":{\n' %(family_name, node_list[j][0])
1039 1040
        partition_dict += ' "instance-count": {{ count }},\n'
        partition_dict += ' "family": "%s",\n' %family_name
Łukasz Nowak's avatar
Łukasz Nowak committed
1041
        partition_dict += ' "computer_guid": "%s"\n' %node_list[j][0]
1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
        partition_dict += '}' 
        if j != i-1:
          partition_dict += ',\n'
        else:
          partition_dict += '\n'
      return partition_dict


    # Generate a test suite
    # -Generate a configuration adapted to the test node list length
    cluster_configuration = '{"zope-partition-dict":{\n'
    zope_partition_dict = ""
    for i in range(1, len(node_list)+1):
      zope_partition_dict += "{%% if count == %d %%}\n" %i
      zope_partition_dict += generateZopePartitionDict(i)
      zope_partition_dict += "{% endif %}\n"
    cluster_configuration += zope_partition_dict + '\n}}'
    # -Generate graph coordinate
    graph_coordinate = range(1, len(node_list)+1)
    # -Create the test suite
1062
    self._createTestSuite(quantity=1,priority=1, reference_correction=0,
1063 1064 1065 1066
                       specialise_value=self.scalability_distributor, portal_type="Scalability Test Suite",
                       graph_coordinate=graph_coordinate, cluster_configuration=cluster_configuration)
    self.tic()

Łukasz Nowak's avatar
Łukasz Nowak committed
1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
#    # Master test node launch startTestSuite
#    for node in node_list:
#      if node.getMaster():
#        test_suite_title = self.scalability_distributor.startTestSuite(title=node.getTitle())
##        log("test_suite_title: %s" %test_suite_title)
#        break
#    # Get configuration list generated from test suite
##    configuration_list = self.scalability_distributor.generateConfiguration(test_suite_title)
#   
#    # logs
##    log(configuration_list)    
1078

1079 1080
  def test_19_testMultiDistributor(self):
    pass
1081

1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150
  def test_20_TestSuite_Periodicity(self):
    revision = 'a=a,b=b,c=c'
    self.addCleanup(self.unpinDateTime)
    test_suite = self.test_suite_module.newContent(
      portal_type='Test Suite',
      title='Periodicity Enabled Test Suite',
      int_index=1,
      # periodicity enabled
      enabled=True,
      # periodicity configuration
      periodicity_day_frequency=1,
      periodicity_hour=(13,),
      periodicity_minute=(0,),
    )
    test_suite.validate()
    self.tic()

    self.pinDateTime(DateTime('2017/05/01 00:00:00 UTC'))
    self.assertEqual(None, test_suite.getAlarmDate())
    first_test_result, got_revision = self._createTestResult(revision=revision, test_title='Periodicity Enabled Test Suite')
    self.assertTrue(first_test_result.startswith('test_result_module/'))
    self.assertEqual(revision, got_revision)
    self.assertEqual(DateTime('2017/05/01 13:00:00 UTC'), test_suite.getAlarmDate())
    # Finish the current test run
    self.portal.restrictedTraverse(first_test_result).stop()
    self.tic()

    self.pinDateTime(DateTime('2017/05/01 14:00:00 UTC'))
    second_test_result, got_revision = self._createTestResult(revision=revision, test_title='Periodicity Enabled Test Suite')
    self.assertTrue(second_test_result.startswith('test_result_module/'))
    self.assertNotEqual(first_test_result, second_test_result)
    self.assertEqual(revision, got_revision)
    self.assertEqual(DateTime('2017/05/02 13:00:00 UTC'), test_suite.getAlarmDate())
    # Finish the current test run
    self.portal.restrictedTraverse(second_test_result).stop()
    self.tic()

    self.pinDateTime(DateTime('2017/05/02 14:00:00 UTC'))
    third_test_result, got_revision = self._createTestResult(revision=revision, test_title='Periodicity Enabled Test Suite')
    self.assertTrue(third_test_result.startswith('test_result_module/'))
    self.assertNotEqual(third_test_result, second_test_result)
    self.assertEqual(DateTime('2017/05/03 13:00:00 UTC'), test_suite.getAlarmDate())
    self.tic()

  def test_21_TestSuite_Periodicity_disabled(self):
    self.addCleanup(self.unpinDateTime)
    test_suite = self.test_suite_module.newContent(
      portal_type='Test Suite',
      title='Periodicity Disabled Test Suite',
      int_index=1,
      # periodicity disabled
      enabled=False,
      # periodicity configuration
      periodicity_day_frequency=1,
      periodicity_hour=(13,),
      periodicity_minute=(0,),
    )
    test_suite.validate()
    today = DateTime('2017/05/01')
    self.tic()

    self.pinDateTime(today)
    self.assertEqual(None, test_suite.getAlarmDate())
    self._createTestResult(test_title='Periodicity Disabled Test Suite')
    self.assertEqual(None, test_suite.getAlarmDate())
    self.tic()

    self._createTestResult(test_title='Periodicity Disabled Test Suite')
    self.assertEqual(None, test_suite.getAlarmDate())