Commit acf40150 authored by Roque's avatar Roque Committed by Klaus Wölfel

scalability: run scalability test script update

- the commands to run the tests are launched using threads
- a thread is used to ask instance for metrics periodically
- refactoring for a more generic code
- refactoring in log handling
- old code related to created documents and create users removed
- new script parameters
- general refactoring and cleanup
parent f83a280b
...@@ -6,7 +6,6 @@ import shutil ...@@ -6,7 +6,6 @@ import shutil
import time import time
import sys import sys
import multiprocessing import multiprocessing
import subprocess
import signal import signal
import errno import errno
import json import json
...@@ -16,14 +15,23 @@ import glob ...@@ -16,14 +15,23 @@ import glob
import urlparse import urlparse
import httplib import httplib
import base64 import base64
import threading
from erp5.util.benchmark.argument import ArgumentType from erp5.util.benchmark.argument import ArgumentType
from erp5.util.benchmark.performance_tester import PerformanceTester from erp5.util.benchmark.performance_tester import PerformanceTester
from erp5.util.benchmark.thread import TestThread, TestMetricThread
from erp5.util import taskdistribution from erp5.util import taskdistribution
from erp5.util.testnode import Utils from erp5.util.testnode import Utils
from erp5.util.testnode.ProcessManager import SubprocessError, ProcessManager, CancellationError
import datetime
MAX_INSTALLATION_TIME = 60*50 MAX_INSTALLATION_TIME = 60*50
MAX_TESTING_TIME = 60 MAX_TESTING_TIME = 60
MAX_GETTING_CONNECTION_TIME = 60*5 MAX_GETTING_CONNECTION_TIME = 60*5
TEST_METRIC_TIME_INTERVAL = 60*3
SCALABILITY_LOG_FILENAME = "runScalabilityTestSuite"
LOG_FILE_PREFIX = "scalability-test"
MAX_ERRORS = 2
class DummyLogger(object): class DummyLogger(object):
def __init__(self, func): def __init__(self, func):
...@@ -31,17 +39,18 @@ class DummyLogger(object): ...@@ -31,17 +39,18 @@ class DummyLogger(object):
'critical', 'fatal'): 'critical', 'fatal'):
setattr(self, name, func) setattr(self, name, func)
def getConnection(erp5_url, log): def getConnection(instance_url, log):
""" """
Return a connection with the erp5 instance. Return a connection with the instance.
""" """
start_time = time.time() start_time = time.time()
count = 0 count = 0
while MAX_GETTING_CONNECTION_TIME > time.time()-start_time: while MAX_GETTING_CONNECTION_TIME > time.time()-start_time:
try: try:
count = count + 1 count = count + 1
parsed = urlparse.urlparse(erp5_url) parsed = urlparse.urlparse(instance_url)
host = "%s:%s" % (parsed.hostname, str(parsed.port)) host = "%s:%s" % (parsed.hostname, str(parsed.port))
if parsed.port is None: host = parsed.hostname
if parsed.scheme == 'https': if parsed.scheme == 'https':
return httplib.HTTPSConnection(host) return httplib.HTTPSConnection(host)
elif parsed.scheme == 'http': elif parsed.scheme == 'http':
...@@ -49,16 +58,18 @@ def getConnection(erp5_url, log): ...@@ -49,16 +58,18 @@ def getConnection(erp5_url, log):
else: else:
raise ValueError("Protocol not implemented") raise ValueError("Protocol not implemented")
except: except:
log("Can't get connection to %s, we will retry." %erp5_url) log("Can't get connection to %s, we will retry." %instance_url)
time.sleep(10) time.sleep(10)
raise ValueError("Cannot get new connection after %d try (for %s s)" %(count, str(time.time()-start_time))) raise ValueError("Cannot get new connection after %d try (for %s s)" %(count, str(time.time()-start_time)))
def waitFor0PendingActivities(erp5_url, log): # TODO: this will be refactored soon
def waitFor0PendingActivities(instance_url, log):
""" """
Waiting while there are no pending activities on the erp5 instance. Waiting while there are no pending activities on the instance.
""" """
log("waiting activities for: " + str(instance_url))
start_time = time.time() start_time = time.time()
parsed = urlparse.urlparse(erp5_url) parsed = urlparse.urlparse(instance_url)
user = parsed.username; user = parsed.username;
password = parsed.password; password = parsed.password;
header_dict = {'Authorization': 'Basic %s' % \ header_dict = {'Authorization': 'Basic %s' % \
...@@ -67,7 +78,7 @@ def waitFor0PendingActivities(erp5_url, log): ...@@ -67,7 +78,7 @@ def waitFor0PendingActivities(erp5_url, log):
count = 0 count = 0
ok = False ok = False
while MAX_INSTALLATION_TIME > time.time()-start_time and not ok: while MAX_INSTALLATION_TIME > time.time()-start_time and not ok:
zope_connection = getConnection(erp5_url, log) zope_connection = getConnection(instance_url, log)
try: try:
count = count + 1 count = count + 1
zope_connection.request( zope_connection.request(
...@@ -87,54 +98,14 @@ def waitFor0PendingActivities(erp5_url, log): ...@@ -87,54 +98,14 @@ def waitFor0PendingActivities(erp5_url, log):
log("There is %d pending activities" %len(message_list)) log("There is %d pending activities" %len(message_list))
time.sleep(5) time.sleep(5)
except: except Exception as e:
time.sleep(5) time.sleep(5)
log("exception: " + str(e))
log("Getting activities failed, retry.") log("Getting activities failed, retry.")
if not ok: if not ok:
raise ValueError("Cannot waitFor0PendingActivities after %d try (for %s s)" %(count, str(time.time()-start_time))) raise ValueError("Cannot waitFor0PendingActivities after %d try (for %s s)" %(count, str(time.time()-start_time)))
def getCreatedDocumentNumberFromERP5(erp5_url, log):
"""
Get the number of created documents from erp5 instance.
"""
log("count docs number from ERP5 instance")
count_retry = 0
parsed = urlparse.urlparse(erp5_url)
user = 'zope'
password = 'insecure'
header_dict = {'Authorization': 'Basic %s' % \
base64.encodestring('%s:%s' % (user, password)).strip()}
zope_connection = getConnection(erp5_url, log)
while count_retry < 100 :
try:
zope_connection.request(
'GET', '/erp5/count_docs_scalability',
headers=header_dict
)
result = zope_connection.getresponse()
return int(result.read())
except:
log("retry..")
count_retry += 1
time.sleep(15)
raise ValueError("Impossible to get number of docs from ERP5")
# XXX: This import is required, just to populate sys.modules['test_suite'].
# Even if it's not used in this file. Yuck.
import product.ERP5Type.tests.ERP5TypeTestSuite
from subprocess import call
LOG_FILE_PREFIX = "performance_tester_erp5"
# Duration of a test case
TEST_CASE_DURATION = 60
# Maximum limit of documents to create during a test case
MAX_DOCUMENTS = 100000
class ScalabilityTest(object): class ScalabilityTest(object):
def __init__(self, data, test_result): def __init__(self, data, test_result):
self.__dict__ = {} self.__dict__ = {}
...@@ -144,52 +115,45 @@ class ScalabilityTest(object): ...@@ -144,52 +115,45 @@ class ScalabilityTest(object):
def doNothing(**kwargs): def doNothing(**kwargs):
pass pass
def makeSuite(test_suite=None, log=doNothing, **kwargs): def makeSuite(test_suite=None, location=None, log=doNothing, **kwargs):
# BBB tests (plural form) is only checked for backward compatibility import imp
for k in sys.modules.keys(): try:
if k in ('tests', 'test',) or k.startswith('tests.') or k.startswith('test.'): module = imp.load_source('scalability_test', location + '__init__.py')
del sys.modules[k] suite_class = getattr(module, test_suite)
singular_succeed = True suite = suite_class(**kwargs)
while True: except Exception as e:
module_name, class_name = ('%s.%s' % (singular_succeed and 'test' or 'tests', log("[ERROR] While making suite: " + str(e))
test_suite)).rsplit('.', 1) raise
try:
suite_class = getattr(__import__(module_name, None, None, [class_name]),
class_name)
except (AttributeError, ImportError):
if not singular_succeed:
raise
singular_succeed = False
else:
break
suite = suite_class(max_instance_count=1, **kwargs)
return suite return suite
def createLogger(log_path):
log_path = os.path.join(log_path, SCALABILITY_LOG_FILENAME + ".log")
logger_format = '%(asctime)s %(name)-13s: %(levelname)-8s %(message)s'
formatter = logging.Formatter(logger_format)
logging.basicConfig(level=logging.INFO,
format=logger_format)
logger = logging.getLogger(SCALABILITY_LOG_FILENAME)
file_handler = logging.handlers.RotatingFileHandler(
filename=log_path,
maxBytes=20000000, backupCount=4)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
class ScalabilityLauncher(object): class ScalabilityLauncher(object):
def __init__(self): def __init__(self):
# Parse arguments
self.__argumentNamespace = self._parseArguments(argparse.ArgumentParser( self.__argumentNamespace = self._parseArguments(argparse.ArgumentParser(
description='Run ERP5 benchmarking scalability suites.')) description='Run benchmarking scalability suites.'))
# Create Logger logger = createLogger(self.__argumentNamespace.log_path)
log_path = os.path.join(self.__argumentNamespace.log_path,
"runScalabilityTestSuite.log")
logger_format = '%(asctime)s %(name)-13s: %(levelname)-8s %(message)s'
formatter = logging.Formatter(logger_format)
logging.basicConfig(level=logging.INFO,
format=logger_format)
logger = logging.getLogger('runScalabilityTestSuite')
logger.addHandler(logging.NullHandler())
file_handler = logging.handlers.RotatingFileHandler(
filename=log_path,
maxBytes=20000000, backupCount=4)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
self.log = logger.info self.log = logger.info
self.logger = logger
# Proxy to with erp5 master test_result self.users_file_original_content = []
# Proxy to with master test_result
portal_url = self.__argumentNamespace.test_suite_master_url
distributor = taskdistribution.TaskDistributor(portal_url, logger=DummyLogger(self.log))
self.log(self.__argumentNamespace.test_suite_master_url)
self.test_result = taskdistribution.TestResultProxy( self.test_result = taskdistribution.TestResultProxy(
self.__argumentNamespace.test_suite_master_url, distributor,
1.0, DummyLogger(self.log), 1.0, DummyLogger(self.log),
self.__argumentNamespace.test_result_path, self.__argumentNamespace.test_result_path,
self.__argumentNamespace.node_title, self.__argumentNamespace.node_title,
...@@ -198,18 +162,26 @@ class ScalabilityLauncher(object): ...@@ -198,18 +162,26 @@ class ScalabilityLauncher(object):
@staticmethod @staticmethod
def _addParserArguments(parser): def _addParserArguments(parser):
# Mandatory arguments # Mandatory arguments
parser.add_argument('--erp5-url', parser.add_argument('--instance-url',
metavar='ERP5_URL', metavar='INSTANCE_URL',
help='Main url of ERP5 instance to test') help='Main url of instance to test')
parser.add_argument('--bootstrap-password',
metavar='BOOTSRAP_PASSWORD',
help='Bootstrap password of instance objects')
parser.add_argument('--test-result-path', parser.add_argument('--test-result-path',
metavar='ERP5_TEST_RESULT_PATH', metavar='TEST_RESULT_PATH',
help='ERP5 relative path of the test result') help='Relative path of the test result')
parser.add_argument('--revision', parser.add_argument('--revision',
metavar='REVISION', metavar='REVISION',
help='Revision of the test_suite') help='Revision of the test_suite')
parser.add_argument('--current-test-data',
metavar='CURRENT_TEST_DATA',
help='Data of the current test')
parser.add_argument('--test-suite', parser.add_argument('--test-suite',
metavar='TEST_SUITE', metavar='TEST_SUITE',
help='Name of the test suite') help='Name of the test suite')
...@@ -221,20 +193,24 @@ class ScalabilityLauncher(object): ...@@ -221,20 +193,24 @@ class ScalabilityLauncher(object):
parser.add_argument('--test-suite-master-url', parser.add_argument('--test-suite-master-url',
metavar='TEST_SUITE_MASTER_URL', metavar='TEST_SUITE_MASTER_URL',
help='Url to connect to the ERP5 Master testsuite taskditributor') help='Url to connect to the Master testsuite taskditributor')
parser.add_argument('--log-path', parser.add_argument('--log-path',
metavar='LOG_PATH', metavar='LOG_PATH',
help='Log Path') help='Log Path')
parser.add_argument('--erp5-location', parser.add_argument('--repo-location',
metavar='ERP5_LOCATION', metavar='REPO_LOCATION',
help='Path to erp5 depository') help='Path to repository')
parser.add_argument('--runner-path', parser.add_argument('--runner-path',
metavar='Runner_PATH', metavar='RUNNER_PATH',
help='runner Path') help='runner Path')
parser.add_argument('--metric-url',
metavar='METRIC_URL',
help='Url to connect to instance metric generator')
@staticmethod @staticmethod
def _checkParsedArguments(namespace): def _checkParsedArguments(namespace):
return namespace return namespace
...@@ -246,31 +222,54 @@ class ScalabilityLauncher(object): ...@@ -246,31 +222,54 @@ class ScalabilityLauncher(object):
ScalabilityLauncher._checkParsedArguments(namespace) ScalabilityLauncher._checkParsedArguments(namespace)
return namespace return namespace
def moveLogs(self, folder_name): def moveLogs(self, folder_name, current_test):
# Get file paths
file_to_move_list = glob.glob(os.path.join(self.__argumentNamespace.log_path, file_to_move_list = glob.glob(os.path.join(self.__argumentNamespace.log_path,
"%s*.csv" %LOG_FILE_PREFIX)) "%s*.csv" %LOG_FILE_PREFIX))
file_to_move_list += glob.glob(os.path.join(self.__argumentNamespace.log_path, file_to_move_list += glob.glob(os.path.join(self.__argumentNamespace.log_path,
"%s*.log" %LOG_FILE_PREFIX)) "%s*.log" %LOG_FILE_PREFIX))
# Create folder root_test_dir = os.path.join(self.__argumentNamespace.log_path,
new_directory_path = os.path.join(self.__argumentNamespace.log_path, "scalability-test-%s/" % current_test.relative_path.split("/")[1])
folder_name) if not os.path.exists(root_test_dir):
if not os.path.exists(new_directory_path): os.makedirs(new_directory_path) os.makedirs(root_test_dir)
# Move files new_directory_path = os.path.join(root_test_dir,
folder_name)
if not os.path.exists(new_directory_path):
os.makedirs(new_directory_path)
for file_to_move in file_to_move_list: for file_to_move in file_to_move_list:
shutil.move(file_to_move, new_directory_path) shutil.move(file_to_move, new_directory_path)
def getRunningTest(self): def getRunningTest(self):
""" """
Return a ScalabilityTest with current running test case informations, Return a ScalabilityTest with current running test case informations
or None if no test_case ready
""" """
data = self.test_result.getRunningTestCase() data_array = self.__argumentNamespace.current_test_data.split(',')
if not data: data = json.dumps({"count": data_array[0], "title": data_array[1], "relative_path": data_array[2]})
return None
decoded_data = Utils.deunicodeData(json.loads(data)) decoded_data = Utils.deunicodeData(json.loads(data))
return ScalabilityTest(decoded_data, self.test_result) return ScalabilityTest(decoded_data, self.test_result)
def clearUsersFile(self, user_file_path):
self.log("Clearing users file: %s" % user_file_path)
os.remove(user_file_path)
users_file = open(user_file_path, "w")
for line in self.users_file_original_content:
users_file.write(line)
users_file.close()
def updateUsersFile(self, user_quantity, password, user_file_path):
self.log("Updating users file: %s" % user_file_path)
users_file = open(user_file_path, "r")
file_content = users_file.readlines()
self.users_file_original_content = file_content
new_file_content = []
for line in file_content:
new_file_content.append(line.replace('<password>', password).replace('<user_quantity>', str(user_quantity)))
users_file.close()
os.remove(user_file_path)
users_file = open(user_file_path, "w")
for line in new_file_content:
users_file.write(line)
users_file.close()
def run(self): def run(self):
self.log("Scalability Launcher started, with:") self.log("Scalability Launcher started, with:")
self.log("Test suite master url: %s" %self.__argumentNamespace.test_suite_master_url) self.log("Test suite master url: %s" %self.__argumentNamespace.test_suite_master_url)
...@@ -278,140 +277,136 @@ class ScalabilityLauncher(object): ...@@ -278,140 +277,136 @@ class ScalabilityLauncher(object):
self.log("Test result path: %s" %self.__argumentNamespace.test_result_path) self.log("Test result path: %s" %self.__argumentNamespace.test_result_path)
self.log("Revision: %s" %self.__argumentNamespace.revision) self.log("Revision: %s" %self.__argumentNamespace.revision)
self.log("Node title: %s" %self.__argumentNamespace.node_title) self.log("Node title: %s" %self.__argumentNamespace.node_title)
self.log("ERP5 url: %s" %self.__argumentNamespace.erp5_url) self.log("Instance url: %s" %self.__argumentNamespace.instance_url)
error_message_set, exit_status = set(), 0 error_message_set, exit_status = set(), 0
process_manager = ProcessManager(self.log)
# Get suite informations # Get suite informations
suite = makeSuite(self.__argumentNamespace.test_suite, self.log) suite = makeSuite(self.__argumentNamespace.test_suite, self.__argumentNamespace.repo_location, self.log)
test_suite_list = suite.getTestList() test_suite_list = suite.getTestList()
# Main loop try:
while True:
# Loop for getting new test case
current_test = self.getRunningTest() current_test = self.getRunningTest()
while not current_test: except Exception as e:
time.sleep(15) error_message = "ERROR while getting current running test: " + str(e)
current_test = self.getRunningTest() self.log(error_message)
self.log("Test Case %s going to be run." %(current_test.title)) return error_message, 1
# Prepare configuration self.log("Test Case %s going to be run." %(current_test.title))
current_test_number = int(current_test.title) # Prepare configuration
test_duration = suite.getTestDuration(current_test_number) current_test_number = int(current_test.title)
benchmarks_path = os.path.join(self.__argumentNamespace.erp5_location, suite.getTestPath()) test_duration = suite.getTestDuration(current_test_number)
user_file_full_path = os.path.join(self.__argumentNamespace.erp5_location, suite.getUsersFilePath()) benchmarks_path = os.path.join(self.__argumentNamespace.repo_location, suite.getTestPath())
user_file_path = os.path.split(user_file_full_path)[0] user_file_full_path = os.path.join(self.__argumentNamespace.repo_location, suite.getUsersFilePath())
user_file = os.path.split(user_file_full_path)[1] user_file_path = os.path.split(user_file_full_path)[0]
tester_path = self.__argumentNamespace.runner_path user_file = os.path.split(user_file_full_path)[1]
user_number = suite.getUserNumber(current_test_number) tester_path = self.__argumentNamespace.runner_path
repetition = suite.getTestRepetition(current_test_number) user_quantity = suite.getUserQuantity(current_test_number)
repetition = suite.getTestRepetition(current_test_number)
self.log("user_number: %s" %str(user_number)) instance_url = self.__argumentNamespace.instance_url
self.log("test_duration: %s seconds" %str(test_duration)) metric_url = self.__argumentNamespace.metric_url
# Store the number of documents generated for each iteration # To take metrics
document_number = [] metric_thread_stop_event = threading.Event()
metric_thread = TestMetricThread(metric_url, self.log, metric_thread_stop_event, interval=TEST_METRIC_TIME_INTERVAL)
# Repeat the same test several times to accurate test result metric_thread.start()
for i in range(1, repetition+1):
self.log("Repetition: %d/%d" %(i, repetition)) bootstrap_password = self.__argumentNamespace.bootstrap_password
try:
# Get the number of documents present before running the test. self.updateUsersFile(user_quantity, bootstrap_password, user_file_full_path + ".py")
waitFor0PendingActivities(self.__argumentNamespace.erp5_url, self.log) except Exception as e:
previous_document_number = getCreatedDocumentNumberFromERP5(self.__argumentNamespace.erp5_url, self.log) self.log("ERROR while updating file: " + str(e))
self.log("previous_document_number: %d" %previous_document_number)
now = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M")
# Generate commands to run log_dir = "test-%s_%s" % (current_test.title, now)
command_list = [] # the repetition of tests will be refactored soon
user_index = 0 for i in range(1, repetition+1):
for test_suite in test_suite_list: self.log("Repetition: %d/%d" %(i, repetition))
command_list.append([tester_path, waitFor0PendingActivities(instance_url, self.log)
self.__argumentNamespace.erp5_url, # Generate commands to run
str(user_number/len(test_suite_list)), command_list = []
test_suite, user_index = 0
'--benchmark-path-list', benchmarks_path, for test_suite in test_suite_list:
'--users-file-path', user_file_path, command_list.append([tester_path,
'--users-file', user_file, instance_url,
'--filename-prefix', "%s_%s_repetition%d" %(LOG_FILE_PREFIX, current_test.title, i), str(user_quantity/len(test_suite_list)),
'--report-directory', self.__argumentNamespace.log_path, test_suite,
'--repeat', "%s" %str(MAX_DOCUMENTS), '--benchmark-path-list', benchmarks_path,
'--max-errors', str(1000000), '--users-file-path', user_file_path,
'--user-index', str(user_index), '--users-file', user_file,
]) '--filename-prefix', "%s_%s_repetition%d_suite_%s" %(LOG_FILE_PREFIX, current_test.title, i, test_suite),
user_index += user_number/len(test_suite_list) '--report-directory', self.__argumentNamespace.log_path,
'--repeat', "%d"%1,
# Launch commands '--max-errors', str(MAX_ERRORS),
tester_process_list = [] '--user-index', str(user_index),
for command in command_list: ])
self.log("command: %s" %str(command)) user_index += user_quantity/len(test_suite_list)
tester_process_list.append(subprocess.Popen(command))
# Launch commands
# Sleep for command in command_list:
time.sleep(test_duration) test_thread = TestThread(process_manager, command, self.log)
test_thread.start()
# Stop # Sleep
for tester_process in tester_process_list: self.log("Going to sleep for %s seconds (Test duration)." % str(test_duration))
tester_process.send_signal(signal.SIGINT) time.sleep(test_duration)
self.log("End signal sent to the tester.")
waitFor0PendingActivities(instance_url, self.log)
# Count created documents self.moveLogs(log_dir, current_test)
# Wait for 0 pending activities before counting
waitFor0PendingActivities(self.__argumentNamespace.erp5_url, self.log) self.log("Test Case %s has finished" %(current_test.title))
current_document_number = getCreatedDocumentNumberFromERP5(self.__argumentNamespace.erp5_url, self.log) metric_thread_stop_event.set()
created_document_number = current_document_number - previous_document_number time.sleep(15) # wait thread to stop
self.log("previous_document_number: %d" %previous_document_number) metric_list = metric_thread.getMetricList()
self.log("current_document_number: %d" %current_document_number) test_output = suite.getScalabilityTestOutput(metric_list)
self.log("created_document_number: %d" %created_document_number) if not test_output:
document_number.append(created_document_number) self.log("metric list and test output empty. getting metric thread error message.")
# Move csv/logs test_output = metric_thread.getErrorMessage()
self.moveLogs(current_test.title) self.log("test_output: " + str(test_output))
self.log("Test Case %s is finish" %(current_test.title)) # Send results to master
retry_time = 2.0
# Get the maximum as choice proxy = taskdistribution.ServerProxy(
maximum = 0 self.__argumentNamespace.test_suite_master_url,
for i in range(0,len(document_number)): allow_none=True
if document_number[i] > maximum: ).portal_task_distribution
maximum = document_number[i] test_result_line_test = taskdistribution.TestResultLineProxy(
proxy, retry_time, self.logger,
# Send results to ERP5 master current_test.relative_path,
retry_time = 2.0 current_test.title
proxy = taskdistribution.ServerProxy( )
self.__argumentNamespace.test_suite_master_url, test_details = "number of users=%d\n"\
allow_none=True "number of repetitions=%d\n"\
).portal_task_distribution "number of tests=%d\n"\
test_result_line_test = taskdistribution.TestResultLineProxy( "tests=%s\n"\
proxy, retry_time, self.log, "duration=%d\n"\
current_test.relative_path, %(
current_test.title (user_quantity/len(test_suite_list))*len(test_suite_list),
) repetition,
results = "created docs=%d\n"\ len(test_suite_list),
"duration=%d\n"\ '_'.join(test_suite_list),
"number of tests=%d\n"\ test_duration
"number of users=%d\n"\ )
"tests=%s\n"\ self.log("Test details: %s" % test_details)
%( self.log("Test output: %s" % test_output)
maximum, self.log("Stopping the test case...")
test_duration, try:
len(test_suite_list), test_result_line_test.stop(stdout=test_output,
(user_number/len(test_suite_list))*len(test_suite_list), command=test_details,
'_'.join(test_suite_list)
)
self.log("Results: %s" %results)
test_result_line_test.stop(stdout=results,
test_count=len(test_suite_list), test_count=len(test_suite_list),
duration=test_duration) duration=test_duration)
self.log("Test Case Stopped") except Exception as e:
self.log("ERROR stopping test line")
self.log(e)
raise e
self.log("Test Case Stopped")
self.clearUsersFile(user_file_full_path + ".py")
#
error_message_set = None error_message_set = None
exit_status = 0 exit_status = 0
self.log("Scalability Launcher finished.")
return error_message_set, exit_status return error_message_set, exit_status
def main(): def main():
error_message_set, exit_status = ScalabilityLauncher().run() error_message_set, exit_status = ScalabilityLauncher().run()
for error_message in error_message_set:
print >>sys.stderr, "ERROR: %s" % error_message
sys.exit(exit_status) sys.exit(exit_status)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment