Commit 86814cc2 authored by Xiaowu Zhang's avatar Xiaowu Zhang

stask/erp5: generate different runTestSuite according to parameter

if saucelabs_dict is present, then generate delegate runTestSuite to launch test with saucelab
else default one is created
parent 90b10c1b
......@@ -158,6 +158,13 @@ rendered = ${buildout:directory}/instance-caucase.cfg
<= download-base-neo
url = ${:_profile_base_location_}/${:filename}
[template-run-zelenium]
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/template/run-zelenium-test.py.in
md5sum = 8c42f98228f0ae9dc16ee2dab739b818
output = ${buildout:directory}/run-zelenium-test.py.in
mode = 755
[check-recipe]
recipe = plone.recipe.command
stop-on-error = true
......@@ -285,6 +292,7 @@ context =
key root_common root-common:target
key site_zcml site-zcml:target
key sixtunnel_location 6tunnel:location
key template_run_zelenium template-run-zelenium:output
key stunnel_location stunnel:location
key template_apache_conf template-apache-backend-conf:target
key template_balancer template-balancer:target
......@@ -549,6 +557,7 @@ eggs = ${neoppod:eggs}
# Needed for parsing .po files from our Localizer subset
polib
selenium
# Needed for Google OAuth
google-api-python-client
......@@ -797,3 +806,6 @@ unidiff = 0.5.5
# deepdiff = 3.3.0
jsonpickle = 0.9.6
decorator = 4.3.0
selenium = 3.8.0
......@@ -66,7 +66,7 @@ md5sum = 0969fbb25b05c02ef3c2d437b2f4e1a0
[template]
filename = instance.cfg.in
md5sum = 330b72acf0752cce5da70b1fcdfac487
md5sum = 50db2da70232c91642c93dc06c982b3c
[monitor-template-dummy]
filename = dummy.cfg
......@@ -74,7 +74,7 @@ md5sum = d41d8cd98f00b204e9800998ecf8427e
[template-erp5]
filename = instance-erp5.cfg.in
md5sum = 9f8851af5faed9d1e346d7dbe0e6db5d
md5sum = 83e0f627633ec3e394dc27d06c627b3b
[template-zeo]
filename = instance-zeo.cfg.in
......@@ -82,7 +82,7 @@ md5sum = 3e650915959ff31c9c13c84069bbcd35
[template-zope]
filename = instance-zope.cfg.in
md5sum = a4dda17c1671ead45ac92b7decc32228
md5sum = 40574986b5a75e5c888737797ee6973e
[template-balancer]
filename = instance-balancer.cfg.in
......
......@@ -152,6 +152,7 @@ config-caucase-url = ${request-caucase:connection-http-url}
config-cloudooo-url = {{ dumps(slapparameter_dict.get('cloudooo-url', default_cloudooo_url)) }}
config-deadlock-debugger-password = ${publish-early:deadlock-debugger-password}
config-developer-list = {{ dumps(slapparameter_dict.get('developer-list', [inituser_login])) }}
config-saucelabs-dict = {{ dumps(slapparameter_dict.get('saucelabs-dict', {})) }}
config-hosts-dict = {{ dumps(slapparameter_dict.get('hosts-dict', {})) }}
config-hostalias-dict = {{ dumps(slapparameter_dict.get('hostalias-dict', {})) }}
config-id-store-interval = {{ dumps(slapparameter_dict.get('id-store-interval')) }}
......
......@@ -4,6 +4,7 @@
{% set zodb_dict = slapparameter_dict['zodb-dict'] -%}
{% set instance_index_list = range(slapparameter_dict['instance-count']) -%}
{% set node_id_base = slapparameter_dict['name'] -%}
{% set saucelabs_dict = slapparameter_dict.get('saucelabs-dict', None) -%}
{% set node_id_index_format = '-%%0%ii' % (len(str(instance_index_list[-1])), ) -%}
{% set part_list = [] -%}
{% set publish_list = [] -%}
......@@ -67,6 +68,23 @@ ca-certs = ${directory:test-ca-certs}
ca-newcerts = ${directory:test-ca-newcerts}
ca-crl = ${directory:test-ca-crl}
{% if saucelabs_dict -%}
[test-zelenium-runner-parameter]
configuration = {{ dumps(saucelabs_dict) }}
user = {{ dumps(slapparameter_dict['inituser-login']) }}
password = {{ dumps(slapparameter_dict['inituser-password']) }}
[{{ section('test-zelenium-runner') }}]
recipe = slapos.recipe.template:jinja2
template = {{ parameter_dict['run-zelenium-template'] }}
rendered = ${directory:bin}/runTestSuite
extensions = jinja2.ext.do
context =
import json_module json
key configuration test-zelenium-runner-parameter:configuration
key user test-zelenium-runner-parameter:user
key password test-zelenium-runner-parameter:password
{% else -%}
{% if slapparameter_dict['mysql-test-url-list'] -%}
[{{ section('run-unit-test-userhosts-wrapper') }}]
<= userhosts-wrapper-base
......@@ -125,6 +143,7 @@ command-name = runTestSuite
command-line-extra =
--db_list '{{ ','.join(connection_string_list) }}'
{%- endif %}
{%- endif %}
[directory]
recipe = slapos.cookbook:mkdirectory
......
......@@ -84,6 +84,7 @@ extra-context =
[dynamic-template-zope-parameters]
bin-directory = {{ bin_directory }}
zope-conf-template = {{ template_zope_conf }}
run-zelenium-template = {{ template_run_zelenium }}
buildout-bin-directory = {{ buildout_bin_directory }}
6tunnel = {{ sixtunnel_location }}
coreutils = {{ coreutils_location }}
......
#!${buildout:directory}/bin/${eggs:interpreter}
# BEWARE: This file is operated by slapgrid
# BEWARE: It will be overwritten automatically
import argparse, os, sys, traceback
from erp5.util import taskdistribution
from lxml import etree
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
import json
import subprocess
from urllib import urlopen
def main():
parser = argparse.ArgumentParser(description='Run a test suite.')
parser.add_argument('--test_suite', help='The test suite name')
parser.add_argument('--test_suite_title', help='The test suite title')
parser.add_argument('--test_node_title', help='The test node title')
parser.add_argument('--project_title', help='The project title')
parser.add_argument('--revision', help='The revision to test',
default='dummy_revision')
parser.add_argument('--node_quantity', help='ignored', type=int)
parser.add_argument('--master_url',
help='The Url of Master controling many suites')
parser.add_argument('--frontend_url',
help='The url of frontend of the test suite')
parser.add_argument('--target',
help='Target OS to run tests on',
type=str)
parser.add_argument('--target_version',
help='Target OS version to use',
type=str,)
parser.add_argument('--target_browser',
help='The desired browser of the target OS to be used. Example: Firefox if target is Android.',
type=str,)
parser.add_argument('--target_device',
help='The desired device running the target OS. Example: iPad Simulator, if target is iOS.',
type=str,)
parser.add_argument('--appium_server_auth',
help='Combination of user and token to access SauceLabs service. (i.e. user:token)',
type=str)
args = parser.parse_args()
parsed_parameters = json.loads('{{ json_module.dumps(configuration) }}')
if not getattr(args, 'target', None):
args.target = parsed_parameters.get('target', 'firefox')
if not getattr(args, 'test_suite', None):
args.test_suite = parsed_parameters.get('test-suite')
if not getattr(args, 'target_version', None):
args.target_version = parsed_parameters.get('target-version')
if not getattr(args, 'appium_server_auth', None):
args.appium_server_auth = parsed_parameters.get('appium-server-auth')
if not getattr(args, 'target_browser', None):
args.target_browser = parsed_parameters.get('target-browser')
if not getattr(args, 'target_device', None):
args.target_device = parsed_parameters.get('target-device')
args.frontend_url = parsed_parameters.get('frontend-url')
args.run_only = parsed_parameters.get('run_only', None)
is_browser_running = False
test_line_dict = {}
test_suite_title = args.test_suite_title or args.test_suite
test_suite = args.test_suite
revision = args.revision
# curl https://saucelabs.com/rest/v1/info/platforms/all
# https://wiki.saucelabs.com/display/DOCS/Platform+Configurator#/
if args.target in ['iOS', 'Android']:
# parameters for mobile emulators have different names then parameters for
# desktop OSes
capabilities = {
'platformName': args.target,
'platformVersion': args.target_version,
'deviceName': args.target_device,
'browserName': args.target_browser
}
elif 'Windows' in args.target or 'OS X' in args.target:
capabilities = {
'browserName': args.target_browser,
'platform': args.target,
'version': args.target_version
}
if not args.appium_server_auth:
raise RuntimeError('--appium_server_auth is required.')
if not args.frontend_url:
raise RuntimeError('--frontend_url is required.')
appium_url = "http://%s@ondemand.saucelabs.com/wd/hub" % (args.appium_server_auth)
# adjust make path to frontend
# Do not store any test result in the ZMI
if args.run_only:
url = "%s/erp5/portal_tests/%s/core/TestRunner.html" \
"?test=../test_suite_html" \
"&auto=on" \
"&resultsUrl=../getId" \
"&__ac_name=%s" \
"&__ac_password=%s" % (args.frontend_url, args.run_only, "{{ user }}", "{{ password }}")
else:
url = "%s/erp5/portal_tests/core/TestRunner.html" \
"?test=../test_suite_html" \
"&auto=on" \
"&resultsUrl=../getId" \
"&__ac_name=%s" \
"&__ac_password=%s" % (args.frontend_url, "{{ user }}", "{{ password }}")
# Wait until all activities are finished...
wait_url = args.frontend_url + '/erp5/Zuite_waitForActivities'
while 1:
try:
response = urlopen(wait_url)
try:
if response.code == 500:
sys.exit(-1)
if response.code == 200 and response.read() == 'Done.':
break
finally:
response.close()
except Exception:
traceback.print_exc()
time.sleep(10)
tool = taskdistribution.TaskDistributor(portal_url=args.master_url)
try:
browser = webdriver.Remote(appium_url, capabilities)
is_browser_running = True
agent = browser.execute_script("return navigator.userAgent")
print url
print agent
start_time = time.time()
browser.get(url)
# Wait for Zelenium to be loaded
WebDriverWait(browser, 10).until(EC.presence_of_element_located((
By.XPATH, '//iframe[@id="testSuiteFrame"]'
)))
# XXX No idea how to wait for the iframe content to be loaded
time.sleep(5)
# Count number of test to be executed
test_count = browser.execute_script(
"return document.getElementById('testSuiteFrame').contentDocument.querySelector('tbody').children.length"
) - 1 # First child is the file name
# Wait for test to be executed
erp5_zelenium_test_timeout = 90
WebDriverWait(browser, erp5_zelenium_test_timeout * (test_count + 1)).until(EC.presence_of_element_located((
By.XPATH, '//td[@id="testRuns" and contains(text(), "%i")]' % test_count
)))
execution_duration = round(time.time() - start_time, 2)
if test_count:
test_execution_duration = execution_duration / test_count
else:
test_execution_duration = 0
html_parser = etree.HTMLParser(recover=True)
# body = etree.fromstring(browser.page_source.encode('UTF-8'), html_parser)
# test_count = int(body.xpath('//td[@id="testRuns"]')[0].text)
# failed_test_count = int(body.xpath('//td[@id="testFailures"]')[0].text)
# print 'Run %i, failed %i' % (test_count, failed_test_count)
# https://github.com/appium/appium/issues/5199
# browser.switch_to.frame(browser.find_element_by_id("testSuiteFrame"))
# iframe = etree.fromstring(browser.page_source.encode('UTF-8'), html_parser)
iframe = etree.fromstring(
browser.execute_script(
"return document.getElementById('testSuiteFrame').contentDocument.querySelector('html').innerHTML"
).encode('UTF-8'),
html_parser
)
browser.quit()
is_browser_running = False
tbody = iframe.xpath('.//body/table/tbody')[0]
for tr in tbody[1:]:
# First td is the main title
test_name = tr[0][0].text
skip_count = success_count = error_count = 0
if len(tr) == 1:
# Test was not executed
tr_count = 1
test_table = 'Test not executed!'
test_tbody = 'Test not executed!'
else:
test_table = tr[1].xpath('.//table')[0]
test_tbody = tr[1].xpath('.//tbody')[0]
tr_count = len(test_tbody)
for tr in test_tbody:
# print etree.tostring(tr).split('\n')[0]
status = tr.attrib.get('class')
if status is None or 'status_done' in status:
skip_count += 1
elif 'status_passed' in status:
success_count += 1
elif 'status_failed' in status:
error_count += 1
test_line_dict[test_name] = {
'test_count': tr_count,
'error_count': error_count,
'failure_count': tr_count - (skip_count + success_count + error_count),
'skip_count': skip_count,
'duration': test_execution_duration,
'command': url,
'stdout': agent,
'stderr': '',
'html_test_result': etree.tostring(test_table)
}
except:
test_line_dict['UnexpectedException'] = {
'test_count': 1,
'error_count': 0,
'failure_count': 1,
'skip_count': 0,
'duration': 1,
'command': url,
'stdout': agent,
'stderr': traceback.format_exc()
}
try:
test_result = tool.createTestResult(revision = revision,
test_name_list = test_line_dict.keys(),
node_title = args.test_node_title,
test_title = test_suite_title,
project_title = args.project_title)
if test_result is None or not hasattr(args, 'master_url'):
return
# report test results
while 1:
test_result_line = test_result.start()
if not test_result_line:
print 'No test result anymore.'
break
print 'Submitting: "%s"' % test_result_line.name
# report status back to Nexedi ERP5
test_result_line.stop(**test_line_dict[test_result_line.name])
except:
# Catch any exception here, to warn user instead of being silent,
# by generating fake error result
print traceback.format_exc()
result = dict(status_code=-1,
command=url,
stderr=traceback.format_exc(),
stdout='')
# XXX: inform test node master of error
raise EnvironmentError(result)
finally:
if is_browser_running:
# if by any chance browser is still running due to
# traceback raised make sure we cleanup
browser.quit()
if __name__ == "__main__":
main()
\ No newline at end of file
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment