KVM resiliency test: use test framework.

Allows to not have to backport all changes to KVM test suite.
parent f5454071
...@@ -26,58 +26,16 @@ ...@@ -26,58 +26,16 @@
# #
############################################################################## ##############################################################################
# XXX: This module should use the resiliencytestsuite helper module to factor all code, from .resiliencytestsuite import ResiliencyTestSuite
# like slaprunner test suite does.
# XXX: takeover module should be in slapos.toolbox, not in slapos.cookbook
from slapos.recipe.addresiliency.takeover import takeover
import slapos.slap
import logging import logging
import os
import random import random
import string import string
import subprocess
import sys
import time import time
import traceback
import urllib import urllib
logger = logging.getLogger('KVMResiliencyTest') logger = logging.getLogger('KVMResiliencyTest')
# Wait for 2 hours before renaming, so that replication of data is done
# (~1GB of data to backup)
SLEEP_TIME = 2 * 60 * 60
# In case of unittest testnode (not scalability testnode), everything is local
UNIT_TEST_SLEEP_TYPE = 900
def fetchMainInstanceIP(current_partition, software_release, instance_name):
return current_partition.request(
software_release=software_release,
software_type='kvm-resilient',
partition_reference=instance_name).getConnectionParameter('ipv6')
def setRandomKey(ip):
"""
Set a random key that will be stored inside of the virtual hard drive.
"""
random_key = ''.join(random.SystemRandom().sample(string.ascii_lowercase, 20))
for i in range(0, 60):
connection = urllib.urlopen('http://%s:10080/set?key=%s' % (ip, random_key))
if connection.getcode() is 200:
break
else:
logger.info('Impossible to connect to virtual machine to set key. sleeping...')
time.sleep(60)
if i is 59:
raise Exception('Bad return code when setting key in main instance, after trying for 60 minutes.')
return random_key
def fetchKey(ip): def fetchKey(ip):
""" """
Fetch the key that had been set on original virtual hard drive. Fetch the key that had been set on original virtual hard drive.
...@@ -98,13 +56,7 @@ def fetchKey(ip): ...@@ -98,13 +56,7 @@ def fetchKey(ip):
return new_key return new_key
class KVMTestSuite(ResiliencyTestSuite):
def runTestSuite(server_url, key_file, cert_file,
computer_id, partition_id, software,
namebase, kvm_rootinstance_name,
# Number of instances: main instance (exporter) + clones (importer).
total_instance_count="2",
type=None):
""" """
Run KVM Resiliency Test. Run KVM Resiliency Test.
Requires a specific KVM environment (virtual hard drive), see KVM SR for more Requires a specific KVM environment (virtual hard drive), see KVM SR for more
...@@ -121,27 +73,27 @@ def runTestSuite(server_url, key_file, cert_file, ...@@ -121,27 +73,27 @@ def runTestSuite(server_url, key_file, cert_file,
With the following python code running at boot in /root/number.py: With the following python code running at boot in /root/number.py:
import os import os
from flask import Flask, abort, request from flask import Flask, abort, request
app = Flask(__name__) app = Flask(__name__)
storage = 'storage.txt' storage = 'storage.txt'
@app.route("/") @app.route("/")
def greeting_list(): # 'cause there are several greetings, and plural is forbidden. def greeting_list(): # 'cause there are several greetings, and plural is forbidden.
return "Hello World" return "Hello World"
@app.route("/get") @app.route("/get")
def get(): def get():
return open(storage, 'r').read() return open(storage, 'r').read()
@app.route("/set") @app.route("/set")
def set(): def set():
#if os.path.exists(storage): #if os.path.exists(storage):
# abort(503) # abort(503)
open(storage, 'w').write(request.args['key']) open(storage, 'w').write(request.args['key'])
return "OK" return "OK"
if __name__ == "__main__": if __name__ == "__main__":
app.run(host='0.0.0.0', port=80) app.run(host='0.0.0.0', port=80)
...@@ -171,112 +123,59 @@ def runTestSuite(server_url, key_file, cert_file, ...@@ -171,112 +123,59 @@ def runTestSuite(server_url, key_file, cert_file,
update-rc.d io defaults update-rc.d io defaults
""" """
if type == 'UnitTest':
global SLEEP_TIME def _getPartitionParameterDict(self):
SLEEP_TIME = UNIT_TEST_SLEEP_TYPE """
Overload default method.
slap = slapos.slap.slap() """
slap.initializeConnection(server_url, key_file, cert_file) return self.partition.request(
partition = slap.registerComputerPartition( software_release=self.software,
computer_guid=computer_id, software_type='kvm-resilient',
partition_id=partition_id partition_reference=self.root_instance_name).getConnectionParameterDict()
)
def generateData(self):
ip = fetchMainInstanceIP(partition, software, kvm_rootinstance_name) """
logger.info('KVM IP is %s.' % ip) Set a random key that will be stored inside of the virtual hard drive.
"""
# In resilient stack, main instance (example with KVM) is named "kvm0", self.key = ''.join(random.SystemRandom().sample(string.ascii_lowercase, 20))
# clones are named "kvm1", "kvm2", ... self.logger.info('Generated key is: %s' % self.key)
clone_count = int(total_instance_count) - 1
def pushDataOnMainInstance(self):
# In case we have only one clone: test the takeover twice self.logger.info('Getting the KVM IP...')
# so that we test the reconstruction of a new clone. self.ip = self._getPartitionParameterDict()['ipv6']
if clone_count == 1: logger.info('KVM IP is %s.' % self.ip)
for i in range(2):
ip = testClone(1, ip, for i in range(0, 60):
server_url, key_file, cert_file, computer_id, partition_id, software, namebase, partition, kvm_rootinstance_name, type) connection = urllib.urlopen('http://%s:10080/set?key=%s' % (self.ip, self.key))
if connection.getcode() is 200:
# In case we have more than one clone: test each clone one time. break
else: else:
# So first clone starts from 1. logger.info('Impossible to connect to virtual machine to set key. sleeping...')
current_clone = 1 time.sleep(60)
while current_clone <= clone_count: if i is 59:
ip = testClone(current_clone, ip, raise Exception('Bad return code when setting key in main instance, after trying for 60 minutes.')
server_url, key_file, cert_file, computer_id, partition_id, software, namebase, partition, kvm_rootinstance_name, type)
current_clone = current_clone + 1 logger.info('Key uploaded to KVM main instance.')
# All clones have been successfully tested: success. def checkDataOnCloneInstance(self):
return True self.ip = self._returnNewInstanceParameter(
parameter_key='ipv6',
def testClone(current_clone, ip, old_parameter_value=self.ip
server_url, key_file, cert_file, computer_id, partition_id, software, namebase, partition, kvm_rootinstance_name, test_type):
logger.info('Testing kvm%s.' % current_clone)
key = setRandomKey(ip)
logger.info('Key set for test in current KVM: %s.' % key)
logger.info('Sleeping for %s seconds.' % SLEEP_TIME)
time.sleep(SLEEP_TIME)
# Make the clone instance takeover the main instance
logger.info('Replacing main instance by clone instance...')
for i in range(0, 10):
try:
takeover(
server_url=server_url,
key_file=key_file,
cert_file=cert_file,
computer_guid=computer_id,
partition_id=partition_id,
software_release=software,
namebase=namebase,
winner_instance_suffix=str(current_clone),
)
break
except: # SSLError
traceback.print_exc()
if i is 9:
raise
logger.warning('takeover failed. Retrying...')
time.sleep(10)
logger.info('Done.')
if test_type == 'UnitTest': # Run by classical erp5testnode using slapproxy
# Run slapos node instance
# XXX hardcoded
slapos_configuration_file_path = os.path.join(
os.path.dirname(sys.argv[0]),
'..', '..', '..', 'slapos.cfg'
) )
print slapos_configuration_file_path
command = ['/opt/slapos/bin/slapos', 'node', 'instance', new_key = fetchKey(self.ip)
'--cfg=%s' % slapos_configuration_file_path, logger.info('Key on this new instance is %s' % new_key)
'--pidfile=slapos.pid']
subprocess.Popen(command).wait() # Compare with original key. If same: success.
subprocess.Popen(command).wait() if new_key == self.key:
subprocess.Popen(command).wait() self.logger.info('Data are the same: success.')
new_ip = ip return True
else:
else: # ScalabilityTest self.logger.info('Data are different: failure.')
# Wait for the new IP (of old-clone new-main instance) to appear.
logger.info('Waiting for new main instance to be ready...')
new_ip = None def runTestSuite(*args, **kwargs):
while not new_ip or new_ip == 'None' or new_ip == ip: """
logger.info('Not ready yet. SlapOS says main IP is %s' % new_ip) Run KVM Resiliency Test.
time.sleep(60) """
new_ip = fetchMainInstanceIP(partition, software, kvm_rootinstance_name) return KVMTestSuite(*args, **kwargs).runTestSuite()
logger.info('New IP of instance is %s' % new_ip)
new_key = fetchKey(new_ip)
logger.info('Key on this new instance is %s' % new_key)
# Compare with original key. If same: success.
if new_key == key:
logger.info('Success for clone %s.' % current_clone)
else:
logger.info('Failure for clone %s. Aborting.' % current_clone)
return False
# Setup "new old ip" for next clone, so that it will test it is different
# from current clone
return new_ip
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment