Commit 666dc179 authored by Lutra Conseil's avatar Lutra Conseil Committed by Rafael Monnerat

Merge branch 'collect'

parents dec95dfc d38f6a23
......@@ -48,6 +48,7 @@ setup(name=name,
'netifaces', # to fetch information about network devices
'setuptools', # namespaces
'supervisor', # slapgrid uses supervisor to manage processes
'psutil',
'xml_marshaller>=0.9.3', # to unmarshall/marshall python objects to/from
# XML
'zope.interface', # slap library implementes interfaces
......@@ -72,17 +73,7 @@ setup(name=name,
entry_points={
'console_scripts': [
'slapos-watchdog = slapos.grid.watchdog:main',
'slapproxy = slapos.cli_legacy.proxy_start:main',
'slapos = slapos.cli.entry:main',
# Deprecated entry points
'slapconsole = slapos.cli_legacy.console:console',
'slapformat = slapos.cli_legacy.format:main',
'slapgrid-sr = slapos.cli_legacy.slapgrid:runSoftwareRelease',
'slapgrid-cp = slapos.cli_legacy.slapgrid:runComputerPartition',
'slapgrid-ur = slapos.cli_legacy.slapgrid:runUsageReport',
'slapgrid-supervisorctl = slapos.cli_legacy.svcbackend:supervisorctl',
'slapgrid-supervisord = slapos.cli_legacy.svcbackend:supervisord',
'bang = slapos.cli_legacy.bang:main',
],
'slapos.cli': [
# Utilities
......@@ -102,6 +93,7 @@ setup(name=name,
'node software = slapos.cli.slapgrid:SoftwareCommand',
'node instance = slapos.cli.slapgrid:InstanceCommand',
'node boot = slapos.cli.boot:BootCommand',
'node collect = slapos.cli.collect:CollectCommand',
# SlapOS client commands
'console = slapos.cli.console:ConsoleCommand',
'configure local = slapos.cli.configure_local:ConfigureLocalCommand',
......
# -*- coding: utf-8 -*-
import subprocess
from time import sleep
import socket
import glob
import os
from slapos.collect import do_collect
from slapos.cli.command import must_be_root
from slapos.cli.entry import SlapOSApp
from slapos.cli.config import ConfigCommand
class CollectCommand(ConfigCommand):
"""
Collect system consumption and data and store.
"""
command_group = 'node'
def get_parser(self, prog_name):
ap = super(CollectCommand, self).get_parser(prog_name)
return ap
@must_be_root
def take_action(self, args):
configp = self.fetch_config(args)
do_collect(configp)
......@@ -174,10 +174,10 @@ def save_former_config(conf):
def fetch_configuration_template():
req = requests.get('http://git.erp5.org/gitweb/slapos.core.git/blob_plain/HEAD:/slapos.cfg.example')
req.raise_for_status()
return req.text
template_arg_list = (__name__.split('.')[0], 'slapos.cfg.example')
with pkg_resources.resource_stream(*template_arg_list) as fout:
slapos_node_configuration_template = fout.read()
return slapos_node_configuration_template
def slapconfig(conf):
"""Base Function to configure slapos in /etc/opt/slapos"""
......@@ -200,13 +200,6 @@ def slapconfig(conf):
if not dry_run:
os.mkdir(slap_conf_dir, 0o711)
# Hack that should be removed when we switch to re6st
# Force start of vpn
openvpn_needed_file = os.path.join(slap_conf_dir, 'openvpn-needed')
if not os.path.exists(openvpn_needed_file):
if not dry_run:
open(openvpn_needed_file, 'w').write('')
user_certificate_repository_path = os.path.join(slap_conf_dir, 'ssl')
if not os.path.exists(user_certificate_repository_path):
conf.logger.info('Creating directory: %s', user_certificate_repository_path)
......@@ -344,5 +337,5 @@ def do_register(conf):
slapconfig(conf)
conf.logger.info('Node has successfully been configured as %s.', COMP)
conf.logger.info('Now please invoke /usr/sbin/slapos-start on your site.')
conf.logger.info('Now please invoke slapos node boot on your site.')
return 0
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2011, 2012 Vifib SARL and Contributors.
# All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import argparse
import ConfigParser
from slapos.bang import do_bang
def main(*args):
ap = argparse.ArgumentParser()
ap.add_argument('-m', '--message', default='', help='Message for bang.')
ap.add_argument('configuration_file', type=argparse.FileType(),
help='SlapOS configuration file.')
if args:
args = ap.parse_args(list(args))
else:
args = ap.parse_args()
configp = ConfigParser.SafeConfigParser()
configp.readfp(args.configuration_file)
do_bang(configp, args.message)
# -*- coding: utf-8 -*-
import argparse
import os
import textwrap
from slapos.client import ClientConfig, init, do_console
from slapos.cli_legacy.util import get_config_parser
def console():
description = textwrap.dedent("""\
slapconsole allows you interact with slap API. You can play with the global
"slap" object and with the global "request" method.
examples :
>>> # Request instance
>>> request(kvm, "myuniquekvm")
>>> # Request software installation on owned computer
>>> supply(kvm, "mycomputer")
>>> # Fetch instance informations on already launched instance
>>> request(kvm, "myuniquekvm").getConnectionParameter("url")""")
ap = argparse.ArgumentParser(description=description,
formatter_class=argparse.RawDescriptionHelpFormatter)
ap.add_argument('-u', '--master_url',
default=None,
help='Url of SlapOS Master to use.')
ap.add_argument('-k', '--key_file',
help="SSL Authorisation key file.")
ap.add_argument('-c', '--cert_file',
help="SSL Authorisation certificate file.")
ap.add_argument('configuration_file',
help='path to slapos.cfg')
args = ap.parse_args()
if not os.path.isfile(args.configuration_file):
ap.error("%s: Not found or not a regular file." % args.configuration_file)
configp = get_config_parser(args.configuration_file)
conf = ClientConfig(args, configp)
local = init(conf)
do_console(local)
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010, 2011, 2012 Vifib SARL and Contributors.
# All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import argparse
import ConfigParser
import logging
import sys
import os
from slapos.format import FormatConfig, UsageError, tracing_monkeypatch, do_format
def main(*args):
"Run default configuration."
ap = argparse.ArgumentParser()
ap.add_argument('-x', '--computer_xml',
help="Path to file with computer's XML. If does not exists, will be created",
default=None)
ap.add_argument('--computer_json',
help="Path to a JSON version of the computer's XML (for development only).",
default=None)
ap.add_argument('-l', '--log_file',
help="The path to the log file used by the script.")
ap.add_argument('-i', '--input_definition_file',
help="Path to file to read definition of computer instead of "
"declaration. Using definition file allows to disable "
"'discovery' of machine services and allows to define computer "
"configuration in fully controlled manner.")
ap.add_argument('-o', '--output_definition_file',
help="Path to file to write definition of computer from "
"declaration.")
ap.add_argument('-n', '--dry_run',
help="Don't actually do anything.",
default=False,
action="store_true")
ap.add_argument('-v', '--verbose',
default=False,
action="store_true",
help="Verbose output.")
# the console option is actually ignored and not used anymore.
ap.add_argument('-c', '--console',
default=False,
action="store_true",
help="Console output.")
ap.add_argument('--alter_user',
choices=['True', 'False'],
help="Shall slapformat alter user database [default: True]")
ap.add_argument('--alter_network',
choices=['True', 'False'],
help="Shall slapformat alter network configuration [default: True]")
ap.add_argument('--now',
help="Launch slapformat without delay",
default=False,
action="store_true")
ap.add_argument('configuration_file',
help='path to slapos.cfg')
if args:
args = ap.parse_args(list(args))
else:
args = ap.parse_args()
logger = logging.getLogger("slapformat")
logger.addHandler(logging.StreamHandler())
if args.verbose:
logger.setLevel(logging.DEBUG)
logger.debug("Verbose mode enabled.")
else:
logger.setLevel(logging.INFO)
conf = FormatConfig(logger=logger)
configp = ConfigParser.SafeConfigParser()
if configp.read(args.configuration_file) != [args.configuration_file]:
raise UsageError('Cannot find or parse configuration file: %s' % args.configuration_file)
conf.mergeConfig(args, configp)
if conf.log_file:
if not os.path.isdir(os.path.dirname(conf.log_file)):
# fallback to console only if directory for logs does not exists and
# continue to run
raise ValueError('Please create directory %r to store %r log file' % (
os.path.dirname(conf.log_file), conf.log_file))
else:
file_handler = logging.FileHandler(conf.log_file)
file_handler.setFormatter(logging.Formatter("%(asctime)s - "
"%(name)s - %(levelname)s - %(message)s"))
conf.logger.addHandler(file_handler)
conf.logger.info('Configured logging to file %r' % conf.log_file)
try:
conf.setConfig()
except UsageError as exc:
sys.stderr.write(exc.message + '\n')
sys.stderr.write("For help use --help\n")
sys.exit(1)
tracing_monkeypatch(conf)
try:
do_format(conf=conf)
except:
conf.logger.exception('Uncaught exception:')
raise
# -*- coding: utf-8 -*-
# vim: set et sts=2:
import argparse
import ConfigParser
import logging
import os
import sys
from slapos.proxy import ProxyConfig, do_proxy
class UsageError(Exception):
pass
def main():
ap = argparse.ArgumentParser()
ap.add_argument('-l', '--log_file',
help='The path to the log file used by the script.')
ap.add_argument('-v', '--verbose',
action='store_true',
help='Verbose output.')
# XXX not used anymore, deprecated
ap.add_argument('-c', '--console',
action='store_true',
help='Console output.')
ap.add_argument('-u', '--database-uri',
help='URI for sqlite database')
ap.add_argument('configuration_file',
help='path to slapos.cfg')
args = ap.parse_args()
logger = logging.getLogger('slapproxy')
logger.addHandler(logging.StreamHandler())
if args.verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
conf = ProxyConfig(logger=logger)
configp = ConfigParser.SafeConfigParser()
if configp.read(args.configuration_file) != [args.configuration_file]:
raise UsageError('Cannot find or parse configuration file: %s' % args.configuration_file)
conf.mergeConfig(args, configp)
if conf.log_file:
if not os.path.isdir(os.path.dirname(conf.log_file)):
raise ValueError('Please create directory %r to store %r log file' % (
os.path.dirname(conf.log_file), conf.log_file))
file_handler = logging.FileHandler(conf.log_file)
file_handler.setFormatter(logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s"))
logger.addHandler(file_handler)
logger.info('Configured logging to file %r' % conf.log_file)
conf.setConfig()
try:
do_proxy(conf=conf)
return_code = 0
except SystemExit as err:
return_code = err
sys.exit(return_code)
# -*- coding: utf-8 -*-
# vim: set et sts=2:
import argparse
import ConfigParser
import logging
import sys
from slapos.grid.utils import setRunning, setFinished
from slapos.grid.slapgrid import (merged_options, check_missing_parameters,
check_missing_files, random_delay, create_slapgrid_object)
def parse_arguments(*argument_tuple):
"""Parse arguments and return options dictionary merged with the config file."""
ap = argparse.ArgumentParser()
ap.add_argument('--instance-root',
help='The instance root directory location.')
ap.add_argument('--software-root',
help='The software_root directory location.')
ap.add_argument('--master-url',
help='The master server URL. Mandatory.')
ap.add_argument('--computer-id',
help='The computer id defined in the server.')
ap.add_argument('--supervisord-socket',
help='The socket supervisor will use.')
ap.add_argument('--supervisord-configuration-path',
help='The location where supervisord configuration will be stored.')
ap.add_argument('--buildout', default=None,
help='Location of buildout binary.')
ap.add_argument('--pidfile',
help='The location where pidfile will be created.')
ap.add_argument('--logfile',
help='The location where slapgrid logfile will be created.')
ap.add_argument('--key_file',
help='SSL Authorisation key file.')
ap.add_argument('--cert_file',
help='SSL Authorisation certificate file.')
ap.add_argument('--signature_private_key_file',
help='Signature private key file.')
ap.add_argument('--master_ca_file',
help='Root certificate of SlapOS master key.')
ap.add_argument('--certificate_repository_path',
help='Path to directory where downloaded certificates would be stored.')
ap.add_argument('-v', '--verbose', action='store_true',
help='Be verbose.')
ap.add_argument('--maximum-periodicity', type=int, default=None,
help='Periodicity at which buildout should be run in instance.')
ap.add_argument('--promise-timeout', type=int, default=3,
help='Promise timeout in seconds.')
ap.add_argument('--now', action='store_true',
help='Launch slapgrid without delay. Default behavior.')
ap.add_argument('--all', action='store_true',
help='Launch slapgrid to process all Software Releases '
'and/or Computer Partitions.')
ap.add_argument('--only-sr',
help='Force the update of a single software release (use url hash), '
'even if is already installed. This option will make all other '
'sofware releases be ignored.')
ap.add_argument('--only-cp',
help='Update a single or a list of computer partitions '
'(ie.:slappartX, slappartY), '
'this option will make all other computer partitions be ignored.')
ap.add_argument('configuration_file', type=argparse.FileType(),
help='SlapOS configuration file.')
# Deprecated options
ap.add_argument('-c', '--console', action='store_true',
help="Deprecated, doesn't do anything.")
ap.add_argument('--develop', action='store_true',
help='Deprecated, same as --all.')
ap.add_argument('--only_sr',
help='Deprecated, same as --only-sr.')
ap.add_argument('--only_cp',
help='Deprecated, same as --only-cp.')
ap.add_argument('--maximal_delay',
help='Deprecated. Will only work from configuration file in the future.')
if not argument_tuple:
args = ap.parse_args()
else:
args = ap.parse_args(list(argument_tuple))
return args
def setup_logger(options):
logger = logging.getLogger(__name__)
if options.get('logfile'):
handler = logging.FileHandler(options['logfile'])
else:
handler = logging.StreamHandler()
if options['verbose']:
handler.setLevel(logging.DEBUG)
else:
handler.setLevel(logging.INFO)
formatter = logging.Formatter(fmt='%(asctime)s %(name)-18s: '
'%(levelname)-8s %(message)s',
datefmt='%Y-%m-%dT%H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def parseArgumentTupleAndReturnSlapgridObject(*argument_tuple):
"""Returns a new instance of slapgrid.Slapgrid created with argument+config parameters.
Also returns the pidfile path, and configures logger.
"""
args = parse_arguments(*argument_tuple)
configp = ConfigParser.SafeConfigParser()
configp.readfp(args.configuration_file)
options = merged_options(args, configp)
logger = setup_logger(options)
check_missing_parameters(options)
check_missing_files(options)
random_delay(options, logger=logger)
slapgrid_object = create_slapgrid_object(options, logger=logger)
return slapgrid_object, options.get('pidfile')
def realRun(argument_tuple, method):
slapgrid_object, pidfile = parseArgumentTupleAndReturnSlapgridObject(*argument_tuple)
if pidfile:
setRunning(logger=slapgrid_object.logger, pidfile=pidfile)
try:
return getattr(slapgrid_object, method)()
finally:
if pidfile:
setFinished(pidfile)
def runSoftwareRelease(*argument_tuple):
"""Hook for entry point to process Software Releases"""
sys.exit(realRun(argument_tuple, 'processSoftwareReleaseList'))
def runComputerPartition(*argument_tuple):
"""Hook for entry point to process Computer Partitions"""
sys.exit(realRun(argument_tuple, 'processComputerPartitionList'))
def runUsageReport(*argument_tuple):
"""Hook for entry point to process Usage Reports"""
sys.exit(realRun(argument_tuple, 'agregateAndSendUsage'))
# -*- coding: utf-8 -*-
# vim: set et sts=2:
##############################################################################
#
# Copyright (c) 2010, 2011, 2012 Vifib SARL and Contributors.
# All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import logging
import os
from optparse import OptionParser
import ConfigParser
from slapos.grid.svcbackend import launchSupervisord
def getOptionDict(*argument_tuple):
usage = """
Typical usage:
* %prog CONFIGURATION_FILE [arguments passed to supervisor]
""".strip()
parser = OptionParser(usage=usage)
# Parses arguments
if argument_tuple:
(argument_option_instance, argument_list) = parser.parse_args(list(argument_tuple))
else:
# No arguments given to entry point : we parse sys.argv.
(argument_option_instance, argument_list) = parser.parse_args()
if not argument_list:
parser.error("Configuration file is obligatory. Consult documentation by calling with -h.")
configuration_file = argument_list[0]
if not os.path.exists(configuration_file):
parser.error("Could not read configuration file : %s" % configuration_file)
slapgrid_configuration = ConfigParser.SafeConfigParser()
slapgrid_configuration.read(configuration_file)
# Merges the two dictionnaries
option_dict = dict(slapgrid_configuration.items("slapos"))
# Supervisord configuration location
option_dict.setdefault('supervisord_configuration_path',
os.path.join(option_dict['instance_root'], 'etc', 'supervisord.conf'))
# Supervisord socket
option_dict.setdefault('supervisord_socket',
os.path.join(option_dict['instance_root'], 'supervisord.socket'))
return option_dict, argument_list[1:]
def supervisorctl(*argument_tuple):
logger = logging.getLogger('SVCBackend')
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
logger.addHandler(handler)
option_dict, args = getOptionDict(*argument_tuple)
import supervisor.supervisorctl
launchSupervisord(socket=option_dict['supervisord_socket'],
configuration_file=option_dict['supervisord_configuration_path'],
logger=logger)
supervisor.supervisorctl.main(args=['-c', option_dict['supervisord_configuration_path']] + args)
def supervisord(*argument_tuple):
logger = logging.getLogger('SVCBackend')
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
logger.addHandler(handler)
option_dict, _ = getOptionDict(*argument_tuple)
launchSupervisord(socket=option_dict['supervisord_socket'],
configuration_file=option_dict['supervisord_configuration_path'],
logger=logger)
# -*- coding: utf-8 -*-
import ConfigParser
import os
def get_config_parser(path):
configp = ConfigParser.SafeConfigParser()
path = os.path.expanduser(path)
if not os.path.isfile(path):
raise OSError('Specified configuration file %s does not exist. Exiting.' % path)
configp.read(path)
return configp
from psutil import process_iter, NoSuchProcess, AccessDenied
from time import time, sleep, strftime
from slapos.collect.db import Database
from slapos.util import mkdir_p
# Local import
from snapshot import ProcessSnapshot, SystemSnapshot, ComputerSnapshot
from slapos.collect.reporter import SystemJSONReporterDumper, \
RawCSVDumper, \
SystemCSVReporterDumper
from entity import get_user_list, Computer
def _get_time():
return strftime("%Y-%m-%d -- %H:%M:%S").split(" -- ")
def build_snapshot(proc):
try:
return ProcessSnapshot(proc)
except NoSuchProcess:
return None
def current_state(user_dict):
"""
Iterator used to apply build_snapshot(...) on every single relevant process.
A process is considered relevant if its user matches our user list, i.e.
its user is a slapos user
"""
process_list = [p for p in process_iter() if p.username() in user_dict]
for i, process in enumerate(process_list):
yield build_snapshot(process)
def do_collect(conf):
"""
Main function
The idea here is to poll system every so many seconds
For each poll, we get a list of Snapshots, holding informations about
processes. We iterate over that list to store datas on a per user basis:
Each user object is a dict, indexed on timestamp. We add every snapshot
matching the user so that we get informations for each users
"""
try:
collected_date, collected_time = _get_time()
user_dict = get_user_list(conf)
try:
for snapshot in current_state(user_dict):
if snapshot:
user_dict[snapshot.username].append(snapshot)
except (KeyboardInterrupt, SystemExit, NoSuchProcess):
raise
log_directory = "%s/var/data-log" % conf.get("slapos", "instance_root")
mkdir_p(log_directory)
database = Database(log_directory)
computer = Computer(ComputerSnapshot())
computer.save(database, collected_date, collected_time)
for user in user_dict.values():
user.save(database, collected_date, collected_time)
SystemCSVReporterDumper(database).dump(log_directory)
RawCSVDumper(database).dump(log_directory)
except AccessDenied:
print "You HAVE TO execute this script with root permission."
import sqlite3
import os
from time import time, strftime
class Database:
database_name = "collector.db"
table_list = ["user", "computer", "system", "disk"]
CREATE_USER_TABLE = "create table if not exists user " \
"(partition text, pid real, process text, " \
" cpu_percent real, cpu_time real, " \
" cpu_num_threads real, memory_percent real, " \
" memory_rss real, io_rw_counter real, " \
" io_cycles_counter real, date text, time text, " \
" reported integer NULL DEFAULT 0)"
CREATE_COMPUTER_TABLE = "create table if not exists computer "\
"(cpu_num_core real, cpu_frequency real, cpu_type text," \
" memory_size real, memory_type text, partition_list text," \
" date text, time text, reported integer NULL DEFAULT 0)"
CREATE_SYSTEM_TABLE = "create table if not exists system " \
"(loadavg real, cpu_percent real, memory_used real, "\
" memory_free real, net_in_bytes real, net_in_errors real, "\
" net_in_dropped real, net_out_bytes real, net_out_errors real, "\
" net_out_dropped real, date text, time text, " \
" reported integer NULL DEFAULT 0)"
CREATE_DISK_PARTITION = "create table if not exists disk "\
"(partition text, used text, free text, mountpoint text, " \
" date text, time text, reported integer NULL DEFAULT 0)"
INSERT_USER_TEMPLATE = "insert into user(" \
"partition, pid, process, cpu_percent, cpu_time, " \
"cpu_num_threads, memory_percent," \
"memory_rss, io_rw_counter, io_cycles_counter, " \
"date, time) values " \
"('%s', %s, '%s', %s, %s, %s, %s, %s, %s, %s, '%s', '%s' )"
INSERT_COMPUTER_TEMPLATE = "insert into computer("\
" cpu_num_core, cpu_frequency, cpu_type," \
"memory_size, memory_type, partition_list," \
"date, time) values "\
"(%s, %s, '%s', %s, '%s', '%s', '%s', '%s' )"
INSERT_DISK_TEMPLATE = "insert into disk("\
" partition, used, free, mountpoint," \
" date, time) "\
"values ('%s', %s, %s, '%s', '%s', '%s' )"
INSERT_SYSTEM_TEMPLATE = "insert into system("\
" loadavg, cpu_percent, memory_used, memory_free," \
" net_in_bytes, net_in_errors, net_in_dropped," \
" net_out_bytes, net_out_errors, net_out_dropped, " \
" date, time) values "\
"( %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, '%s', '%s' )"
def __init__(self, directory = None):
assert self.database_name is not None
self.uri = os.path.join(directory, self.database_name)
self.connection = None
self.cursor = None
self._bootstrap()
def connect(self):
self.connection = sqlite3.connect(self.uri)
self.cursor = self.connection.cursor()
def commit(self):
assert self.connection is not None
self.connection.commit()
def close(self):
assert self.connection is not None
self.cursor.close()
self.connection.close()
def _execute(self, sql):
assert self.connection is not None
return self.cursor.execute(sql)
def _bootstrap(self):
assert self.CREATE_USER_TABLE is not None
self.connect()
self._execute(self.CREATE_USER_TABLE)
self._execute(self.CREATE_COMPUTER_TABLE)
self._execute(self.CREATE_SYSTEM_TABLE)
self._execute(self.CREATE_DISK_PARTITION)
self.commit()
self.close()
def _getInsertionDateTuple(self):
return strftime("%Y-%m-d -- %H:%M:%S").split(" -- ")
###################
# Insertion methods
###################
def insertUserSnapshot(self, partition, pid, process, cpu_percent, cpu_time,
cpu_num_threads, memory_percent, memory_rss, io_rw_counter,
io_cycles_counter, insertion_date, insertion_time):
""" Insert user processes snapshots information on a database """
insertion_sql = self.INSERT_USER_TEMPLATE % \
( partition, pid, process, cpu_percent, cpu_time,
cpu_num_threads, memory_percent,
memory_rss, io_rw_counter, io_cycles_counter,
insertion_date, insertion_time)
self._execute(insertion_sql)
return insertion_sql
def insertComputerSnapshot(self, cpu_num_core, cpu_frequency, cpu_type,
memory_size, memory_type, partition_list, insertion_date, insertion_time):
"""Insert Computer general informations snapshots informations on
the database
"""
insertion_sql = self.INSERT_COMPUTER_TEMPLATE % \
( cpu_num_core, cpu_frequency, cpu_type,
memory_size, memory_type,
partition_list, insertion_date,
insertion_time)
self._execute(insertion_sql)
return insertion_sql
def insertDiskPartitionSnapshot(self, partition, used, free, mountpoint,
insertion_date, insertion_time):
""" Insert Disk Partitions informations on the database """
insertion_sql = self.INSERT_DISK_TEMPLATE % \
( partition, used, free, mountpoint,
insertion_date, insertion_time )
self._execute(insertion_sql)
return insertion_sql
def insertSystemSnapshot(self, loadavg, cpu_percent, memory_used, memory_free,
net_in_bytes, net_in_errors, net_in_dropped, net_out_bytes,
net_out_errors, net_out_dropped, insertion_date, insertion_time):
""" Include System general Snapshot on the database
"""
insertion_sql = self.INSERT_SYSTEM_TEMPLATE % \
( loadavg, cpu_percent, memory_used, memory_free,
net_in_bytes, net_in_errors, net_in_dropped,
net_out_bytes, net_out_errors, net_out_dropped,
insertion_date, insertion_time )
self._execute(insertion_sql)
return insertion_sql
def getTableList(self):
""" Get the list of tables from the database
"""
return [i[0] for i in self._execute(
"SELECT name FROM sqlite_master WHERE type='table'")]
def getDateScopeList(self, ignore_date=None, reported=0):
""" Get from the present unique dates from the system
Use a smaller table to sabe time.
"""
if ignore_date is not None:
where_clause = " AND date != '%s'" % ignore_date
else:
where_clause = ""
select_sql = "SELECT date, count(time) FROM system "\
" WHERE reported = %s %s GROUP BY date" % \
(reported, where_clause)
return self._execute(select_sql)
def markDayAsReported(self, date_scope, table_list):
""" Mark all registers from a certain date as reported """
update_sql = "UPDATE %s SET reported = 1 " \
"WHERE date = '%s' AND reported = 0"
for table in table_list:
self._execute(update_sql % (table, date_scope))
def select(self, table, date=None, columns="*"):
""" Query database for a full table information """
if date is not None:
where_clause = " WHERE date = '%s' " % date
else:
where_clause = ""
select_sql = "SELECT %s FROM %s %s " % (columns, table, where_clause)
return self._execute(select_sql)
#####################################################
# Export Tables as Dict for handle realtime plotting
#####################################################
def exportSystemAsDict(self, date):
""" Export system table as dictionally, formatting the output
for present it in a nicer presentation.
"""
collected_entry_dict = {}
collected_entry_dict["loadavg"] = []
collected_entry_dict["cpu_percent"] = []
collected_entry_dict["memory_used"] = []
collected_entry_dict["memory_free"] = []
collected_entry_dict["net_in_bytes"] = []
collected_entry_dict["net_in_errors"] = []
collected_entry_dict["net_in_dropped"] = []
collected_entry_dict["net_out_bytes"] = []
collected_entry_dict["net_out_errors"] = []
collected_entry_dict["net_out_dropped"] = []
first_entry = 1
last_entry_in = 0
last_entry_out = 0
entry_list = self._execute(
"SELECT loadavg, cpu_percent, memory_used, memory_free," \
" net_in_bytes, net_in_errors, net_in_dropped," \
" net_out_bytes, net_out_errors, net_out_dropped, " \
" date, time FROM system WHERE date = '%s'" % date)
for entry in entry_list:
entry_time = "%s %s" % (entry[10], str(entry[11]))
if not first_entry:
_entry_in = entry[4] - last_entry_in
last_entry_in = entry[4]
entry_in = _entry_in
_entry_out = entry[7] - last_entry_out
last_entry_out = entry[7]
entry_out = _entry_out
else:
first_entry = 0
last_entry_in = entry[4]
last_entry_out = entry[7]
continue
collected_entry_dict["loadavg"].append(
{'entry': entry[0], 'time': entry_time })
collected_entry_dict["cpu_percent"].append(
{'entry': entry[1], 'time': entry_time })
collected_entry_dict["memory_used"].append(
{'entry': entry[2]/1024, 'time': entry_time })
collected_entry_dict["memory_free"].append(
{'entry': entry[3]/1024, 'time': entry_time })
collected_entry_dict["net_in_bytes"].append(
{'entry': entry_in/1024, 'time': entry_time })
collected_entry_dict["net_in_errors"].append(
{'entry': entry[5], 'time': entry_time })
collected_entry_dict["net_in_dropped"].append(
{'entry': entry[6], 'time': entry_time })
collected_entry_dict["net_out_bytes"].append(
{'entry': entry_out/1024, 'time': entry_time })
collected_entry_dict["net_out_errors"].append(
{'entry': entry[8], 'time': entry_time })
collected_entry_dict["net_out_dropped"].append(
{'entry': entry[9], 'time': entry_time })
return collected_entry_dict
def exportDiskAsDict(self, date):
""" Export a column from a table for a given date.
"""
collected_entry_dict = {}
entry_list = self._execute(
"SELECT partition, used, free, date, time "\
"from disk WHERE date = '%s'" % (date))
for partition, used, free, __date, __time in entry_list:
partition_used = "%s-used" % partition
partition_free = "%s-free" % partition
if partition_used not in collected_entry_dict:
collected_entry_dict[partition_used] = []
if partition_free not in collected_entry_dict:
collected_entry_dict[partition_free] = []
collected_entry_dict[partition_used].append(
{'entry': int(used)/1024,
'time': "%s %s" % (__date, str(__time))})
collected_entry_dict[partition_free].append(
{'entry': int(free)/1024,
'time': "%s %s" % (__date, str(__time))})
return collected_entry_dict
def get_user_list(config):
nb_user = int(config.get("slapformat", "partition_amount"))
name_prefix = config.get("slapformat", "user_base_name")
path_prefix = config.get("slapformat", "partition_base_name")
instance_root = config.get("slapos", "instance_root")
user_dict = {name: User(name, path)
for name, path in [
(
"%s%s" % (name_prefix, nb),
"%s/%s%s" % (instance_root, path_prefix, nb)
) for nb in range(nb_user)
]
}
#user_dict['root'] = User("root", "/opt/slapgrid")
return user_dict
class User(object):
def __init__(self, name, path):
self.name = str(name)
self.path = str(path)
self.snapshot_list = []
def append(self, value):
self.snapshot_list.append(value)
def save(self, database, collected_date, collected_time):
""" Insert collected data on user collector """
database.connect()
for snapshot_item in self.snapshot_list:
database.insertUserSnapshot(self.name,
pid=snapshot_item.get("pid"),
process=snapshot_item.get("process"),
cpu_percent=snapshot_item.get("cpu_percent"),
cpu_time=snapshot_item.get("cpu_time"),
cpu_num_threads=snapshot_item.get("cpu_num_threads"),
memory_percent=snapshot_item.get("memory_percent"),
memory_rss=snapshot_item.get("memory_rss"),
io_rw_counter=snapshot_item.get("io_rw_counter"),
io_cycles_counter=snapshot_item.get("io_cycles_counter"),
insertion_date=collected_date,
insertion_time=collected_time)
database.commit()
database.close()
class Computer(dict):
def __init__(self, computer_snapshot):
self.computer_snapshot = computer_snapshot
def save(self, database, collected_date, collected_time):
database.connect()
self._save_computer_snapshot(database, collected_date, collected_time)
self._save_system_snapshot(database, collected_date, collected_time)
self._save_disk_partition_snapshot(database, collected_date, collected_time)
database.commit()
database.close()
def _save_computer_snapshot(self, database, collected_date, collected_time):
partition_list = ";".join(["%s=%s" % (x,y) for x,y in \
self.computer_snapshot.get("partition_list")])
database.insertComputerSnapshot(
cpu_num_core=self.computer_snapshot.get("cpu_num_core"),
cpu_frequency=self.computer_snapshot.get("cpu_frequency"),
cpu_type=self.computer_snapshot.get("cpu_type"),
memory_size=self.computer_snapshot.get("memory_size"),
memory_type=self.computer_snapshot.get("memory_type"),
partition_list=partition_list,
insertion_date=collected_date,
insertion_time=collected_time)
def _save_system_snapshot(self, database, collected_date, collected_time):
snapshot = self.computer_snapshot.get("system_snapshot")
database.insertSystemSnapshot(
loadavg=snapshot.get("load"),
cpu_percent=snapshot.get("cpu_percent"),
memory_used=snapshot.get("memory_used"),
memory_free=snapshot.get("memory_free"),
net_in_bytes=snapshot.get("net_in_bytes"),
net_in_errors=snapshot.get("net_in_errors"),
net_in_dropped=snapshot.get("net_in_dropped"),
net_out_bytes=snapshot.get("net_out_bytes"),
net_out_errors= snapshot.get("net_out_errors"),
net_out_dropped=snapshot.get("net_out_dropped"),
insertion_date=collected_date,
insertion_time=collected_time)
def _save_disk_partition_snapshot(self, database, collected_date, collected_time):
for disk_partition in self.computer_snapshot.get("disk_snapshot_list"):
database.insertDiskPartitionSnapshot(
partition=disk_partition.partition,
used=disk_partition.disk_size_used,
free=disk_partition.disk_size_free,
mountpoint=';'.join(disk_partition.mountpoint_list),
insertion_date=collected_date,
insertion_time=collected_time)
#!/usr/bin/env python
from slapos.collect.db import Database
from slapos.util import mkdir_p
import os.path
import json
import csv
from time import strftime
class Dumper(object):
def __init__(self, database):
self.db = database
class SystemReporter(Dumper):
def dump(self, folder):
""" Dump data """
_date = strftime("%Y-%m-%d")
self.db.connect()
for item, collected_item_list in self.db.exportSystemAsDict(_date).iteritems():
self.writeFile(item, folder, collected_item_list)
for partition, collected_item_list in self.db.exportDiskAsDict(_date).iteritems():
partition_id = partition.split("-")[0].split("/")[-1]
item = "memory_%s" % partition.split("-")[1]
self.writeFile("disk_%s_%s" % (item, partition_id), folder, collected_item_list)
self.db.close()
class SystemJSONReporterDumper(SystemReporter):
def writeFile(self, name, folder, collected_entry_list=[]):
""" Dump data as json """
file_io = open(os.path.join(folder, "system_%s.json" % name), "w")
json.dump(collected_entry_list, file_io, sort_keys=True, indent=2)
file_io.close()
class SystemCSVReporterDumper(SystemReporter):
def writeFile(self, name, folder, collected_entry_list=[]):
""" Dump data as json """
file_io = open(os.path.join(folder, "system_%s.csv" % name), "w")
csv_output = csv.writer(file_io)
csv_output.writerow(["time", "entry"])
for collected_entry in collected_entry_list:
csv_output.writerow([collected_entry["time"], collected_entry["entry"]])
file_io.close()
class RawDumper(Dumper):
""" Dump raw data in a certain format
"""
def dump(self, folder):
date = strftime("%Y-%m-%d")
self.db.connect()
table_list = self.db.getTableList()
for date_scope, amount in self.db.getDateScopeList(ignore_date=date):
for table in table_list:
self.writeFile(table, folder, date_scope,
self.db.select(table, date_scope))
self.db.markDayAsReported(date_scope,
table_list=table_list)
self.db.commit()
self.db.close()
class RawCSVDumper(RawDumper):
def writeFile(self, name, folder, date_scope, rows):
mkdir_p(os.path.join(folder, date_scope))
file_io = open(os.path.join(folder, "%s/dump_%s.csv" % (date_scope, name)), "w")
csv_output = csv.writer(file_io)
csv_output.writerows(rows)
file_io.close()
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010, 2011, 2012 Vifib SARL and Contributors.
# All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import psutil
import os
class _Snapshot(object):
def get(self, property, default=None):
return getattr(self, property, default)
class ProcessSnapshot(_Snapshot):
""" Take a snapshot from the running process
"""
def __init__(self, process=None):
assert type(process) is psutil.Process
ui_counter_list = process.get_io_counters()
self.username = process.username()
self.pid = process.pid
# Save full command line from the process.
self.name = "%s-%s" % (process.pid, process.create_time())
# CPU percentage, we will have to get actual absolute value
self.cpu_percent = process.get_cpu_percent(None)
# CPU Time
self.cpu_time = sum(process.get_cpu_times())
# Thread number, might not be really relevant
self.cpu_num_threads = process.get_num_threads()
# Memory percentage
self.memory_percent = process.get_memory_percent()
# Resident Set Size, virtual memory size is not accouned for
self.memory_rss = process.get_memory_info()[0]
# Byte count, Read and write. OSX NOT SUPPORTED
self.io_rw_counter = ui_counter_list[2] + ui_counter_list[3]
# Read + write IO cycles
self.io_cycles_counter = ui_counter_list[0] + ui_counter_list[1]
class SystemSnapshot(_Snapshot):
""" Take a snapshot from current system usage
"""
def __init__(self):
memory = psutil.phymem_usage()
net_io = psutil.net_io_counters()
self.memory_used = memory.used
self.memory_free = memory.free
self.memory_percent = memory.percent
self.cpu_percent = psutil.cpu_percent()
self.load = os.getloadavg()[0]
self.net_in_bytes = net_io.bytes_recv
self.net_in_errors = net_io.errin
self.net_in_dropped = net_io.dropin
self.net_out_bytes = net_io.bytes_sent
self.net_out_errors = net_io.errout
self.net_out_dropped = net_io.dropout
class DiskPartitionSnapshot(_Snapshot):
""" Take Snapshot from general disk partitions
usage
"""
def __init__(self, partition, mountpoint):
self.partition = partition
self.mountpoint_list = [ mountpoint ]
disk = psutil.disk_usage(mountpoint)
disk_io = psutil.disk_io_counters()
self.disk_size_used = disk.used
self.disk_size_free = disk.free
self.disk_size_percent = disk.percent
class ComputerSnapshot(_Snapshot):
""" Take a snapshot from computer informations
"""
def __init__(self):
self.cpu_num_core = psutil.NUM_CPUS
self.cpu_frequency = 0
self.cpu_type = 0
self.memory_size = psutil.TOTAL_PHYMEM
self.memory_type = 0
#
# Include a SystemSnapshot and a list DiskPartitionSnapshot
# on a Computer Snapshot
#
self.system_snapshot = SystemSnapshot()
self.disk_snapshot_list = []
self.partition_list = self._get_physical_disk_info()
def _get_physical_disk_info(self):
partition_dict = {}
for partition in psutil.disk_partitions():
if partition.device not in partition_dict:
usage = psutil.disk_usage(partition.mountpoint)
partition_dict[partition.device] = usage.total
self.disk_snapshot_list.append(
DiskPartitionSnapshot(partition.device,
partition.mountpoint))
return [(k, v) for k, v in partition_dict.iteritems()]
......@@ -39,6 +39,7 @@ import tempfile
import time
import traceback
import warnings
import logging
if sys.version_info < (2, 6):
warnings.warn('Used python version (%s) is old and has problems with'
......@@ -48,6 +49,7 @@ from lxml import etree
from slapos.slap.slap import NotFoundError
from slapos.slap.slap import ServerError
from slapos.util import mkdir_p, chownDirectory
from slapos.grid.exception import BuildoutFailedError
from slapos.grid.SlapObject import Software, Partition
from slapos.grid.svcbackend import launchSupervisord
......@@ -606,6 +608,23 @@ class Slapgrid(object):
os.remove(timestamp_path)
self.logger.exception('')
# Include Partition Logging
log_folder_path = "%s/.slapgrid/log" % instance_path
mkdir_p(log_folder_path)
partition_file_handler = logging.FileHandler(
filename="%s/instance.log" % (log_folder_path)
)
stat_info = os.stat(instance_path)
chownDirectory("%s/.slapgrid" % instance_path,
uid=stat_info.st_uid,
gid=stat_info.st_gid)
formatter = logging.Formatter(
'[%(asctime)s] %(levelname)-8s %(name)s %(message)s')
partition_file_handler.setFormatter(formatter)
self.logger.addHandler(partition_file_handler)
try:
self.logger.info('Processing Computer Partition %s.' % computer_partition_id)
self.logger.info(' Software URL: %s' % software_url)
self.logger.info(' Software path: %s' % software_path)
......@@ -626,7 +645,6 @@ class Slapgrid(object):
certificate_repository_path=self.certificate_repository_path,
buildout=self.buildout,
logger=self.logger)
computer_partition_state = computer_partition.getState()
# XXX this line breaks 37 tests
......@@ -663,6 +681,8 @@ class Slapgrid(object):
(computer_partition_id, computer_partition_state)
computer_partition.error(error_string, logger=self.logger)
raise NotImplementedError(error_string)
finally:
self.logger.removeHandler(partition_file_handler)
# If partition has been successfully processed, write timestamp
if timestamp:
......@@ -893,7 +913,7 @@ class Slapgrid(object):
try:
computer_partition_id = computer_partition.getId()
#We want to execute all the script in the report folder
# We want to execute all the script in the report folder
instance_path = os.path.join(self.instance_root,
computer_partition.getId())
report_path = os.path.join(instance_path, 'etc', 'report')
......@@ -902,7 +922,7 @@ class Slapgrid(object):
else:
script_list_to_run = []
#We now generate the pseudorandom name for the xml file
# We now generate the pseudorandom name for the xml file
# and we add it in the invocation_list
f = tempfile.NamedTemporaryFile()
name_xml = '%s.%s' % ('slapreport', os.path.basename(f.name))
......@@ -914,13 +934,13 @@ class Slapgrid(object):
invocation_list = []
invocation_list.append(os.path.join(instance_path, 'etc', 'report',
script))
#We add the xml_file name to the invocation_list
# We add the xml_file name to the invocation_list
#f = tempfile.NamedTemporaryFile()
#name_xml = '%s.%s' % ('slapreport', os.path.basename(f.name))
#path_to_slapreport = os.path.join(instance_path, 'var', name_xml)
invocation_list.append(path_to_slapreport)
#Dropping privileges
# Dropping privileges
uid, gid = None, None
stat_info = os.stat(instance_path)
#stat sys call to get statistics informations
......@@ -946,21 +966,25 @@ class Slapgrid(object):
self.logger.exception('Cannot run usage script(s) for %r:' %
computer_partition.getId())
#Now we loop through the different computer partitions to report
# Now we loop through the different computer partitions to report
report_usage_issue_cp_list = []
for computer_partition in computer_partition_list:
try:
filename_delete_list = []
computer_partition_id = computer_partition.getId()
instance_path = os.path.join(self.instance_root, computer_partition_id)
dir_reports = os.path.join(instance_path, 'var', 'xml_report')
#The directory xml_report contain a number of files equal
#to the number of software instance running inside the same partition
dir_report_list = [os.path.join(instance_path, 'var', 'xml_report'),
os.path.join(self.instance_root, 'var', 'xml_report',
computer_partition_id)]
for dir_reports in dir_report_list:
# The directory xml_report contain a number of files equal
# to the number of software instance running inside the same partition
if os.path.isdir(dir_reports):
filename_list = os.listdir(dir_reports)
else:
filename_list = []
#self.logger.debug('name List %s' % filename_list)
# self.logger.debug('name List %s' % filename_list)
for filename in filename_list:
......@@ -968,7 +992,7 @@ class Slapgrid(object):
if os.path.exists(file_path):
usage = open(file_path, 'r').read()
#We check the validity of xml content of each reports
# We check the validity of xml content of each reports
if not self.validateXML(usage, partition_consumption_model):
self.logger.info('WARNING: The XML file %s generated by slapreport is '
'not valid - This report is left as is at %s where you can '
......@@ -984,7 +1008,7 @@ class Slapgrid(object):
else:
self.logger.debug('Usage report %r not found, ignored' % file_path)
#After sending the aggregated file we remove all the valid xml reports
# After sending the aggregated file we remove all the valid xml reports
for filename in filename_delete_list:
os.remove(os.path.join(dir_reports, filename))
......@@ -997,15 +1021,15 @@ class Slapgrid(object):
self.logger.info('computer_partition_usage_list: %s - %s' %
(computer_partition_usage.usage, computer_partition_usage.getId()))
#If there is, at least, one report
# If there is, at least, one report
if computer_partition_usage_list != []:
try:
#We generate the final XML report with asXML method
# We generate the final XML report with asXML method
computer_consumption = self.asXML(computer_partition_usage_list)
self.logger.info('Final xml report: %s' % computer_consumption)
#We test the XML report before sending it
# We test the XML report before sending it
if self.validateXML(computer_consumption, computer_consumption_model):
self.logger.info('XML file generated by asXML is valid')
slap_computer_usage.reportUsage(computer_consumption)
......
##############################################################################
#
# Copyright (c) 2014 Vifib SARL and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import os
import glob
import unittest
import shutil
import tempfile
import slapos.slap
import psutil
from time import strftime
from slapos.cli.collect import CollectCommand
from slapos.collect import entity, snapshot, db, reporter
from slapos.cli.entry import SlapOSApp
from argparse import Namespace
from ConfigParser import ConfigParser
class FakeDatabase(object):
def __init__(self):
self.invoked_method_list = []
def connect(self):
self.invoked_method_list.append(("connect", ""))
def close(self):
self.invoked_method_list.append(("close", ""))
def commit(self):
self.invoked_method_list.append(("commit", ""))
def insertUserSnapshot(self, *args, **kw):
self.invoked_method_list.append(("insertUserSnapshot", (args, kw)))
def insertSystemSnapshot(self, *args, **kw):
self.invoked_method_list.append(("insertSystemSnapshot", (args, kw)))
def insertComputerSnapshot(self, *args, **kw):
self.invoked_method_list.append(("insertComputerSnapshot", (args, kw)))
def insertDiskPartitionSnapshot(self, *args, **kw):
self.invoked_method_list.append(("insertDiskPartitionSnapshot", (args, kw)))
class TestCollectDatabase(unittest.TestCase):
def setUp(self):
self.instance_root = tempfile.mkdtemp()
def tearDown(self):
if os.path.exists(self.instance_root):
shutil.rmtree(self.instance_root)
def test_database_bootstrap(self):
self.assertFalse(os.path.exists(
"%s/collector.db" % self.instance_root ))
database = db.Database(self.instance_root)
database.connect()
try:
self.assertEquals(
[u'user', u'computer', u'system', u'disk'],
database.getTableList())
finally:
database.close()
self.assertTrue(os.path.exists(
"%s/collector.db" % self.instance_root ))
def test_insert_user_snapshot(self):
database = db.Database(self.instance_root)
database.connect()
try:
database.insertUserSnapshot(
'fakeuser0', 10, '10-12345', '0.1', '10.0', '1',
'10.0', '10.0', '0.1', '0.1', 'DATE', 'TIME')
database.commit()
self.assertEquals([i for i in database.select('user')],
[(u'fakeuser0', 10.0, u'10-12345', 0.1, 10.0,
1.0, 10.0, 10.0, 0.1, 0.1, u'DATE', u'TIME', 0)])
finally:
database.close()
def test_insert_computer_snapshot(self):
database = db.Database(self.instance_root)
database.connect()
try:
database.insertComputerSnapshot(
'1', '0', '0', '100', '0', '/dev/sdx1', 'DATE', 'TIME')
database.commit()
self.assertEquals([i for i in database.select('computer')],
[(1.0, 0.0, u'0', 100.0, u'0', u'/dev/sdx1', u'DATE', u'TIME', 0)])
finally:
database.close()
def test_insert_disk_partition_snapshot(self):
database = db.Database(self.instance_root)
database.connect()
try:
database.insertDiskPartitionSnapshot(
'/dev/sdx1', '10', '20', '/mnt', 'DATE', 'TIME')
database.commit()
self.assertEquals([i for i in database.select('disk')],
[(u'/dev/sdx1', u'10', u'20', u'/mnt', u'DATE', u'TIME', 0)])
finally:
database.close()
def test_insert_system_snapshot(self):
database = db.Database(self.instance_root)
database.connect()
try:
database.insertSystemSnapshot("0.1", '10.0', '100.0', '100.0',
'10.0', '1', '2', '12.0', '1', '1', 'DATE', 'TIME')
database.commit()
self.assertEquals([i for i in database.select('system')],
[(0.1, 10.0, 100.0, 100.0, 10.0, 1.0,
2.0, 12.0, 1.0, 1.0, u'DATE', u'TIME', 0)])
finally:
database.close()
def test_date_scope(self):
database = db.Database(self.instance_root)
database.connect()
try:
database.insertSystemSnapshot("0.1", '10.0', '100.0', '100.0',
'10.0', '1', '2', '12.0', '1', '1', 'EXPECTED-DATE', 'TIME')
database.commit()
self.assertEquals([i for i in database.getDateScopeList()],
[('EXPECTED-DATE', 1)])
self.assertEquals([i for i in \
database.getDateScopeList(ignore_date='EXPECTED-DATE')],
[])
self.assertEquals([i for i in \
database.getDateScopeList(reported=1)],
[])
finally:
database.close()
def test_mark_day_as_reported(self):
database = db.Database(self.instance_root)
database.connect()
try:
database.insertSystemSnapshot("0.1", '10.0', '100.0', '100.0',
'10.0', '1', '2', '12.0', '1', '1', 'EXPECTED-DATE', 'TIME')
database.insertSystemSnapshot("0.1", '10.0', '100.0', '100.0',
'10.0', '1', '2', '12.0', '1', '1', 'NOT-EXPECTED-DATE', 'TIME')
database.commit()
self.assertEquals([i for i in database.select('system')],
[(0.1, 10.0, 100.0, 100.0, 10.0, 1.0,
2.0, 12.0, 1.0, 1.0, u'EXPECTED-DATE', u'TIME', 0),
(0.1, 10.0, 100.0, 100.0, 10.0, 1.0,
2.0, 12.0, 1.0, 1.0, u'NOT-EXPECTED-DATE', u'TIME', 0)])
database.markDayAsReported(date_scope="EXPECTED-DATE",
table_list=["system"])
database.commit()
self.assertEquals([i for i in database.select('system')],
[(0.1, 10.0, 100.0, 100.0, 10.0, 1.0,
2.0, 12.0, 1.0, 1.0, u'EXPECTED-DATE', u'TIME', 1),
(0.1, 10.0, 100.0, 100.0, 10.0, 1.0,
2.0, 12.0, 1.0, 1.0, u'NOT-EXPECTED-DATE', u'TIME', 0)])
finally:
database.close()
class TestCollectReport(unittest.TestCase):
def setUp(self):
self.instance_root = tempfile.mkdtemp()
def tearDown(self):
if os.path.exists(self.instance_root):
shutil.rmtree(self.instance_root)
def test_raw_csv_report(self):
database = db.Database(self.instance_root)
database.connect()
database.insertSystemSnapshot("0.1", '10.0', '100.0', '100.0',
'10.0', '1', '2', '12.0', '1', '1', '1983-01-10', 'TIME')
database.insertDiskPartitionSnapshot(
'/dev/sdx1', '10', '20', '/mnt', '1983-01-10', 'TIME')
database.insertComputerSnapshot(
'1', '0', '0', '100', '0', '/dev/sdx1', '1983-01-10', 'TIME')
database.commit()
database.close()
reporter.RawCSVDumper(database).dump(self.instance_root)
self.assertTrue(os.path.exists("%s/1983-01-10" % self.instance_root))
csv_path_list = ['%s/1983-01-10/dump_disk.csv' % self.instance_root,
'%s/1983-01-10/dump_computer.csv' % self.instance_root,
'%s/1983-01-10/dump_user.csv' % self.instance_root,
'%s/1983-01-10/dump_system.csv' % self.instance_root]
self.assertEquals(glob.glob("%s/1983-01-10/*.csv" % self.instance_root),
csv_path_list)
def test_system_csv_report(self):
database = db.Database(self.instance_root)
database.connect()
database.insertSystemSnapshot("0.1", '10.0', '100.0', '100.0',
'10.0', '1', '2', '12.0', '1', '1', strftime("%Y-%m-%d"), 'TIME')
database.insertDiskPartitionSnapshot(
'/dev/sdx1', '10', '20', '/mnt', strftime("%Y-%m-%d"), 'TIME')
database.insertComputerSnapshot(
'1', '0', '0', '100', '0', '/dev/sdx1', strftime("%Y-%m-%d"), 'TIME')
database.commit()
database.close()
reporter.SystemCSVReporterDumper(database).dump(self.instance_root)
csv_path_list = ['%s/system_memory_used.csv' % self.instance_root,
'%s/system_cpu_percent.csv' % self.instance_root,
'%s/system_net_out_bytes.csv' % self.instance_root,
'%s/system_net_in_bytes.csv' % self.instance_root,
'%s/system_disk_memory_free_sdx1.csv' % self.instance_root,
'%s/system_net_out_errors.csv' % self.instance_root,
'%s/system_disk_memory_used_sdx1.csv' % self.instance_root,
'%s/system_net_out_dropped.csv' % self.instance_root,
'%s/system_memory_free.csv' % self.instance_root,
'%s/system_net_in_errors.csv' % self.instance_root,
'%s/system_net_in_dropped.csv' % self.instance_root,
'%s/system_loadavg.csv' % self.instance_root]
self.assertEquals(glob.glob("%s/*.csv" % self.instance_root), csv_path_list)
class TestCollectSnapshot(unittest.TestCase):
def setUp(self):
self.slap = slapos.slap.slap()
self.app = SlapOSApp()
self.temp_dir = tempfile.mkdtemp()
os.environ["HOME"] = self.temp_dir
self.instance_root = tempfile.mkdtemp()
self.software_root = tempfile.mkdtemp()
if os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
def test_process_snapshot(self):
process = psutil.Process(os.getpid())
process_snapshot = snapshot.ProcessSnapshot(process)
self.assertNotEquals(process_snapshot.username, None)
self.assertEquals(int(process_snapshot.pid), os.getpid())
self.assertEquals(int(process_snapshot.name.split("-")[0]),
os.getpid())
self.assertNotEquals(process_snapshot.cpu_percent , None)
self.assertNotEquals(process_snapshot.cpu_time , None)
self.assertNotEquals(process_snapshot.cpu_num_threads, None)
self.assertNotEquals(process_snapshot.memory_percent , None)
self.assertNotEquals(process_snapshot.memory_rss, None)
self.assertNotEquals(process_snapshot.io_rw_counter, None)
self.assertNotEquals(process_snapshot.io_cycles_counter, None)
def test_process_snapshot_broken_process(self):
self.assertRaises(AssertionError,
snapshot.ProcessSnapshot, None)
def test_computer_snapshot(self):
computer_snapshot = snapshot.ComputerSnapshot()
self.assertNotEquals(computer_snapshot.cpu_num_core , None)
self.assertNotEquals(computer_snapshot.cpu_frequency , None)
self.assertNotEquals(computer_snapshot.cpu_type , None)
self.assertNotEquals(computer_snapshot.memory_size , None)
self.assertNotEquals(computer_snapshot.memory_type , None)
self.assertEquals(type(computer_snapshot.system_snapshot),
snapshot.SystemSnapshot)
self.assertNotEquals(computer_snapshot.disk_snapshot_list, [])
self.assertNotEquals(computer_snapshot.partition_list, [])
self.assertEquals(type(computer_snapshot.disk_snapshot_list[0]),
snapshot.DiskPartitionSnapshot)
def test_system_snapshot(self):
system_snapshot = snapshot.SystemSnapshot()
self.assertNotEquals(system_snapshot.memory_used , None)
self.assertNotEquals(system_snapshot.memory_free , None)
self.assertNotEquals(system_snapshot.memory_percent , None)
self.assertNotEquals(system_snapshot.cpu_percent , None)
self.assertNotEquals(system_snapshot.load , None)
self.assertNotEquals(system_snapshot.net_in_bytes , None)
self.assertNotEquals(system_snapshot.net_in_errors, None)
self.assertNotEquals(system_snapshot.net_in_dropped , None)
self.assertNotEquals(system_snapshot.net_out_bytes , None)
self.assertNotEquals(system_snapshot.net_out_errors, None)
self.assertNotEquals(system_snapshot.net_out_dropped , None)
class TestCollectEntity(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
os.environ["HOME"] = self.temp_dir
self.instance_root = tempfile.mkdtemp()
self.software_root = tempfile.mkdtemp()
if os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
def tearDown(self):
pass
def getFakeUser(self):
return entity.User("fakeuser0",
"%s/fakeuser0" % self.instance_root )
def test_get_user_list(self):
config = ConfigParser()
config.add_section('slapformat')
config.set('slapformat', 'partition_amount', '3')
config.set('slapformat', 'user_base_name', 'slapuser')
config.set('slapformat', 'partition_base_name', 'slappart')
config.add_section('slapos')
config.set('slapos', 'instance_root', self.instance_root)
user_dict = entity.get_user_list(config)
username_list = ['slapuser0', 'slapuser1', 'slapuser2']
self.assertEquals(username_list, user_dict.keys())
for name in username_list:
self.assertEquals(user_dict[name].name, name)
self.assertEquals(user_dict[name].snapshot_list, [])
expected_path = "%s/slappart%s" % (self.instance_root, name.strip("slapuser"))
self.assertEquals(user_dict[name].path, expected_path)
def test_user_add_snapshot(self):
user = self.getFakeUser()
self.assertEquals(user.snapshot_list, [])
user.append("SNAPSHOT")
self.assertEquals(user.snapshot_list, ["SNAPSHOT"])
def test_user_save(self):
user = self.getFakeUser()
process = psutil.Process(os.getpid())
user.append(snapshot.ProcessSnapshot(process))
database = FakeDatabase()
user.save(database, "DATE", "TIME")
self.assertEquals(database.invoked_method_list[0], ("connect", ""))
self.assertEquals(database.invoked_method_list[1][0], "insertUserSnapshot")
self.assertEquals(database.invoked_method_list[1][1][0], ("fakeuser0",))
self.assertEquals(database.invoked_method_list[1][1][1].keys(),
['cpu_time', 'cpu_percent', 'process',
'memory_rss', 'pid', 'memory_percent',
'io_rw_counter', 'insertion_date', 'insertion_time',
'io_cycles_counter', 'cpu_num_threads'])
self.assertEquals(database.invoked_method_list[2], ("commit", ""))
self.assertEquals(database.invoked_method_list[3], ("close", ""))
def test_computer_entity(self):
computer = entity.Computer(snapshot.ComputerSnapshot())
database = FakeDatabase()
computer.save(database, "DATE", "TIME")
self.assertEquals(database.invoked_method_list[0], ("connect", ""))
self.assertEquals(database.invoked_method_list[1][0], "insertComputerSnapshot")
self.assertEquals(database.invoked_method_list[1][1][0], ())
self.assertEquals(database.invoked_method_list[1][1][1].keys(),
['insertion_time', 'insertion_date', 'cpu_num_core',
'partition_list', 'cpu_frequency', 'memory_size',
'cpu_type', 'memory_type'])
self.assertEquals(database.invoked_method_list[2][0], "insertSystemSnapshot")
self.assertEquals(database.invoked_method_list[2][1][0], ())
self.assertEquals(set(database.invoked_method_list[2][1][1].keys()),
set([ 'memory_used', 'cpu_percent', 'insertion_date', 'insertion_time',
'loadavg', 'memory_free', 'net_in_bytes', 'net_in_dropped',
'net_in_errors', 'net_out_bytes', 'net_out_dropped',
'net_out_errors']))
self.assertEquals(database.invoked_method_list[3][0], "insertDiskPartitionSnapshot")
self.assertEquals(database.invoked_method_list[3][1][0], ())
self.assertEquals(set(database.invoked_method_list[3][1][1].keys()),
set([ 'used', 'insertion_date', 'partition', 'free',
'mountpoint', 'insertion_time' ]))
self.assertEquals(database.invoked_method_list[-2], ("commit", ""))
self.assertEquals(database.invoked_method_list[-1], ("close", ""))
......@@ -46,7 +46,6 @@ from mock import patch
import slapos.slap.slap
import slapos.grid.utils
from slapos.grid import slapgrid
from slapos.cli_legacy.slapgrid import parseArgumentTupleAndReturnSlapgridObject
from slapos.grid.utils import md5digest
from slapos.grid.watchdog import Watchdog
from slapos.grid import SlapObject
......@@ -543,7 +542,8 @@ class TestSlapgridCPWithMaster(MasterMixin, unittest.TestCase):
self.assertEqual(self.grid.processComputerPartitionList(), slapgrid.SLAPGRID_SUCCESS)
self.assertItemsEqual(os.listdir(self.instance_root), ['0', 'etc', 'var'])
partition = os.path.join(self.instance_root, '0')
self.assertItemsEqual(os.listdir(partition), ['buildout.cfg', 'software_release', 'worked'])
self.assertItemsEqual(os.listdir(partition), ['.slapgrid', 'buildout.cfg',
'software_release', 'worked'])
self.assertItemsEqual(os.listdir(self.software_root), [instance.software.software_hash])
self.assertEqual(computer.sequence,
['getFullComputerInformation', 'availableComputerPartition',
......@@ -559,7 +559,8 @@ class TestSlapgridCPWithMaster(MasterMixin, unittest.TestCase):
self.assertEqual(self.grid.processComputerPartitionList(), slapgrid.SLAPGRID_SUCCESS)
self.assertItemsEqual(os.listdir(self.instance_root), ['0', 'etc', 'var'])
partition = os.path.join(self.instance_root, '0')
self.assertItemsEqual(os.listdir(partition), ['buildout.cfg', 'software_release', 'worked'])
self.assertItemsEqual(os.listdir(partition), ['.slapgrid', 'buildout.cfg',
'software_release', 'worked'])
self.assertItemsEqual(os.listdir(self.software_root), [instance.software.software_hash])
self.assertEqual(computer.sequence,
['getFullComputerInformation', 'availableComputerPartition',
......@@ -588,7 +589,8 @@ class TestSlapgridCPWithMaster(MasterMixin, unittest.TestCase):
self.assertEqual(self.grid.processComputerPartitionList(), slapgrid.SLAPGRID_SUCCESS)
self.assertItemsEqual(os.listdir(self.instance_root), ['0', 'etc', 'var'])
self.assertItemsEqual(os.listdir(partition.partition_path),
['.0_wrapper.log', 'buildout.cfg', 'etc', 'software_release', 'worked'])
['.slapgrid', '.0_wrapper.log', 'buildout.cfg',
'etc', 'software_release', 'worked'])
wrapper_log = os.path.join(partition.partition_path, '.0_wrapper.log')
self.assertLogContent(wrapper_log, 'Working')
self.assertItemsEqual(os.listdir(self.software_root), [partition.software.software_hash])
......@@ -623,7 +625,8 @@ chmod 755 etc/run/wrapper
self.assertEqual(self.grid.processComputerPartitionList(), slapgrid.SLAPGRID_SUCCESS)
self.assertItemsEqual(os.listdir(self.instance_root), ['0', 'etc', 'var'])
self.assertItemsEqual(os.listdir(instance.partition_path),
['.0_wrapper.log', 'buildout.cfg', 'etc', 'software_release', 'worked'])
['.slapgrid', '.0_wrapper.log', 'buildout.cfg',
'etc', 'software_release', 'worked'])
wrapper_log = os.path.join(instance.partition_path, '.0_wrapper.log')
self.assertLogContent(wrapper_log, 'Working')
self.assertItemsEqual(os.listdir(self.software_root), [instance.software.software_hash])
......@@ -637,7 +640,8 @@ chmod 755 etc/run/wrapper
self.assertEqual(self.launchSlapgrid(), slapgrid.SLAPGRID_SUCCESS)
self.assertItemsEqual(os.listdir(self.instance_root), ['0', 'etc', 'var'])
self.assertItemsEqual(os.listdir(instance.partition_path),
['.0_wrapper.log', 'buildout.cfg', 'etc', 'software_release', 'worked'])
['.slapgrid', '.0_wrapper.log', 'buildout.cfg',
'etc', 'software_release', 'worked'])
self.assertLogContent(wrapper_log, 'Signal handler called with signal 15')
self.assertEqual(computer.sequence,
['getFullComputerInformation', 'availableComputerPartition',
......@@ -675,7 +679,8 @@ chmod 755 etc/run/wrapper
self.assertEqual(self.grid.processComputerPartitionList(), slapgrid.SLAPGRID_SUCCESS)
self.assertItemsEqual(os.listdir(self.instance_root), ['0', 'etc', 'var'])
self.assertItemsEqual(os.listdir(instance.partition_path),
['.0_wrapper.log', 'buildout.cfg', 'etc', 'software_release', 'worked'])
['.slapgrid', '.0_wrapper.log', 'buildout.cfg',
'etc', 'software_release', 'worked'])
wrapper_log = os.path.join(instance.partition_path, '.0_wrapper.log')
self.assertLogContent(wrapper_log, 'Working')
self.assertItemsEqual(os.listdir(self.software_root),
......@@ -694,7 +699,8 @@ exit 1
self.assertItemsEqual(os.listdir(self.instance_root),
['0', 'etc', 'var'])
self.assertItemsEqual(os.listdir(instance.partition_path),
['.0_wrapper.log', 'buildout.cfg', 'etc', 'software_release', 'worked'])
['.slapgrid', '.0_wrapper.log', 'buildout.cfg',
'etc', 'software_release', 'worked'])
self.assertLogContent(wrapper_log, 'Signal handler called with signal 15')
self.assertEqual(computer.sequence,
['getFullComputerInformation',
......@@ -711,7 +717,7 @@ exit 1
self.assertItemsEqual(os.listdir(self.instance_root), ['0', 'etc', 'var'])
partition = os.path.join(self.instance_root, '0')
self.assertItemsEqual(os.listdir(partition),
['buildout.cfg', 'etc', 'software_release', 'worked'])
['.slapgrid', 'buildout.cfg', 'etc', 'software_release', 'worked'])
self.assertItemsEqual(os.listdir(self.software_root),
[instance.software.software_hash])
self.assertEqual(computer.sequence,
......@@ -725,7 +731,8 @@ exit 1
self.assertItemsEqual(os.listdir(self.instance_root), ['0', 'etc', 'var'])
partition = os.path.join(self.instance_root, '0')
self.assertItemsEqual(os.listdir(partition),
['.0_wrapper.log', 'etc', 'buildout.cfg', 'software_release', 'worked'])
['.slapgrid', '.0_wrapper.log', 'etc',
'buildout.cfg', 'software_release', 'worked'])
self.assertItemsEqual(os.listdir(self.software_root),
[instance.software.software_hash])
wrapper_log = os.path.join(instance.partition_path, '.0_wrapper.log')
......@@ -752,7 +759,7 @@ exit 1
self.assertItemsEqual(os.listdir(self.instance_root), ['0', 'etc', 'var'])
partition = os.path.join(self.instance_root, '0')
self.assertItemsEqual(os.listdir(partition), [dummy_file_name])
self.assertItemsEqual(os.listdir(partition), ['.slapgrid', dummy_file_name])
self.assertItemsEqual(os.listdir(self.software_root), [instance.software.software_hash])
self.assertEqual(computer.sequence,
['getFullComputerInformation',
......@@ -796,7 +803,8 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase):
self.assertEqual(self.grid.processComputerPartitionList(), slapgrid.SLAPGRID_SUCCESS)
self.assertItemsEqual(os.listdir(self.instance_root), ['0', 'etc', 'var'])
self.assertItemsEqual(os.listdir(partition.partition_path),
['.0_daemon.log', 'buildout.cfg', 'etc', 'software_release', 'worked'])
['.slapgrid', '.0_daemon.log', 'buildout.cfg',
'etc', 'software_release', 'worked'])
daemon_log = os.path.join(partition.partition_path, '.0_daemon.log')
self.assertLogContent(daemon_log, 'Failing')
self.assertIsCreated(self.watchdog_banged)
......@@ -840,7 +848,8 @@ class TestSlapgridCPWithMasterWatchdog(MasterMixin, unittest.TestCase):
self.assertItemsEqual(os.listdir(self.instance_root),
['0', 'etc', 'var'])
self.assertItemsEqual(os.listdir(partition.partition_path),
['.0_daemon.log', 'buildout.cfg', 'etc', 'software_release', 'worked'])
['.slapgrid', '.0_daemon.log', 'buildout.cfg',
'etc', 'software_release', 'worked'])
daemon_log = os.path.join(partition.partition_path, '.0_daemon.log')
self.assertLogContent(daemon_log, 'Failing')
self.assertIsNotCreated(self.watchdog_banged)
......@@ -1127,7 +1136,7 @@ class TestSlapgridCPPartitionProcessing(MasterMixin, unittest.TestCase):
self.assertItemsEqual(os.listdir(self.instance_root), ['0', 'etc', 'var'])
partition = os.path.join(self.instance_root, '0')
self.assertItemsEqual(os.listdir(partition),
['.timestamp', 'buildout.cfg', 'software_release', 'worked'])
['.slapgrid', '.timestamp', 'buildout.cfg', 'software_release', 'worked'])
self.assertItemsEqual(os.listdir(self.software_root), [instance.software.software_hash])
timestamp_path = os.path.join(instance.partition_path, '.timestamp')
self.setSlapgrid()
......@@ -1146,7 +1155,8 @@ class TestSlapgridCPPartitionProcessing(MasterMixin, unittest.TestCase):
self.assertItemsEqual(os.listdir(self.instance_root), ['0', 'etc', 'var'])
partition = os.path.join(self.instance_root, '0')
self.assertItemsEqual(os.listdir(partition),
['.timestamp', 'buildout.cfg', 'software_release', 'worked'])
['.slapgrid', '.timestamp', 'buildout.cfg',
'software_release', 'worked'])
self.assertItemsEqual(os.listdir(self.software_root), [instance.software.software_hash])
self.assertEqual(self.launchSlapgrid(develop=True),
......@@ -1167,7 +1177,7 @@ class TestSlapgridCPPartitionProcessing(MasterMixin, unittest.TestCase):
self.assertItemsEqual(os.listdir(self.instance_root), ['0', 'etc', 'var'])
partition = os.path.join(self.instance_root, '0')
self.assertItemsEqual(os.listdir(partition),
['.timestamp', 'buildout.cfg', 'software_release', 'worked'])
['.slapgrid', '.timestamp', 'buildout.cfg', 'software_release', 'worked'])
self.assertItemsEqual(os.listdir(self.software_root), [instance.software.software_hash])
instance.timestamp = str(int(timestamp) - 1)
self.assertEqual(self.launchSlapgrid(), slapgrid.SLAPGRID_SUCCESS)
......@@ -1184,7 +1194,7 @@ class TestSlapgridCPPartitionProcessing(MasterMixin, unittest.TestCase):
self.assertItemsEqual(os.listdir(self.instance_root), ['0', 'etc', 'var'])
partition = os.path.join(self.instance_root, '0')
self.assertItemsEqual(os.listdir(partition),
['.timestamp', 'buildout.cfg', 'software_release', 'worked'])
['.slapgrid', '.timestamp', 'buildout.cfg', 'software_release', 'worked'])
self.assertItemsEqual(os.listdir(self.software_root), [instance.software.software_hash])
instance.timestamp = str(int(timestamp) + 1)
self.assertEqual(self.launchSlapgrid(), slapgrid.SLAPGRID_SUCCESS)
......@@ -1205,7 +1215,7 @@ class TestSlapgridCPPartitionProcessing(MasterMixin, unittest.TestCase):
self.assertItemsEqual(os.listdir(self.instance_root), ['0', 'etc', 'var'])
partition = os.path.join(self.instance_root, '0')
self.assertItemsEqual(os.listdir(partition),
['.timestamp', 'buildout.cfg', 'software_release', 'worked'])
['.slapgrid', '.timestamp', 'buildout.cfg', 'software_release', 'worked'])
self.assertItemsEqual(os.listdir(self.software_root),
[instance.software.software_hash])
instance.timestamp = None
......@@ -1231,7 +1241,8 @@ class TestSlapgridCPPartitionProcessing(MasterMixin, unittest.TestCase):
self.launchSlapgrid()
partition = os.path.join(self.instance_root, '0')
self.assertItemsEqual(os.listdir(partition),
['.timestamp', 'buildout.cfg', 'software_release', 'worked'])
['.slapgrid', '.timestamp', 'buildout.cfg',
'software_release', 'worked'])
time.sleep(2)
# dummify install() so that it doesn't actually do anything so that it
......@@ -1240,9 +1251,8 @@ class TestSlapgridCPPartitionProcessing(MasterMixin, unittest.TestCase):
self.launchSlapgrid()
self.assertItemsEqual(os.listdir(partition),
['.timestamp', 'buildout.cfg', 'software_release', 'worked'])
['.slapgrid', '.timestamp', 'buildout.cfg',
'software_release', 'worked'])
def test_one_partition_periodicity_from_file_does_not_disturb_others(self):
"""
......@@ -1514,7 +1524,8 @@ class TestSlapgridUsageReport(MasterMixin, unittest.TestCase):
self.assertEqual(self.grid.processComputerPartitionList(), slapgrid.SLAPGRID_SUCCESS)
self.assertItemsEqual(os.listdir(self.instance_root), ['0', 'etc', 'var'])
self.assertItemsEqual(os.listdir(instance.partition_path),
['.0_wrapper.log', 'buildout.cfg', 'etc', 'software_release', 'worked'])
['.slapgrid', '.0_wrapper.log', 'buildout.cfg',
'etc', 'software_release', 'worked'])
wrapper_log = os.path.join(instance.partition_path, '.0_wrapper.log')
self.assertLogContent(wrapper_log, 'Working')
self.assertItemsEqual(os.listdir(self.software_root), [instance.software.software_hash])
......@@ -1582,7 +1593,8 @@ class TestSlapgridUsageReport(MasterMixin, unittest.TestCase):
self.assertEqual(self.grid.processComputerPartitionList(), slapgrid.SLAPGRID_SUCCESS)
self.assertItemsEqual(os.listdir(self.instance_root), ['0', 'etc', 'var'])
self.assertItemsEqual(os.listdir(instance.partition_path),
['.0_wrapper.log', 'buildout.cfg', 'etc', 'software_release', 'worked'])
['.slapgrid', '.0_wrapper.log', 'buildout.cfg',
'etc', 'software_release', 'worked'])
wrapper_log = os.path.join(instance.partition_path, '.0_wrapper.log')
self.assertLogContent(wrapper_log, 'Working')
self.assertItemsEqual(os.listdir(self.software_root), [instance.software.software_hash])
......@@ -1597,12 +1609,14 @@ class TestSlapgridUsageReport(MasterMixin, unittest.TestCase):
self.assertEqual(self.grid.agregateAndSendUsage(), slapgrid.SLAPGRID_SUCCESS)
self.assertItemsEqual(os.listdir(self.instance_root), ['0', 'etc', 'var'])
self.assertItemsEqual(os.listdir(instance.partition_path),
['.0_wrapper.log', 'buildout.cfg', 'etc', 'software_release', 'worked'])
['.slapgrid', '.0_wrapper.log', 'buildout.cfg',
'etc', 'software_release', 'worked'])
wrapper_log = os.path.join(instance.partition_path, '.0_wrapper.log')
self.assertLogContent(wrapper_log, 'Working')
self.assertItemsEqual(os.listdir(self.instance_root), ['0', 'etc', 'var'])
self.assertItemsEqual(os.listdir(instance.partition_path),
['.0_wrapper.log', 'buildout.cfg', 'etc', 'software_release', 'worked'])
['.slapgrid', '.0_wrapper.log', 'buildout.cfg',
'etc', 'software_release', 'worked'])
wrapper_log = os.path.join(instance.partition_path, '.0_wrapper.log')
self.assertLogContent(wrapper_log, 'Working')
self.assertEqual(computer.sequence,
......@@ -1710,187 +1724,6 @@ buildout = /path/to/buildout/binary
shutil.rmtree(self.certificate_repository_path, True)
class TestSlapgridArgumentTuple(SlapgridInitialization):
"""
Test suite about arguments given to slapgrid command.
"""
def test_empty_argument_tuple(self):
"""
Raises if the argument list if empty and without configuration file.
"""
parser = parseArgumentTupleAndReturnSlapgridObject
# XXX: SystemExit is too generic exception, it is only known that
# something is wrong
self.assertRaises(SystemExit, parser, *())
def test_default_argument_tuple(self):
"""
Check if we can have the slapgrid object returned with the minimum
arguments.
"""
parser = parseArgumentTupleAndReturnSlapgridObject
return_list = parser(*self.default_arg_tuple)
self.assertEquals(2, len(return_list))
def test_signature_private_key_file_non_exists(self):
"""
Raises if the signature_private_key_file does not exists.
"""
parser = parseArgumentTupleAndReturnSlapgridObject
argument_tuple = ("--signature_private_key_file",
"/non/exists/path") + self.default_arg_tuple
self.assertRaisesRegexp(RuntimeError,
"File '/non/exists/path' does not exist.",
parser, *argument_tuple)
def test_signature_private_key_file(self):
"""
Check if the signature private key argument value is available on
slapgrid object.
"""
parser = parseArgumentTupleAndReturnSlapgridObject
argument_tuple = ("--signature_private_key_file",
self.signature_key_file_descriptor.name) + self.default_arg_tuple
slapgrid_object = parser(*argument_tuple)[0]
self.assertEquals(self.signature_key_file_descriptor.name,
slapgrid_object.signature_private_key_file)
def test_backward_compatibility_all(self):
"""
Check if giving --all triggers "develop" option.
"""
parser = parseArgumentTupleAndReturnSlapgridObject
slapgrid_object = parser('--all', *self.default_arg_tuple)[0]
self.assertTrue(slapgrid_object.develop)
def test_backward_compatibility_not_all(self):
"""
Check if not giving --all neither --develop triggers "develop"
option to be False.
"""
parser = parseArgumentTupleAndReturnSlapgridObject
slapgrid_object = parser(*self.default_arg_tuple)[0]
self.assertFalse(slapgrid_object.develop)
def test_upload_binary_cache_blacklist(self):
"""
Check if giving --upload-to-binary-cache-url-blacklist triggers option.
"""
self.slapos_config_descriptor.write("""
[slapos]
software_root = /opt/slapgrid
instance_root = /srv/slapgrid
master_url = https://slap.vifib.com/
computer_id = your computer id
buildout = /path/to/buildout/binary
[networkcache]
upload-to-binary-cache-url-blacklist =
http://1
http://2/bla
""")
self.slapos_config_descriptor.seek(0)
slapgrid_object = parseArgumentTupleAndReturnSlapgridObject(
*self.default_arg_tuple)[0]
self.assertEqual(
slapgrid_object.upload_to_binary_cache_url_blacklist,
['http://1', 'http://2/bla']
)
self.assertEqual(
slapgrid_object.download_from_binary_cache_url_blacklist,
[]
)
def test_download_binary_cache_blacklist(self):
"""
Check if giving --download-from-binary-cache-url-blacklist triggers option.
"""
self.slapos_config_descriptor.write("""
[slapos]
software_root = /opt/slapgrid
instance_root = /srv/slapgrid
master_url = https://slap.vifib.com/
computer_id = your computer id
buildout = /path/to/buildout/binary
[networkcache]
download-from-binary-cache-url-blacklist =
http://1
http://2/bla
""")
self.slapos_config_descriptor.seek(0)
slapgrid_object = parseArgumentTupleAndReturnSlapgridObject(
*self.default_arg_tuple)[0]
self.assertEqual(
slapgrid_object.upload_to_binary_cache_url_blacklist,
[]
)
self.assertEqual(
slapgrid_object.download_from_binary_cache_url_blacklist,
['http://1', 'http://2/bla']
)
def test_upload_download_binary_cache_blacklist(self):
"""
Check if giving both --download-from-binary-cache-url-blacklist
and --upload-to-binary-cache-url-blacklist triggers options.
"""
self.slapos_config_descriptor.write("""
[slapos]
software_root = /opt/slapgrid
instance_root = /srv/slapgrid
master_url = https://slap.vifib.com/
computer_id = your computer id
buildout = /path/to/buildout/binary
[networkcache]
upload-to-binary-cache-url-blacklist =
http://1
http://2/bla
download-from-binary-cache-url-blacklist =
http://3
http://4/bla
""")
self.slapos_config_descriptor.seek(0)
slapgrid_object = parseArgumentTupleAndReturnSlapgridObject(
*self.default_arg_tuple)[0]
self.assertEqual(
slapgrid_object.upload_to_binary_cache_url_blacklist,
['http://1', 'http://2/bla']
)
self.assertEqual(
slapgrid_object.download_from_binary_cache_url_blacklist,
['http://3', 'http://4/bla']
)
def test_backward_compatibility_download_binary_cache_blacklist(self):
"""
Check if giving both --binary-cache-url-blacklist
and --upload-to-binary-cache-blacklist triggers options.
"""
self.slapos_config_descriptor.write("""
[slapos]
software_root = /opt/slapgrid
instance_root = /srv/slapgrid
master_url = https://slap.vifib.com/
computer_id = your computer id
buildout = /path/to/buildout/binary
[networkcache]
binary-cache-url-blacklist =
http://1
http://2/bla
""")
self.slapos_config_descriptor.seek(0)
slapgrid_object = parseArgumentTupleAndReturnSlapgridObject(
*self.default_arg_tuple)[0]
self.assertEqual(
slapgrid_object.upload_to_binary_cache_url_blacklist,
[]
)
self.assertEqual(
slapgrid_object.download_from_binary_cache_url_blacklist,
['http://1', 'http://2/bla']
)
class TestSlapgridCPWithMasterPromise(MasterMixin, unittest.TestCase):
def test_one_failing_promise(self):
computer = ComputerForTest(self.software_root, self.instance_root)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment