Commit 66998e0b authored by Thomas Gambier's avatar Thomas Gambier 🚴🏼

Update Release Candidate

parents d5d69e5e 4d23d2bc
Pipeline #39791 failed with stage
in 0 seconds
# Based on https://github.com/apache/httpd/commit/05297d7b00f0cb1f20d3ff14f4409097df2615fe
--- a/configure
+++ b/configure
@@ -6608,7 +6608,7 @@
if test "x$with_pcre" = "x" || test "$with_pcre" = "yes"; then
with_pcre="$PATH"
-else if which $with_pcre 2>/dev/null; then :; else
+else if command -v $with_pcre 2>/dev/null; then :; else
with_pcre="$with_pcre/bin:$with_pcre"
fi
fi
@@ -6710,11 +6710,11 @@
test -n "$ac_ct_PCRE_CONFIG" && break
done
-test -n "$ac_ct_PCRE_CONFIG" || ac_ct_PCRE_CONFIG="`which $with_pcre 2>/dev/null`"
+test -n "$ac_ct_PCRE_CONFIG" || ac_ct_PCRE_CONFIG="`command -v $with_pcre 2>/dev/null`"
PCRE_CONFIG=$ac_ct_PCRE_CONFIG
else
- PCRE_CONFIG="`which $with_pcre 2>/dev/null`"
+ PCRE_CONFIG="`command -v $with_pcre 2>/dev/null`"
fi
fi
\ No newline at end of file
......@@ -47,6 +47,10 @@ shared = true
version = 2.4.62
url = https://archive.apache.org/dist/httpd/httpd-${:version}.tar.bz2
md5sum = cded7afa23c13c4854008d95a69ce016
patch-options = -p1
# XXX to be removed when we upgrade apache
patches =
${:_profile_base_location_}/apache_configure_which.patch#340f8cf5b66a08d90d95b72b85fe7824
configure-options = --disable-static
--enable-authn-alias
--enable-bucketeer
......@@ -100,7 +104,7 @@ configure-options = --disable-static
--with-apr-util=${apr-util:location}
environment =
PATH=${perl:location}/bin:${pkgconfig:location}/bin:%(PATH)s
PATH=${patch:location}/bin:${perl:location}/bin:${pkgconfig:location}/bin:%(PATH)s
PKG_CONFIG_PATH=${openssl:location}/lib/pkgconfig
CPPFLAGS=-I${libuuid:location}/include -I${openssl:location}/include -I${apr:location}/include -I${apr-util:location}/include
LDFLAGS=-Wl,-rpath=${zlib:location}/lib -Wl,-rpath=${openssl:location}/lib -L${libuuid:location}/lib -Wl,-rpath=${libuuid:location}/lib -L${libxml2:location}/lib -Wl,-rpath=${libxml2:location}/lib -Wl,-rpath=${pcre:location}/lib -Wl,-rpath=${sqlite3:location}/lib -Wl,-rpath=${gdbm:location}/lib -L${apr:location}/lib -Wl,-rpath=${apr:location}/lib -L${apr-util:location}/lib -Wl,-rpath=${apr-util:location}/lib -L${libexpat:location}/lib -Wl,-rpath=${libexpat:location}/lib
......
......@@ -62,4 +62,4 @@ environment =
PATH=${pkgconfig:location}/bin:${xz-utils:location}/bin:%(PATH)s
PKG_CONFIG_PATH=${:pkg_config_depends}
CPPFLAGS=-I${zlib:location}/include -I${libexpat:location}/include
LDFLAGS=-L${bzip2:location}/lib -Wl,-rpath=${bzip2:location}/lib -L${zlib:location}/lib -Wl,-rpath=${zlib:location}/lib -L${libexpat:location}/lib -Wl,-rpath=${libexpat:location}/lib -Wl,-rpath=${glib:location}/lib -Wl,-rpath=${pango:location}/lib
LDFLAGS=-L${bzip2:location}/lib -Wl,-rpath=${bzip2:location}/lib -L${zlib:location}/lib -Wl,-rpath=${zlib:location}/lib -L${libexpat:location}/lib -Wl,-rpath=${libexpat:location}/lib -Wl,-rpath=${glib:location}/lib -Wl,-rpath=${pango:location}/lib -Wl,-rpath=${harfbuzz:location}/lib
......@@ -5,8 +5,8 @@ parts =
[libtasn1]
recipe = slapos.recipe.cmmi
shared = true
url = https://ftp.gnu.org/gnu/libtasn1/libtasn1-4.17.0.tar.gz
md5sum = c46f6eb3bd1287031ae5d36465094402
url = https://ftp.gnu.org/gnu/libtasn1/libtasn1-4.20.0.tar.gz
md5sum = 930f71d788cf37505a0327c1b84741be
configure-options =
--disable-static
--disable-gtk-doc-html
......@@ -28,7 +28,7 @@ from setuptools import setup, find_packages
import glob
import os
version = '1.0.394'
version = '1.0.397'
name = 'slapos.cookbook'
long_description = open("README.rst").read()
......@@ -145,7 +145,6 @@ setup(name=name,
'slapos.recipe.request:RequestOptionalJSONEncoded',
're6stnet.registry = slapos.recipe.re6stnet:Recipe',
'shell = slapos.recipe.shell:Recipe',
'signalwrapper= slapos.recipe.signal_wrapper:Recipe',
'simplelogger = slapos.recipe.simplelogger:Recipe',
'simplehttpserver = slapos.recipe.simplehttpserver:Recipe',
'slapconfiguration = slapos.recipe.slapconfiguration:Recipe',
......
from __future__ import print_function
import atexit
import errno
import sys
import os
import select
import signal
import subprocess
import time
......@@ -61,12 +63,43 @@ def _libc():
raise OSError(e, os.strerror(e))
return mount, unshare
def parse_signal(sig):
try:
try:
a, b = sig.split('+', 1)
except ValueError:
a = sig
b = 0
else:
b = int(b)
if a[0] != '_':
return getattr(signal, 'SIG' + a) + b
except Exception:
raise ValueError("invalid signal value: %s" % sig)
def generic_exec(args, extra_environ=None, wait_list=None,
pidfile=None, reserve_cpu=False, private_tmpfs=(),
#shebang_workaround=False, # XXX: still needed ?
# signal name (e.g. "RTMIN+1") that should be specified
# if pidfile (defaults to "USR1" if redirect)
sig_ign=None,
# redirect stdout and/or stderr to files; upon USR1,
# these files are reopened (suitable for log rotation)
# and an optional signal is sent to the spawned process
redirect=None, # (signal, stdout, stderr)
):
"""
All the SIG_IGN stuff is to avoid being killed between:
- the moment some external software (like a logrotate configuration
snippet) can know the PID via pidfile;
- and when the process being starting sets up its signal handler.
"""
args = list(args)
if redirect and not sig_ign:
sig_ign = "USR1"
if sig_ign:
sig_ign = parse_signal(sig_ign)
signal.signal(sig_ign, signal.SIG_IGN)
if pidfile:
import psutil
try:
......@@ -80,7 +113,14 @@ def generic_exec(args, extra_environ=None, wait_list=None,
n = len(args)
for i in six.moves.xrange(1+len(running)-n):
if args == running[i:n+i]:
sys.exit("Already running with pid %s." % pid)
return "Already running with pid %s." % pid
@atexit.register # some best effort clean-up, and it is
def _(): # expected that it does nothing upon execv.
try:
os.unlink(pidfile)
except OSError as e:
if e.errno != errno.ENOENT:
raise
with open(pidfile, 'w') as f:
f.write(str(os.getpid()))
......@@ -95,27 +135,99 @@ def generic_exec(args, extra_environ=None, wait_list=None,
if wait_list:
_wait_files_creation(wait_list)
if private_tmpfs:
mount, unshare = _libc()
CLONE_NEWNS = 0x00020000
CLONE_NEWUSER = 0x10000000
uid = os.getuid()
gid = os.getgid()
unshare(CLONE_NEWUSER |CLONE_NEWNS)
with open('/proc/self/setgroups', 'w') as f:
f.write('deny')
with open('/proc/self/uid_map', 'w') as f:
f.write('%s %s 1' % (uid, uid))
with open('/proc/self/gid_map', 'w') as f:
f.write('%s %s 1' % (gid, gid))
for size, path in private_tmpfs:
try:
os.mkdir(path)
except OSError as e:
if e.errno != errno.EEXIST:
def preexec_fn():
if private_tmpfs:
mount, unshare = _libc()
CLONE_NEWNS = 0x00020000
CLONE_NEWUSER = 0x10000000
uid = os.getuid()
gid = os.getgid()
unshare(CLONE_NEWUSER |CLONE_NEWNS)
with open('/proc/self/setgroups', 'w') as f:
f.write('deny')
with open('/proc/self/uid_map', 'w') as f:
f.write('%s %s 1' % (uid, uid))
with open('/proc/self/gid_map', 'w') as f:
f.write('%s %s 1' % (gid, gid))
for size, path in private_tmpfs:
try:
os.mkdir(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
mount(b'tmpfs', path.encode(), b'tmpfs', 0, ('size=' + size).encode())
if redirect:
if sig != sig_ign:
signal.signal(sig_ign, signal.SIG_DFL)
signal.signal(sig, signal.SIG_IGN)
for fds in dup2:
os.dup2(*fds)
if redirect:
if extra_environ:
env = os.environ.copy()
env.update(extra_environ)
else:
env = None
sig, stdout, stderr = redirect
sig = parse_signal(sig)
r, trigger = os.pipe()
rfds = [r]
logs = []
dup2 = []
def reopen():
new = []
for path, fd in logs:
os.close(fd)
new.append((path,
os.open(path, os.O_WRONLY | os.O_CREAT | os.O_APPEND, 0o666)))
logs[:] = new
def setup(fd, path):
r, w = os.pipe()
if w != fd:
dup2.append((w, fd))
logs.append((path, w))
rfds.append(r)
if stdout:
setup(1, stdout)
if stderr:
if stderr == stdout:
dup2.append((1, 2))
else:
setup(2, stderr)
# First, preexec_fn is called, then close_fds is processed,
# and at last, Popen returns.
process = subprocess.Popen(args, preexec_fn=preexec_fn, env=env,
close_fds=True) # PY3: this is the default
def sighandler(*_):
if sig:
process.send_signal(sig)
os.write(trigger, b'\0')
signal.signal(sig_ign, sighandler)
reopen()
while True:
try: # PY3: select internally retries on EINTR
r = select.select(rfds, (), ())[0]
except select.error as e:
if e.args[0] != errno.EINTR:
raise
mount(b'tmpfs', path.encode(), b'tmpfs', 0, ('size=' + size).encode())
assert six.PY2
continue
for r in r:
d = os.read(r, 1024)
i = rfds.index(r) - 1
if i < 0:
reopen()
elif d:
os.write(logs[i][1], d)
else:
os.close(logs.pop(i)[1])
os.close(rfds.pop(i+1))
if not logs:
signal.signal(sig_ign, signal.SIG_IGN)
return process.wait()
preexec_fn()
if extra_environ:
env = os.environ.copy()
env.update(extra_environ)
......
......@@ -153,10 +153,12 @@ class GenericBaseRecipe(object):
private_tmpfs.append(tuple(x))
return private_tmpfs
def createWrapper(self, path, args, env=None, **kw):
def createWrapper(self, path, args, env=None, sig_ign=None, **kw):
"""Create a wrapper script for process replacement"""
assert args
if kw:
if sig_ign:
kw['sig_ign'] = sig_ign
return self.createPythonScript(path,
'slapos.recipe.librecipe.execute.generic_exec',
(args, env) if env else (args,), kw)
......@@ -167,8 +169,10 @@ class GenericBaseRecipe(object):
# here (note that this can't be done correctly with a POSIX shell, because
# the process can't be given a name).
lines = ['#!/bin/sh']
lines = ['#!/bin/sh -e']
if sig_ign:
lines.append("trap '' " + sig_ign)
if env:
for k, v in sorted(six.iteritems(env)):
lines.append('export %s=%s' % (k, shlex.quote(v)))
......
......@@ -98,8 +98,9 @@ class NeoBaseRecipe(GenericBaseRecipe):
environment[k.rstrip()] = v.lstrip()
private_tmpfs = self.parsePrivateTmpfs()
kw = {'private_tmpfs': private_tmpfs} if private_tmpfs else {}
return self.createWrapper(options['wrapper'], args, env=environment, **kw)
return self.createWrapper(
options['wrapper'], args, env=environment, sig_ign="RTMIN+1",
**{'private_tmpfs': private_tmpfs} if private_tmpfs else {})
def _getBindingAddress(self):
options = self.options
......
# -*- coding: utf-8 -*-
import logging
import json
import os
import time
import slapos
import traceback
import logging
from re6st import registry
from pathlib import Path
from re6st import registry
log = logging.getLogger('SLAPOS-RE6STNET')
logging.basicConfig(level=logging.INFO)
logging.trace = logging.debug
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
def loadJsonFile(path):
if os.path.exists(path):
if path.exists():
with open(path, 'r') as f:
return json.load(f)
return {}
......@@ -24,7 +23,7 @@ def writeFile(path, data):
f.write(data)
def readFile(path):
if os.path.exists(path):
if path.exists():
with open(path, 'r') as f:
content = f.read()
return content
......@@ -45,77 +44,81 @@ def getComputerPartition(master_url, key_file, cert_file,
def requestAddToken(client, token_base_path):
time.sleep(3)
path_list = [x for x in os.listdir(token_base_path) if x.endswith('.add')]
path_list = [x for x in token_base_path.iterdir() if x.suffix == '.add']
log.info("Searching tokens to add at %s and found %s." % (token_base_path, path_list))
log.info("Searching tokens to add at %s and found %s.", token_base_path, path_list)
if not path_list:
log.info("No new token to add. Exiting...")
return
for reference_key in path_list:
request_file = os.path.join(token_base_path, reference_key)
request_file = token_base_path / reference_key
token = readFile(request_file)
log.info("Including token %s for %s" % (token, reference_key))
log.info("Including token %s for %s", token, reference_key)
if token :
reference = reference_key.split('.')[0]
reference = reference_key.stem
# email is unique as reference is also unique
email = '%s@slapos' % reference.lower()
try:
result = client.addToken(email, token)
except Exception:
log.info('Request add token fail for %s... \n %s' % (request_file,
traceback.format_exc()))
log.exception('Request add token fail for %s...', request_file)
continue
if result in (token, None):
# update information
log.info("New token added for slave instance %s. Updating file status..." %
log.info("New token added for slave instance %s. Updating file status...",
reference)
status_file = os.path.join(token_base_path, '%s.status' % reference)
status_file = (token_base_path / reference).with_suffix('.status')
updateFile(status_file, 'TOKEN_ADDED')
os.unlink(request_file)
request_file.unlink()
else:
log.debug('Bad token. Request add token fail for %s...' % request_file)
log.debug('Bad token. Request add token fail for %s...', request_file)
def requestRemoveToken(client, token_base_path):
path_list = [x for x in os.listdir(token_base_path) if x.endswith('.remove')]
path_list = [x for x in token_base_path.iterdir() if x.suffix == '.remove']
if not path_list:
log.info("No token to delete. Exiting...")
return
for reference_key in path_list:
request_file = os.path.join(token_base_path, reference_key)
request_file = token_base_path / reference_key
token = readFile(request_file)
if token :
reference = reference_key.split('.')[0]
reference = reference_key.stem
try:
result = client.deleteToken(token)
except Exception:
log.debug('Request delete token fail for %s... \n %s' % (request_file,
traceback.format_exc()))
log.exception('Request delete token fail for %s...', request_file)
continue
else:
# certificate is invalidated, it will be revoked
writeFile(os.path.join(token_base_path, '%s.revoke' % reference), '')
if result in (True, 'True'):
# update information
log.info("Token deleted for slave instance %s. Clean up file status..." %
if not client.isToken(str(token)):
# Token has been destroyed or is already used, we can proceed to revoke the certificate
email = '%s@slapos' % reference.lower()
try:
cn = client.getNodePrefix(str(email))
except Exception:
log.exception('getNodePrefix for email %s failed', email)
continue
if cn:
try:
client.revoke(cn)
except Exception:
log.exception('Revoke cert with cn %s failed...', cn)
continue
log.info("Token deleted for slave instance %s. Clean up file status...",
reference)
if result in ['True', 'False']:
os.unlink(request_file)
status_file = os.path.join(token_base_path, '%s.status' % reference)
if os.path.exists(status_file):
os.unlink(status_file)
ipv6_file = os.path.join(token_base_path, '%s.ipv6' % reference)
if os.path.exists(ipv6_file):
os.unlink(ipv6_file)
request_file.unlink()
status_file = request_file.with_suffix('.status')
status_file.unlink()
ipv6_file = request_file.with_suffix('.ipv6')
ipv6_file.unlink(missing_ok=True)
else:
log.debug('Bad token. Request remove token fail for %s...' % request_file)
log.error('Bad token. Request remove token fail for %s...', request_file)
def checkService(client, token_base_path, token_json, computer_partition):
token_dict = loadJsonFile(token_json)
......@@ -125,9 +128,9 @@ def checkService(client, token_base_path, token_json, computer_partition):
# Check token status
for slave_reference, token in token_dict.items():
log.info("%s %s" % (slave_reference, token))
status_file = os.path.join(token_base_path, '%s.status' % slave_reference)
if not os.path.exists(status_file):
log.info("%s %s", slave_reference, token)
status_file = (token_base_path / slave_reference).with_suffix('.status')
if not status_file.exists():
# This token is not added yet!
log.info("Token %s dont exist yet." % status_file)
continue
......@@ -135,10 +138,10 @@ def checkService(client, token_base_path, token_json, computer_partition):
if not client.isToken(str(token)):
# Token is used to register client
updateFile(status_file, 'TOKEN_USED')
log.info("Token status of %s updated to 'used'." % slave_reference)
log.info("Token status of %s updated to 'used'.", slave_reference)
status = readFile(status_file)
log.info("Token %s has %s State." % (status_file, status))
log.info("Token %s has %s State.", status_file, status)
ipv6 = "::"
ipv4 = "0.0.0.0"
......@@ -153,20 +156,17 @@ def checkService(client, token_base_path, token_json, computer_partition):
try:
ipv6 = client.getIPv6Address(str(email)).decode()
except Exception:
log.info('Error for dump ipv6 for %s... \n %s' % (slave_reference,
traceback.format_exc()))
log.exception('Error for dump ipv6 for %s...', slave_reference)
log.info("%s, IPV6 = %s", slave_reference, ipv6)
log.info("%s, IPV6 = %s" % (slave_reference, ipv6))
try:
ipv4 = client.getIPv4Information(str(email)).decode() or "0.0.0.0"
except Exception:
log.info('Error for dump ipv4 for %s... \n %s' % (slave_reference,
traceback.format_exc()))
log.exception('Error for dump ipv4 for %s...', slave_reference)
log.info("%s, IPV4 = %s" % (slave_reference, ipv4))
try:
log.info("Update parameters for %s" % slave_reference)
log.info("Update parameters for %s", slave_reference)
# Normalise the values as simple strings to be on the same format that
# the values which come from master.
......@@ -176,13 +176,13 @@ def checkService(client, token_base_path, token_json, computer_partition):
'ipv4': str(ipv4)},
slave_reference)
except Exception:
log.fatal("Error while sending slave %s informations: %s",
slave_reference, traceback.format_exc())
log.exception("Error while sending slave %s information",
slave_reference)
def manage(registry_url, token_base_path, token_json,
computer_dict, can_bang=True):
token_base_path = Path(token_base_path)
client = registry.RegistryClient(registry_url)
log.info("ADD TOKEN")
......@@ -197,5 +197,5 @@ def manage(registry_url, token_base_path, token_json,
log.info("Update Services")
# check status of all token
checkService(client, token_base_path, token_json, computer_partition)
checkService(client, token_base_path, Path(token_json), computer_partition)
##############################################################################
#
# Copyright (c) 2012 Vifib SARL and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from slapos.recipe.librecipe import GenericBaseRecipe
class Recipe(GenericBaseRecipe):
def install(self):
return [
self.createPythonScript(
self.options['wrapper-path'],
'slapos.recipe.librecipe.execute.execute_with_signal_translation',
((self.options['wrapped-path'],),)
)
]
......@@ -27,6 +27,7 @@
import os, shlex
from slapos.recipe.librecipe import GenericBaseRecipe, generateHashFromFiles
from slapos.recipe.librecipe.execute import parse_signal
from zc.buildout import UserError
class Recipe(GenericBaseRecipe):
......@@ -40,6 +41,9 @@ class Recipe(GenericBaseRecipe):
:param str pidfile: path to pidfile ensure exclusivity for the process
:param lines private-tmpfs: list of "<size> <path>" private tmpfs, using user namespaces
:param bool reserve-cpu: command will ask for an exclusive CPU core
:param str sig-ign: see slapos.recipe.librecipe.execute.generic_exec
:param str redirect-{signal,stdout,stderr}: set 'redirect' param
to slapos.recipe.librecipe.execute.generic_exec
"""
_existing = ()
......@@ -60,6 +64,20 @@ class Recipe(GenericBaseRecipe):
hash_files = hash_files.split()
options['__hash_files__'] = generateHashFromFiles(hash_files)
self.hash_files += hash_files
sig = options.get('sig-ign')
if sig:
parse_signal(sig)
self.sig_ign = sig
redirect = tuple(options.get('redirect-' + x)
for x in ('signal', 'stdout', 'stderr',))
if any(redirect):
sig = redirect[0]
if sig:
parse_signal(sig)
if not any(redirect[1:]):
raise UserError(
"redirect-signal without redirecting any output makes no sense")
self.redirect = redirect
def getWrapperPath(self):
wrapper_path = self.options['wrapper-path']
......@@ -94,6 +112,11 @@ class Recipe(GenericBaseRecipe):
kw['private_tmpfs'] = private_tmpfs
if self.isTrueValue(options.get('reserve-cpu')):
kw['reserve_cpu'] = True
for x in 'redirect', 'sig_ign':
try:
kw[x] = getattr(self, x)
except AttributeError:
pass
return self.createWrapper(self.getWrapperPath(),
args, environment, **kw)
......
......@@ -19,4 +19,4 @@ md5sum = 0d6db8da45bbdf311f9c6a2f916045a2
[template-default]
filename = instance-default.cfg.in
md5sum = 339a47644377509a754c9ead038728a6
md5sum = df90202fce312b66c9c8bdfa0b95f90c
......@@ -84,13 +84,14 @@ recipe = slapos.recipe.template
inline =
{
"public":true,
"op": [{"username":"$${admin-password:username}","password":"$${admin-password:passwd}"}],
"other": [],
"presenter": [{"username": "", "password": "{{ presenter_password }}"}],
"users":{
"$${admin-password:username}": {"password":"$${admin-password:passwd}", "permissions": "op"}
},
"wildcard-user": {"password": "{{ presenter_password }}", "permissions": "present"},
{%- if slapparameter_dict.get("allow_subgroups", False) %}
"allow-subgroups": true
"auto-subgroups": true
{%- else %}
"allow-subgroups": false
"auto-subgroups": false
{%- endif %}
}
output = $${directory:groups}/public.json
......
......@@ -40,4 +40,4 @@ output = ${buildout:directory}/template-default.cfg
<= go-git-package
go.importpath = lab.nexedi.com/nexedi/galene
repository = https://lab.nexedi.com/nexedi/galene.git
revision = galene-0.8
revision = galene-0.96.2
......@@ -7418,13 +7418,24 @@ if __name__ == '__main__':
class HTTP6Server(backend.ThreadedHTTPServer):
address_family = socket.AF_INET6
ip, port = sys.argv[1], int(sys.argv[2])
if len(sys.argv) > 3:
ssl_certificate = sys.argv[3]
scheme = 'https'
else:
ssl_certificate = None
scheme = 'http'
if ':' in ip:
klass = HTTP6Server
url_template = 'http://[%s]:%s/'
url_template = '%s://[%s]:%s/'
else:
klass = backend.ThreadedHTTPServer
url_template = 'http://%s:%s/'
url_template = '%s://%s:%s/'
server = klass((ip, port), backend.TestHandler)
print((url_template % server.server_address[:2]))
if ssl_certificate is not None:
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(ssl_certificate)
server.socket = context.wrap_socket(server.socket, server_side=True)
print((url_template % (scheme, *server.server_address[:2])))
server.serve_forever()
......@@ -16,11 +16,11 @@
[template]
filename = instance.cfg
md5sum = 109828c2a97c09b0976d266aaba00328
md5sum = 1bd4ed3240f81ce5ff5a55e79366a3f4
[template-ors]
filename = instance-ors.cfg
md5sum = dae4865ef39ad8fe07ed17b2e170d155
md5sum = 2faa3bd0a56ef11c9537b363dbf9ce07
[slaplte.jinja2]
_update_hash_filename_ = slaplte.jinja2
......@@ -60,11 +60,11 @@ md5sum = 52da9fe3a569199e35ad89ae1a44c30e
[template-enb]
_update_hash_filename_ = instance-enb.jinja2.cfg
md5sum = 22c399fc3a84aeec0f6545fe72fbf2f3
md5sum = dd4dd35e440380286c09c7f2338d07f6
[template-ors-enb]
_update_hash_filename_ = instance-ors-enb.jinja2.cfg
md5sum = 4f0e65d831889a7d0932f106c11856eb
md5sum = 7b2e07ff83106b01659ec57571d80de3
[template-ors-ue]
_update_hash_filename_ = instance-ors-ue.jinja2.cfg
......@@ -138,14 +138,6 @@ md5sum = 330f5f07806f1da11cd05bb8e4b52e55
_update_hash_filename_ = config/ue-ifup
md5sum = f02fbfd31ba89cf243e2752adcae28d9
[frequency_outofbounds_promise]
_update_hash_filename_ = promise/check_frequency_outofbounds.py
md5sum = 7c83eab2df4f5a5d519e3eb16e4077a3
[gps_lock_promise]
_update_hash_filename_ = promise/check_gps_lock.py
md5sum = c79fb837cc32bc0182ebf15078115b10
[nginx_conf.in]
_update_hash_filename_ = config/nginx_conf.in
md5sum = e2496564695fb76b242c3e0f8d0ab6c3
......@@ -67,7 +67,7 @@ parts =
check-xlog-fluentbit-health.py
{%- endif %}
{%- if slapparameter_dict.get('gps_sync', False) %}
gps-lock-promise
check-gps-lock.py
{%- endif %}
nginx-launcher
nginx-graceful
......@@ -376,11 +376,9 @@ config-stats-period = {{ slapparameter_dict.get("enb_stats_fetch_period", 60) }}
config-min-rxtx-delay = {{ slapparameter_dict.get("min_rxtx_delay", 0) }}
{%- if slapparameter_dict.get('gps_sync', False) %}
[gps-lock-promise]
recipe = slapos.cookbook:promise.plugin
eggs = slapos.core
file = {{ gps_lock_promise }}
output = ${directory:plugins}/check-gps-lock.py
[check-gps-lock.py]
<= macro.promise
promise = check_gps_lock
config-amarisoft-rf-info-log = ${ru_amarisoft-rf-info-template:log-output}
config-stats-period = {{ slapparameter_dict.get("enb_stats_fetch_period", 60) }}
{%- endif %}
......@@ -260,11 +260,9 @@ init =
del publish['peer-cell-list']
# Add custom promise to check if /dev/sdr0 is busy
[frequency-outofbounds-promise]
recipe = slapos.cookbook:promise.plugin
eggs = slapos.core
file = {{ frequency_outofbounds_promise }}
output = ${directory:plugins}/check-frequency-outofbounds.py
[check-frequency-outofbounds.py]
<= macro.promise
promise = check_frequency_outofbounds
{%- if enb_mode == 'enb' %}
config-frequency = {{ xearfcn_module.frequency(ors_version['current-earfcn']) }}
{%- elif enb_mode == 'gnb' %}
......@@ -274,4 +272,4 @@ config-range-rating = {{ ors_version['range'] }}
[buildout]
extra-parts +=
frequency-outofbounds-promise
check-frequency-outofbounds.py
......@@ -30,7 +30,6 @@ filename = instance-enb.cfg
extra-context +=
section ors ors-version
section ors_version ors-version
raw frequency_outofbounds_promise ${frequency_outofbounds_promise:target}
import-list +=
rawfile instance-enb-base.jinja2.cfg ${template-enb:target}
......
......@@ -243,7 +243,6 @@ extra-context =
raw fluent_bit_location ${fluent-bit:location}
raw openssh_location ${openssh:location}
raw openssh_output_keygen ${openssh-output:keygen}
raw gps_lock_promise ${gps_lock_promise:target}
[dynamic-template-core-network]
< = jinja2-template-base
......
import os
import errno
from zope.interface import implementer
from slapos.grid.promise import interface
from slapos.grid.promise.generic import GenericPromise
@implementer(interface.IPromise)
class RunPromise(GenericPromise):
def __init__(self, config):
"""
Called when initialising the promise before testing.
Sets the configuration and the periodicity.
"""
super(RunPromise, self).__init__(config)
self.setPeriodicity(minute=1)
def sense(self):
"""
Called every time the promise is tested.
Signals a positive or negative result.
In this case, check whether the file exists.
"""
frequency = self.getConfig('frequency')
range_rating = self.getConfig('range-rating')
try:
min_frequency = int(range_rating.split('MHz')[0].strip())
max_frequency = int(range_rating.split('-')[1].split('MHz')[0].strip())
except (IndexError, ValueError) as e:
self.logger.info("Range rating not available, skipping the promise")
return
try:
frequency = int(float(frequency))
except ValueError as e:
self.logger.info("Invalid frequency, skipping the promise")
return
if min_frequency <= frequency <= max_frequency:
self.logger.info("Frequency is in bounds ({} MHz <= {} MHz <= {} MHz)".format(
min_frequency,
frequency,
max_frequency))
elif frequency < min_frequency:
self.logger.error("Frequency is lower than the lowest possible frequency on this hardware, please increase it ({} MHz < {} MHz)".format(
frequency,
min_frequency))
else:
self.logger.error("Frequency is higher than the highest possible frequency on this hardware, please increase it ({} MHz > {} MHz)".format(
frequency,
max_frequency))
def test(self):
"""
Called after sense() if the instance is still converging.
Returns success or failure based on sense results.
In this case, fail if the previous sensor result is negative.
"""
return self._test(result_count=1, failure_amount=1)
def anomaly(self):
"""
Called after sense() if the instance has finished converging.
Returns success or failure based on sense results.
Failure signals the instance has diverged.
In this case, fail if two out of the last three results are negative.
"""
return self._anomaly(result_count=1, failure_amount=1)
import itertools
import json
import logging
import os
import re
from dateutil import parser as dateparser
from datetime import datetime
from zope.interface import implementer
from slapos.grid.promise import interface
from slapos.grid.promise.generic import GenericPromise
def iter_reverse_lines(f):
"""
Read lines from the end of the file
"""
f.seek(0, os.SEEK_END)
while True:
try:
while f.seek(-2, os.SEEK_CUR) and f.read(1) != b'\n':
pass
except OSError:
return
pos = f.tell()
yield f.readline()
f.seek(pos, os.SEEK_SET)
def iter_logrotate_file_handle(path, mode='r'):
"""
Yield successive file handles for rotated logs
(XX.log, XX.log.1, XX.log.2, ...)
"""
for i in itertools.count():
path_i = path + str(i or '')
try:
with open(path_i, mode) as f:
yield f
except OSError:
break
def get_json_log_data_interval(json_log_file, interval):
"""
Get all data in the last "interval" seconds from JSON log
Reads rotated logs too (XX.log, XX.log.1, XX.log.2, ...)
"""
current_time = datetime.now()
data_list = []
for f in iter_logrotate_file_handle(json_log_file, 'rb'):
for line in iter_reverse_lines(f):
l = json.loads(line)
timestamp = dateparser.parse(l['time'])
if (current_time - timestamp).total_seconds() > interval:
return data_list
data_list.append(l['data'])
return data_list
class JSONPromise(GenericPromise):
def __init__(self, config):
self.__name = config.get('name', None)
self.__log_folder = config.get('log-folder', None)
super(JSONPromise, self).__init__(config)
json_log_name = os.path.splitext(self.__name)[0] + '.json.log'
self.__json_log_file = os.path.join(self.__log_folder, json_log_name)
self.json_logger = self.__make_json_logger(self.__json_log_file)
def __make_json_logger(self, json_log_file):
logger = logging.getLogger('json-logger')
logger.setLevel(logging.INFO)
handler = logging.FileHandler(json_log_file)
formatter = logging.Formatter(
'{"time": "%(asctime)s", "log_level": "%(levelname)s"'
', "message": "%(message)s", "data": %(data)s}'
)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
@implementer(interface.IPromise)
class RunPromise(JSONPromise):
def __init__(self, config):
super(RunPromise, self).__init__(config)
self.setPeriodicity(minute=1)
self.amarisoft_rf_info_log = self.getConfig('amarisoft-rf-info-log')
self.stats_period = int(self.getConfig('stats-period'))
def sense(self):
data_list = get_json_log_data_interval(self.amarisoft_rf_info_log, self.stats_period * 2)
if len(data_list) < 1:
self.logger.error("rf_info: stale data")
return
rf_info_text = data_list[0]['rf_info']
if 'Sync: gps (locked)' in rf_info_text:
self.logger.info("GPS locked")
else:
self.logger.error("GPS not locked")
def test(self):
"""
Called after sense() if the instance is still converging.
Returns success or failure based on sense results.
In this case, fail if the previous sensor result is negative.
"""
return self._test(result_count=1, failure_amount=1)
def anomaly(self):
"""
Called after sense() if the instance has finished converging.
Returns success or failure based on sense results.
Failure signals the instance has diverged.
In this case, fail if two out of the last three results are negative.
"""
return self._anomaly(result_count=1, failure_amount=1)
......@@ -34,6 +34,3 @@ output= ${buildout:bin-directory}/${:_buildout_section_name_}
mode = 0755
context =
section bash bash
[frequency_outofbounds_promise]
<= download-base
......@@ -222,9 +222,6 @@ update-command = ${:command}
<= setcap
exe = ${dnsmasq:location}/sbin/dnsmasq
[gps_lock_promise]
<= download-base
[versions]
websocket-client = 1.4.2
ncclient = 0.6.13
......
......@@ -361,7 +361,7 @@ simplegeneric = 0.8.1
singledispatch = 3.4.0.3
six = 1.16.0
slapos.cookbook = 1.0.386
slapos.core = 1.14.3
slapos.core = 1.15.0
slapos.extension.shared = 1.0
slapos.libnetworkcache = 0.25
slapos.rebootstrap = 4.7
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment