Commit 89ec7583 authored by Thomas Gambier's avatar Thomas Gambier 🚴🏼

Update Release Candidate

parents 6e110f27 12c244e5
...@@ -68,7 +68,7 @@ shared = false ...@@ -68,7 +68,7 @@ shared = false
[apache-php] [apache-php]
recipe = slapos.recipe.cmmi recipe = slapos.recipe.cmmi
url = https://www.php.net/distributions/php-8.2.24.tar.xz url = https://www.php.net/distributions/php-8.2.24.tar.xz
md5sum = 3263dbd4846871dd6fabe042f141eb19 md5sum = fff29ce84f5b4ddfc2063f7b2021fce2
configure-options = configure-options =
--disable-static --disable-static
--disable-zend-test --disable-zend-test
......
...@@ -14,7 +14,8 @@ revision = v1.12.1-nxd3 ...@@ -14,7 +14,8 @@ revision = v1.12.1-nxd3
[babeld] [babeld]
recipe = slapos.recipe.cmmi recipe = slapos.recipe.cmmi
path = ${babeld-repository:location} path = ${babeld-repository:location}
make-options = CC='gcc -std=gnu99' # Fedora's redhat-hardened-ld forces us to use either -fPIC or -fPIE
make-options = CC='gcc -std=gnu99 -fPIE'
configure-command = configure-command =
echo "No configure.." echo "No configure.."
environment = environment =
......
[buildout] [buildout]
extends =
../gnu-config/buildout.cfg
parts = parts =
chrpath chrpath
...@@ -6,3 +8,4 @@ parts = ...@@ -6,3 +8,4 @@ parts =
recipe = slapos.recipe.cmmi recipe = slapos.recipe.cmmi
url = http://http.debian.net/debian/pool/main/c/chrpath/chrpath_0.16.orig.tar.gz url = http://http.debian.net/debian/pool/main/c/chrpath/chrpath_0.16.orig.tar.gz
md5sum = 2bf8d1d1ee345fc8a7915576f5649982 md5sum = 2bf8d1d1ee345fc8a7915576f5649982
pre-configure = cp -f ${gnu-config:location}/config.sub ${gnu-config:location}/config.guess .
[buildout] [buildout]
extends = extends =
../numpy/buildout.cfg ../numpy/buildout.cfg
../cmake/buildout.cfg
../curl/buildout.cfg ../curl/buildout.cfg
../geos/buildout.cfg ../geos/buildout.cfg
../giflib/buildout.cfg ../giflib/buildout.cfg
../jasper/buildout.cfg
../libexpat/buildout.cfg ../libexpat/buildout.cfg
../libjpeg/buildout.cfg
../libtiff/buildout.cfg
../libxml2/buildout.cfg
../openjpeg/buildout.cfg
../pcre/buildout.cfg ../pcre/buildout.cfg
../proj4/buildout.cfg ../pkgconfig/buildout.cfg
../proj/buildout.cfg
../sqlite3/buildout.cfg ../sqlite3/buildout.cfg
../xz-utils/buildout.cfg ../xz-utils/buildout.cfg
...@@ -16,27 +21,30 @@ parts = ...@@ -16,27 +21,30 @@ parts =
[gdal] [gdal]
recipe = slapos.recipe.cmmi recipe = slapos.recipe.cmmi
version = 1.11.1 shared = true
version = 3.2.3
url = http://download.osgeo.org/gdal/${:version}/gdal-${:version}.tar.xz url = http://download.osgeo.org/gdal/${:version}/gdal-${:version}.tar.xz
md5sum = 2e126d7c6605691d38f3e71b945f5c73 md5sum = 6c276978d625d23a091bac9fdddb99db
location = @@LOCATION@@
configure-options = configure-options =
--with-curl=${curl:location}/bin/curl-config --with-curl=${curl:location}/bin/curl-config
--with-expat=${libexpat:location} --with-expat=${libexpat:location}
--with-geos=${geos:location}/bin/geos-config --with-geos=${geos:location}/bin/geos-config
--with-gif=${giflib:location} --with-gif=${giflib:location}
--with-jasper=${jasper:location} --with-openjpeg=${openjpeg:location}
--with-jpeg=${libjpeg:location} --with-jpeg=${libjpeg:location}
--with-libtiff=${libtiff:location} --with-libtiff=${libtiff:location}
--with-libz=${zlib:location} --with-libz=${zlib:location}
--with-png=${libpng:location} --with-png=${libpng:location}
--with-static-proj4=${proj4:location} --with-proj=${proj:location}
--with-sqlite3=${sqlite3:location} --with-sqlite3=${sqlite3:location}
--with-xml2=${libxml2:location}/bin/xml2-config --with-xml2=yes
--without-webp --without-webp
environment = environment =
PATH=${xz-utils:location}/bin:%(PATH)s PATH=${pkgconfig:location}/bin:${xz-utils:location}/bin:%(PATH)s
CPPFLAGS=-I${pcre:location}/include CPPFLAGS=-I${pcre:location}/include
LDFLAGS=-L${pcre:location}/lib -Wl,-rpath=${buildout:parts-directory}/${:_buildout_section_name_}/lib -Wl,-rpath=${curl:location}/lib -Wl,-rpath=${geos:location}/lib -Wl,-rpath=${giflib:location}/lib -Wl,-rpath=${jasper:location}/lib -Wl,-rpath=${jbigkit:location}/lib -Wl,-rpath=${libexpat:location}/lib -Wl,-rpath=${libjpeg:location}/lib -Wl,-rpath=${libpng:location}/lib -Wl,-rpath=${libtiff:location}/lib -Wl,-rpath=${libxml2:location}/lib -Wl,-rpath=${openssl:location}/lib -Wl,-rpath=${pcre:location}/lib -Wl,-rpath=${sqlite3:location}/lib -Wl,-rpath=${zlib:location}/lib LDFLAGS=-L${pcre:location}/lib -Wl,-rpath=${:location}/lib -Wl,-rpath=${proj:location}/lib -Wl,-rpath=${curl:location}/lib -Wl,-rpath=${geos:location}/lib -Wl,-rpath=${giflib:location}/lib -Wl,-rpath=${openjpeg:location}/lib -Wl,-rpath=${jbigkit:location}/lib -Wl,-rpath=${libexpat:location}/lib -Wl,-rpath=${libjpeg:location}/lib -Wl,-rpath=${libpng:location}/lib -Wl,-rpath=${libtiff:location}/lib -Wl,-rpath=${libxml2:location}/lib -Wl,-rpath=${openssl:location}/lib -Wl,-rpath=${pcre:location}/lib -Wl,-rpath=${sqlite3:location}/lib -Wl,-rpath=${zlib:location}/lib
PKG_CONFIG_PATH=${libxml2:location}/lib/pkgconfig
[gdal-python] [gdal-python]
recipe = zc.recipe.egg:custom recipe = zc.recipe.egg:custom
......
# SlapOS software release to test GDAL on Nexedi testing infrastructure.
[buildout]
extends =
../../stack/slapos-py2.cfg
buildout.cfg
parts =
gdal-interpreter
[gdal-interpreter]
recipe = zc.recipe.egg
interpreter = python-gdal
eggs =
${gdal-python:egg}
Tests for GDAL component
[project]
name = "slapos.test.gdal"
version = "0.0.1.dev0"
description = "Test for SlapOS' GDAL component."
readme = "README.md"
requires-python = ">=3"
license = {text = "GNU General Public License version 3 or later"}
keywords = [
"SlapOS",
"testing",
"GDAL",
]
authors = [
{name = "Nexedi"},
]
maintainers = [
{name = "Nexedi", email = "info@nexedi.com"},
]
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: GIS",
"Topic :: Software Development :: Testing",
"Typing :: Typed",
]
dependencies = [
"slapos.core",
"slapos.libnetworkcache",
]
[project.urls]
homepage = "https://slapos.nexedi.com"
documentation = "https://slapos.nexedi.com"
repository = "https://lab.nexedi.com/nexedi/slapos"
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[tool.setuptools]
zip-safe = true
\ No newline at end of file
##############################################################################
#
# Copyright (c) 2024 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
# pyright: strict
from pathlib import Path
import subprocess
from slapos.grid.utils import md5digest
from slapos.testing.testcase import makeModuleSetUpAndTestCaseClass
setUpModule, SlapOSInstanceTestCase = makeModuleSetUpAndTestCaseClass(
Path(__file__).parent.parent / "test.cfg"
)
class GDALTestCase(SlapOSInstanceTestCase):
"""Tests for the GDAL component."""
request_instance = False
def test_noinstance(self):
"""Test that no SlapOS instance is being created for this component."""
self.assertFalse(hasattr(self, "computer_partition"))
def test_import(self):
"""Test that the GDAL Python module is importable."""
software_path = (
Path(self.slap.software_directory)
/ md5digest(self.getSoftwareURL())
)
self.assertTrue(software_path.exists())
self.assertTrue(software_path.is_dir())
bin_path = software_path / "bin"
self.assertTrue(bin_path.exists())
self.assertTrue(bin_path.is_dir())
python_exe = bin_path / "python-gdal"
self.assertTrue(python_exe.exists())
self.assertTrue(python_exe.is_file())
subprocess.check_call([python_exe, "-c", "import osgeo.gdal"])
[buildout] [buildout]
extends =
../cmake/buildout.cfg
parts = parts =
geos geos
[geos] [geos]
recipe = slapos.recipe.cmmi recipe = slapos.recipe.cmmi
version = 3.4.2 shared = true
version = 3.12.2
url = http://download.osgeo.org/geos/geos-${:version}.tar.bz2 url = http://download.osgeo.org/geos/geos-${:version}.tar.bz2
md5sum = fc5df2d926eb7e67f988a43a92683bae md5sum = f451aa3884f2ca19ae555f5c7d8de4f8
location = @@LOCATION@@
configure-command = ${cmake:location}/bin/cmake
configure-options = configure-options =
--disable-dependency-tracking -DCMAKE_BUILD_TYPE=Release
--disable-static -DCMAKE_INSTALL_PREFIX=@@LOCATION@@
-DCMAKE_INSTALL_LIBDIR=lib
-DCMAKE_INSTALL_RPATH=${:location}/lib
...@@ -10,12 +10,13 @@ extends = ...@@ -10,12 +10,13 @@ extends =
../pkgconfig/buildout.cfg ../pkgconfig/buildout.cfg
../zlib/buildout.cfg ../zlib/buildout.cfg
../libexpat/buildout.cfg ../libexpat/buildout.cfg
../xz-utils/buildout.cfg
[graphviz] [graphviz]
recipe = slapos.recipe.cmmi recipe = slapos.recipe.cmmi
shared = true shared = true
url = https://ftp.osuosl.org/pub/blfs/conglomeration/graphviz/graphviz-2.40.1.tar.gz url = https://gitlab.com/api/v4/projects/4207231/packages/generic/graphviz-releases/12.1.2/graphviz-12.1.2.tar.xz
md5sum = 4ea6fd64603536406166600bcc296fc8 md5sum = 54cf8e3b60bc137c72395d664fc6121a
pkg_config_depends = ${pango:location}/lib/pkgconfig:${pango:pkg_config_depends} pkg_config_depends = ${pango:location}/lib/pkgconfig:${pango:pkg_config_depends}
configure-options = configure-options =
--with-included-ltdl --with-included-ltdl
...@@ -58,7 +59,7 @@ configure-options = ...@@ -58,7 +59,7 @@ configure-options =
--without-libgd --without-libgd
--without-glut --without-glut
environment = environment =
PATH=${pkgconfig:location}/bin:%(PATH)s PATH=${pkgconfig:location}/bin:${xz-utils:location}/bin:%(PATH)s
PKG_CONFIG_PATH=${:pkg_config_depends} PKG_CONFIG_PATH=${:pkg_config_depends}
CPPFLAGS=-I${zlib:location}/include -I${libexpat:location}/include CPPFLAGS=-I${zlib:location}/include -I${libexpat:location}/include
LDFLAGS=-L${bzip2:location}/lib -Wl,-rpath=${bzip2:location}/lib -L${zlib:location}/lib -Wl,-rpath=${zlib:location}/lib -L${libexpat:location}/lib -Wl,-rpath=${libexpat:location}/lib -Wl,-rpath=${glib:location}/lib -Wl,-rpath=${pango:location}/lib LDFLAGS=-L${bzip2:location}/lib -Wl,-rpath=${bzip2:location}/lib -L${zlib:location}/lib -Wl,-rpath=${zlib:location}/lib -L${libexpat:location}/lib -Wl,-rpath=${libexpat:location}/lib -Wl,-rpath=${glib:location}/lib -Wl,-rpath=${pango:location}/lib
[buildout]
extends =
../cmake/buildout.cfg
parts =
openjpeg
[openjpeg]
recipe = slapos.recipe.cmmi
shared = true
version = 2.5.2
url = https://github.com/uclouvain/openjpeg/archive/refs/tags/v${:version}.tar.gz
md5sum = f9ee64845881a15109ed0aa73a12202f
location = @@LOCATION@@
configure-command = ${cmake:location}/bin/cmake
configure-options =
-DCMAKE_BUILD_TYPE=Release
-DCMAKE_INSTALL_PREFIX=@@LOCATION@@
-DCMAKE_INSTALL_LIBDIR=lib
-DCMAKE_INSTALL_RPATH=${:location}/lib
...@@ -10,8 +10,8 @@ extends = ...@@ -10,8 +10,8 @@ extends =
[openldap] [openldap]
recipe = slapos.recipe.cmmi recipe = slapos.recipe.cmmi
shared = true shared = true
url = http://www.openldap.org/software/download/OpenLDAP/openldap-release/openldap-2.4.47.tgz url = https://www.openldap.org/software/download/OpenLDAP/openldap-release/openldap-2.6.8.tgz
md5sum = e508f97bfd778fec7799f286e5c07176 md5sum = a7ca5f245340e478ea18b8f972c89bb1
pre-configure = cp -f ${gnu-config:location}/config.sub ${gnu-config:location}/config.guess build pre-configure = cp -f ${gnu-config:location}/config.sub ${gnu-config:location}/config.guess build
configure-options = configure-options =
--disable-static --disable-static
...@@ -29,3 +29,8 @@ environment = ...@@ -29,3 +29,8 @@ environment =
CPPFLAGS=-I${openssl:location}/include -I${cyrus-sasl:location}/include CPPFLAGS=-I${openssl:location}/include -I${cyrus-sasl:location}/include
LDFLAGS=-L${openssl:location}/lib -Wl,-rpath=${openssl:location}/lib -L${cyrus-sasl:location}/lib -Wl,-rpath=${cyrus-sasl:location}/lib LDFLAGS=-L${openssl:location}/lib -Wl,-rpath=${openssl:location}/lib -L${cyrus-sasl:location}/lib -Wl,-rpath=${cyrus-sasl:location}/lib
PATH=${groff:location}/bin:%(PATH)s PATH=${groff:location}/bin:%(PATH)s
# old version for python-ldap-python
[openldap:python2]
url = http://www.openldap.org/software/download/OpenLDAP/openldap-release/openldap-2.4.47.tgz
md5sum = e508f97bfd778fec7799f286e5c07176
...@@ -15,12 +15,11 @@ max_version = 0 ...@@ -15,12 +15,11 @@ max_version = 0
recipe = slapos.recipe.build:gitclone recipe = slapos.recipe.build:gitclone
git-executable = ${git:location}/bin/git git-executable = ${git:location}/bin/git
repository = https://lab.nexedi.com/nexedi/osie.git repository = https://lab.nexedi.com/nexedi/osie.git
revision = dd9aea8 revision = 7d6b1af
[osie-coupler] [osie-coupler]
recipe = slapos.recipe.cmmi recipe = slapos.recipe.cmmi
path = ${osie-repository:location}/coupler path = ${osie-repository:location}/coupler
bin_dir = ${:path}/bin/
environment = environment =
PATH=${gcc:prefix}/bin:/usr/bin PATH=${gcc:prefix}/bin:/usr/bin
C_INCLUDE_PATH=${open62541:location}/include:${open62541:location}/deps:${open62541:location}/src/pubsub C_INCLUDE_PATH=${open62541:location}/include:${open62541:location}/deps:${open62541:location}/src/pubsub
......
[buildout]
extends =
../cmake/buildout.cfg
../curl/buildout.cfg
../libtiff/buildout.cfg
../sqlite3/buildout.cfg
parts =
proj
[proj]
recipe = slapos.recipe.cmmi
shared = true
version = 9.4.0
url = https://download.osgeo.org/proj/proj-${:version}.tar.gz
md5sum = c33fd24cf4e3a3048c330b1b07e86b4f
configure-command = ${cmake:location}/bin/cmake
configure-options =
-DCMAKE_BUILD_TYPE=Release
-DEXE_SQLITE3=${sqlite3:location}/bin/sqlite3
-DSQLite3_INCLUDE_DIR=${sqlite3:location}/include
-DSQLite3_LIBRARY=${sqlite3:location}/lib/libsqlite3.so
-DCURL_INCLUDE_DIR=${curl:location}/include
-DCURL_LIBRARY=${curl:location}/lib/libcurl.so
-DTIFF_INCLUDE_DIR=${libtiff:location}/include
-DTIFF_LIBRARY_RELEASE=${libtiff:location}/lib/libtiff.so
-DCMAKE_INSTALL_PREFIX=@@LOCATION@@
-DCMAKE_INSTALL_LIBDIR=lib
-DCMAKE_INSTALL_RPATH=${curl:location}/lib:${libtiff:location}/lib:${sqlite3:location}/lib
[buildout]
parts =
proj4
[proj4]
recipe = slapos.recipe.cmmi
version = 4.8.0
url = http://download.osgeo.org/proj/proj-${:version}.tar.gz
md5sum = d815838c92a29179298c126effbb1537
configure-options =
--disable-dependency-tracking
...@@ -61,23 +61,29 @@ md5sum = dd94cab4541b57b88cf3dab32d6336e3 ...@@ -61,23 +61,29 @@ md5sum = dd94cab4541b57b88cf3dab32d6336e3
[python3.8] [python3.8]
<= python3-common <= python3-common
version = 3.8 version = 3.8
package_version = 3.8.19 package_version = 3.8.20
md5sum = 2532d25930266546822c144b99652254 md5sum = 745478c81d6382cf46b5e7ad89e56008
[python3.9] [python3.9]
<= python3-common <= python3-common
version = 3.9 version = 3.9
package_version = 3.9.19 package_version = 3.9.20
md5sum = 87d0f8281237b972ff8b23e0e2c8d325 md5sum = bdcda0fdb99e7e17018f6886fae5e1fd
[python3.10] [python3.10]
<= python3-common <= python3-common
version = 3.10 version = 3.10
package_version = 3.10.14 package_version = 3.10.15
md5sum = 05148354ce821ba7369e5b7958435400 md5sum = 8b1faa1b193e4e90c0f17eb2decd89b5
[python3.11] [python3.11]
<= python3-common <= python3-common
version = 3.11 version = 3.11
package_version = 3.11.9 package_version = 3.11.10
md5sum = 22ea467e7d915477152e99d5da856ddc md5sum = af59e243df4c7019f941ae51891c10bc
[python3.12]
<= python3-common
version = 3.12
package_version = 3.12.7
md5sum = c6c933c1a0db52597cb45a7910490f93
...@@ -76,50 +76,3 @@ PKG_CONFIG_PATH-rbd = ...@@ -76,50 +76,3 @@ PKG_CONFIG_PATH-rbd =
[qemu:sys.version_info < (3,6)] [qemu:sys.version_info < (3,6)]
environment += environment +=
PYTHON=${python3:executable} PYTHON=${python3:executable}
[debian-netinst-base]
recipe = slapos.recipe.build:download
shared = true
filename = debian-${:version}-${:arch}-netinst.iso
url = https://cdimage.debian.org/cdimage/archive/${:archive}/${:arch}/iso-cd/${:filename}
archive = ${:version}
[debian-amd64-netinst-base]
<= debian-netinst-base
arch = amd64
[debian-amd64-jessie-netinst.iso]
<= debian-amd64-netinst-base
version = 8.11.1
md5sum = df0ce86d0b1d81e232ad08eef58754ed
[debian-amd64-stretch-netinst.iso]
<= debian-amd64-netinst-base
version = 9.13.0
md5sum = 6097fdb9cbab47c96471274b9044e983
[debian-amd64-buster-netinst.iso]
<= debian-amd64-netinst-base
version = 10.11.0
md5sum = 9d7b9cc850464d60ac174787c53e8f3f
[debian-amd64-bullseye-netinst.iso]
<= debian-amd64-netinst-base
version = 11.7.0
md5sum = b33775a9ab6eae784b6da9f31be48be3
[debian-amd64-bookworm-netinst.iso]
<= debian-amd64-netinst-base
version = 12.4.0
md5sum = a03cf771ba9513d908093101a094ac88
alternate-url = https://cdimage.debian.org/cdimage/release/current/${:arch}/iso-cd/${:filename}
[debian-amd64-netinst.iso]
<= debian-amd64-bookworm-netinst.iso
[debian-amd64-testing-netinst.iso]
<= debian-amd64-netinst-base
alternate-url = https://cdimage.debian.org/cdimage/${archive}/${:arch}/iso-cd/${:filename}
archive = bullseye_di_rc3
version = bullseye-DI-rc3
md5sum = 405917de7062c58357a3673c9901f0c4
...@@ -24,6 +24,6 @@ post-install = ...@@ -24,6 +24,6 @@ post-install =
# https://git.archlinux.org/svntogit/packages.git/tree/trunk/PKGBUILD?h=packages/sqlite # https://git.archlinux.org/svntogit/packages.git/tree/trunk/PKGBUILD?h=packages/sqlite
# NEO needs SQLITE_ENABLE_UPDATE_DELETE_LIMIT to drop partitions. # NEO needs SQLITE_ENABLE_UPDATE_DELETE_LIMIT to drop partitions.
environment = environment =
CPPFLAGS=-I${zlib:location}/include -DSQLITE_MAX_VARIABLE_NUMBER=250000 CPPFLAGS=-I${zlib:location}/include -DSQLITE_MAX_VARIABLE_NUMBER=250000 -DSQLITE_ENABLE_RTREE=1
LDFLAGS=-Wl,-rpath=@@LOCATION@@/lib -L${readline:location}/lib -Wl,-rpath=${readline:location}/lib -L${zlib:location}/lib -Wl,-rpath=${zlib:location}/lib LDFLAGS=-Wl,-rpath=@@LOCATION@@/lib -L${readline:location}/lib -Wl,-rpath=${readline:location}/lib -L${zlib:location}/lib -Wl,-rpath=${zlib:location}/lib
PATH=${tcl:location}/bin:${xz-utils:location}/bin:%(PATH)s PATH=${tcl:location}/bin:${xz-utils:location}/bin:%(PATH)s
...@@ -3,7 +3,6 @@ extends = ...@@ -3,7 +3,6 @@ extends =
../file/buildout.cfg ../file/buildout.cfg
../openssh/buildout.cfg ../openssh/buildout.cfg
../p7zip/buildout.cfg ../p7zip/buildout.cfg
../qemu-kvm/buildout.cfg
parts = vm-debian parts = vm-debian
...@@ -59,6 +58,54 @@ preseed.apt-setup/enable-source-repositories = false ...@@ -59,6 +58,54 @@ preseed.apt-setup/enable-source-repositories = false
preseed.recommends = false preseed.recommends = false
preseed.tasks = preseed.tasks =
[debian-netinst-base]
recipe = slapos.recipe.build:download
shared = true
filename = debian-${:version}-${:arch}-netinst.iso
url = https://cdimage.debian.org/cdimage/archive/${:archive}/${:arch}/iso-cd/${:filename}
archive = ${:version}
[debian-amd64-netinst-base]
<= debian-netinst-base
arch = amd64
[debian-amd64-jessie-netinst.iso]
<= debian-amd64-netinst-base
version = 8.11.1
md5sum = df0ce86d0b1d81e232ad08eef58754ed
[debian-amd64-stretch-netinst.iso]
<= debian-amd64-netinst-base
version = 9.13.0
md5sum = 6097fdb9cbab47c96471274b9044e983
[debian-amd64-buster-netinst.iso]
<= debian-amd64-netinst-base
version = 10.11.0
md5sum = 9d7b9cc850464d60ac174787c53e8f3f
[debian-amd64-bullseye-netinst.iso]
<= debian-amd64-netinst-base
version = 11.7.0
md5sum = b33775a9ab6eae784b6da9f31be48be3
[debian-amd64-bookworm-netinst.iso]
<= debian-amd64-netinst-base
version = 12.4.0
md5sum = a03cf771ba9513d908093101a094ac88
alternate-url = https://cdimage.debian.org/cdimage/release/current/${:arch}/iso-cd/${:filename}
[debian-amd64-netinst.iso]
<= debian-amd64-bookworm-netinst.iso
[debian-amd64-testing-netinst.iso]
<= debian-amd64-netinst-base
alternate-url = https://cdimage.debian.org/cdimage/${archive}/${:arch}/iso-cd/${:filename}
archive = bullseye_di_rc3
version = bullseye-DI-rc3
md5sum = 405917de7062c58357a3673c9901f0c4
[debian-stable] [debian-stable]
x86_64.iso = debian-amd64-netinst.iso x86_64.iso = debian-amd64-netinst.iso
x86_64.kernel = install.amd/vmlinuz x86_64.kernel = install.amd/vmlinuz
......
...@@ -43,6 +43,8 @@ inline = ...@@ -43,6 +43,8 @@ inline =
[versions] [versions]
freezegun = 1.5.1:whl freezegun = 1.5.1:whl
ZopeUndo = 6.0
[versions:python2] [versions:python2]
freezegun = 0.3.15 freezegun = 0.3.15
ZopeUndo = 5.0
...@@ -2,5 +2,8 @@ ...@@ -2,5 +2,8 @@
[buildout] [buildout]
extends = extends =
test-zodb5.cfg test-common.cfg
test-py2.cfg test-py2.cfg
[ZODB]
major = 5
# SlapOS software release to test zodbtools/ZODB5-py3 on Nexedi testing infrastructure. # SlapOS software release to test zodbtools/ZODB6-py3 on Nexedi testing infrastructure.
[buildout] [buildout]
extends = test-common.cfg extends = test-common.cfg
[ZODB] [ZODB]
major = 5 major = 6
[instance-profile] [instance-profile]
filename = instance.cfg.in filename = instance.cfg.in
md5sum = 4dc7ebc5f38baca776f520e7f5ccf9a7 md5sum = eafb0d0c2137516e884cde56b7016270
...@@ -29,9 +29,6 @@ key = ${slap-connection:key-file} ...@@ -29,9 +29,6 @@ key = ${slap-connection:key-file}
cert = ${slap-connection:cert-file} cert = ${slap-connection:cert-file}
configuration.runtime_plc_url = configuration.runtime_plc_url =
configuration.runtime_plc_md5sum = configuration.runtime_plc_md5sum =
configuration.autostart = 1
configuration.interface = 127.0.0.1
configuration.port = 8009
# Create all needed directories, depending on your needs # Create all needed directories, depending on your needs
[directory] [directory]
...@@ -47,6 +44,10 @@ log = ${:var}/log ...@@ -47,6 +44,10 @@ log = ${:var}/log
[beremiz-runtime] [beremiz-runtime]
logfile = ${directory:log}/beremiz-runtime.log logfile = ${directory:log}/beremiz-runtime.log
recipe = slapos.cookbook:wrapper recipe = slapos.cookbook:wrapper
# default webport in Beremiz
webport = 8009
# adding BEREMIZPYTHONPATH is needed so we can override the Beremiz' # adding BEREMIZPYTHONPATH is needed so we can override the Beremiz'
# internal code which tries to use sys.executable to spawn processes # internal code which tries to use sys.executable to spawn processes
# and in the context of SlapOS it's a plain Python without needed modules # and in the context of SlapOS it's a plain Python without needed modules
...@@ -56,6 +57,7 @@ environment = ...@@ -56,6 +57,7 @@ environment =
BEREMIZPYTHONPATH = {{ buildout['bin-directory'] }}/pythonwitheggs BEREMIZPYTHONPATH = {{ buildout['bin-directory'] }}/pythonwitheggs
PATH={{ gcc_location }}/bin PATH={{ gcc_location }}/bin
LIBRARY_PATH={{ openssl_location }}/lib LIBRARY_PATH={{ openssl_location }}/lib
BEREMIZ_LOCAL_HOST=${instance-parameter:ipv4-random}
command-line = command-line =
{{ buildout['bin-directory'] }}/pythonwitheggs {{ buildout['directory'] }}/parts/beremiz-source/Beremiz_cli.py -k --project-home ${directory:home}/parts/download-plc/ build transfer run {{ buildout['bin-directory'] }}/pythonwitheggs {{ buildout['directory'] }}/parts/beremiz-source/Beremiz_cli.py -k --project-home ${directory:home}/parts/download-plc/ build transfer run
...@@ -64,10 +66,9 @@ wrapper-path = ${directory:service}/beremiz-runtime ...@@ -64,10 +66,9 @@ wrapper-path = ${directory:service}/beremiz-runtime
[http-promise] [http-promise]
recipe = slapos.cookbook:check_port_listening recipe = slapos.cookbook:check_port_listening
path = ${directory:promise}/${:_buildout_section_name_} path = ${directory:promise}/${:_buildout_section_name_}
hostname = ${instance-parameter:configuration.interface} hostname = ${instance-parameter:ipv4-random}
port = ${instance-parameter:configuration.port} port = ${beremiz-runtime:webport}
[publish-connection-parameter] [publish-connection-parameter]
recipe = slapos.cookbook:publish recipe = slapos.cookbook:publish
port = ${instance-parameter:configuration.port} beremiz_runtime_url = http://${instance-parameter:ipv4-random}:${beremiz-runtime:webport}
interface = ${instance-parameter:configuration.interface}
...@@ -51,6 +51,8 @@ setup(name=name, ...@@ -51,6 +51,8 @@ setup(name=name,
'cryptography', 'cryptography',
'pexpect', 'pexpect',
'pyOpenSSL', 'pyOpenSSL',
'ZEO',
'zodburi',
], ],
test_suite='test', test_suite='test',
) )
import glob
import ipaddress import ipaddress
import json import json
import logging import logging
...@@ -267,17 +266,13 @@ class TestLog(BalancerTestCase, CrontabMixin): ...@@ -267,17 +266,13 @@ class TestLog(BalancerTestCase, CrontabMixin):
# crontab for apachedex is executed # crontab for apachedex is executed
self._executeCrontabAtDate('generate-apachedex-report', '23:59') self._executeCrontabAtDate('generate-apachedex-report', '23:59')
# it creates a report for the day # it creates a report for the day
apachedex_report, = glob.glob( apachedex_report, = (
os.path.join( self.computer_partition_root_path
self.computer_partition_root_path, / 'srv'
'srv', / 'monitor'
'monitor', / 'private'
'private', / 'apachedex').glob('ApacheDex-*.html')
'apachedex', report_text = apachedex_report.read_text()
'ApacheDex-*.html',
))
with open(apachedex_report) as f:
report_text = f.read()
self.assertIn('APacheDEX', report_text) self.assertIn('APacheDEX', report_text)
# having this table means that apachedex could parse some lines. # having this table means that apachedex could parse some lines.
self.assertIn('<h2>Hits per status code</h2>', report_text) self.assertIn('<h2>Hits per status code</h2>', report_text)
...@@ -318,8 +313,8 @@ class TestLog(BalancerTestCase, CrontabMixin): ...@@ -318,8 +313,8 @@ class TestLog(BalancerTestCase, CrontabMixin):
self.assertEqual( self.assertEqual(
requests.get(self.default_balancer_zope_url, verify=False).status_code, requests.get(self.default_balancer_zope_url, verify=False).status_code,
requests.codes.service_unavailable) requests.codes.service_unavailable)
with open(os.path.join(self.computer_partition_root_path, 'var', 'log', 'apache-error.log')) as error_log_file: error_log_file = self.computer_partition_root_path / 'var' / 'log' / 'apache-error.log'
error_line = error_log_file.read().splitlines()[-1] error_line = error_log_file.read_text().splitlines()[-1]
self.assertIn('backend default has no server available!', error_line) self.assertIn('backend default has no server available!', error_line)
# this log also include a timestamp # this log also include a timestamp
self.assertRegex(error_line, r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}') self.assertRegex(error_line, r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}')
...@@ -416,7 +411,7 @@ class TestBalancer(BalancerTestCase): ...@@ -416,7 +411,7 @@ class TestBalancer(BalancerTestCase):
# real time statistics can be obtained by using the stats socket and there # real time statistics can be obtained by using the stats socket and there
# is a wrapper which makes this a bit easier. # is a wrapper which makes this a bit easier.
socat_process = subprocess.Popen( socat_process = subprocess.Popen(
[self.computer_partition_root_path + '/bin/haproxy-socat-stats'], [self.computer_partition_root_path / 'bin' / 'haproxy-socat-stats'],
stdin=subprocess.PIPE, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT stderr=subprocess.STDOUT
...@@ -604,14 +599,10 @@ class TestServerTLSEmbeddedCaucase(BalancerTestCase): ...@@ -604,14 +599,10 @@ class TestServerTLSEmbeddedCaucase(BalancerTestCase):
balancer_parsed_url.port) balancer_parsed_url.port)
# run caucase updater in the future, so that certificate is renewed # run caucase updater in the future, so that certificate is renewed
caucase_updater, = glob.glob( caucase_updater, = (
os.path.join( self.computer_partition_root_path / 'etc' / 'service'
self.computer_partition_root_path, ).glob('caucase-updater-haproxy-certificate-*')
'etc', process = pexpect.spawnu(f"faketime +90days {caucase_updater}")
'service',
'caucase-updater-haproxy-certificate-*',
))
process = pexpect.spawnu("faketime +90days " + caucase_updater)
logger = self.logger logger = self.logger
class DebugLogFile: class DebugLogFile:
def write(self, msg): def write(self, msg):
...@@ -953,21 +944,16 @@ class TestClientTLS(BalancerTestCase): ...@@ -953,21 +944,16 @@ class TestClientTLS(BalancerTestCase):
# We have two services in charge of updating CRL and CA certificates for # We have two services in charge of updating CRL and CA certificates for
# each frontend CA, plus the one for the balancer's own certificate # each frontend CA, plus the one for the balancer's own certificate
caucase_updater_list = glob.glob( caucase_updater_list = list((
os.path.join( self.computer_partition_root_path / 'etc' / 'service'
self.computer_partition_root_path, ).glob('caucase-updater-*'))
'etc',
'service',
'caucase-updater-*',
))
self.assertEqual(len(caucase_updater_list), 3) self.assertEqual(len(caucase_updater_list), 3)
# find the one corresponding to this caucase # find the one corresponding to this caucase
for caucase_updater_candidate in caucase_updater_list: for caucase_updater_candidate in caucase_updater_list:
with open(caucase_updater_candidate) as f: if caucase.url in caucase_updater_candidate.read_text():
if caucase.url in f.read(): caucase_updater = caucase_updater_candidate
caucase_updater = caucase_updater_candidate break
break
else: else:
self.fail("Could not find caucase updater script for %s" % caucase.url) self.fail("Could not find caucase updater script for %s" % caucase.url)
......
...@@ -477,8 +477,8 @@ class TestSeleniumTestRunner(ERP5InstanceTestCase, TestPublishedURLIsReachableMi ...@@ -477,8 +477,8 @@ class TestSeleniumTestRunner(ERP5InstanceTestCase, TestPublishedURLIsReachableMi
} }
def test_test_runner_configuration_json_file(self): def test_test_runner_configuration_json_file(self):
runUnitTest_script, = glob.glob( runUnitTest_script, = self.computer_partition_root_path.glob(
self.computer_partition_root_path + "/../*/bin/runUnitTest.real") "../*/bin/runUnitTest.real")
config_file = None config_file = None
with open(runUnitTest_script) as f: with open(runUnitTest_script) as f:
for line in f: for line in f:
...@@ -504,8 +504,8 @@ class TestDisableTestRunner(ERP5InstanceTestCase, TestPublishedURLIsReachableMix ...@@ -504,8 +504,8 @@ class TestDisableTestRunner(ERP5InstanceTestCase, TestPublishedURLIsReachableMix
""" """
# self.computer_partition_root_path is the path of root partition. # self.computer_partition_root_path is the path of root partition.
# we want to assert that no scripts exist in any partition. # we want to assert that no scripts exist in any partition.
bin_programs = list(map(os.path.basename, bin_programs = [
glob.glob(self.computer_partition_root_path + "/../*/bin/*"))) p.name for p in self.computer_partition_root_path.glob("../*/bin/*")]
self.assertTrue(bin_programs) # just to check the glob was correct. self.assertTrue(bin_programs) # just to check the glob was correct.
self.assertNotIn('runUnitTest', bin_programs) self.assertNotIn('runUnitTest', bin_programs)
......
import contextlib
import subprocess
import json
import zodburi
from ZODB.DB import DB
from slapos.testing.utils import CrontabMixin
from . import ERP5InstanceTestCase, default, matrix, setUpModule, ERP5PY3
_ = setUpModule
class ZEOTestCase(ERP5InstanceTestCase):
__test_matrix__ = matrix((default,))
@classmethod
def getInstanceSoftwareType(cls) -> str:
return "zodb-zeo"
@classmethod
def _getInstanceParameterDict(cls) -> dict:
return {
"tcpv4-port": 8000,
"computer-memory-percent-threshold": 100,
"name": cls.__name__,
"monitor-passwd": "secret",
"zodb-dict": {"root": {}},
}
@classmethod
def getInstanceParameterDict(cls) -> dict:
return {"_": json.dumps(cls._getInstanceParameterDict())}
def setUp(self) -> None:
self.storage_dict = json.loads(
self.computer_partition.getConnectionParameterDict()["_"]
)["storage-dict"]
def db(self) -> contextlib.AbstractContextManager[DB]:
root = self.storage_dict["root"]
zeo_uri = f"zeo://{root['server']}?storage={root['storage']}"
storage_factory, dbkw = zodburi.resolve_uri(zeo_uri)
return contextlib.closing(DB(storage_factory(), **dbkw))
class TestRepozo(ZEOTestCase, CrontabMixin):
__partition_reference__ = "rpz"
def test_backup_and_restore(self) -> None:
def check_state():
(self.computer_partition_root_path / ".timestamp").unlink()
self.waitForInstance()
if ERP5PY3:
with self.db() as db:
with db.transaction() as cnx:
self.assertEqual(cnx.root.state, "before backup")
if ERP5PY3:
# as it is not possible to connect to a python2 ZEO server
# from a python3 client, we check more when the server is python3
with self.db() as db:
with db.transaction() as cnx:
cnx.root.state = "before backup"
check_state()
self._executeCrontabAtDate("tidstorage", "2000-01-01 UTC")
dat, fsz, index = sorted(
[
p.name
for p in (
self.computer_partition_root_path / "srv" / "backup" / "zodb" / "root"
).glob("*")
]
)
self.assertRegex(dat, r'2000-01-01-00-\d\d-\d\d.dat')
self.assertRegex(fsz, r'2000-01-01-00-\d\d-\d\d.fsz')
self.assertRegex(index, r'2000-01-01-00-\d\d-\d\d.index')
if ERP5PY3:
with self.db() as db:
with db.transaction() as cnx:
cnx.root.state = "after backup"
db.close()
restore_script = self.computer_partition_root_path / "srv" / "runner-import-restore"
self.assertTrue(restore_script.exists())
status, restore_output = subprocess.getstatusoutput(str(restore_script))
self.assertEqual(status, 1)
self.assertIn("Zeo is already running", restore_output)
with self.slap.instance_supervisor_rpc as supervisor:
supervisor.stopAllProcesses()
restore_output = subprocess.check_output(restore_script)
check_state()
...@@ -15,28 +15,27 @@ ...@@ -15,28 +15,27 @@
[instance-profile] [instance-profile]
filename = instance.cfg.in filename = instance.cfg.in
md5sum = 8c9dc41c176ba01116de5b71aaa704de md5sum = 32c772c593d2c3c38c26186b91b78cf8
[instance-default]
filename = instance-default.cfg.in
md5sum = b4330fbe0c9c3631f4f477c06d3460b3
[instance-agent]
filename = instance-agent.cfg.in
md5sum = 6bbc97cf8e752d22773d5f23ecdda37d
[influxdb-config-file] [influxdb-config-file]
filename = influxdb-config-file.cfg.in filename = influxdb-config-file.cfg.in
md5sum = a28972ced3e0f4aa776e43a9c44717c0 md5sum = a28972ced3e0f4aa776e43a9c44717c0
[telegraf-config-file]
filename = telegraf-config-file.cfg.in
md5sum = a1a9c22c2a7829c66a49fc2504604d21
[grafana-config-file] [grafana-config-file]
filename = grafana-config-file.cfg.in filename = grafana-config-file.cfg.in
md5sum = e255dcca466f5de51698d24cbd114577 md5sum = 2b75d6b1984d9d154303ec773aa88474
[grafana-provisioning-config-file] [grafana-provisioning-dashboards-config-file]
filename = grafana-provisioning-config-file.cfg.in filename = grafana-provisioning-dashboards-config-file.cfg.in
md5sum = 3aa0f1ed752b2a59ea2b5e7c1733daf3 md5sum = 5616679a9c5c2757540175ead3f5500a
[loki-config-file]
filename = loki-config-file.cfg.in
md5sum = ad2baf4599a937d7352034a41fa24814
[promtail-config-file]
filename = promtail-config-file.cfg.in
md5sum = 5f1b3a1a3d3f98daeab4780106452d71
...@@ -154,7 +154,7 @@ reporting_enabled = true ...@@ -154,7 +154,7 @@ reporting_enabled = true
# in some UI views to notify that grafana or plugin update exists # in some UI views to notify that grafana or plugin update exists
# This option does not cause any auto updates, nor send any information # This option does not cause any auto updates, nor send any information
# only a GET request to https://grafana.com to get latest versions # only a GET request to https://grafana.com to get latest versions
check_for_updates = true check_for_updates = false
# Google Analytics universal tracking code, only enabled if you specify an id here # Google Analytics universal tracking code, only enabled if you specify an id here
google_analytics_ua_id = google_analytics_ua_id =
...@@ -334,23 +334,21 @@ allow_sign_up = true ...@@ -334,23 +334,21 @@ allow_sign_up = true
#################################### SMTP / Emailing ##################### #################################### SMTP / Emailing #####################
[smtp] [smtp]
{% set email = slapparameter_dict.get('email', {}) %}
#enabled = false #enabled = false
enabled = {{ slapparameter_dict.get('smtp-server') and 'true' or 'false' }} enabled = {{ email.get('smtp-server') and 'true' or 'false' }}
#host = locahost:25 #host = locahost:25
host = {{ slapparameter_dict.get('smtp-server', '') }} host = {{ email.get('smtp-server', '') }}
#user = #user =
user = {{ slapparameter_dict.get('smtp-username', '') }} user = {{ email.get('smtp-username', '') }}
# If the password contains # or ; you have to wrap it with trippel quotes. Ex """#password;""" # If the password contains # or ; you have to wrap it with trippel quotes. Ex """#password;"""
#password = #password =
password = {{ slapparameter_dict.get('smtp-password', '') and '"""%s"""' % slapparameter_dict['smtp-password'] or ""}} password = {{ email.get('smtp-password', '') and '"""%s"""' % email['smtp-password'] or ""}}
cert_file = cert_file =
key_file = key_file =
#skip_verify = false skip_verify = {{ email.get('smtp-verify-ssl') and 'false' or 'true' }}
skip_verify = {{ slapparameter_dict.get('smtp-verify-ssl', 'true').lower() == 'true' and 'false' or 'true' }} from_address = {{ email.get('email-from-address', '') }}
#from_address = admin@grafana.localhost from_name = {{ email.get('email-from-name', 'Grafana') }}
from_address = {{ slapparameter_dict.get('email-from-address', '') }}
#from_name = Grafana
from_name = {{ slapparameter_dict.get('email-from-name', 'Grafana') }}
ehlo_identity = ehlo_identity =
[emails] [emails]
......
# https://grafana.com/docs/administration/provisioning/#example-datasource-config-file
apiVersion: 1
datasources:
- name: telegraf
type: influxdb
access: proxy
url: {{ influxdb['url'] }}
user: {{ influxdb['auth-username'] }}
database: telegraf
isDefault: true
jsonData:
tlsSkipVerify: true
secureJsonData:
password: {{ influxdb['auth-password'] }}
version: 1
editable: false
- name: loki
type: loki
access: proxy
url: {{ loki['url'] }}
version: 1
editable: false
# https://grafana.com/docs/grafana/latest/administration/provisioning/#dashboards
apiVersion: 1
providers:
- name: SlapOS
folder: ''
updateIntervalSeconds: 10
allowUiUpdates: false
options:
path: {{ dashboards_dir }}
{
"$schema": "https://json-schema.org/draft/2019-09/schema",
"description": "Parameters to instantiate an agent collecting logs and metrics",
"type": "object",
"additionalProperties": false,
"unevaluatedProperties": false,
"$defs": {
"type": {
"description": "Type of the application. With `SlapOS` type, some metrics are collected from supervisor and from some known partition types (for example: ERP5's mariadb or ERP5's zopes). With `system` type, only log files are ingested.",
"type": "string",
"default": "SlapOS",
"enum": [
"SlapOS",
"system"
]
},
"name": {
"description": "Name of this application",
"type": "string"
},
"urls": {
"description": "URLs to monitor for availability and certificate lifetime",
"type": "array",
"items": {
"type": "string"
}
},
"log-file-patterns": {
"type": "array",
"items": {
"type": "string"
},
"description": "Glob patterns for watched log files."
},
"static-tags": {
"type": "object",
"description": "Static tags for this partition",
"examples": [
{
"service-level": "production",
"data-center": "abc123"
}
]
}
},
"required": [
"applications",
"influxdb",
"loki"
],
"properties": {
"applications": {
"description": "Applications to monitor",
"type": "array",
"items": {
"oneOf": [
{
"type": "object",
"additionalProperties": false,
"description": "Configuration for SlapOS type application",
"required": [
"type",
"name",
"instance-root",
"partitions"
],
"properties": {
"type": {
"$ref": "#/$defs/type",
"const": "SlapOS"
},
"name": {
"$ref": "#/$defs/name"
},
"urls": {
"$ref": "#/$defs/urls"
},
"instance-root": {
"description": "Directory containing SlapOS partitions.",
"type": "string",
"examples": [
"/srv/slapgrid/",
"/srv/slapgrid/slappart30/srv/runner/instance/"
]
},
"partitions": {
"description": "SlapOS partitions to monitor",
"type": "array",
"items": {
"type": "object",
"required": [
"name",
"reference"
],
"unevaluatedProperties": false,
"properties": {
"name": {
"type": "string",
"description": "Friendly name of the partition",
"examples": [
"mariadb",
"zope-activity"
]
},
"reference": {
"type": "string",
"description": "Reference of the partition",
"examples": [
"slappart1",
"slappart2"
]
},
"type": {
"type": "string",
"description": "Type of the partition. Known types have metrics and logs collected",
"enum": [
"erp5/mariadb",
"erp5/balancer",
"erp5/zope-activity",
"erp5/zope-front",
"erp5/zeo",
"mariadb",
"default"
],
"default": "default"
},
"log-file-patterns": {
"$ref": "#/$defs/log-file-patterns",
"description": "Glob pattern for log files to watch. This mostly makes sense for `default` partition type. `{partition_root_directory}` python `.format`-style substitution variable is supported."
},
"static-tags": {
"$ref": "#/$defs/static-tags"
}
},
"allOf": [
{
"if": {
"properties": {
"type": {
"enum": [
"mariadb",
"erp5/mariadb"
]
}
}
},
"then": {
"properties": {
"dbname": {
"type": "string",
"description": "Database name"
},
"username": {
"type": "string",
"description": "Username to connect to database"
}
}
}
}
],
"examples": [
{
"name": "zope-backoffice",
"type": "erp5/zope-front",
"reference": "slappart1",
"static-tags": {
"instance": "instance-name"
}
},
{
"name": "mariadb",
"type": "erp5/mariadb",
"reference": "slappart2"
},
{
"name": "Theia",
"type": "default",
"log-file-patterns": [
"{partition_root_directory}/.slappart*log"
]
}
]
}
}
}
},
{
"type": "object",
"additionalProperties": false,
"description": "Configuration for `system` type application",
"required": [
"type",
"name"
],
"properties": {
"type": {
"$ref": "#/$defs/type",
"const": "system"
},
"name": {
"$ref": "#/$defs/name"
},
"urls": {
"$ref": "#/$defs/urls"
},
"partitions": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": false,
"properties": {
"name": {
"type": "string",
"description": "Friendly name of the partition",
"examples": [
"syslog",
"email"
]
},
"log-file-patterns": {
"$ref": "#/$defs/log-file-patterns"
},
"static-tags": {
"$ref": "#/$defs/static-tags"
}
},
"examples": [
{
"name": "syslog",
"log-file-patterns": [
"/var/log/syslog"
]
},
{
"name": "kernel",
"log-file-patterns": [
"/var/log/kern.log",
"/var/log/messages"
]
},
{
"name": "re6stnet",
"log-file-patterns": [
"/var/log/re6stnet/*.log"
]
}
]
}
}
}
}
]
}
},
"influxdb": {
"description": "Connection information for influxdb",
"type": "object",
"additionalProperties": false,
"required": [
"url",
"database",
"username",
"password"
],
"properties": {
"url": {
"description": "IPv6 URL of influxdb HTTP endpoint",
"format": "uri",
"type": "string"
},
"database": {
"description": "database created in influxdb",
"type": "string"
},
"username": {
"description": "username for influxdb",
"type": "string"
},
"password": {
"description": "password for influxdb user",
"type": "string"
}
}
},
"loki": {
"description": "Connection information for loki",
"type": "object",
"additionalProperties": false,
"required": [
"url",
"caucase-url"
],
"properties": {
"url": {
"description": "Base URL of Loki",
"format": "uri",
"type": "string"
},
"caucase-url": {
"description": "URL caucase service used by Loki",
"format": "uri",
"type": "string"
}
}
}
}
}
{
"$schema": "http://json-schema.org/draft-07/schema#",
"description": "Values returned by agent instantiation",
"additionalProperties": false,
"properties": {
"telegraf-extra-config-dir": {
"description": "Directory in telegraf partition where extra configuration file will be loaded. These files must match *.conf pattern",
"type": "string"
},
"promtail-url": {
"description": "URL of embedded server from promtail",
"format": "uri",
"type": "string"
},
"facl-script": {
"description": "Path of a generated script to set ACL for the agent to access files and sockets. This might be needed depending on how slapos partitions were formatted",
"type": "string"
}
},
"type": "object"
}
{% import "caucase" as caucase with context %}
[buildout]
parts =
promises
publish-connection-parameter
eggs-directory = {{ buildout_eggs_directory }}
develop-eggs-directory = {{ buildout_develop_eggs_directory }}
offline = true
[instance-parameter]
recipe = slapos.cookbook:slapconfiguration
computer = ${slap-connection:computer-id}
partition = ${slap-connection:partition-id}
url = ${slap-connection:server-url}
key = ${slap-connection:key-file}
cert = ${slap-connection:cert-file}
[slap-configuration]
# apache-frontend reads from from a part named [slap-configuration]
recipe = slapos.cookbook:slapconfiguration.serialised
computer = ${slap-connection:computer-id}
partition = ${slap-connection:partition-id}
url = ${slap-connection:server-url}
key = ${slap-connection:key-file}
cert = ${slap-connection:cert-file}
[directory]
recipe = slapos.cookbook:mkdirectory
home = ${buildout:directory}
etc = ${:home}/etc
var = ${:home}/var
tmp = ${:home}/tmp
srv = ${:home}/srv
service = ${:etc}/service
promise = ${:etc}/promise
telegraf-dir = ${:srv}/telegraf
telegraf-extra-config-dir = ${:telegraf-dir}/extra-config
caucase-updater-loki-promtail-client = ${:srv}/caucase-updater/loki-client-promtail
promtail-dir = ${:srv}/promtail
# macros
[config-file]
recipe = slapos.recipe.template:jinja2
url = {{ buildout_parts_directory }}/${:_buildout_section_name_}/${:_buildout_section_name_}.cfg.in
output = ${directory:etc}/${:_buildout_section_name_}.cfg
extensions = jinja2.ext.do
[check-port-listening-promise]
recipe = slapos.cookbook:check_port_listening
path = ${directory:promise}/${:_buildout_section_name_}
[check-url-available-promise]
recipe = slapos.cookbook:check_url_available
path = ${directory:promise}/${:_buildout_section_name_}
dash_path = {{ dash_bin }}
curl_path = {{ curl_bin }}
[influxdb-server]
recipe = slapos.recipe.build
slapparameter-dict = ${slap-configuration:configuration}
init =
import urllib.parse
influxdb = options['slapparameter-dict']['influxdb']
options['url'] = influxdb['url']
options['database'] = influxdb['database']
options['auth-username'] = influxdb['username']
options['auth-password'] = influxdb['password']
parsed_url = urllib.parse.urlparse(options['url'])
options['hostname'] = parsed_url.hostname
options['port'] = str(parsed_url.port)
[influxdb-listen-promise]
<= check-port-listening-promise
hostname = ${influxdb-server:hostname}
port = ${influxdb-server:port}
[telegraf]
recipe = slapos.cookbook:wrapper
extra-config-dir = ${directory:telegraf-extra-config-dir}
# telegraf needs influxdb to be already listening before starting
command-line =
bash -c '${influxdb-listen-promise:path} && ${:nice} {{ telegraf_bin }} --config ${telegraf-config-file:output} --config-directory ${:extra-config-dir}'
wrapper-path = ${directory:service}/telegraf
hash-files = ${telegraf-config-file:output}
# TODO: control nice of the agent ?
{% if 0 %}
nice = nice -19 chrt --idle 0 ionice -c3
{% else %}
nice =
{% endif %}
[telegraf-config-file]
recipe = slapos.recipe.build
output = ${directory:etc}/${:_buildout_section_name_}.toml
telegraf-input-slapos-bin = {{ telegraf_input_slapos_bin }}
slapparameter-dict = ${slap-configuration:configuration}
input-socket = ${directory:var}/tg.sock
init =
import zc.buildout
import pkg_resources
buildout_options = self.buildout["buildout"]
zc.buildout.easy_install.install(
["toml"],
dest=None,
working_set=pkg_resources.working_set,
path=[
buildout_options["develop-eggs-directory"],
buildout_options["eggs-directory"]])
import collections
import pathlib
import urllib.parse
import toml
slapparameter_dict = self.options["slapparameter-dict"]
slap_connection = self.buildout["slap-connection"]
influxdb = self.buildout['influxdb-server']
self._config_files = {} # files to create during install step
access_path_dict = {}
inputs = collections.defaultdict(list)
processors = collections.defaultdict(list)
config = {
"agent": {
"debug": False,
"flush_interval": "10s",
"flush_jitter": "0s",
"hostname": "",
"interval": "10s",
"round_interval": True,
},
"tags": {
"computer_id": slap_connection['computer-id'],
},
# built-in inputs
"cpu": {
"drop": ["cpu_time"],
"percpu": True,
"totalcpu": True,
},
"disk": {},
"io": {},
"mem": {},
"system": {},
"inputs": inputs,
"processors": processors,
"outputs": {
"influxdb": {
"database": influxdb["database"],
"insecure_skip_verify": True, # TODO
"username": influxdb["auth-username"],
"password": influxdb["auth-password"],
"precision": "s",
"urls": [
influxdb["url"],
],
},
},
}
for application in slapparameter_dict.get("applications", []):
partition_mapping = {}
partition_root_directory = ''
for partition in application.get("partitions", []):
partition.setdefault("type", "default")
if "reference" in partition:
partition_mapping[partition["reference"]] = partition["name"]
if application.get("instance-root"):
partition_root_directory = pathlib.Path(application["instance-root"]) / partition['reference']
if partition["type"] in ("erp5/mariadb", "mariadb"):
partition.setdefault("username", "root")
partition.setdefault("dbname", "erp5")
mariadb_socket = f"{partition_root_directory}/var/run/mariadb.sock"
dsn = f"{partition['username']}@unix({mariadb_socket})/{partition['dbname']}"
access_path_dict[mariadb_socket] = 'rw'
inputs["mysql"].append(
{
"name_override": "mariadb",
"servers": [dsn],
"gather_innodb_metrics": True,
"gather_slave_status": True,
"mariadb_dialect": True,
"tags": dict(
partition.get("static-tags", {}),
app=application["name"],
name=partition["name"],
partition_reference=partition["reference"],
),
}
)
if partition["type"] == "erp5/mariadb":
inputs["sql"].append(
{
"name_override": "mariadb_activities",
"driver": "mysql",
"dsn": dsn,
"query": [
{
"query": """
select 'message' as cmf_activity_queue, count(*) as message_count from message
union all select 'message_queue' as cmf_activity_queue, count(*) as message_count from message_queue
""",
"field_columns_include": ["message_count"],
"tag_columns_include": ["cmf_activity_queue"],
},
{
"query": """
select 'message' as cmf_activity_queue, count(*) as failed_message_count
from message where processing_node between -2 and -10
union all select 'message_queue' as cmf_activity_queue, count(*) as failed_message_count
from message_queue where processing_node between -2 and -10
""",
"field_columns_include": ["failed_message_count"],
"tag_columns_include": ["cmf_activity_queue"],
},
{
"query": """
select cast(coalesce(max(UNIX_TIMESTAMP(now()) - UNIX_TIMESTAMP(message.date)), 0) as int)
as waiting_time, 'message' as cmf_activity_queue
from message where processing_node in (-1, 0) and message.message not like '%after_tag%'
union all
select cast(coalesce(max(UNIX_TIMESTAMP(now()) - UNIX_TIMESTAMP(message_queue.date)), 0) as int)
as waiting_time, 'message_queue' as cmf_activity_queue
from message_queue where processing_node in (-1, 0) and message_queue.message not like '%after_tag%'
""",
"field_columns_include": ["waiting_time"],
"tag_columns_include": ["cmf_activity_queue"],
},
],
"tags": dict(
partition.get("static-tags", {}),
app=application["name"],
name=partition["name"],
partition_reference=partition["reference"],
),
}
)
if partition["type"] == "erp5/balancer":
# XXX this produces many measurements
haproxy_socket = f"{partition_root_directory}/var/run/ha.sock"
access_path_dict[haproxy_socket] = 'rw'
inputs["haproxy"].append(
{
"servers": [haproxy_socket],
"tags": dict(
partition.get("static-tags", {}),
app=application["name"],
name=partition["name"],
partition_reference=partition["reference"],
),
})
urls = application.get("urls", [])
if urls:
inputs["http_response"].append({
"interval": "5m",
"urls": urls,
"tags": {"app": application["name"]},
})
for url in urls:
x509_url = url
parsed_url = urllib.parse.urlparse(url)
if parsed_url.scheme == 'https':
# x509_cert wants a port
if not parsed_url.port:
x509_url = parsed_url._replace(netloc=parsed_url.hostname+':443').geturl()
inputs["x509_cert"].append({
"sources": [x509_url],
"tags": {"url": url},
"interval": "5h",
"tags": {"app": application["name"]},
})
if application.get("type") == "SlapOS":
telegraf_slapos_input_config_file = str(
pathlib.Path(self.options['location'])
/ f"telegraf-input-slapos-{application['name']}.cfg"
)
self._config_files[telegraf_slapos_input_config_file] = toml.dumps({
"inputs": {
"slapos": [{
"instance_root": application["instance-root"]}]}})
access_path_dict[f"{application['instance-root']}/sv.sock"] = 'rw'
telegraf_slapos_input_command = self.options['telegraf-input-slapos-bin']
inputs["execd"].append({
"name_override": "slapos_services",
"command": [telegraf_slapos_input_command, '-config', telegraf_slapos_input_config_file],
"tags": {"app": application["name"]},
})
# drop measurements for not monitored partitions.
processors["starlark"].append({
"namepass": ["slapos_services"],
"tagpass": {"app": [application["name"]]},
"order": 1,
"source": f'''
def apply(metric):
if metric.tags.get('reference') in {list(partition_mapping)!r}:
return metric
'''
})
# telegraf-input-slapos outputs the process name as "name", but we rename
# this to "process_name", so that it is more understandable in a global
# context and because we use the name of the partition as "name" everywhere
# else.
processors["rename"].append({
"namepass": ["slapos_services"],
"tagpass": {"app": [application["name"]]},
"order": 2,
"replace": [{
"tag": "name",
"dest": "process_name",
}]})
# "normalize" slapos process names, remove hash from hash-files and -on-watch suffix
processors["regex"].append({
"namepass": ["slapos_services"],
"tagpass": {"app": [application["name"]]},
"order": 3,
"tags": [{
"key": "process_name",
"pattern": "^(.*)-on-watch$",
"replacement": "$" + "{1}",
}]})
processors["regex"].append({
"namepass": ["slapos_services"],
"tagpass": {"app": [application["name"]]},
"order": 4,
"tags": [{
"key": "process_name",
"pattern": "^(.*)-\\w{32}",
# XXX we concatenate strings so that we don't have to escape them for buildout
"replacement": "$" + "{1}",
}]})
# use consistent `partition_reference` for slappart
processors["rename"].append({
"namepass": ["slapos_services"],
"tagpass": {"app": [application["name"]]},
"order": 5,
"replace": [{
"tag": "reference",
"dest": "partition_reference",
}]})
processors["enum"].append({
"namepass": ["slapos_services"],
"tagpass": {"app": [application["name"]]},
"order": 6,
"mapping": [{
"tag": "partition_reference",
"dest": "name",
"value_mappings": partition_mapping,
}]})
# add a socket input so that we can have a promise verifying that telegraf is listening
inputs['socket_listener'].append({"service_address": f"unix://{self.options['input-socket']}"})
options['access-path-dict'] = access_path_dict
self._config_files[options['output']] = toml.dumps(config)
install =
import os
os.mkdir(self.options['location'])
for fname, content in self._config_files.items():
with open(fname, 'w') as f:
f.write(content)
[loki-server]
recipe = slapos.recipe.build
slapparameter-dict = ${slap-configuration:configuration}
init =
loki = options['slapparameter-dict']['loki']
options['url'] = loki['url']
options['caucase-url'] = loki['caucase-url']
[loki-client-certificate]
key-file = ${directory:etc}/${:_buildout_section_name_}.key
cert-file = ${directory:etc}/${:_buildout_section_name_}.crt
common-name = ${:_buildout_section_name_}
ca-file = ${directory:etc}/${:_buildout_section_name_}.ca.crt
crl-file = ${directory:etc}/${:_buildout_section_name_}.crl
[loki-client-certificate-csr-config]
recipe = slapos.recipe.template
inline =
[req]
prompt = no
distinguished_name = dn
[ dn ]
CN = ${:cn}
L = ${slap-connection:computer-id}
O = ${slap-connection:partition-id}
output = ${buildout:parts-directory}/${:_buildout_section_name_}/${:_buildout_section_name_}
[loki-client-certificate-prepare-csr]
# variable
config =
recipe = plone.recipe.command
command =
if [ ! -f '${:csr}' ] ; then
{{ openssl_bin }} req \
-newkey rsa \
-batch \
-new \
-sha256 \
-nodes \
-keyout /dev/null \
-config '${:config}' \
-out '${:csr}'
fi
stop-on-error = true
csr = ${directory:srv}/${:_buildout_section_name_}.csr.pem
[loki-promtail-client-certificate]
<= loki-client-certificate
[loki-promtail-client-certificate-csr-config]
<= loki-client-certificate-csr-config
cn = loki ${slap-connection:partition-id}@${slap-connection:computer-id}
[loki-promtail-client-certificate-prepare-csr]
<= loki-client-certificate-prepare-csr
config = ${loki-promtail-client-certificate-csr-config:output}
{{
caucase.updater(
prefix='loki-promtail-client-certificate',
buildout_bin_directory=buildout_bin_directory,
updater_path='${directory:service}/loki-promtail-client-certificate-updater',
url='${loki-server:caucase-url}',
data_dir='${directory:caucase-updater-loki-promtail-client}',
crt_path='${loki-promtail-client-certificate:cert-file}',
ca_path='${loki-promtail-client-certificate:ca-file}',
crl_path='${loki-promtail-client-certificate:crl-file}',
key_path='${loki-promtail-client-certificate:key-file}',
template_csr='${loki-promtail-client-certificate-prepare-csr:csr}',
openssl=openssl_bin,
)}}
[promtail]
recipe = slapos.cookbook:wrapper
command-line = ${:nice} {{ promtail_bin }} -config.file=${promtail-config-file:location}
wrapper-path = ${directory:service}/promtail
hash-files =
${promtail-config-file:location}
# TODO: control nice of the agent ?
{% if 0 %}
nice = nice -19 chrt --idle 0 ionice -c3
{% else %}
nice =
{% endif %}
dir = ${directory:promtail-dir}
http-port = 19080
grpc-port = 19095
ip = ${instance-parameter:ipv4-random}
url = http://${:ip}:${:http-port}
[promtail-config-file]
recipe = slapos.recipe.build
location = ${directory:etc}/${:_buildout_section_name_}.yaml
slapparameter-dict = ${slap-configuration:configuration}
depends = ${loki-promtail-client-certificate:recipe}
{% raw %}
init =
import pathlib
import json
slapparameter_dict = self.options['slapparameter-dict']
slap_connection = self.buildout["slap-connection"]
loki_certificate = self.buildout['loki-promtail-client-certificate']
self._config_files = {} # files to create during install step
access_path_dict = {}
cfg = {
"server": {
"http_listen_address": self.buildout['promtail']['ip'],
"http_listen_port": int(self.buildout['promtail']['http-port']),
"grpc_listen_address": self.buildout['promtail']['ip'],
"grpc_listen_port": int(self.buildout['promtail']['grpc-port']),
"graceful_shutdown_timeout": 5,
"external_url": self.buildout['promtail']['url'],
},
"positions": {
"filename": "{}/positions.yaml".format(self.buildout['promtail']['dir']),
},
"clients": [
{
"url": "{}/loki/api/v1/push".format(self.buildout['loki-server']['url']),
"tls_config": {
"ca_file": loki_certificate['ca-file'],
"cert_file": loki_certificate['cert-file'],
"key_file": loki_certificate['key-file'],
},
# this might not be good for copytruncate option of logrotate
# see https://grafana.com/docs/loki/latest/send-data/promtail/logrotation/
"batchwait": "5s"
}
],
"scrape_configs": []
}
def get_job_selector(partition, job_name, application_name):
# make a selector in LogQL, like '{job="job_name",key="value"}'
selector_parts = []
for k, v in dict(
partition.get('static-tags', {}),
app=application_name,
job=job_name
).items():
selector_parts.append(f'{k}="{v}"')
return "{%s}" % ",".join(selector_parts)
def get_static_configs(partition, job_name, path_list, application):
if not isinstance(path_list, list):
raise ValueError(f'{path_list!r} is not a list')
partition_root_directory = ''
if partition.get('reference') and 'instance-root' in application:
instance_root = pathlib.Path(application['instance-root'])
partition_root_directory = instance_root / partition['reference']
path_list = [path.format(partition_root_directory=partition_root_directory) for path in path_list]
for path in path_list:
access_path_dict[path] = 'r'
partition_kw = {}
if partition.get('reference'):
partition_kw['partition_reference'] = partition['reference']
return [
{
"targets": [
"localhost"
],
"labels": dict(
partition.get('static-tags', {}),
job=job_name,
app=application['name'],
name=partition['name'],
computer_id=slap_connection['computer-id'],
__path__=path,
**partition_kw
)
} for path in path_list
]
for application in slapparameter_dict.get('applications', []):
for partition in application.get('partitions', []):
partition.setdefault("type", "default")
if partition['type'] in ('erp5/zope-activity', 'erp5/zope-front'):
# job name include the app name because they need to be unique
job_name = f"{application['name']}-{partition['name']}-event-log"
cfg['scrape_configs'].append({
"job_name": job_name,
"pipeline_stages": [
{
"match": {
"selector": get_job_selector(partition, job_name, application['name']),
"stages": [
{
"multiline": {
# TODO this does not seem to work well
"firstline": "^------",
"max_wait_time": "5s"
}
},
{
"regex": {
"expression": "^------\\n(?P<timestamp>\\d{4}-\\d{2}-\\d{2}\\s\\d{1,2}\\:\\d{2}\\:\\d{2}\\,\\d{3}) (?P<level>\\S+) (?P<component>\\S+) (?P<message>.*)"
}
},
{
"timestamp": {
"format": "2021-04-04 03:57:11,242",
"source": "timestamp"
}
},
{
"labels": {
"level": None
}
}
]
}
}
],
"static_configs": get_static_configs(
partition,
job_name,
["{partition_root_directory}/var/log/zope-*-event.log"],
application,
)})
if partition['type'] == 'erp5/zope-front':
job_name = f"{application['name']}-{partition['name']}-access-log"
cfg['scrape_configs'].append({
"job_name": job_name,
# drop requests for haproxy health check
"pipeline_stages": [
{
"drop": {
"expression": '.* "GET / HTTP/1.0" 200 .*'
}
}
],
"static_configs": get_static_configs(
partition,
job_name,
["{partition_root_directory}/var/log/zope-*-Z2.log"],
application,
)})
job_name = f"{application['name']}-{partition['name']}-long-request-log"
cfg['scrape_configs'].append({
"job_name": job_name,
"pipeline_stages": [
{
"match": {
"selector": get_job_selector(partition, job_name, application['name']),
"stages": [
{
"multiline": {
"firstline": "^\\d{4}-\\d{2}-\\d{2}\\s\\d{1,2}\\:\\d{2}\\:\\d{2}\\,\\d{3}",
"max_wait_time": "5s"
}
},
{
"regex": {
"expression": "^(?P<timestamp>.*) .*"
}
},
{
"timestamp": {
"format": "2021-04-04 03:57:11,242",
"source": "timestamp"
}
}
]
}
}
],
"static_configs": get_static_configs(
partition,
job_name,
["{partition_root_directory}/var/log/longrequest_logger_zope-*.log"],
application,
)})
if partition['type'] in ('erp5/mariadb', 'mariadb'):
job_name = f"{application['name']}-{partition['name']}-mariadb-slow-queries"
cfg['scrape_configs'].append({
"job_name": job_name,
"pipeline_stages": [
{
"match": {
"selector": get_job_selector(partition, job_name, application['name']),
"stages": [
{
"multiline": {
# between each slow query, slow query log has a first line like:
# # Time: 231008 16:29:01
# and then a second like:
# # User@Host: user[user] @ [10.0.71.207]
# but the first line is not repeated for subsequent queries that happens
# at the same second
"firstline": r"(^# Time: \d{2}.*\n^# User@Host:.*|^# User@Host:.*)",
"max_wait_time": "5s"
}
},
{
"regex": {
"expression": ".*SET timestamp=(?P<timestamp>\\d+);.*"
}
},
{
"timestamp": {
"format": "Unix",
"source": "timestamp"
}
}
]
}
}
],
"static_configs": get_static_configs(
partition,
job_name,
["{partition_root_directory}/var/log/mariadb_slowquery.log"],
application,
)})
job_name = f"{application['name']}-{partition['name']}-mariadb-error-log"
cfg['scrape_configs'].append({
"job_name": job_name,
"pipeline_stages": [
{
"match": {
"selector": get_job_selector(partition, job_name, application['name']),
"stages": [
{
"timestamp": {
"format": "2021-06-05 3:55:31",
"source": "timestamp"
}
}
]
}
}
],
"static_configs": get_static_configs(
partition,
job_name,
["{partition_root_directory}/var/log/mariadb_error.log"],
application,
)})
if partition['type'] == 'erp5/zeo':
job_name = f"{application['name']}-{partition['name']}-zeo-log"
cfg['scrape_configs'].append({
"job_name": job_name,
"pipeline_stages": [
{
"match": {
"selector": get_job_selector(partition, job_name, application['name']),
"stages": [
{
"multiline": {
"firstline": "^------",
"max_wait_time": "5s"
}
},
{
"regex": {
"expression": "^------\\n(?P<timestamp>\\d{4}-\\d{2}-\\d{2}\\s\\d{1,2}\\:\\d{2}\\:\\d{2}\\,\\d{3}) (?P<level>\\S+) (?P<component>\\S+) (?P<message>.*)"
}
},
{
"timestamp": {
"format": "2021-04-04 03:57:11,242",
"source": "timestamp"
}
},
{
"labels": {
"level": None,
}
}
]
}
}
],
"static_configs": get_static_configs(
partition,
job_name,
["{partition_root_directory}/var/log/zeo-*.log"],
application,
)})
if partition['type'] == 'erp5/balancer':
job_name = f"{application['name']}-{partition['name']}-balancer-access-log"
cfg['scrape_configs'].append({
"job_name": job_name,
"static_configs": get_static_configs(
partition,
job_name,
["{partition_root_directory}/var/log/apache-access.log"],
application,
)})
job_name = f"{application['name']}-{partition['name']}-balancer-error-log"
cfg['scrape_configs'].append({
"job_name": job_name,
"static_configs": get_static_configs(
partition,
job_name,
["{partition_root_directory}/var/log/apache-error.log"],
application,
)})
if partition.get('log-file-patterns'):
job_name = f"{application['name']}-{partition['name']}"
cfg['scrape_configs'].append({
"job_name": job_name,
"static_configs": get_static_configs(
partition,
job_name,
partition['log-file-patterns'],
application,
)})
self._config_files[options['location']] = json.dumps(cfg, indent=2)
options['access-path-dict'] = access_path_dict
install =
for fname, content in self._config_files.items():
with open(fname, 'w') as f:
f.write(content)
{% endraw %}
[promtail-listen-promise]
<= check-port-listening-promise
hostname = ${promtail:ip}
port = ${promtail:http-port}
[telegraf-listen-promise]
recipe = slapos.cookbook:wrapper
command-line =
test -S ${telegraf-config-file:input-socket}
wrapper-path = ${directory:promise}/${:_buildout_section_name_}
[facl-script]
recipe = slapos.recipe.build
promtail-access-path-dict = ${promtail-config-file:access-path-dict}
telegraf-access-path-dict = ${telegraf-config-file:access-path-dict}
install =
import itertools
import os
import pathlib
import pwd
import shlex
user = pwd.getpwuid(os.getuid()).pw_name
script_code = ''
def quote_path(p):
# quote, but preserve *
p = str(p)
assert '__STAR__' not in p
p = p.replace('*', '__STAR__')
p = shlex.quote(p)
p = p.replace('__STAR__', '*')
return p
# make sure we can access the parents folders
parent_access = {}
def check_parent_access(path):
parent = path.parent
if parent != path:
parent_access[str(parent)] = 'x'
check_parent_access(parent)
for path_spec, access in itertools.chain(
options['promtail-access-path-dict'].items(),
options['telegraf-access-path-dict'].items()):
path = pathlib.Path(path_spec)
check_parent_access(path)
for path_spec, access in sorted(itertools.chain(
options['promtail-access-path-dict'].items(),
options['telegraf-access-path-dict'].items(),
parent_access.items())):
path = pathlib.Path(path_spec)
if '*' in path_spec:
script_code += f'setfacl --modify=u:{user}:rx {quote_path(path.parent)}\n'
script_code += f'setfacl --modify=u:{user}:{access} {quote_path(path)}\n'
pathlib.Path(location).write_text(script_code)
[promises]
recipe =
instance-promises =
${promtail-listen-promise:path}
${telegraf-listen-promise:wrapper-path}
[publish-connection-parameter]
recipe = slapos.cookbook:publish.serialised
telegraf-extra-config-dir = ${telegraf:extra-config-dir}
facl-script = ${facl-script:location}
promtail-url = ${promtail:url}
{
"$schema": "https://json-schema.org/draft/2019-09/schema",
"description": "Parameters to instantiate Grafana",
"type": "object",
"additionalProperties": false,
"properties": {
"email": {
"type": "object",
"description": "Email configuration",
"additionalProperties": false,
"properties": {
"smtp-server": {
"description": "SMTP server used by Grafana to send emails (in host:port format). Leaving this empty will disable email sending.",
"type": "string"
},
"smtp-username": {
"description": "Username to connect to SMTP server",
"type": "string"
},
"smtp-password": {
"description": "Password to connect to SMTP server",
"type": "string"
},
"smtp-verify-ssl": {
"description": "Verify certificate of SMTP server",
"type": "boolean",
"default": true
},
"email-from-address": {
"description": "Email address used in `From:` header of emails",
"type": "string"
},
"email-from-name": {
"description": "Name used in `From:` header of emails",
"default": "Grafana",
"type": "string"
}
}
},
"frontend": {
"type": "object",
"additionalProperties": false,
"properties": {
"custom-domain": {
"description": "Custom domain to use when requesting a rapid-cdn frontend",
"type": "string",
"format": "hostname"
}
}
},
"caucase": {
"type": "object",
"description": "Caucase configuration. To connect external agents, it's required to approve their client certificates, either using an external caucase referenced as `external-caucase-url` or registering a user with `user-auto-approve-count`",
"additionalProperties": false,
"properties": {
"external-caucase-url": {
"description": "URL of a caucase instance to manage all server and clients certificates, to use instead of embedding caucase",
"type": "string",
"format": "uri"
},
"user-auto-approve-count": {
"description": "Number of users to automatically approve in the embedded caucase",
"type": "integer",
"default": 0
}
}
},
"influxdb": {
"description": "Fine tuning influxdb parameters",
"type": "object",
"additionalProperties": false,
"properties": {
"default-retention-policy-days": {
"description": "Number of days to keep metrics data",
"default": 720,
"type": "integer"
}
}
},
"loki": {
"description": "Fine tuning loki parameters",
"type": "object",
"additionalProperties": false,
"properties": {
"retention-period-days": {
"description": "Number of days to keep log data",
"default": 60,
"type": "integer"
}
}
},
"agent": {
"type": "object",
"properties": {
"applications": {
"$ref": "./instance-agent-input-schema.json#properties/applications"
}
}
}
}
}
{ {
"$schema": "http://json-schema.org/draft-04/schema#", "$schema": "http://json-schema.org/draft-07/schema#",
"description": "Values returned by Grafana instantiation", "description": "Values returned by Grafana instantiation",
"additionalProperties": false,
"properties": { "properties": {
"url": { "url": {
"description": "Shared frontend for this Grafana instance", "description": "Shared frontend for this Grafana instance",
"pattern": "^https://", "format": "uri",
"type": "string" "type": "string"
}, },
"grafana-username": { "grafana-username": {
...@@ -18,12 +17,12 @@ ...@@ -18,12 +17,12 @@
}, },
"grafana-url": { "grafana-url": {
"description": "IPv6 URL to access grafana", "description": "IPv6 URL to access grafana",
"pattern": "^https://", "format": "uri",
"type": "string" "type": "string"
}, },
"influxdb-url": { "influxdb-url": {
"description": "IPv6 URL of influxdb HTTP endpoint", "description": "IPv6 URL of influxdb HTTP endpoint",
"pattern": "^https://", "format": "uri",
"type": "string" "type": "string"
}, },
"influxdb-database": { "influxdb-database": {
...@@ -38,8 +37,23 @@ ...@@ -38,8 +37,23 @@
"description": "password for influxdb user", "description": "password for influxdb user",
"type": "string" "type": "string"
}, },
"telegraf-extra-config-dir": { "loki-url": {
"description": "Directory in telegraf partition where extra configuration file will be loaded. These files must match *.conf pattern", "description": "Base URL of Loki",
"format": "uri",
"type": "string"
},
"loki-caucase-url": {
"description": "URL caucase service used by Loki",
"format": "uri",
"type": "string"
},
"agent-promtail-url": {
"description": "URL of embedded server from promtail",
"format": "uri",
"type": "string"
},
"agent-facl-script": {
"description": "Path of a generated script to set ACL for the agent to access files and sockets. This might be needed depending on how slapos partitions were formatted",
"type": "string" "type": "string"
} }
}, },
......
{% import "caucase" as caucase with context %}
[buildout]
parts =
promises
publish-connection-parameter
eggs-directory = {{ buildout_eggs_directory }}
develop-eggs-directory = {{ buildout_develop_eggs_directory }}
offline = true
[instance-parameter]
recipe = slapos.cookbook:slapconfiguration
computer = ${slap-connection:computer-id}
partition = ${slap-connection:partition-id}
url = ${slap-connection:server-url}
key = ${slap-connection:key-file}
cert = ${slap-connection:cert-file}
[slap-configuration]
# apache-frontend reads from from a part named [slap-configuration]
recipe = slapos.cookbook:slapconfiguration.serialised
computer = ${slap-connection:computer-id}
partition = ${slap-connection:partition-id}
url = ${slap-connection:server-url}
key = ${slap-connection:key-file}
cert = ${slap-connection:cert-file}
[directory]
recipe = slapos.cookbook:mkdirectory
home = ${buildout:directory}
etc = ${:home}/etc
var = ${:home}/var
tmp = ${:home}/tmp
srv = ${:home}/srv
service = ${:etc}/service
promise = ${:etc}/promise
influxdb-data-dir = ${:srv}/influxdb
grafana-dir = ${:srv}/grafana
grafana-data-dir = ${:grafana-dir}/data
grafana-logs-dir = ${:var}/log
grafana-plugins-dir = ${:grafana-dir}/plugins
grafana-provisioning-config-dir = ${:grafana-dir}/provisioning-config
grafana-provisioning-datasources-dir = ${:grafana-provisioning-config-dir}/datasources
grafana-provisioning-dashboards-dir = ${:grafana-provisioning-config-dir}/dashboards
grafana-dashboards-dir = ${:grafana-dir}/dashboards
loki-dir = ${:srv}/loki
loki-storage-filesystem-directory = ${:loki-dir}/chunks
loki-compactor-working-directory = ${:loki-dir}/compactor
srv-caucased-loki = ${:srv}/caucased/loki
backup-caucased-loki = ${:srv}/backup/caucased/loki
caucase-updater-loki-server = ${:srv}/caucase-updater/loki-server
caucase-updater-loki-promise-client = ${:srv}/caucase-updater/loki-client-promise
caucase-updater-loki-grafana-client = ${:srv}/caucase-updater/loki-client-grafana
# macros
[generate-insecure-self-signed-certificate]
# TODO: stop using this, use caucase
recipe = plone.recipe.command
command =
if [ ! -e ${:key-file} ]
then
{{ openssl_bin }} req -x509 -nodes -sha256 -days 3650 \
-subj "/C=AA/ST=X/L=X/O=Dis/CN=${:common-name}" \
-newkey rsa -keyout ${:key-file} \
-out ${:cert-file}
fi
update-command = ${:command}
key-file = ${directory:etc}/${:_buildout_section_name_}.key
cert-file = ${directory:etc}/${:_buildout_section_name_}.crt
common-name = ${:_buildout_section_name_}
[config-file]
recipe = slapos.recipe.template:jinja2
url = {{ buildout_parts_directory }}/${:_buildout_section_name_}/${:_buildout_section_name_}.cfg.in
output = ${directory:etc}/${:_buildout_section_name_}.cfg
extensions = jinja2.ext.do
[check-port-listening-promise]
recipe = slapos.cookbook:check_port_listening
path = ${directory:promise}/${:_buildout_section_name_}
[check-url-available-promise]
recipe = slapos.cookbook:check_url_available
path = ${directory:promise}/${:_buildout_section_name_}
dash_path = {{ dash_bin }}
curl_path = {{ curl_bin }}
[influxdb]
ipv6 = ${instance-parameter:ipv6-random}
ipv4 = ${instance-parameter:ipv4-random}
host = ${:ipv6}
local-host = ${:ipv4}
rpc-port = 8088
http-port = 8086
url = https://[${:host}]:${:http-port}
data-dir = ${directory:influxdb-data-dir}
auth-username = ${influxdb-password:username}
auth-password = ${influxdb-password:passwd}
unix-socket = ${directory:var}/influxdb.socket
ssl-cert-file = ${influxdb-certificate:cert-file}
ssl-key-file = ${influxdb-certificate:key-file}
database = telegraf
recipe = slapos.cookbook:wrapper
command-line =
{{ influxd_bin }} -config ${influxdb-config-file:output}
wrapper-path = ${directory:service}/influxdb
[influxdb-config-file]
<= config-file
context =
section influxdb influxdb
[influxdb-password]
recipe = slapos.cookbook:generate.password
username = influxdb
[influxdb-certificate]
<= generate-insecure-self-signed-certificate
[influxdb-listen-promise]
<= check-port-listening-promise
hostname = ${influxdb:ipv6}
port = ${influxdb:http-port}
[influxdb-password-promise]
recipe = slapos.cookbook:wrapper
command-line =
{{ influx_bin }} -username ${influxdb:auth-username} -password ${influxdb:auth-password} -socket ${influxdb:unix-socket} -execute "CREATE USER ${influxdb:auth-username} WITH PASSWORD '${influxdb:auth-password}' WITH ALL PRIVILEGES"
wrapper-path = ${directory:promise}/${:_buildout_section_name_}
[influxdb-database-ready-promise]
recipe = slapos.cookbook:wrapper
command-line =
bash -c "{{ influx_bin }} \
-username ${influxdb:auth-username} \
-password ${influxdb:auth-password} \
-host [${influxdb:host}] \
-port ${influxdb:http-port} \
-unsafeSsl \
-ssl \
-execute 'show databases' | grep '${influxdb:database}'"
wrapper-path = ${directory:promise}/${:_buildout_section_name_}
[influxdb-create-defaul-data-retention-policy-promise]
recipe = slapos.cookbook:wrapper
command-line =
{{ influx_bin }}
-username ${influxdb:auth-username}
-password ${influxdb:auth-password}
-socket ${influxdb:unix-socket}
-execute 'CREATE RETENTION POLICY "slapos-default-policy" ON "${influxdb:database}" DURATION {{ slapparameter_dict.get('influxdb', {}).get('default-retention-policy-days', 720) }}d REPLICATION 1 DEFAULT'
wrapper-path = ${directory:promise}/${:_buildout_section_name_}
[grafana]
ipv6 = ${instance-parameter:ipv6-random}
port = 8180
url = https://[${:ipv6}]:${:port}
data-dir = ${directory:grafana-data-dir}
logs-dir = ${directory:grafana-logs-dir}
plugins-dir = ${directory:grafana-plugins-dir}
provisioning-config-dir = ${directory:grafana-provisioning-config-dir}
provisioning-datasources-dir = ${directory:grafana-provisioning-datasources-dir}
provisioning-dashboards-dir = ${directory:grafana-provisioning-dashboards-dir}
admin-user = ${grafana-password:username}
admin-password = ${grafana-password:passwd}
secret-key = ${grafana-secret-key:passwd}
ssl-key-file = ${grafana-certificate:key-file}
ssl-cert-file = ${grafana-certificate:cert-file}
recipe = slapos.cookbook:wrapper
command-line =
{{ grafana_bin }}
server
-config ${grafana-config-file:output}
-homepath {{ grafana_homepath }}
wrapper-path = ${directory:service}/grafana
hash-files =
${grafana-config-file:output}
hash-existing-files =
${grafana-provisioning-datasources-config-file:location}
[grafana-certificate]
<= generate-insecure-self-signed-certificate
[grafana-password]
recipe = slapos.cookbook:generate.password
username = admin
[grafana-secret-key]
recipe = slapos.cookbook:generate.password
[grafana-config-file]
<= config-file
context =
section grafana grafana
section apache_frontend apache-frontend
key slapparameter_dict slap-configuration:configuration
depends =
${grafana-provisioning-datasources-config-file:location}
${grafana-provisioning-dashboards-config-file:output}
[grafana-provisioning-datasources-config-file]
recipe = slapos.recipe.build
init =
# pre-create location, so that we can use hash-existing-files
import pathlib
datasource_file = pathlib.Path(location)
if not datasource_file.parent.exists():
datasource_file.parent.mkdir(parents=True)
if not datasource_file.exists():
datasource_file.touch()
# make sure this part is reinstalled when certificate is updated
import os
cert_mtime = -1
try:
cert_mtime = (
os.stat(options['loki-grafana-client-certificate-cert-file']).st_mtime
+ os.stat(options['loki-server-certificate-ca-file']).st_mtime
)
except FileNotFoundError:
pass
options['loki-grafana-client-certificate-cert-mtime'] = str(int(cert_mtime))
install =
import json
import os
def safe_read_file(path):
if os.path.exists(path):
with open(path) as f:
return f.read()
influxdb_data_source = {
"name": "telegraf",
"type": "influxdb",
"access": "proxy",
"url": options['influxdb-url'],
"user": options['influxdb-auth-username'],
"database": options['influxdb-database'],
"isDefault": True,
"jsonData": {
"tlsSkipVerify": True # TODO
},
"secureJsonData": {
"password": options['influxdb-auth-password'],
},
"version": int(options['loki-grafana-client-certificate-cert-mtime']),
"editable": False
}
loki_data_source = {
"name": "loki",
"type": "loki",
"access": "proxy",
"url": options['loki-server-url'],
"jsonData": {
"tlsAuth": True,
"tlsAuthWithCACert": True,
"maxLines": 5000, # XXX see max_entries_limit_per_query in loki config
},
"secureJsonData": {
# XXX maybe we can use file directly ?
# see https://github.com/grafana/grafana/discussions/44296#discussioncomment-2515929
"tlsCACert": safe_read_file(options['loki-server-certificate-ca-file']),
"tlsClientCert": safe_read_file(options['loki-grafana-client-certificate-cert-file']),
"tlsClientKey": safe_read_file(options['loki-grafana-client-certificate-key-file']),
},
"version": int(options['loki-grafana-client-certificate-cert-mtime']),
"editable": False,
}
config = {
"apiVersion": 1,
"datasources": [
influxdb_data_source,
loki_data_source,
],
}
with open(options['location'], 'w') as f:
json.dump(config, f, indent=2)
location = ${grafana:provisioning-datasources-dir}/datasources.yaml
loki-server-url = ${loki-server:url}
loki-server-certificate-ca-file = ${loki-server-certificate:ca-file}
loki-grafana-client-certificate-cert-file = ${loki-grafana-client-certificate:cert-file}
loki-grafana-client-certificate-key-file = ${loki-grafana-client-certificate:key-file}
influxdb-url = ${influxdb:url}
influxdb-database = ${influxdb:database}
influxdb-auth-username = ${influxdb:auth-username}
influxdb-auth-password = ${influxdb:auth-password}
[grafana-provisioning-dashboards-config-file]
<= config-file
rendered = ${grafana:provisioning-dashboards-dir}/dashboard.yaml
context =
key dashboards_dir directory:grafana-dashboards-dir
[grafana-listen-promise]
<= check-port-listening-promise
hostname= ${grafana:ipv6}
port = ${grafana:port}
[grafana-provisioning-datasources-config-file-promise]
recipe = slapos.cookbook:wrapper
command-line =
{{ jq_bin }} -e
"if .datasources[1].secureJsonData.tlsClientCert != null and .datasources[1].secureJsonData.tlsCACert != null then true else false end"
${grafana-provisioning-datasources-config-file:location}
wrapper-path = ${directory:promise}/${:_buildout_section_name_}
[loki-server]
storage-filesystem-directory = ${directory:loki-storage-filesystem-directory}
compactor-working-directory = ${directory:loki-compactor-working-directory}
path-prefix = ${directory:loki-dir}
http-port = 3100
url = https://[${:ipv6}]:${:http-port}
ipv4 = ${instance-parameter:ipv4-random}
ipv6 = ${instance-parameter:ipv6-random}
ca-file = ${loki-server-certificate:ca-file}
cert-file = ${loki-server-certificate:cert-file}
key-file = ${loki-server-certificate:key-file}
# TODO: CRL
[loki-service]
recipe = slapos.cookbook:wrapper
command-line =
{{ loki_bin }} -config.file=${loki-server-config-file:location}
wrapper-path = ${directory:service}/${:_buildout_section_name_}
ready-url = ${loki-server:url}/ready
hash-files =
${loki-server-config-file:location}
hash-existing-files =
${loki-server-certificate:cert-file}
[loki-server-config-file]
location = ${directory:etc}/${:_buildout_section_name_}.yaml
recipe = slapos.recipe.build
install =
import json
loki_server = self.buildout['loki-server']
slapparameter_dict = self.buildout['slap-configuration']['configuration']
config = {
"auth_enabled": False,
"server": {
"http_listen_address": loki_server['ipv6'],
"http_listen_port": int(loki_server['http-port']),
"http_tls_config": {
"client_ca_file": loki_server['ca-file'],
"cert_file": loki_server['cert-file'],
"key_file": loki_server['key-file'],
"client_auth_type": "RequireAndVerifyClientCert",
},
"grpc_listen_address": loki_server['ipv4'],
"grpc_server_max_recv_msg_size": 104857600,
"grpc_server_max_send_msg_size": 104857600
},
"common": {
"instance_addr": loki_server['ipv4'],
"replication_factor": 1,
"ring": {
"instance_addr": loki_server['ipv4'],
"kvstore": {
"store": "inmemory"
}
},
"path_prefix": loki_server['path-prefix'],
},
"schema_config": {
"configs": [
{
"from": "2020-05-15",
"store": "tsdb",
"object_store": "filesystem",
"schema": "v13",
"index": {
"prefix": "index_",
"period": "24h"
}
}
]
},
"storage_config": {
"filesystem": {
"directory": loki_server['storage-filesystem-directory'],
}
},
"limits_config": {
"ingestion_rate_mb": 1024,
"ingestion_burst_size_mb": 1024,
# TODO: do we want this ? too large queries make the browser slow.
# default is 1000, but it seems we can at least raise to 5000
"max_entries_limit_per_query": 5001,
"reject_old_samples": False,
"retention_period": '{}d'.format(
slapparameter_dict.get('loki', {}).get('retention-period-days', 60))
},
"frontend_worker": {
"grpc_client_config": {
# TODO check needed
# https://github.com/grafana/loki/issues/5143#issuecomment-1697196679
"max_send_msg_size": 268435456
}
},
"compactor": {
"working_directory": loki_server['compactor-working-directory'],
"delete_request_store": "filesystem",
"retention_enabled": True,
"retention_delete_delay": "2h",
}
}
with open(options['location'], 'w') as f:
json.dump(config, f, indent=2)
[loki-server-certificate-init-certificate]
recipe = slapos.recipe.build
init =
# pre-create a file at the path of the certificate,
# so that we can use hash-existing-files options
import pathlib
cert_file = pathlib.Path(self.buildout['loki-server-certificate']['cert-file'])
if not cert_file.parent.exists():
cert_file.parent.mkdir()
if not cert_file.exists():
cert_file.touch()
[loki-server-certificate]
init = ${loki-server-certificate-init-certificate:init}
key-file = ${directory:etc}/${:_buildout_section_name_}.key
cert-file = ${directory:etc}/${:_buildout_section_name_}.crt
common-name = ${:_buildout_section_name_}
ca-file = ${directory:etc}/${:_buildout_section_name_}.ca.crt
crl-file = ${directory:etc}/${:_buildout_section_name_}.crl
{{
caucase.updater(
prefix='loki-server-certificate',
buildout_bin_directory=buildout_bin_directory,
updater_path='${directory:service}/loki-server-certificate-updater',
url='${loki-caucased:url}',
data_dir='${directory:caucase-updater-loki-server}',
crt_path='${loki-server-certificate:cert-file}',
ca_path='${loki-server-certificate:ca-file}',
crl_path='${loki-server-certificate:crl-file}',
key_path='${loki-server-certificate:key-file}',
template_csr='${loki-server-certificate-prepare-csr:csr}',
openssl=openssl_bin,
)}}
[loki-server-certificate-csr-config]
recipe = slapos.recipe.template
inline =
[req]
prompt = no
req_extensions = req_ext
distinguished_name = dn
[ dn ]
CN = loki-server
[ req_ext ]
subjectAltName = @alt_names
[ alt_names ]
IP.1 = ${loki-server:ipv4}
IP.2 = ${loki-server:ipv6}
output = ${buildout:parts-directory}/${:_buildout_section_name_}/${:_buildout_section_name_}
[loki-server-certificate-prepare-csr]
recipe = plone.recipe.command
command =
if [ ! -f '${:csr}' ] ; then
{{ openssl_bin }} req \
-newkey rsa \
-batch \
-new \
-sha256 \
-nodes \
-keyout /dev/null \
-config '${loki-server-certificate-csr-config:output}' \
-out '${:csr}'
fi
stop-on-error = true
csr = ${directory:srv}/${:_buildout_section_name_}.csr.pem
[loki-server-listen-promise]
<= check-url-available-promise
url = ${loki-service:ready-url}
ca-cert-file = ${loki-server:ca-file}
cert-file = ${loki-promise-client-certificate:cert-file}
key-file = ${loki-promise-client-certificate:key-file}
[loki-client-certificate]
key-file = ${directory:etc}/${:_buildout_section_name_}.key
cert-file = ${directory:etc}/${:_buildout_section_name_}.crt
common-name = ${:_buildout_section_name_}
ca-file = ${directory:etc}/${:_buildout_section_name_}.ca.crt
crl-file = ${directory:etc}/${:_buildout_section_name_}.crl
[loki-client-certificate-csr-config]
recipe = slapos.recipe.template
inline =
[req]
prompt = no
distinguished_name = dn
[ dn ]
CN = ${:_buildout_section_name_}
output = ${buildout:parts-directory}/${:_buildout_section_name_}/${:_buildout_section_name_}
[loki-client-certificate-prepare-csr]
# variable
config =
recipe = plone.recipe.command
command =
if [ ! -f '${:csr}' ] ; then
{{ openssl_bin }} req \
-newkey rsa \
-batch \
-new \
-sha256 \
-nodes \
-keyout /dev/null \
-config '${:config}' \
-out '${:csr}'
fi
stop-on-error = true
csr = ${directory:srv}/${:_buildout_section_name_}.csr.pem
[loki-promise-client-certificate]
<= loki-client-certificate
[loki-promise-client-certificate-csr-config]
<= loki-client-certificate-csr-config
[loki-promise-client-certificate-prepare-csr]
<= loki-client-certificate-prepare-csr
config = ${loki-promise-client-certificate-csr-config:output}
{{
caucase.updater(
prefix='loki-promise-client-certificate',
buildout_bin_directory=buildout_bin_directory,
updater_path='${directory:service}/loki-promise-client-certificate-updater',
url='${loki-caucased:url}',
data_dir='${directory:caucase-updater-loki-promise-client}',
crt_path='${loki-promise-client-certificate:cert-file}',
ca_path='${loki-promise-client-certificate:ca-file}',
crl_path='${loki-promise-client-certificate:crl-file}',
key_path='${loki-promise-client-certificate:key-file}',
template_csr='${loki-promise-client-certificate-prepare-csr:csr}',
openssl=openssl_bin,
)}}
[loki-grafana-client-certificate]
<= loki-client-certificate
[loki-grafana-client-certificate-csr-config]
<= loki-client-certificate-csr-config
[loki-grafana-client-certificate-prepare-csr]
<= loki-client-certificate-prepare-csr
config = ${loki-grafana-client-certificate-csr-config:output}
{{
caucase.updater(
prefix='loki-grafana-client-certificate',
buildout_bin_directory=buildout_bin_directory,
updater_path='${directory:service}/loki-grafana-client-certificate-updater',
url='${loki-caucased:url}',
data_dir='${directory:caucase-updater-loki-grafana-client}',
crt_path='${loki-grafana-client-certificate:cert-file}',
ca_path='${loki-grafana-client-certificate:ca-file}',
crl_path='${loki-grafana-client-certificate:crl-file}',
key_path='${loki-grafana-client-certificate:key-file}',
template_csr='${loki-grafana-client-certificate-prepare-csr:csr}',
openssl=openssl_bin,
)}}
{% if slapparameter_dict.get('caucase', {}).get('external-caucase-url') %}
[loki-caucased]
url = {{ slapparameter_dict.get('caucase', {}).get('external-caucase-url') }}
{% else %}
[loki-caucased]
port = 18080
ip = ${instance-parameter:ipv6-random}
netloc = [${:ip}]:${:port}
url = http://${:netloc}/
# service_auto_approve_count is 4 for the default:
# - server: loki
# - clients: loki promise, grafana, promtail
{{
caucase.caucased(
prefix='loki-caucased',
buildout_bin_directory=buildout_bin_directory,
caucased_path='${directory:service}/loki-caucased',
backup_dir='${directory:backup-caucased-loki}',
data_dir='${directory:srv-caucased-loki}',
netloc='${loki-caucased:netloc}',
tmp='${directory:tmp}',
service_auto_approve_count=4,
user_auto_approve_count='${loki-caucased-user-auto-approve-count:user-auto-approve-count}',
key_len=2048,
)}}
[loki-caucased-user-auto-approve-count]
user-auto-approve-count = {{ slapparameter_dict.get('caucase', {}).get('user-auto-approve-count', 0) }}
{% endif %}
[apache-frontend]
<= slap-connection
recipe = slapos.cookbook:requestoptional
name = Grafana Frontend
# XXX We have hardcoded SR URL here.
software-url = http://git.erp5.org/gitweb/slapos.git/blob_plain/HEAD:/software/apache-frontend/software.cfg
shared = true
config-url = ${grafana:url}
{% if slapparameter_dict.get('frontend', {}).get('custom-domain') %}
config-custom_domain = {{ slapparameter_dict['frontend']['custom-domain'] }}
{% endif %}
return = domain secure_access
[apache-frontend-available-promise]
<= check-url-available-promise
url = ${apache-frontend:connection-secure_access}
[request-agent-config]
recipe = slapos.recipe.build
init =
slap_connection = self.buildout["slap-connection"]
configuration = self.buildout['slap-configuration']['configuration']
applications = configuration.get('agent', {}).get('applications', [])
applications.append(
# Add a default config ingesting grafana's and influxdb's logs
{
"name": "Grafana",
"type": "system",
"partitions": [
{
"name": "grafana",
"static-tags": {
"partition_reference": slap_connection['partition-id'],
},
"log-file-patterns": [
f"{self.buildout['directory']['home']}/.*_influxdb*.log",
]
},
{
"name": "influxdb",
"static-tags": {
"partition_reference": slap_connection['partition-id'],
},
"log-file-patterns": [
f"{self.buildout['directory']['home']}/.*_grafana*.log",
]
},
]
}
)
options['applications'] = applications
options['loki'] = {
'url': self.buildout['loki-server']['url'],
'caucase-url': self.buildout['loki-caucased']['url'],
}
options['influxdb'] = {
"url": self.buildout['influxdb']['url'],
"database": self.buildout['influxdb']['database'],
"username": self.buildout['influxdb']['auth-username'],
"password": self.buildout['influxdb']['auth-password'],
}
[request-slapos-partition-base]
recipe = slapos.cookbook:request.serialised
software-url = ${slap-connection:software-release-url}
server-url = ${slap-connection:server-url}
key-file = ${slap-connection:key-file}
cert-file = ${slap-connection:cert-file}
computer-id = ${slap-connection:computer-id}
partition-id = ${slap-connection:partition-id}
[request-agent]
<= request-slapos-partition-base
software-type = agent
name = agent
return = facl-script promtail-url
config-applications = ${request-agent-config:applications}
config-loki = ${request-agent-config:loki}
config-influxdb = ${request-agent-config:influxdb}
[agent-promtail-url]
recipe = slapos.cookbook:urlparse
url = ${request-agent:connection-promtail-url}
[agent-promtail-listen-promise]
<= check-port-listening-promise
hostname = ${agent-promtail-url:host}
port = ${agent-promtail-url:port}
[promises]
recipe =
instance-promises =
${influxdb-listen-promise:path}
${influxdb-password-promise:wrapper-path}
${influxdb-database-ready-promise:wrapper-path}
${influxdb-create-defaul-data-retention-policy-promise:wrapper-path}
${grafana-listen-promise:path}
${grafana-provisioning-datasources-config-file-promise:wrapper-path}
${loki-server-listen-promise:path}
${apache-frontend-available-promise:path}
${agent-promtail-listen-promise:path}
[publish-connection-parameter]
recipe = slapos.cookbook:publish.serialised
influxdb-url = ${influxdb:url}
influxdb-database = ${influxdb:database}
influxdb-username = ${influxdb:auth-username}
influxdb-password = ${influxdb:auth-password}
grafana-url = ${grafana:url}
grafana-username = ${grafana:admin-user}
grafana-password = ${grafana:admin-password}
loki-url = ${loki-server:url}
loki-caucase-url = ${loki-caucased:url}
url = ${apache-frontend:connection-secure_access}
agent-facl-script = ${request-agent:connection-facl-script}
agent-promtail-url = ${request-agent:connection-promtail-url}
{
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Parameters to instantiate Grafana",
"type": "object",
"additionalProperties": false,
"properties": {
"smtp-server": {
"description": "SMTP server used by grafana to send emails (in host:port format). Leaving this empty will disable email sending.",
"type": "string"
},
"smtp-username": {
"description": "Username to connect to SMTP server",
"type": "string"
},
"smtp-password": {
"description": "Password to connect to SMTP server",
"type": "string"
},
"smtp-verify-ssl": {
"description": "Verify SSL certificate of SMTP server",
"type": "string",
"enum": [
"true",
"false"
]
},
"email-from-address": {
"description": "Email address used in From: header of emails",
"type": "string"
},
"email-from-name": {
"description": "Name used in From: header of emails",
"default": "Grafana",
"type": "string"
},
"promtail-extra-scrape-config": {
"description": "Raw promtail config (experimental parameter, see https://github.com/grafana/loki/blob/v0.3.0/docs/promtail.md#scrape-configs for detail)",
"default": "",
"type": "string"
}
}
}
[buildout] [buildout]
parts = parts = switch-softwaretype
promises
publish-connection-parameter
eggs-directory = {{ buildout['eggs-directory'] }} eggs-directory = {{ buildout['eggs-directory'] }}
develop-eggs-directory = {{ buildout['develop-eggs-directory'] }} develop-eggs-directory = {{ buildout['develop-eggs-directory'] }}
offline = true offline = true
[jinja2-template-base]
recipe = slapos.recipe.template:jinja2
[instance-parameter] filename = ${:_buildout_section_name_}.cfg
recipe = slapos.cookbook:slapconfiguration output = ${buildout:parts-directory}/${:_buildout_section_name_}/${:filename}
computer = ${slap-connection:computer-id} extensions =
partition = ${slap-connection:partition-id} jinja2.ext.do
url = ${slap-connection:server-url} extra-context =
key = ${slap-connection:key-file} context =
cert = ${slap-connection:cert-file} raw buildout_bin_directory {{ buildout['bin-directory'] }}
raw buildout_parts_directory {{ buildout['parts-directory'] }}
raw buildout_eggs_directory {{ buildout['eggs-directory'] }}
raw buildout_develop_eggs_directory {{ buildout['develop-eggs-directory'] }}
key slapparameter_dict slap-configuration:configuration
raw instance_default {{ instance_default }}
raw instance_agent {{ instance_agent }}
raw openssl_bin {{ openssl_bin }}
raw telegraf_bin {{ telegraf_bin }}
raw telegraf_input_slapos_bin {{ telegraf_input_slapos_bin }}
raw influxd_bin {{ influxd_bin }}
raw influx_bin {{ influx_bin }}
raw grafana_bin {{ grafana_bin }}
raw grafana_homepath {{ grafana_homepath }}
raw loki_bin {{ loki_bin }}
raw promtail_bin {{ promtail_bin }}
raw curl_bin {{ curl_bin }}
raw dash_bin {{ dash_bin }}
raw jq_bin {{ jq_bin }}
import-list =
file caucase context:caucase-jinja2-library
[context]
caucase-jinja2-library = {{ caucase_jinja2_library }}
[instance-default]
<= jinja2-template-base
url = {{ instance_default }}
[instance-agent]
<= jinja2-template-base
url = {{ instance_agent }}
[switch-softwaretype]
recipe = slapos.cookbook:switch-softwaretype
default = instance-default:output
RootSoftwareInstance = ${:default}
agent = instance-agent:output
[slap-configuration] [slap-configuration]
# apache-frontend reads from from a part named [slap-configuration]
recipe = slapos.cookbook:slapconfiguration.serialised recipe = slapos.cookbook:slapconfiguration.serialised
computer = ${slap-connection:computer-id} computer = ${slap-connection:computer-id}
partition = ${slap-connection:partition-id} partition = ${slap-connection:partition-id}
url = ${slap-connection:server-url} url = ${slap-connection:server-url}
key = ${slap-connection:key-file} key = ${slap-connection:key-file}
cert = ${slap-connection:cert-file} cert = ${slap-connection:cert-file}
[directory]
recipe = slapos.cookbook:mkdirectory
home = ${buildout:directory}
etc = ${:home}/etc
var = ${:home}/var
srv = ${:home}/srv
service = ${:etc}/service
promise = ${:etc}/promise
influxdb-data-dir = ${:srv}/influxdb
grafana-dir = ${:srv}/grafana
grafana-data-dir = ${:grafana-dir}/data
grafana-logs-dir = ${:var}/log
grafana-plugins-dir = ${:grafana-dir}/plugins
grafana-provisioning-config-dir = ${:grafana-dir}/provisioning-config
grafana-provisioning-datasources-dir = ${:grafana-provisioning-config-dir}/datasources
grafana-provisioning-dashboards-dir = ${:grafana-provisioning-config-dir}/dashboards
telegraf-dir = ${:srv}/telegraf
telegraf-extra-config-dir = ${:telegraf-dir}/extra-config
loki-dir = ${:srv}/loki
loki-storage-boltdb-dir = ${:loki-dir}/index/
loki-storage-filesystem-dir = ${:loki-dir}/chunks/
promtail-dir = ${:srv}/promtail
# macros
[generate-certificate]
recipe = plone.recipe.command
command =
if [ ! -e ${:key-file} ]
then
{{ openssl_bin }} req -x509 -nodes -sha256 -days 3650 \
-subj "/C=AA/ST=X/L=X/O=Dis/CN=${:common-name}" \
-newkey rsa -keyout ${:key-file} \
-out ${:cert-file}
fi
update-command = ${:command}
key-file = ${directory:etc}/${:_buildout_section_name_}.key
cert-file = ${directory:etc}/${:_buildout_section_name_}.crt
common-name = ${:_buildout_section_name_}
[config-file]
recipe = slapos.recipe.template:jinja2
url = {{ buildout['parts-directory'] }}/${:_buildout_section_name_}/${:_buildout_section_name_}.cfg.in
output = ${directory:etc}/${:_buildout_section_name_}.cfg
extensions = jinja2.ext.do
[check-port-listening-promise]
recipe = slapos.cookbook:check_port_listening
path = ${directory:promise}/${:_buildout_section_name_}
[check-url-available-promise]
recipe = slapos.cookbook:check_url_available
path = ${directory:promise}/${:_buildout_section_name_}
dash_path = {{ dash_bin }}
curl_path = {{ curl_bin }}
[influxdb]
ipv6 = ${instance-parameter:ipv6-random}
ipv4 = ${instance-parameter:ipv4-random}
host = ${:ipv6}
local-host = ${:ipv4}
rpc-port = 8088
http-port = 8086
url = https://[${:host}]:${:http-port}
data-dir = ${directory:influxdb-data-dir}
auth-username = ${influxdb-password:username}
auth-password = ${influxdb-password:passwd}
unix-socket = ${directory:var}/influxdb.socket
ssl-cert-file = ${influxdb-certificate:cert-file}
ssl-key-file = ${influxdb-certificate:key-file}
database = telegraf
recipe = slapos.cookbook:wrapper
command-line =
nice -19 chrt --idle 0 ionice -c3 {{ influxd_bin }} -config ${influxdb-config-file:output}
wrapper-path = ${directory:service}/influxdb
[influxdb-config-file]
<= config-file
context =
section influxdb influxdb
[influxdb-password]
recipe = slapos.cookbook:generate.password
username = influxdb
[influxdb-certificate]
<= generate-certificate
[influxdb-listen-promise]
<= check-port-listening-promise
hostname = ${influxdb:ipv6}
port = ${influxdb:http-port}
[influxdb-password-promise]
recipe = slapos.cookbook:wrapper
command-line =
{{ influx_bin }} -username ${influxdb:auth-username} -password ${influxdb:auth-password} -socket ${influxdb:unix-socket} -execute "CREATE USER ${influxdb:auth-username} WITH PASSWORD '${influxdb:auth-password}' WITH ALL PRIVILEGES"
wrapper-path = ${directory:promise}/${:_buildout_section_name_}
[influxdb-database-ready-promise]
recipe = slapos.cookbook:wrapper
command-line =
bash -c "{{ influx_bin }} \
-username ${influxdb:auth-username} \
-password ${influxdb:auth-password} \
-host [${influxdb:host}] \
-port ${influxdb:http-port} \
-unsafeSsl \
-ssl \
-execute 'show databases' | grep '${influxdb:database}'"
wrapper-path = ${directory:promise}/${:_buildout_section_name_}
[grafana]
ipv6 = ${instance-parameter:ipv6-random}
port = 8180
url = https://[${:ipv6}]:${:port}
data-dir = ${directory:grafana-data-dir}
logs-dir = ${directory:grafana-logs-dir}
plugins-dir = ${directory:grafana-plugins-dir}
provisioning-config-dir = ${directory:grafana-provisioning-config-dir}
provisioning-datasources-dir = ${directory:grafana-provisioning-datasources-dir}
admin-user = ${grafana-password:username}
admin-password = ${grafana-password:passwd}
secret-key = ${grafana-secret-key:passwd}
ssl-key-file = ${grafana-certificate:key-file}
ssl-cert-file = ${grafana-certificate:cert-file}
recipe = slapos.cookbook:wrapper
command-line =
{{ grafana_bin }} -config ${grafana-config-file:output} -homepath {{ grafana_homepath }}
wrapper-path = ${directory:service}/grafana
[grafana-certificate]
<= generate-certificate
[grafana-password]
recipe = slapos.cookbook:generate.password
username = admin
[grafana-secret-key]
recipe = slapos.cookbook:generate.password
[grafana-config-file]
<= config-file
context =
section grafana grafana
section apache_frontend apache-frontend
key slapparameter_dict slap-configuration:configuration
depends =
${grafana-provisioning-config-file:output}
[grafana-provisioning-config-file]
<= config-file
output = ${grafana:provisioning-datasources-dir}/datasource.yaml
context =
section influxdb influxdb
section loki loki
[grafana-listen-promise]
<= check-port-listening-promise
hostname= ${grafana:ipv6}
port = ${grafana:port}
[telegraf]
recipe = slapos.cookbook:wrapper
extra-config-dir = ${directory:telegraf-extra-config-dir}
# telegraf needs influxdb to be already listening before starting
command-line =
bash -c '${influxdb-listen-promise:path} && nice -19 chrt --idle 0 ionice -c3 {{ telegraf_bin }} --config ${telegraf-config-file:output} --config-directory ${:extra-config-dir}'
wrapper-path = ${directory:service}/telegraf
[telegraf-config-file]
<= config-file
context =
section influxdb influxdb
section telegraf telegraf
[loki]
recipe = slapos.cookbook:wrapper
command-line =
bash -c 'nice -19 chrt --idle 0 ionice -c3 {{ loki_bin }} -config.file=${loki-config-file:output}'
wrapper-path = ${directory:service}/loki
storage-boltdb-dir = ${directory:loki-storage-boltdb-dir}
storage-filesystem-dir = ${directory:loki-storage-filesystem-dir}
ip = ${instance-parameter:ipv4-random}
port = 3100
grpc-port = 9095
url = http://${:ip}:${:port}
[loki-config-file]
<= config-file
context =
section loki loki
[loki-listen-promise]
<= check-url-available-promise
url = ${loki:url}/ready
[promtail]
recipe = slapos.cookbook:wrapper
command-line =
bash -c 'nice -19 chrt --idle 0 ionice -c3 {{ promtail_bin }} -config.file=${promtail-config-file:output}'
wrapper-path = ${directory:service}/promtail
dir = ${directory:promtail-dir}
http-port = 19080
grpc-port = 19095
ip = ${instance-parameter:ipv4-random}
url = http://${:ip}:${:http-port}
[promtail-config-file]
<= config-file
context =
section promtail promtail
section loki loki
key slapparameter_dict slap-configuration:configuration
[promtail-listen-promise]
<= check-port-listening-promise
hostname= ${promtail:ip}
port = ${promtail:http-port}
[apache-frontend]
<= slap-connection
recipe = slapos.cookbook:requestoptional
name = Grafana Frontend
# XXX We have hardcoded SR URL here.
software-url = http://git.erp5.org/gitweb/slapos.git/blob_plain/HEAD:/software/apache-frontend/software.cfg
shared = true
config-url = ${grafana:url}
config-https-only = true
return = domain secure_access
[apache-frontend-available-promise]
<= check-url-available-promise
url = ${apache-frontend:connection-secure_access}
[promises]
recipe =
instance-promises =
${influxdb-listen-promise:path}
${influxdb-password-promise:wrapper-path}
${influxdb-database-ready-promise:wrapper-path}
${grafana-listen-promise:path}
${loki-listen-promise:path}
${promtail-listen-promise:path}
${promtail-listen-promise:path}
${apache-frontend-available-promise:path}
[publish-connection-parameter]
recipe = slapos.cookbook:publish
influxdb-url = ${influxdb:url}
influxdb-database = ${influxdb:database}
influxdb-username = ${influxdb:auth-username}
influxdb-password = ${influxdb:auth-password}
telegraf-extra-config-dir = ${telegraf:extra-config-dir}
grafana-url = ${grafana:url}
grafana-username = ${grafana:admin-user}
grafana-password = ${grafana:admin-password}
loki-url = ${loki:url}
promtail-url = ${promtail:url}
url = ${apache-frontend:connection-secure_access}
auth_enabled: false
server:
http_listen_address: {{ loki['ip'] }}
http_listen_port: {{ loki['port'] }}
grpc_listen_address: {{ loki['ip'] }}
grpc_listen_port: {{ loki['grpc-port'] }}
ingester:
lifecycler:
address: {{ loki['ip'] }}
ring:
kvstore:
store: inmemory
replication_factor: 1
chunk_idle_period: 15m
schema_config:
configs:
- from: 2018-04-15
store: boltdb
object_store: filesystem
schema: v9
index:
prefix: index_
period: 168h
storage_config:
boltdb:
directory: {{ loki['storage-boltdb-dir'] }}
filesystem:
directory: {{ loki['storage-filesystem-dir'] }}
limits_config:
enforce_metric_name: false
reject_old_samples: true
reject_old_samples_max_age: 168h
chunk_store_config:
max_look_back_period: 0
table_manager:
chunk_tables_provisioning:
inactive_read_throughput: 0
inactive_write_throughput: 0
provisioned_read_throughput: 0
provisioned_write_throughput: 0
index_tables_provisioning:
inactive_read_throughput: 0
inactive_write_throughput: 0
provisioned_read_throughput: 0
provisioned_write_throughput: 0
retention_deletes_enabled: false
retention_period: 0
server:
http_listen_address: {{ promtail['ip'] }}
http_listen_port: {{ promtail['http-port'] }}
grpc_listen_address: {{ promtail['ip'] }}
grpc_listen_port: {{ promtail['grpc-port'] }}
external_url: {{ promtail['url'] }}
positions:
filename: {{ promtail['dir'] }}/positions.yaml
clients:
- url: {{ loki['url'] }}/api/prom/push
scrape_configs:
- job_name: test
static_configs:
- targets:
- localhost
labels:
job: grafanalogs
__path__: ./var/log/*log
{{ slapparameter_dict.get('promtail-extra-scrape-config', '') }}
[buildout] [buildout]
extends = extends =
../../stack/slapos.cfg ../../stack/slapos.cfg
../../stack/caucase/buildout.cfg
../../stack/nodejs.cfg ../../stack/nodejs.cfg
../../component/make/buildout.cfg ../../component/make/buildout.cfg
../../component/golang/buildout.cfg ../../component/golang/buildout.cfg
../../component/openssl/buildout.cfg ../../component/openssl/buildout.cfg
../../component/curl/buildout.cfg ../../component/curl/buildout.cfg
../../component/dash/buildout.cfg ../../component/dash/buildout.cfg
../../component/jq/buildout.cfg
../../component/systemd/buildout.cfg
../../component/fluent-bit/buildout.cfg
buildout.hash.cfg buildout.hash.cfg
versions = versions
parts = parts =
slapos-cookbook slapos-cookbook
instance-profile instance-profile
gowork gowork
influxdb-config-file influxdb-config-file
telegraf-config-file
grafana-config-file grafana-config-file
grafana-provisioning-config-file grafana-provisioning-dashboards-config-file
loki-config-file fluent-bit
promtail-config-file post-install-cleanup
[nodejs]
<= nodejs-14.16.0
[go_github.com_grafana_grafana] [go_github.com_grafana_grafana]
<= go-git-package <= go-git-package
go.importpath = github.com/grafana/grafana go.importpath = github.com/grafana/grafana
repository = https://github.com/grafana/grafana repository = https://github.com/grafana/grafana
revision = v7.5.2-0-gca413c612f revision = v10.1.2-0-g8e428858dd
[go_github.com_grafana_loki] [go_github.com_grafana_loki]
<= go-git-package <= go-git-package
go.importpath = github.com/grafana/loki go.importpath = github.com/grafana/loki
repository = https://github.com/perrinjerome/loki repository = https://github.com/grafana/loki
revision = v2.2.1-1-gda6d45f2 revision = v3.1.0-0-g935aee77e
[go_github.com_influxdata_influxdb] [go_github.com_influxdata_influxdb]
<= go-git-package <= go-git-package
...@@ -46,49 +46,80 @@ revision = v1.8.4-0-gbc8ec4384e ...@@ -46,49 +46,80 @@ revision = v1.8.4-0-gbc8ec4384e
<= go-git-package <= go-git-package
go.importpath = github.com/influxdata/telegraf go.importpath = github.com/influxdata/telegraf
repository = https://github.com/influxdata/telegraf repository = https://github.com/influxdata/telegraf
revision = v1.17.3-0-g24a552b90b revision = v1.28.1-0-g3ea9ffbe2
[go_github.com_perrinjerome_slapos_telegraf_input]
<= go-git-package
go.importpath = github.com/perrinjerome/telegraf-input-slapos
repository = https://github.com/perrinjerome/telegraf-input-slapos
revision = v0.0.2-0-gd4c5221
[go_github.com_prometheus_prometheus]
<= go-git-package
go.importpath = github.com/prometheus/prometheus
repository = https://github.com/prometheus/prometheus
revision = v0.41.0-0-gc0d8a56c6
# [go_github.com_jaegertracking_jaeger]
# <= go-git-package
# go.importpath = github.com/jaegertracking/jaeger
# repository = https://github.com/jaegertracking/jaeger
# revision = v1.20.0-623-gcac21f82
[gowork] [gowork]
# Fails with current default golang1.18
golang = ${golang1.17:location}
install = install =
${go_github.com_grafana_loki:location}:./cmd/loki ${go_github.com_grafana_loki:location}:./cmd/loki
${go_github.com_grafana_loki:location}:./cmd/promtail ${go_github.com_grafana_loki:location}:./clients/cmd/promtail
${go_github.com_grafana_loki:location}:./cmd/logcli ${go_github.com_grafana_loki:location}:./cmd/logcli
${go_github.com_influxdata_telegraf:location}:./cmd/... ${go_github.com_influxdata_telegraf:location}:./cmd/...
${go_github.com_influxdata_influxdb:location}:./cmd/... ${go_github.com_influxdata_influxdb:location}:./cmd/...
${go_github.com_perrinjerome_slapos_telegraf_input:location}:./...
${go_github.com_prometheus_prometheus:location}:./cmd/...
# disable cgo, to prevent loki/promtail from using go-systemd
environment = environment =
CGO_ENABLED = 0 CGO_ENABLED=1
CGO_CFLAGS=-I${systemd:location}/include
buildflags =
-tags promtail_journal_enabled
cpkgpath =
${systemd:location}
telegraf-bin = ${:bin}/telegraf telegraf-bin = ${:bin}/telegraf
telegraf-input-slapos-bin = ${:bin}/telegraf-input-slapos
influx-bin = ${:bin}/influx influx-bin = ${:bin}/influx
influxd-bin = ${:bin}/influxd influxd-bin = ${:bin}/influxd
grafana-bin = ${:bin}/grafana-server grafana-bin = ${grafana:binpath}/grafana
grafana-homepath = ${grafana:homepath} grafana-homepath = ${grafana:homepath}
loki-bin = ${:bin}/loki loki-bin = ${:bin}/loki
promtail-bin = ${:bin}/promtail promtail-bin = ${:bin}/promtail
[post-install-cleanup]
recipe = plone.recipe.command
stop-on-error = true
# remove caches and binary files confusing software check
command =
chmod +w ${gowork.dir:directory}/pkg/mod/github.com/gabriel-vasile/mimetype@v1.4.2/testdata/ \
&& rm -rf ${gowork.dir:directory}/pkg/mod/github.com/gabriel-vasile/mimetype@v1.4.2/testdata/so.so \
&& chmod -w ${gowork.dir:directory}/pkg/mod/github.com/gabriel-vasile/mimetype@v1.4.2/testdata/ \
&& rm -rf ${buildout:directory}/.cache/
[grafana] [grafana]
recipe = plone.recipe.command recipe = plone.recipe.command
command = bash -c " command = bash -ce "
cd ${:homepath} && cd ${:homepath} && \
. ${gowork:env.sh} && . ${gowork:env.sh} && \
# Unlike the loki, grafana _needs_ CGO_ENABLED, so we override here go install github.com/google/wire/cmd/wire@v0.5.0 && \
export CGO_ENABLED=1 && wire gen -tags oss ./pkg/server ./pkg/cmd/grafana-cli/runner && \
go run build.go setup && \ go run build.go setup && \
go run build.go build && \ go run build.go build && \
${yarn:location}/bin/yarn install --pure-lockfile && \ export NODE_OPTIONS=--max_old_space_size=8192 && \
${yarn:location}/bin/yarn install --immutable && \
${yarn:location}/bin/yarn run themes:generate && \
${yarn:location}/bin/yarn run build && \ ${yarn:location}/bin/yarn run build && \
${yarn:location}/bin/yarn run plugins:build-bundled && \ ${yarn:location}/bin/yarn run plugins:build-bundled"
# Cleanup yarn and Cypress caches
rm -rf ${buildout:directory}/.cache/Cypress/ && \
rm -rf ${buildout:directory}/.cache/yarn/
"
homepath = ${go_github.com_grafana_grafana:location} homepath = ${go_github.com_grafana_grafana:location}
# XXX "linux-amd64" is not portable here
binpath = ${go_github.com_grafana_grafana:location}/bin/linux-amd64
stop-on-error = true stop-on-error = true
[download-file-base] [download-file-base]
...@@ -98,19 +129,22 @@ url = ${:_profile_base_location_}/${:filename} ...@@ -98,19 +129,22 @@ url = ${:_profile_base_location_}/${:filename}
[influxdb-config-file] [influxdb-config-file]
<= download-file-base <= download-file-base
[telegraf-config-file]
<= download-file-base
[grafana-config-file] [grafana-config-file]
<= download-file-base <= download-file-base
[grafana-provisioning-config-file] [grafana-provisioning-dashboards-config-file]
<= download-file-base <= download-file-base
[loki-config-file] [instance-eggs]
recipe = zc.recipe.egg
eggs =
toml
[instance-agent]
<= download-file-base <= download-file-base
[promtail-config-file] [instance-default]
<= download-file-base <= download-file-base
[instance-profile] [instance-profile]
...@@ -120,8 +154,11 @@ output = ${buildout:directory}/instance.cfg ...@@ -120,8 +154,11 @@ output = ${buildout:directory}/instance.cfg
extensions = jinja2.ext.do extensions = jinja2.ext.do
context = context =
section buildout buildout section buildout buildout
key instance_default instance-default:target
key instance_agent instance-agent:target
key openssl_bin openssl-output:openssl key openssl_bin openssl-output:openssl
key telegraf_bin gowork:telegraf-bin key telegraf_bin gowork:telegraf-bin
key telegraf_input_slapos_bin gowork:telegraf-input-slapos-bin
key influxd_bin gowork:influxd-bin key influxd_bin gowork:influxd-bin
key influx_bin gowork:influx-bin key influx_bin gowork:influx-bin
key grafana_bin gowork:grafana-bin key grafana_bin gowork:grafana-bin
...@@ -130,8 +167,13 @@ context = ...@@ -130,8 +167,13 @@ context =
key promtail_bin gowork:promtail-bin key promtail_bin gowork:promtail-bin
key curl_bin :curl-bin key curl_bin :curl-bin
key dash_bin :dash-bin key dash_bin :dash-bin
key jq_bin :jq-bin
key caucase_jinja2_library caucase-jinja2-library:target
curl-bin = ${curl:location}/bin/curl curl-bin = ${curl:location}/bin/curl
dash-bin = ${dash:location}/bin/dash dash-bin = ${dash:location}/bin/dash
jq-bin = ${jq:location}/bin/jq
depends = ${instance-eggs:eggs} ${caucase-eggs:eggs}
[versions] [versions]
inotifyx = 0.2.2 toml = 0.10.2
{ {
"name": "Grafana", "name": "Grafana",
"description": "Grafana, Telegraf and Influxdb", "description": "Grafana, Influxdb, Loki and Telegraf",
"serialisation": "xml", "serialisation": "json-in-xml",
"software-type": { "software-type": {
"default": { "default": {
"title": "Default", "title": "Default",
"description": "Grafana, Telegraf and Influxdb in same partition", "description": "Grafana, Influxdb and Loki",
"request": "instance-input-schema.json", "request": "instance-default-input-schema.json",
"response": "instance-output-schema.json", "response": "instance-default-output-schema.json",
"index": 0
},
"agent": {
"title": "Agent",
"description": "Telegraf agent sending metrics to Influxdb and Promtail agent sending logs to Loki",
"request": "instance-agent-input-schema.json",
"response": "instance-agent-output-schema.json",
"index": 0 "index": 0
} }
} }
......
# Telegraf configuration
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared plugins.
# Even if a plugin has no configuration, it must be declared in here
# to be active. Declaring a plugin means just specifying the name
# as a section with no variables. To deactivate a plugin, comment
# out the name and any variables.
# Use 'telegraf -config telegraf.toml -test' to see what metrics a config
# file would generate.
# One rule that plugins conform to is wherever a connection string
# can be passed, the values '' and 'localhost' are treated specially.
# They indicate to the plugin to use their own builtin configuration to
# connect to the local system.
# NOTE: The configuration has a few required parameters. They are marked
# with 'required'. Be sure to edit those to make this configuration work.
# Tags can also be specified via a normal map, but only one form at a time:
[tags]
# dc = "us-east-1"
# Configuration for telegraf agent
[agent]
# Default data collection interval for all plugins
interval = "10s"
# Rounds collection interval to 'interval'
# ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
# Default data flushing interval for all outputs. You should not set this below
# interval. Maximum flush_interval will be flush_interval + flush_jitter
flush_interval = "10s"
# Jitter the flush interval by a random amount. This is primarily to avoid
# large write spikes for users running a large number of telegraf instances.
# ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
# Run telegraf in debug mode
debug = false
# Override default hostname, if empty use os.Hostname()
hostname = ""
###############################################################################
# OUTPUTS #
###############################################################################
[outputs]
# Configuration for influxdb server to send metrics to
[outputs.influxdb]
# The full HTTP or UDP endpoint URL for your InfluxDB instance
# Multiple urls can be specified for InfluxDB cluster support.
# urls = ["udp://localhost:8089"] # UDP endpoint example
# XXX XXX XXX
#urls = ["http://localhost:8086"] # required
urls = ["{{ influxdb['url'] }}"]
insecure_skip_verify = true # because we are using a self signed certificate
# The target database for metrics (telegraf will create it if not exists)
database = "{{ influxdb['database'] }}" # required
# Precision of writes, valid values are n, u, ms, s, m, and h
# note: using second precision greatly helps InfluxDB compression
precision = "s"
# Connection timeout (for the connection with InfluxDB), formatted as a string.
# If not provided, will default to 0 (no timeout)
# timeout = "5s"
username = "{{ influxdb['auth-username'] }}"
password = "{{ influxdb['auth-password'] }}"
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
# user_agent = "telegraf"
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
# udp_payload = 512
###############################################################################
# PLUGINS #
###############################################################################
# Read metrics about cpu usage
[cpu]
# Whether to report per-cpu stats or not
percpu = true
# Whether to report total system cpu stats or not
totalcpu = true
# Comment this line if you want the raw CPU time metrics
drop = ["cpu_time"]
# Read metrics about memory usage
[mem]
# no configuration
[disk]
[io]
[system]
###############################################################################
# ERP5 - PLUGINS #
###############################################################################
#
# Left here as example, don't edit this file directly, but place your config
# files in {{ telegraf['extra-config-dir'] }}
#
#[mysql]
# servers = ["root@unix(/srv/slapgrid/slappart12/srv/runner/instance/slappart1/var/run/mariadb.sock)/erp5"]
#[memcached]
# # XXX kumofs does not support memcached's stat command
# servers = ["10.0.248.233:2013", "10.0.248.233:2003"]
#[haproxy]
# servers = ["http://10.0.121.162:2150/haproxy", "http://10.0.121.162:2152/haproxy"]
#[[inputs.exec]]
# commands = ["/srv/slapgrid/slappart0/bin/slapsensor /srv/slapgrid/slappart0/srv/runner/instance/etc/supervisord.conf"]
# name_suffix = "_slapos"
# interval = "5s"
###############################################################################
# SERVICE PLUGINS #
###############################################################################
...@@ -25,13 +25,15 @@ ...@@ -25,13 +25,15 @@
# #
############################################################################## ##############################################################################
from __future__ import unicode_literals import functools
import io import io
import json
import logging import logging
import os import pathlib
import re
import tempfile import tempfile
import textwrap
import time import time
import urllib.parse
import psutil import psutil
import requests import requests
...@@ -39,10 +41,8 @@ from six.moves import configparser ...@@ -39,10 +41,8 @@ from six.moves import configparser
from slapos.testing.testcase import makeModuleSetUpAndTestCaseClass from slapos.testing.testcase import makeModuleSetUpAndTestCaseClass
setUpModule, SlapOSInstanceTestCase = makeModuleSetUpAndTestCaseClass( setUpModule, SlapOSInstanceTestCase = makeModuleSetUpAndTestCaseClass(
os.path.abspath( pathlib.Path(__file__).parent.parent / 'software.cfg')
os.path.join(os.path.dirname(__file__), '..', 'software.cfg')))
class GrafanaTestCase(SlapOSInstanceTestCase): class GrafanaTestCase(SlapOSInstanceTestCase):
...@@ -57,78 +57,97 @@ class GrafanaTestCase(SlapOSInstanceTestCase): ...@@ -57,78 +57,97 @@ class GrafanaTestCase(SlapOSInstanceTestCase):
class TestGrafana(GrafanaTestCase): class TestGrafana(GrafanaTestCase):
def setUp(self): def setUp(self):
self.grafana_url = self.computer_partition.getConnectionParameterDict( self.connection_params = json.loads(
)['grafana-url'] self.computer_partition.getConnectionParameterDict()['_']
)
self.grafana_url = self.connection_params['grafana-url']
def test_grafana_available(self): def test_grafana_available(self):
resp = requests.get(self.grafana_url, verify=False) resp = requests.get(self.grafana_url, verify=False)
self.assertEqual(requests.codes.ok, resp.status_code) self.assertEqual(resp.status_code, requests.codes.ok)
def test_grafana_api(self): def test_grafana_api(self):
# check API is usable # check API is usable
api_org_url = '{self.grafana_url}/api/org'.format(**locals()) api_org_url = f'{self.grafana_url}/api/org'
resp = requests.get(api_org_url, verify=False) resp = requests.get(api_org_url, verify=False)
self.assertEqual(requests.codes.unauthorized, resp.status_code) self.assertEqual(resp.status_code, requests.codes.unauthorized)
connection_params = self.computer_partition.getConnectionParameterDict()
resp = requests.get( resp = requests.get(
api_org_url, api_org_url,
verify=False, verify=False,
auth=requests.auth.HTTPBasicAuth( auth=requests.auth.HTTPBasicAuth(
connection_params['grafana-username'], self.connection_params['grafana-username'],
connection_params['grafana-password'], self.connection_params['grafana-password'],
)) ))
self.assertEqual(requests.codes.ok, resp.status_code) self.assertEqual(resp.status_code, requests.codes.ok)
self.assertEqual(1, resp.json()['id']) self.assertEqual(resp.json()['id'], 1)
def test_grafana_datasource_povisinonned(self): def test_grafana_datasource_provisioned(self):
# data sources are provisionned # data sources are provisionned
connection_params = self.computer_partition.getConnectionParameterDict() get = functools.partial(
resp = requests.get( requests.get,
'{self.grafana_url}/api/datasources'.format(**locals()), verify=False,
verify=False, auth=requests.auth.HTTPBasicAuth(
auth=requests.auth.HTTPBasicAuth( self.connection_params['grafana-username'],
connection_params['grafana-username'], self.connection_params['grafana-password'],
connection_params['grafana-password'], )
)) )
self.assertEqual(requests.codes.ok, resp.status_code) datasources_resp = get(f'{self.grafana_url}/api/datasources')
self.assertEqual(datasources_resp.status_code, requests.codes.ok)
self.assertEqual( self.assertEqual(
sorted(['influxdb', 'loki']), sorted([ds['type'] for ds in datasources_resp.json()]),
sorted([ds['type'] for ds in resp.json()])) sorted(['influxdb', 'loki']))
# data sources are usable
# for this we need to wait a bit, because they are only usable once
# some data has been ingested
influxdb, = [ds for ds in datasources_resp.json() if ds['type'] == 'influxdb']
loki, = [ds for ds in datasources_resp.json() if ds['type'] == 'loki']
for retry in range(16):
influxdb_health = get(f'{self.grafana_url}/api/datasources/uid/{influxdb["uid"]}/health').json()
if influxdb_health.get('status') == "OK":
break
time.sleep(retry)
self.assertEqual(influxdb_health['status'], "OK")
for retry in range(16):
loki_health = get(f'{self.grafana_url}/api/datasources/uid/{loki["uid"]}/resources/labels?start={time.time() - 1000}').json()
if loki_health.get('data'):
break
time.sleep(retry)
else:
self.fail(loki_health)
self.assertEqual(loki_health['status'], "success")
self.assertIn("app", loki_health['data'])
def test_email_disabled(self): def test_email_disabled(self):
config = configparser.ConfigParser() config = configparser.ConfigParser()
# grafana config file is like an ini file with an implicit default section # grafana config file is like an ini file with an implicit default section
with open( f = self.computer_partition_root_path / 'etc' / 'grafana-config-file.cfg'
os.path.join(self.computer_partition_root_path, 'etc', config.read_file(io.StringIO('[default]\n' + f.read_text()))
'grafana-config-file.cfg')) as f:
config.readfp(io.StringIO('[default]\n' + f.read()))
self.assertEqual(config.get('smtp', 'enabled'), 'false') self.assertEqual(config.get('smtp', 'enabled'), 'false')
class TestGrafanaEmailEnabled(GrafanaTestCase): class TestGrafanaEmailEnabled(GrafanaTestCase):
__partition_reference__ = 'mail' __partition_reference__ = 'mail'
smtp_verify_ssl = "true" smtp_verify_ssl = True
smtp_skip_verify = "false" smtp_skip_verify = "false"
@classmethod @classmethod
def getInstanceParameterDict(cls): def getInstanceParameterDict(cls):
return { return {"_": json.dumps({
"email": {
"smtp-server": "smtp.example.com:25", "smtp-server": "smtp.example.com:25",
"smtp-username": "smtp_username", "smtp-username": "smtp_username",
"smtp-password": "smtp_password", "smtp-password": "smtp_password",
'smtp-verify-ssl': cls.smtp_verify_ssl, 'smtp-verify-ssl': cls.smtp_verify_ssl,
"email-from-address": "grafana@example.com", "email-from-address": "grafana@example.com",
"email-from-name": "Grafana From Name", "email-from-name": "Grafana From Name",
} }})}
def test_email_enabled(self): def test_email_enabled(self):
config = configparser.ConfigParser() config = configparser.ConfigParser()
with open( f = self.computer_partition_root_path / 'etc' / 'grafana-config-file.cfg'
os.path.join(self.computer_partition_root_path, 'etc', config.read_file(io.StringIO('[default]\n' + f.read_text()))
'grafana-config-file.cfg')) as f:
config.readfp(io.StringIO('[default]\n' + f.read()))
self.assertEqual(config.get('smtp', 'enabled'), 'true') self.assertEqual(config.get('smtp', 'enabled'), 'true')
self.assertEqual(config.get('smtp', 'host'), 'smtp.example.com:25') self.assertEqual(config.get('smtp', 'host'), 'smtp.example.com:25')
self.assertEqual(config.get('smtp', 'user'), 'smtp_username') self.assertEqual(config.get('smtp', 'user'), 'smtp_username')
...@@ -139,98 +158,169 @@ class TestGrafanaEmailEnabled(GrafanaTestCase): ...@@ -139,98 +158,169 @@ class TestGrafanaEmailEnabled(GrafanaTestCase):
class TestGrafanaEmailEnabledSkipVerify(TestGrafanaEmailEnabled): class TestGrafanaEmailEnabledSkipVerify(TestGrafanaEmailEnabled):
smtp_verify_ssl = "false" smtp_verify_ssl = False
smtp_skip_verify = "true" smtp_skip_verify = "true"
class TestInfluxDb(GrafanaTestCase): class TestInfluxDb(GrafanaTestCase):
def setUp(self): def setUp(self):
self.influxdb_url = self.computer_partition.getConnectionParameterDict( self.connection_params = json.loads(self.computer_partition.getConnectionParameterDict()['_'])
)['influxdb-url'] self.influxdb_url = self.connection_params['influxdb-url']
def test_influxdb_available(self): def test_influxdb_available(self):
ping_url = '{self.influxdb_url}/ping'.format(**locals()) ping_url = f'{self.influxdb_url}/ping'
resp = requests.get(ping_url, verify=False) resp = requests.get(ping_url, verify=False)
self.assertEqual(requests.codes.no_content, resp.status_code) self.assertEqual(resp.status_code, requests.codes.no_content)
def test_influxdb_api(self): def test_influxdb_api(self):
query_url = '{self.influxdb_url}/query'.format(**locals()) query_url = f'{self.influxdb_url}/query'
connection_params = self.computer_partition.getConnectionParameterDict()
for i in range(10): for i in range(16):
# retry, as it may take a little delay to create databases # retry, as it may take a little delay to create databases
resp = requests.get( resp = requests.get(
query_url, query_url,
verify=False, verify=False,
params=dict( params=dict(
q='SHOW DATABASES', q='SHOW DATABASES',
u=connection_params['influxdb-username'], u=self.connection_params['influxdb-username'],
p=connection_params['influxdb-password'])) p=self.connection_params['influxdb-password']))
self.assertEqual(requests.codes.ok, resp.status_code) self.assertEqual(resp.status_code, requests.codes.ok)
result, = resp.json()['results'] result, = resp.json()['results']
if result['series'] and 'values' in result['series'][0]: if result['series'] and 'values' in result['series'][0]:
break break
time.sleep(0.5 * i) time.sleep(0.5 * i)
self.assertIn( self.assertIn(
[connection_params['influxdb-database']], result['series'][0]['values']) [self.connection_params['influxdb-database']], result['series'][0]['values'])
class TestTelegraf(GrafanaTestCase): class TestTelegraf(GrafanaTestCase):
__partition_reference__ = 'G'
@classmethod
def getInstanceParameterDict(cls):
parameter_dict = {
"agent": {
"applications": [
{
"name": "slapos-standalone-from-test",
"type": "SlapOS",
"instance-root": cls.slap._instance_root,
"partitions": [
{
"name": "test grafana - default partition",
"type": "default",
"reference": "G0", # XXX assumes partitions will be allocated in order
},
{
"name": "test grafana - agent partition",
"type": "default",
"reference": "G1"
},
],
},
],
},
}
return {'_': json.dumps(parameter_dict)}
def setUp(self):
self.connection_params = json.loads(self.computer_partition.getConnectionParameterDict()['_'])
self.influxdb_url = self.connection_params['influxdb-url']
def test_telegraf_running(self): def test_telegraf_running(self):
with self.slap.instance_supervisor_rpc as supervisor: with self.slap.instance_supervisor_rpc as supervisor:
all_process_info = supervisor.getAllProcessInfo() all_process_info = supervisor.getAllProcessInfo()
process_info, = [p for p in all_process_info if 'telegraf' in p['name']] process_info, = [p for p in all_process_info if 'telegraf' in p['name']]
self.assertEqual('RUNNING', process_info['statename']) self.assertEqual(process_info['statename'], 'RUNNING')
def test_telegraf_ingest_slapos_metrics(self):
# wait for data to be ingested
time.sleep(16)
query_url = f'{self.influxdb_url}/query'
query = """
SELECT max("state")
FROM "slapos_services"
WHERE time >= now() - 5m and time <= now()
GROUP BY time(5m),
"partition_reference"::tag,
"name"::tag,
"computer_id"::tag,
"process_name"::tag
fill(null)
"""
get = functools.partial(
requests.get,
verify=False,
params=dict(
q=query,
db=self.connection_params['influxdb-database'],
u=self.connection_params['influxdb-username'],
p=self.connection_params['influxdb-password'],
),
)
for i in range(16):
resp = get(query_url)
if resp.ok and resp.json()['results'][0].get('series'):
break
time.sleep(i)
else:
self.fail(resp.text)
series = resp.json()['results'][0].get('series')
# hashes and "-on-watch" is removed from process_name
self.assertIn('grafana', [s['tags']['process_name'] for s in series])
self.assertIn('telegraf', [s['tags']['process_name'] for s in series])
self.assertIn('loki-service', [s['tags']['process_name'] for s in series])
self.assertIn('loki-grafana-client-certificate-updater', [s['tags']['process_name'] for s in series])
tags = [s['tags'] for s in series if s['tags']['partition_reference'] == 'G0'][0]
self.assertEqual(tags['name'], 'test grafana - default partition')
self.assertEqual(tags['computer_id'], self.slap._computer_id)
self.assertEqual(tags['partition_reference'], 'G0')
self.assertEqual(
{s['tags']['partition_reference'] for s in series},
{'G0', 'G1'},
)
class TestLoki(GrafanaTestCase): class TestLoki(GrafanaTestCase):
@classmethod @classmethod
def getInstanceParameterDict(cls): def getInstanceParameterDict(cls):
cls._logfile = tempfile.NamedTemporaryFile(suffix='log') cls._logfile = tempfile.NamedTemporaryFile(suffix='log')
return { cls.addClassCleanup(cls._logfile.close)
'promtail-extra-scrape-config': parameter_dict = {
textwrap.dedent( "agent": {
r''' "applications": [
- job_name: {cls.__name__} {
pipeline_stages: "name": "TestLoki",
- match: "type": "system",
selector: '{{job="{cls.__name__}"}}' "partitions": [
stages: {
- multiline: "name": "test log file",
firstline: '^\d{{4}}-\d{{2}}-\d{{2}}\s\d{{1,2}}\:\d{{2}}\:\d{{2}}\,\d{{3}}' "log-file-patterns": [cls._logfile.name],
max_wait_time: 3s "static-tags": {
- regex: "testtag": "foo",
expression: '^(?P<timestamp>.*) - (?P<name>\S+) - (?P<level>\S+) - (?P<message>.*)' },
- timestamp: },
format: 2006-01-02T15:04:05Z00:00 ],
source: timestamp },
- labels: ],
level: },
name:
static_configs:
- targets:
- localhost
labels:
job: {cls.__name__}
__path__: {cls._logfile.name}
''').format(**locals())
} }
return {'_': json.dumps(parameter_dict)}
@classmethod
def tearDownClass(cls):
cls._logfile.close()
super(TestLoki, cls).tearDownClass()
def setUp(self): def setUp(self):
self.loki_url = self.computer_partition.getConnectionParameterDict( self.loki_url = json.loads(
self.computer_partition.getConnectionParameterDict()['_']
)['loki-url'] )['loki-url']
def test_loki_available(self): def test_loki_certificate_required(self):
self.assertEqual( with self.assertRaisesRegex(requests.exceptions.SSLError, 'certificate required'):
requests.codes.ok, requests.get(f'{self.loki_url}/ready', verify=False)
requests.get('{self.loki_url}/ready'.format(**locals()),
verify=False).status_code)
def test_log_ingested(self): def test_log_ingested(self):
# create a logger logging to the file that we have # create a logger logging to the file that we have
...@@ -239,68 +329,47 @@ class TestLoki(GrafanaTestCase): ...@@ -239,68 +329,47 @@ class TestLoki(GrafanaTestCase):
test_logger.propagate = False test_logger.propagate = False
test_logger.setLevel(logging.INFO) test_logger.setLevel(logging.INFO)
test_handler = logging.FileHandler(filename=self._logfile.name) test_handler = logging.FileHandler(filename=self._logfile.name)
test_handler.setFormatter(
logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
test_logger.addHandler(test_handler) test_logger.addHandler(test_handler)
test_logger.info("testing message") test_logger.info("testing info message")
test_logger.info("testing another message") get = functools.partial(
test_logger.warning("testing warn") requests.get,
# log an exception, which will be multi line in log file. cert=(
def nested1(): self.computer_partition_root_path / 'etc' / 'loki-promise-client-certificate.crt',
def nested2(): self.computer_partition_root_path / 'etc' / 'loki-promise-client-certificate.key',
raise ValueError('boom') ),
nested2() verify=self.computer_partition_root_path / 'etc' / 'loki-server-certificate.ca.crt',
try: )
nested1() url = urllib.parse.urlparse(
except ValueError: self.loki_url
test_logger.exception("testing exception") )._replace(
path="/loki/api/v1/query_range",
# Check our messages have been ingested query=urllib.parse.urlencode({'query': '{app="TestLoki"} |= ""'}),
# we retry a few times, because there's a short delay until messages are ).geturl()
# ingested and returned. for i in range(16):
for i in range(60): resp = get(url)
resp = requests.get( if resp.ok:
'{self.loki_url}/api/prom/query?query={{job="TestLoki"}}'.format( if result := resp.json().get('data', {}).get('result', []):
**locals()), break
verify=False).json() time.sleep(i)
if len(resp.get('streams', [])) < 3: else:
time.sleep(0.5 * i) self.fail(resp.text)
continue self.assertEqual(
result[0]['stream'],
warn_stream_list = [stream for stream in resp['streams'] if 'level="WARNING"' in stream['labels']] {
self.assertEqual(1, len(warn_stream_list), resp['streams']) 'app': 'TestLoki',
warn_stream, = warn_stream_list 'computer_id': self.slap._computer_id,
self.assertIn("testing warn", warn_stream['entries'][0]['line']) 'detected_level': 'info',
'filename': self._logfile.name,
info_stream_list = [stream for stream in resp['streams'] if 'level="INFO"' in stream['labels']] 'job': 'TestLoki-test log file',
self.assertEqual(1, len(info_stream_list), resp['streams']) 'name': 'test log file',
info_stream, = info_stream_list 'service_name': 'TestLoki',
self.assertTrue( 'testtag': 'foo',
[ }
line for line in info_stream['entries'] )
if "testing message" in line['line'] self.assertEqual(
]) [v[1] for v in result[0]['values']],
self.assertTrue( ['testing info message'])
[ self.assertEqual(len(result), 1)
line for line in info_stream['entries']
if "testing another message" in line['line']
])
error_stream_list = [stream for stream in resp['streams'] if 'level="ERROR"' in stream['labels']]
self.assertEqual(1, len(error_stream_list), resp['streams'])
error_stream, = error_stream_list
line, = [line['line'] for line in error_stream['entries']]
# this entry is multi-line
self.assertIn('testing exception\nTraceback (most recent call last):\n', line)
self.assertIn('ValueError: boom', line)
# The labels we have configued are also available
resp = requests.get(
'{self.loki_url}/api/prom/label'.format(**locals()),
verify=False).json()
self.assertIn('level', resp['values'])
self.assertIn('name', resp['values'])
class TestListenInPartition(GrafanaTestCase): class TestListenInPartition(GrafanaTestCase):
...@@ -308,9 +377,18 @@ class TestListenInPartition(GrafanaTestCase): ...@@ -308,9 +377,18 @@ class TestListenInPartition(GrafanaTestCase):
with self.slap.instance_supervisor_rpc as supervisor: with self.slap.instance_supervisor_rpc as supervisor:
all_process_info = supervisor.getAllProcessInfo() all_process_info = supervisor.getAllProcessInfo()
def canonical_process_name(process):
"""remove hash from hash-files and "on-watch"
"""
return re.sub(
r'-([a-f0-9]{32})$',
'',
process['name'].replace('-on-watch', ''),
)
self.process_dict = { self.process_dict = {
p['name'].replace('-on-watch', ''): psutil.Process(p['pid']) canonical_process_name(p): psutil.Process(p['pid'])
for p in all_process_info if p['name'] != 'watchdog' for p in all_process_info if p['name'] != 'watchdog'
} }
def test_grafana_listen(self): def test_grafana_listen(self):
...@@ -328,13 +406,13 @@ class TestListenInPartition(GrafanaTestCase): ...@@ -328,13 +406,13 @@ class TestListenInPartition(GrafanaTestCase):
c.laddr for c in self.process_dict['influxdb'].connections() c.laddr for c in self.process_dict['influxdb'].connections()
if c.status == 'LISTEN' if c.status == 'LISTEN'
]), ]),
[ sorted([
(self._ipv4_address, 8088), (self._ipv4_address, 8088),
(self.computer_partition_ipv6_address, 8086), (self.computer_partition_ipv6_address, 8086),
], ]),
) )
def test_telegraph_listen(self): def test_telegraf_listen(self):
self.assertEqual( self.assertEqual(
[ [
c.laddr for c in self.process_dict['telegraf'].connections() c.laddr for c in self.process_dict['telegraf'].connections()
...@@ -346,13 +424,13 @@ class TestListenInPartition(GrafanaTestCase): ...@@ -346,13 +424,13 @@ class TestListenInPartition(GrafanaTestCase):
def test_loki_listen(self): def test_loki_listen(self):
self.assertEqual( self.assertEqual(
sorted([ sorted([
c.laddr for c in self.process_dict['loki'].connections() c.laddr for c in self.process_dict['loki-service'].connections()
if c.status == 'LISTEN' if c.status == 'LISTEN'
]), ]),
[ sorted([
(self._ipv4_address, 3100),
(self._ipv4_address, 9095), (self._ipv4_address, 9095),
], (self.computer_partition_ipv6_address, 3100),
]),
) )
def test_promtail_listen(self): def test_promtail_listen(self):
......
...@@ -4,7 +4,7 @@ kvm ...@@ -4,7 +4,7 @@ kvm
Introduction Introduction
------------ ------------
This software release is used to deploy KVM and NBD instances. This software release is used to deploy KVM.
For extensive parameters definition, please look at parameter-input-schema.json. For extensive parameters definition, please look at parameter-input-schema.json.
...@@ -24,7 +24,6 @@ to be accessible from IPv4:: ...@@ -24,7 +24,6 @@ to be accessible from IPv4::
software_release=kvm, software_release=kvm,
partition_reference="My awesome KVM", partition_reference="My awesome KVM",
partition_parameter_kw={ partition_parameter_kw={
"nbd-host":"ubuntu-1204.nbd.vifib.net",
} }
) )
...@@ -38,10 +37,6 @@ KVM instance parameters: ...@@ -38,10 +37,6 @@ KVM instance parameters:
- frontend-instance-guid - frontend-instance-guid
- frontend-addtional-instance-guid - frontend-addtional-instance-guid
- frontend-instance-name (default: VNC Frontend) - frontend-instance-name (default: VNC Frontend)
- nbd-port (default: 1024)
- nbd-host
- nbd2-port (default: 1024)
- nbd2-host
- ram-size (default: 4096) - ram-size (default: 4096)
- disk-size = (default: 40) - disk-size = (default: 40)
......
...@@ -15,15 +15,15 @@ ...@@ -15,15 +15,15 @@
[template] [template]
filename = instance.cfg.in filename = instance.cfg.in
md5sum = ee1fe10d8db4d3c39e3a3f1b53d12883 md5sum = 9ae66fb63a3cdd8072582622aa1bb36c
[template-kvm] [template-kvm]
filename = instance-kvm.cfg.jinja2 filename = instance-kvm.cfg.jinja2
md5sum = 9916c160b1c9711145d7e10506a9fca8 md5sum = bd3a7229e4fdfa9372ee61b6054acf78
[template-kvm-cluster] [template-kvm-cluster]
filename = instance-kvm-cluster.cfg.jinja2.in filename = instance-kvm-cluster.cfg.jinja2.in
md5sum = 6e6f6748ec466eb49a4f872aec7563fa md5sum = 8ce14c5ae114dcfa6e9aff0511b218d4
[template-kvm-resilient] [template-kvm-resilient]
filename = instance-kvm-resilient.cfg.jinja2 filename = instance-kvm-resilient.cfg.jinja2
...@@ -45,10 +45,6 @@ md5sum = 34d1b7cc8ca62bfdfce759a1dfbbaccd ...@@ -45,10 +45,6 @@ md5sum = 34d1b7cc8ca62bfdfce759a1dfbbaccd
filename = template/kvm-export.sh.jinja2 filename = template/kvm-export.sh.jinja2
md5sum = 64aa1ce8785f6b94aabd787fa3443082 md5sum = 64aa1ce8785f6b94aabd787fa3443082
[template-nbd]
filename = instance-nbd.cfg.jinja2
md5sum = e041e8011ad2ec7f104be173ef76f5e9
[template-nginx] [template-nginx]
filename = template/nginx_conf.in filename = template/nginx_conf.in
md5sum = 9ca886120a99befe25ca761ddc54753c md5sum = 9ca886120a99befe25ca761ddc54753c
...@@ -59,7 +55,7 @@ md5sum = 6328f99728284847b8dd1146aadeae1b ...@@ -59,7 +55,7 @@ md5sum = 6328f99728284847b8dd1146aadeae1b
[template-kvm-run] [template-kvm-run]
filename = template/template-kvm-run.in filename = template/template-kvm-run.in
md5sum = f0190843e3979742fe9e29b8a607539f md5sum = 729bc484c8c1a82b827cc4bcdff87f95
[template-kvm-controller] [template-kvm-controller]
filename = template/kvm-controller-run.in filename = template/kvm-controller-run.in
...@@ -96,3 +92,7 @@ md5sum = b4f6ffef08685bace1b9c01a3bd2620d ...@@ -96,3 +92,7 @@ md5sum = b4f6ffef08685bace1b9c01a3bd2620d
[whitelist-domains-default] [whitelist-domains-default]
filename = template/whitelist-domains-default filename = template/whitelist-domains-default
md5sum = e9d40162ba77472775256637a2617d14 md5sum = e9d40162ba77472775256637a2617d14
[boot-image-select-source-config]
filename = template/boot-image-select-source-config.json.in
md5sum = 5dc0cbb8f8dccfdd5c52d0af4a2b2c48
...@@ -354,20 +354,6 @@ ...@@ -354,20 +354,6 @@
"vmxnet3" "vmxnet3"
] ]
}, },
"nbd-host": {
"title": "NBD hostname or IP",
"description": "hostname (or IP) of the NBD server containing the boot image.",
"type": "string",
"format": "internet-address"
},
"nbd-port": {
"title": "NBD port",
"description": "Port of the NBD server containing the boot image.",
"type": "integer",
"default": 1024,
"minimum": 1,
"maximum": 65535
},
"virtual-hard-drive-url": { "virtual-hard-drive-url": {
"title": "Existing disk image URL", "title": "Existing disk image URL",
"description": "If specified, will download an existing disk image (qcow2, raw, ...), and will use it as main virtual hard drive. Can be used to download and use an already installed and customized virtual hard drive.", "description": "If specified, will download an existing disk image (qcow2, raw, ...), and will use it as main virtual hard drive. Can be used to download and use an already installed and customized virtual hard drive.",
...@@ -446,84 +432,31 @@ ...@@ -446,84 +432,31 @@
"type": "boolean", "type": "boolean",
"default": false "default": false
}, },
"boot-image-url-list": {
"title": "Boot image list",
"description": "The list shall be list of direct URLs to images, followed by hash (#), then by image MD5SUM. Each image shall appear on newline, like: \"https://example.com/image.iso#06226c7fac5bacfa385872a19bb99684<newline>https://example.com/another-image.iso#31b40d58b18e038498ddb46caea1361c\". They will be provided in KVM image list according to the order on the list. After updating the list, the instance has to be restarted to refresh it. Amount of images is limited to 4, and one image can be maximum 20GB. Image will be downloaded and checked against its MD5SUM 4 times, then it will be considered as impossible to download with given MD5SUM. Each image has to be downloaded in time shorter than 4 hours, so in case of very slow images to access, it can take up to 16 hours to download all of them. Note: The instance has to be restarted in order to update the list of available images in the VM. Note: Maximum 3 ISOs are supported.",
"type": "string",
"textarea": true
},
"boot-image-url-select": { "boot-image-url-select": {
"title": "Boot image", "title": "Boot image",
"type": "array", "type": "string",
"oneOf": [ "description": "Selectable list of provided ISO images.",
{ "default": "Debian Bookworm 12 netinst x86_64",
"const": [ "enum": [
"https://shacache.nxdcdn.com/33c08e56c83d13007e4a5511b9bf2c4926c4aa12fd5dd56d493c0653aecbab380988c5bf1671dbaea75c582827797d98c4a611f7fb2b131fbde2c677d5258ec9#326b7737c4262e8eb09cd26773f3356a" "Debian Bookworm 12 netinst x86_64",
], "Debian Bullseye 11 netinst x86_64",
"title": "Debian Bookworm 12 netinst x86_64" "Centos 8.2004 Minimal x86_64",
}, "Ubuntu Noble 24.04 Live Server x86_64",
{ "Ubuntu Jammy 22.04 Live Server x86_64",
"const": [ "Ubuntu Focal 20.04 Live Server x86_64",
"https://shacache.nxdcdn.com/02257c3ec27e45d9f022c181a69b59da67e5c72871cdb4f9a69db323a1fad58093f2e69702d29aa98f5f65e920e0b970d816475a5a936e1f3bf33832257b7e92#b710c178eb434d79ce40ce703d30a5f0" "openSUSE Leap 15 NET x86_64",
], "Arch Linux 2020.09.01 x86_64",
"title": "Debian Bullseye 11 netinst x86_64" "Fedora Server 32 netinst x86_64",
}, "FreeBSD 12.1 RELEASE bootonly x86_64",
{ "SUSE Linux Enterprise Server 15 SP6 x86_64"
"const": [
"https://shacache.nxdcdn.com/ce5ddfdbdaccdf929b7fe321212356347d82a02f6b7733427282b416f113d91e587682b003e9d376ac189c3b731595c50c236962aadf2720c16d9f36913577c0#23bf2a2d60271e553e63525e794415f1"
],
"title": "Centos 8.2004 Minimal x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/3d518612aabbdb77fd6b49cb55b824fed11e40540e4af52f5f26174257715c93740f83079ea618b4d933081f0b1bc69d32b7885b7c75bc90da5ad3fe1814cfd4#c53b2d7c3269c3b91a2d941ceaa8ab9b"
],
"title": "Ubuntu Jammy 24.04.1 Live Server x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/8017c532ed74586b718662d8b11cf8c34fa638b0affd0413ed38623989b8f98ffd0bcb475246e279ea2f3c194a3e33c55e0f376a9727de13e4bfd87e75e47b5d#e8d2a77c51b599c10651608a5d8c286f"
],
"title": "Ubuntu Jammy 22.04.1 Live Server x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/302c990c6d69575ff24c96566e5c7e26bf36908abb0cd546e22687c46fb07bf8dba595bf77a9d4fd9ab63e75c0437c133f35462fd41ea77f6f616140cd0e5e6a#f3a306f40e4a313fb5a584d73b3dee8f"
],
"title": "Ubuntu Focal 20.04.1 Live Server x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/6635269a7eb6fbd6b85fda40cd94f14a27bf53cb1fc82ffcce9fe386a025a43e1ab681db7e8cec50416bfbfc90262f0d95273686a101c74b3f17646f0a34c85b#3708a59af6cf820a95cafe0ae73ac399"
],
"title": "openSUSE Leap 15.2 NET x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/fc17e8c6ae0790162f4beb8fa6226d945cff638429588999b3a08493ff27b280dc2939fba825ae04be1d9082ea8d7c3c002c5e4c39fbbcf88b8ab5104619e28a#ebcdb2223a77f098af3923fe1fa180aa"
],
"title": "Arch Linux 2020.09.01 x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/c5a511f349a1146b615e6fab9c24f9be4362046adcf24f0ff82c470d361fac5f6628895e2110ebf8ff87db49d4c413a0a332699da6b1bec64275e0c17a15b999#ca7a1e555c04b4d9a549065fa2ddf713"
],
"title": "Fedora Server 32-1.6 netinst x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/6c355def68b3c0427f21598cb054ffc893568902f205601ac60f192854769b31bc9cff8eeb6ce99ef975a8fb887d8d3e56fc6cd5ea5cb4b3bba1175c520047cb#57088b77f795ca44b00971e44782ee23"
],
"title": "FreeBSD 12.1 RELEASE bootonly x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/e72e03bbcc4c54ce4b8d5f360b47dab9ee514d754e8d78c403626cf000d6ae98d808b3bcff2201e3cf49c1be1b0f308f1cb5ed81676adcb1837dfc811d2451ac"
],
"title": "SUSE Linux Enterprise Server 15 SP6 x86_64"
}
] ]
}, },
"boot-image-url-list": {
"title": "[EXPERT] Boot image list",
"description": "The list shall be list of direct URLs to images, followed by hash (#), then by image MD5SUM. Each image shall appear on newline, like: \"https://example.com/image.iso#06226c7fac5bacfa385872a19bb99684<newline>https://example.com/another-image.iso#31b40d58b18e038498ddb46caea1361c\". They will be provided in KVM image list according to the order on the list. Maximum images: 4. Maximum image size: 20GB. Download tires: 4. Maximum download time: 4h.",
"type": "string",
"textarea": true
},
"whitelist-domains": { "whitelist-domains": {
"title": "Whitelist domains", "title": "Whitelist domains",
"description": "List of whitelisted domain names to be accessed from the VM. They will be resolved to IPs depending on where the VM end up. IPs can be used too.", "description": "List of whitelisted domain names to be accessed from the VM. They will be resolved to IPs depending on where the VM end up. IPs can be used too.",
......
...@@ -75,8 +75,6 @@ config-name = {{ instance_name }} ...@@ -75,8 +75,6 @@ config-name = {{ instance_name }}
{% if slapparameter_dict.get('authorized-keys', []) -%} {% if slapparameter_dict.get('authorized-keys', []) -%}
config-authorized-key = {{ dumps(slapparameter_dict.get('authorized-keys') | join('\n')) }} config-authorized-key = {{ dumps(slapparameter_dict.get('authorized-keys') | join('\n')) }}
{% endif -%} {% endif -%}
config-nbd-port = {{ dumps(kvm_parameter_dict.get('nbd-port', 1024)) }}
config-nbd2-port = {{ dumps(kvm_parameter_dict.get('nbd-port2', 1024)) }}
config-ram-size = {{ dumps(kvm_parameter_dict.get('ram-size', 4096)) }} config-ram-size = {{ dumps(kvm_parameter_dict.get('ram-size', 4096)) }}
config-ram-max-size = {{ dumps(kvm_parameter_dict.get('ram-max-size', int(kvm_parameter_dict.get('ram-size', 4096)) + 512)) }} config-ram-max-size = {{ dumps(kvm_parameter_dict.get('ram-max-size', int(kvm_parameter_dict.get('ram-size', 4096)) + 512)) }}
config-enable-device-hotplug = {{ dumps(kvm_parameter_dict.get('enable-device-hotplug', False)) }} config-enable-device-hotplug = {{ dumps(kvm_parameter_dict.get('enable-device-hotplug', False)) }}
...@@ -89,7 +87,6 @@ config-cpu-max-count = {{ dumps(kvm_parameter_dict.get('cpu-max-count', int(kvm_ ...@@ -89,7 +87,6 @@ config-cpu-max-count = {{ dumps(kvm_parameter_dict.get('cpu-max-count', int(kvm_
config-network-adapter = {{ dumps(kvm_parameter_dict.get('network-adapter', 'virtio-net-pci')) }} config-network-adapter = {{ dumps(kvm_parameter_dict.get('network-adapter', 'virtio-net-pci')) }}
{{ setconfig('numa', kvm_parameter_dict.get('numa', '')) }} {{ setconfig('numa', kvm_parameter_dict.get('numa', '')) }}
{{ setconfig('machine-options', kvm_parameter_dict.get('machine-options', '')) }} {{ setconfig('machine-options', kvm_parameter_dict.get('machine-options', '')) }}
{{ setconfig('nbd-host', kvm_parameter_dict.get('nbd-host', '')) }}
{{ setconfig('host2', kvm_parameter_dict.get('host2', '')) }} {{ setconfig('host2', kvm_parameter_dict.get('host2', '')) }}
config-auto-ballooning = {{ dumps(kvm_parameter_dict.get('auto-ballooning', True)) }} config-auto-ballooning = {{ dumps(kvm_parameter_dict.get('auto-ballooning', True)) }}
......
...@@ -145,33 +145,6 @@ ...@@ -145,33 +145,6 @@
"vmxnet3" "vmxnet3"
] ]
}, },
"nbd-host": {
"title": "NBD hostname",
"description": "hostname (or IP) of the NBD server containing the boot image.",
"type": "string",
"format": "internet-address"
},
"nbd-port": {
"title": "NBD port",
"description": "Port of the NBD server containing the boot image.",
"type": "integer",
"default": 1024,
"minimum": 1,
"maximum": 65535
},
"nbd2-host": {
"title": "Second NBD hostname",
"description": "hostname (or IP) of the second NBD server (containing drivers for example).",
"type": "string",
"format": "internet-address"
},
"nbd2-port": {
"title": "Second NBD port",
"description": "Port of the second NBD server containing the boot image.",
"type": "integer",
"minimum": 1,
"maximum": 65535
},
"virtual-hard-drive-url": { "virtual-hard-drive-url": {
"title": "Existing disk image URL", "title": "Existing disk image URL",
"description": "If specified, will download an existing disk image (qcow2, raw, ...), and will use it as main virtual hard drive. Can be used to download and use an already installed and customized virtual hard drive.", "description": "If specified, will download an existing disk image (qcow2, raw, ...), and will use it as main virtual hard drive. Can be used to download and use an already installed and customized virtual hard drive.",
...@@ -312,84 +285,31 @@ ...@@ -312,84 +285,31 @@
"format": "uri", "format": "uri",
"default": "http://git.erp5.org/gitweb/slapos.git/blob_plain/HEAD:/software/apache-frontend/software.cfg" "default": "http://git.erp5.org/gitweb/slapos.git/blob_plain/HEAD:/software/apache-frontend/software.cfg"
}, },
"boot-image-url-list": {
"title": "Boot image list",
"description": "The list shall be list of direct URLs to images, followed by hash (#), then by image MD5SUM. Each image shall appear on newline, like: \"https://example.com/image.iso#06226c7fac5bacfa385872a19bb99684<newline>https://example.com/another-image.iso#31b40d58b18e038498ddb46caea1361c\". They will be provided in KVM image list according to the order on the list. After updating the list, the instance has to be restarted to refresh it. Amount of images is limited to 4, and one image can be maximum 20GB. Image will be downloaded and checked against its MD5SUM 4 times, then it will be considered as impossible to download with given MD5SUM. Each image has to be downloaded in time shorter than 4 hours, so in case of very slow images to access, it can take up to 16 hours to download all of them. Note: The instance has to be restarted in order to update the list of available images in the VM. Note: Maximum 3 ISOs are supported.",
"type": "string",
"textarea": true
},
"boot-image-url-select": { "boot-image-url-select": {
"title": "Boot image", "title": "Boot image",
"type": "array", "description": "Selectable list of provided ISO images.",
"oneOf": [ "type": "string",
{ "default": "Debian Bookworm 12 netinst x86_64",
"const": [ "enum": [
"https://shacache.nxdcdn.com/33c08e56c83d13007e4a5511b9bf2c4926c4aa12fd5dd56d493c0653aecbab380988c5bf1671dbaea75c582827797d98c4a611f7fb2b131fbde2c677d5258ec9#326b7737c4262e8eb09cd26773f3356a" "Debian Bookworm 12 netinst x86_64",
], "Debian Bullseye 11 netinst x86_64",
"title": "Debian Bookworm 12 netinst x86_64" "Centos 8.2004 Minimal x86_64",
}, "Ubuntu Noble 24.04 Live Server x86_64",
{ "Ubuntu Jammy 22.04 Live Server x86_64",
"const": [ "Ubuntu Focal 20.04 Live Server x86_64",
"https://shacache.nxdcdn.com/02257c3ec27e45d9f022c181a69b59da67e5c72871cdb4f9a69db323a1fad58093f2e69702d29aa98f5f65e920e0b970d816475a5a936e1f3bf33832257b7e92#b710c178eb434d79ce40ce703d30a5f0" "openSUSE Leap 15 NET x86_64",
], "Arch Linux 2020.09.01 x86_64",
"title": "Debian Bullseye 11 netinst x86_64" "Fedora Server 32 netinst x86_64",
}, "FreeBSD 12.1 RELEASE bootonly x86_64",
{ "SUSE Linux Enterprise Server 15 SP6 x86_64"
"const": [
"https://shacache.nxdcdn.com/ce5ddfdbdaccdf929b7fe321212356347d82a02f6b7733427282b416f113d91e587682b003e9d376ac189c3b731595c50c236962aadf2720c16d9f36913577c0#23bf2a2d60271e553e63525e794415f1"
],
"title": "Centos 8.2004 Minimal x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/3d518612aabbdb77fd6b49cb55b824fed11e40540e4af52f5f26174257715c93740f83079ea618b4d933081f0b1bc69d32b7885b7c75bc90da5ad3fe1814cfd4#c53b2d7c3269c3b91a2d941ceaa8ab9b"
],
"title": "Ubuntu Jammy 24.04.1 Live Server x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/8017c532ed74586b718662d8b11cf8c34fa638b0affd0413ed38623989b8f98ffd0bcb475246e279ea2f3c194a3e33c55e0f376a9727de13e4bfd87e75e47b5d#e8d2a77c51b599c10651608a5d8c286f"
],
"title": "Ubuntu Jammy 22.04.1 Live Server x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/302c990c6d69575ff24c96566e5c7e26bf36908abb0cd546e22687c46fb07bf8dba595bf77a9d4fd9ab63e75c0437c133f35462fd41ea77f6f616140cd0e5e6a#f3a306f40e4a313fb5a584d73b3dee8f"
],
"title": "Ubuntu Focal 20.04.1 Live Server x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/6635269a7eb6fbd6b85fda40cd94f14a27bf53cb1fc82ffcce9fe386a025a43e1ab681db7e8cec50416bfbfc90262f0d95273686a101c74b3f17646f0a34c85b#3708a59af6cf820a95cafe0ae73ac399"
],
"title": "openSUSE Leap 15.2 NET x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/fc17e8c6ae0790162f4beb8fa6226d945cff638429588999b3a08493ff27b280dc2939fba825ae04be1d9082ea8d7c3c002c5e4c39fbbcf88b8ab5104619e28a#ebcdb2223a77f098af3923fe1fa180aa"
],
"title": "Arch Linux 2020.09.01 x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/c5a511f349a1146b615e6fab9c24f9be4362046adcf24f0ff82c470d361fac5f6628895e2110ebf8ff87db49d4c413a0a332699da6b1bec64275e0c17a15b999#ca7a1e555c04b4d9a549065fa2ddf713"
],
"title": "Fedora Server 32-1.6 netinst x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/6c355def68b3c0427f21598cb054ffc893568902f205601ac60f192854769b31bc9cff8eeb6ce99ef975a8fb887d8d3e56fc6cd5ea5cb4b3bba1175c520047cb#57088b77f795ca44b00971e44782ee23"
],
"title": "FreeBSD 12.1 RELEASE bootonly x86_64"
},
{
"const": [
"https://shacache.nxdcdn.com/e72e03bbcc4c54ce4b8d5f360b47dab9ee514d754e8d78c403626cf000d6ae98d808b3bcff2201e3cf49c1be1b0f308f1cb5ed81676adcb1837dfc811d2451ac"
],
"title": "SUSE Linux Enterprise Server 15 SP6 x86_64"
}
] ]
}, },
"boot-image-url-list": {
"title": "[EXPERT] Boot image list",
"description": "The list shall be list of direct URLs to images, followed by hash (#), then by image MD5SUM. Each image shall appear on newline, like: \"https://example.com/image.iso#06226c7fac5bacfa385872a19bb99684<newline>https://example.com/another-image.iso#31b40d58b18e038498ddb46caea1361c\". They will be provided in KVM image list according to the order on the list. Maximum images: 4. Maximum image size: 20GB. Download tires: 4. Maximum ownload time: 4h.",
"type": "string",
"textarea": true
},
"whitelist-domains": { "whitelist-domains": {
"title": "Whitelist domains", "title": "Whitelist domains",
"description": "List of whitelisted domain names to be accessed from the VM. They will be resolved to IPs depending on where the VM end up. IPs can be used too.", "description": "List of whitelisted domain names to be accessed from the VM. They will be resolved to IPs depending on where the VM end up. IPs can be used too.",
......
{
"type": "object",
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Input Parameters For NDB Server",
"properties": {}
}
{
"$schema": "http://json-schema.org/draft-07/schema#",
"name": "Output Parameters",
"properties": {
"nbd_url": {
"title": "NBD server URL",
"description": "URL to be used to boot another VM. Requires IPv6. IPv6 should be used as \"NBD hostname\" and port as \"NBD port\"",
"type": "string",
"format": "uri"
},
"upload_url": {
"title": "Upload URL",
"description": "URL used to upload your VM image.",
"type": "string",
"format": "uri"
},
"upload_key": {
"title": "Upload key",
"description": "Key used to upload your VM image.",
"type": "string",
"format": "uri"
}
}
}
...@@ -19,8 +19,11 @@ ...@@ -19,8 +19,11 @@
{% set whitelist_domains = slapparameter_dict.get('whitelist-domains', '') -%} {% set whitelist_domains = slapparameter_dict.get('whitelist-domains', '') -%}
{% set virtual_hard_drive_url_enabled = 'virtual-hard-drive-url' in slapparameter_dict %} {% set virtual_hard_drive_url_enabled = 'virtual-hard-drive-url' in slapparameter_dict %}
{% set virtual_hard_drive_url_gzipped = slapparameter_dict.get('virtual-hard-drive-gzipped', False) %} {% set virtual_hard_drive_url_gzipped = slapparameter_dict.get('virtual-hard-drive-gzipped', False) %}
{% set boot_image_url_list_enabled = 'boot-image-url-list' in slapparameter_dict %} {% if 'boot-image-url-select' not in slapparameter_dict and ('boot-image-url-list' in slapparameter_dict or 'nbd-host' in slapparameter_dict or 'nbd2-host' in slapparameter_dict or virtual_hard_drive_url_enabled)%}
{% set boot_image_url_select_enabled = 'boot-image-url-select' in slapparameter_dict %} {% set boot_image_url_select_default = '' %}
{% else %}
{% set boot_image_url_select_default = 'Debian Bookworm 12 netinst x86_64' %}
{% endif %}
{% set bootstrap_script_url = slapparameter_dict.get('bootstrap-script-url') -%} {% set bootstrap_script_url = slapparameter_dict.get('bootstrap-script-url') -%}
{% set cpu_max_count = dumps(slapparameter_dict.get('cpu-max-count', int(slapparameter_dict.get('cpu-count', 2)) + 1)) %} {% set cpu_max_count = dumps(slapparameter_dict.get('cpu-max-count', int(slapparameter_dict.get('cpu-count', 2)) + 1)) %}
{% set ram_max_size = dumps(slapparameter_dict.get('ram-max-size', int(slapparameter_dict.get('ram-size', 4096)) + 512)) %} {% set ram_max_size = dumps(slapparameter_dict.get('ram-max-size', int(slapparameter_dict.get('ram-size', 4096)) + 512)) %}
...@@ -63,16 +66,12 @@ virtual-hard-drive-url-repository = ${:srv}/virtual-hard-drive-url-repository ...@@ -63,16 +66,12 @@ virtual-hard-drive-url-repository = ${:srv}/virtual-hard-drive-url-repository
virtual-hard-drive-url-var = ${:var}/virtual-hard-drive-url virtual-hard-drive-url-var = ${:var}/virtual-hard-drive-url
virtual-hard-drive-url-expose = ${monitor-directory:private}/virtual-hard-drive-url virtual-hard-drive-url-expose = ${monitor-directory:private}/virtual-hard-drive-url
{%- endif %} {%- endif %}
{%- if boot_image_url_list_enabled %}
boot-image-url-list-repository = ${:srv}/boot-image-url-list-repository boot-image-url-list-repository = ${:srv}/boot-image-url-list-repository
boot-image-url-list-var = ${:var}/boot-image-url-list boot-image-url-list-var = ${:var}/boot-image-url-list
boot-image-url-list-expose = ${monitor-directory:private}/boot-image-url-list boot-image-url-list-expose = ${monitor-directory:private}/boot-image-url-list
{%- endif %}
{%- if boot_image_url_select_enabled %}
boot-image-url-select-repository = ${:srv}/boot-image-url-select-repository boot-image-url-select-repository = ${:srv}/boot-image-url-select-repository
boot-image-url-select-var = ${:var}/boot-image-url-select boot-image-url-select-var = ${:var}/boot-image-url-select
boot-image-url-select-expose = ${monitor-directory:private}/boot-image-url-select boot-image-url-select-expose = ${monitor-directory:private}/boot-image-url-select
{%- endif %}
[create-mac] [create-mac]
recipe = slapos.cookbook:generate.mac recipe = slapos.cookbook:generate.mac
...@@ -88,7 +87,6 @@ storage-path = ${directory:srv}/.passwd ...@@ -88,7 +87,6 @@ storage-path = ${directory:srv}/.passwd
# VNC protocol supports passwords of 8 characters max # VNC protocol supports passwords of 8 characters max
bytes = 8 bytes = 8
{% if boot_image_url_select_enabled %}
## boot-image-url-select support BEGIN ## boot-image-url-select support BEGIN
[empty-file-state-base-select-promise] [empty-file-state-base-select-promise]
<= monitor-promise-base <= monitor-promise-base
...@@ -99,13 +97,12 @@ config-url = ${monitor-base:base-url}/private/boot-image-url-select/${:filename} ...@@ -99,13 +97,12 @@ config-url = ${monitor-base:base-url}/private/boot-image-url-select/${:filename}
[boot-image-url-select-source-config] [boot-image-url-select-source-config]
recipe = slapos.recipe.template:jinja2 recipe = slapos.recipe.template:jinja2
inline = url = {{ boot_image_select_source_config }}
{%- raw %} boot-image-url-select = {{ dumps(slapparameter_dict.get('boot-image-url-select', '')) }}
{{ boot_image_url_select }} boot-image-url-select-default = {{ dumps(boot_image_url_select_default) }}
{% endraw -%}
boot-image-url-select = {{ dumps(slapparameter_dict['boot-image-url-select']) }}
context = context =
key boot_image_url_select :boot-image-url-select key boot_image_url_select :boot-image-url-select
key boot_image_url_select_default :boot-image-url-select-default
output = ${directory:etc}/boot-image-url-select.json output = ${directory:etc}/boot-image-url-select.json
[boot-image-url-select-processed-config] [boot-image-url-select-processed-config]
...@@ -183,9 +180,7 @@ config-filename = ${boot-image-url-select-download-wrapper:md5sum-state-file} ...@@ -183,9 +180,7 @@ config-filename = ${boot-image-url-select-download-wrapper:md5sum-state-file}
filename = ${boot-image-url-select-download-wrapper:error-state-filename} filename = ${boot-image-url-select-download-wrapper:error-state-filename}
config-filename = ${boot-image-url-select-download-wrapper:error-state-file} config-filename = ${boot-image-url-select-download-wrapper:error-state-file}
## boot-image-url-select support END ## boot-image-url-select support END
{% endif %} {# if boot_image_url_select_enabled #}
{% if boot_image_url_list_enabled %}
## boot-image-url-list support BEGIN ## boot-image-url-list support BEGIN
[empty-file-state-base-list-promise] [empty-file-state-base-list-promise]
<= monitor-promise-base <= monitor-promise-base
...@@ -200,7 +195,7 @@ inline = ...@@ -200,7 +195,7 @@ inline =
{%- raw %} {%- raw %}
{{ boot_image_url_list }} {{ boot_image_url_list }}
{% endraw -%} {% endraw -%}
boot-image-url-list = {{ dumps(slapparameter_dict['boot-image-url-list']) }} boot-image-url-list = {{ dumps(slapparameter_dict.get('boot-image-url-list', '')) }}
context = context =
key boot_image_url_list :boot-image-url-list key boot_image_url_list :boot-image-url-list
output = ${directory:etc}/boot-image-url-list.conf output = ${directory:etc}/boot-image-url-list.conf
...@@ -280,7 +275,6 @@ config-filename = ${boot-image-url-list-download-wrapper:md5sum-state-file} ...@@ -280,7 +275,6 @@ config-filename = ${boot-image-url-list-download-wrapper:md5sum-state-file}
filename = ${boot-image-url-list-download-wrapper:error-state-filename} filename = ${boot-image-url-list-download-wrapper:error-state-filename}
config-filename = ${boot-image-url-list-download-wrapper:error-state-file} config-filename = ${boot-image-url-list-download-wrapper:error-state-file}
## boot-image-url-list support END ## boot-image-url-list support END
{% endif %} {# if boot_image_url_list_enabled #}
{% if virtual_hard_drive_url_enabled %} {% if virtual_hard_drive_url_enabled %}
## virtual-hard-drive-url support BEGIN ## virtual-hard-drive-url support BEGIN
...@@ -399,26 +393,13 @@ ipv6 = ${slap-network-information:global-ipv6} ...@@ -399,26 +393,13 @@ ipv6 = ${slap-network-information:global-ipv6}
vnc-ip = ${:ipv4} vnc-ip = ${:ipv4}
vnc-websocket-port = 5701 vnc-websocket-port = 5701
default-cdrom-iso = {{ debian_amd64_netinst_location }}
{% if virtual_hard_drive_url_enabled %} {% if virtual_hard_drive_url_enabled %}
virtual-hard-drive-url-json-config = ${virtual-hard-drive-url-json-config:output} virtual-hard-drive-url-json-config = ${virtual-hard-drive-url-json-config:output}
{% else %} {% else %}
virtual-hard-drive-url-json-config = virtual-hard-drive-url-json-config =
{% endif %} {% endif %}
{% if boot_image_url_list_enabled %}
boot-image-url-list-json-config = ${boot-image-url-list-json-config:output} boot-image-url-list-json-config = ${boot-image-url-list-json-config:output}
{% else %}
boot-image-url-list-json-config =
{% endif %}
{% if boot_image_url_select_enabled %}
boot-image-url-select-json-config = ${boot-image-url-select-json-config:output} boot-image-url-select-json-config = ${boot-image-url-select-json-config:output}
{% else %}
boot-image-url-select-json-config =
{% endif %}
nbd-host = ${slap-parameter:nbd-host}
nbd-port = ${slap-parameter:nbd-port}
nbd2-host = ${slap-parameter:nbd2-host}
nbd2-port = ${slap-parameter:nbd2-port}
tap-interface = {{ slap_configuration.get('tap-name', '') }} tap-interface = {{ slap_configuration.get('tap-name', '') }}
tap-ipv6-addr = {{ slap_configuration.get('tap-ipv6-addr', '') }} tap-ipv6-addr = {{ slap_configuration.get('tap-ipv6-addr', '') }}
...@@ -549,12 +530,8 @@ update-command = ${:command} ...@@ -549,12 +530,8 @@ update-command = ${:command}
command = [ ! -f {{ '${' + key + '}' }} ] && touch {{ '${' + key + '}' }} command = [ ! -f {{ '${' + key + '}' }} ] && touch {{ '${' + key + '}' }}
{%- endmacro %} {%- endmacro %}
{#- Create depending sections, as state files appear late, so it's better to have empty file which will impact the hash anyway #} {#- Create depending sections, as state files appear late, so it's better to have empty file which will impact the hash anyway #}
{%- if boot_image_url_list_enabled %}
{{ generate_depend_section('boot-image-url-list-depend', 'boot-image-url-list-download-wrapper:config') }} {{ generate_depend_section('boot-image-url-list-depend', 'boot-image-url-list-download-wrapper:config') }}
{%- endif %}
{%- if boot_image_url_select_enabled %}
{{ generate_depend_section('boot-image-url-select-depend', 'boot-image-url-select-download-wrapper:config') }} {{ generate_depend_section('boot-image-url-select-depend', 'boot-image-url-select-download-wrapper:config') }}
{%- endif %}
{%- if virtual_hard_drive_url_enabled %} {%- if virtual_hard_drive_url_enabled %}
{{ generate_depend_section('virtual-hard-drive-url-depend', 'virtual-hard-drive-url-download-wrapper:config') }} {{ generate_depend_section('virtual-hard-drive-url-depend', 'virtual-hard-drive-url-download-wrapper:config') }}
{%- endif %} {%- endif %}
...@@ -1085,10 +1062,6 @@ frontend-additional-software-type = default ...@@ -1085,10 +1062,6 @@ frontend-additional-software-type = default
frontend-additional-software-url = http://git.erp5.org/gitweb/slapos.git/blob_plain/HEAD:/software/apache-frontend/software.cfg frontend-additional-software-url = http://git.erp5.org/gitweb/slapos.git/blob_plain/HEAD:/software/apache-frontend/software.cfg
frontend-additional-instance-guid = frontend-additional-instance-guid =
frontend-additional-instance-name = VNC Real Frontend Additional frontend-additional-instance-name = VNC Real Frontend Additional
nbd-port = 1024
nbd-host =
nbd2-port = 1024
nbd2-host =
boot-image-url-list = boot-image-url-list =
enable-device-hotplug = False enable-device-hotplug = False
...@@ -1285,20 +1258,16 @@ parts = ...@@ -1285,20 +1258,16 @@ parts =
virtual-hard-drive-url-download-state-promise virtual-hard-drive-url-download-state-promise
virtual-hard-drive-url-processed-config-promise virtual-hard-drive-url-processed-config-promise
{% endif %} {% endif %}
{% if boot_image_url_list_enabled %}
boot-image-url-list-download-wrapper boot-image-url-list-download-wrapper
boot-image-url-list-config-state-promise boot-image-url-list-config-state-promise
boot-image-url-list-download-md5sum-promise boot-image-url-list-download-md5sum-promise
boot-image-url-list-download-state-promise boot-image-url-list-download-state-promise
boot-image-url-list-processed-config-promise boot-image-url-list-processed-config-promise
{% endif %}
{% if boot_image_url_select_enabled %}
boot-image-url-select-download-wrapper boot-image-url-select-download-wrapper
boot-image-url-select-config-state-promise boot-image-url-select-config-state-promise
boot-image-url-select-download-md5sum-promise boot-image-url-select-download-md5sum-promise
boot-image-url-select-download-state-promise boot-image-url-select-download-state-promise
boot-image-url-select-processed-config-promise boot-image-url-select-processed-config-promise
{% endif %}
{% if additional_frontend %} {% if additional_frontend %}
frontend-additional-promise frontend-additional-promise
{% endif %} {% endif %}
......
#############################
#
# Instanciate nbdserver
#
#############################
[buildout]
parts =
nbd-promise
onetimeupload-promise
publish-connection-information
extends = {{ template_monitor }}
{% set ipv6 = slap_configuration['ipv6-random'] -%}
[rootdirectory]
recipe = slapos.cookbook:mkdirectory
bin = ${buildout:directory}/bin
etc = ${buildout:directory}/etc
srv = ${buildout:directory}/srv
log = ${buildout:directory}/log
[basedirectory]
recipe = slapos.cookbook:mkdirectory
services = ${rootdirectory:etc}/run
watched-services = ${rootdirectory:etc}/service
[nbd-instance]
recipe = slapos.cookbook:nbdserver
ip = {{ ipv6 }}
port = 1024
image-path = ${onetimeupload-instance:image-path}
qemu-path = {{ qemu_nbd_executable_location }}
shell-path = {{ dash_executable_location }}
# XXX TODO: Wait for the iso to be uploaded (execute_wait)
path = ${basedirectory:services}/nbdserver
[nbd-checker-bin]
recipe = slapos.recipe.template
inline =
#!/bin/sh
[ ! -f ${onetimeupload-instance:image-path} ] ||
${buildout:executable} -c 'import socket ; socket.create_connection(("${nbd-instance:ip}","${nbd-instance:port}")).close()'
output = ${rootdirectory:bin}/check-nbd-running.sh
[nbd-promise]
<= monitor-promise-base
promise = check_command_execute
name = nbd_promise.py
config-command = ${nbd-checker-bin:output}
[gen-passwd]
recipe = slapos.cookbook:generate.password
storage-path = ${rootdirectory:srv}/passwd
bytes = 24
[onetimeupload-instance]
recipe = slapos.cookbook:onetimeupload
ip = {{ ipv6 }}
port = {{ slapparameter_dict.get('otu-port', 8080) }}
image-path = ${rootdirectory:srv}/cdrom.iso
log-path = ${rootdirectory:log}/onetimeupload.log
shell-path = {{ dash_executable_location }}
onetimeupload-path = {{ onetimeupload_executable_location }}
path = ${basedirectory:watched-services}/onetimeupload
key = ${gen-passwd:passwd}
[onetimeupload-promise]
<= monitor-promise-base
promise = check_socket_listening
name = onetimeupload_promise.py
config-host = ${onetimeupload-instance:ip}
config-port = ${onetimeupload-instance:port}
[publish-connection-information]
recipe = slapos.cookbook:publish
nbd_hostname = ${nbd-instance:ip}
nbd_port = ${nbd-instance:port}
upload_url = http://[${onetimeupload-instance:ip}]:${onetimeupload-instance:port}
upload_key = ${onetimeupload-instance:key}
status_message = ${detect-if-cdrom-present:status}
[detect-if-cdrom-present]
recipe = slapos.recipe.build
init =
import os
options['status'] = (
"image already uploaded, you can't upload it again"
if os.path.isfile("${onetimeupload-instance:image-path}")
else "WARNING: no image yet, the NBD server doesn't work")
...@@ -12,7 +12,6 @@ recipe = slapos.cookbook:switch-softwaretype ...@@ -12,7 +12,6 @@ recipe = slapos.cookbook:switch-softwaretype
default = $${:kvm} default = $${:kvm}
kvm-cluster = dynamic-template-kvm-cluster:output kvm-cluster = dynamic-template-kvm-cluster:output
kvm = dynamic-template-kvm:output kvm = dynamic-template-kvm:output
nbd = dynamic-template-nbd:output
kvm-resilient = dynamic-template-kvm-resilient:output kvm-resilient = dynamic-template-kvm-resilient:output
kvm-import = dynamic-template-kvm-import:output kvm-import = dynamic-template-kvm-import:output
...@@ -78,7 +77,7 @@ extra-context = ...@@ -78,7 +77,7 @@ extra-context =
raw dash_executable_location ${dash:location}/bin/dash raw dash_executable_location ${dash:location}/bin/dash
raw dnsresolver_executable ${buildout:bin-directory}/dnsresolver raw dnsresolver_executable ${buildout:bin-directory}/dnsresolver
raw dcron_executable_location ${dcron:location}/sbin/crond raw dcron_executable_location ${dcron:location}/sbin/crond
raw debian_amd64_netinst_location ${debian-amd64-bullseye-netinst.iso:target} raw boot_image_select_source_config ${boot-image-select-source-config:target}
raw whitelist_domains_default ${whitelist-domains-default:target} raw whitelist_domains_default ${whitelist-domains-default:target}
raw whitelist_firewall_download_controller ${whitelist-firewall-download-controller:output} raw whitelist_firewall_download_controller ${whitelist-firewall-download-controller:output}
raw image_download_controller ${image-download-controller:output} raw image_download_controller ${image-download-controller:output}
...@@ -151,17 +150,3 @@ context = ...@@ -151,17 +150,3 @@ context =
key slapparameter_dict slap-configuration:configuration key slapparameter_dict slap-configuration:configuration
raw zcat_binary ${gzip:location}/bin/zcat raw zcat_binary ${gzip:location}/bin/zcat
raw gzip_binary ${gzip:location}/bin/gzip raw gzip_binary ${gzip:location}/bin/gzip
[dynamic-template-nbd]
<= jinja2-template-base
url = ${template-nbd:location}/instance-nbd.cfg.jinja2
filename = template-nbd.cfg
context =
section slap_configuration slap-configuration
key slapparameter_dict slap-configuration:configuration
key eggs_directory buildout:eggs-directory
key develop_eggs_directory buildout:develop-eggs-directory
raw qemu_nbd_executable_location ${qemu:location}/bin/qemu-nbd
raw dash_executable_location ${dash:location}/bin/dash
raw onetimeupload_executable_location ${buildout:bin-directory}/onetimeupload
raw template_monitor ${monitor2-template:output}
...@@ -85,9 +85,6 @@ output = ${buildout:directory}/template.cfg ...@@ -85,9 +85,6 @@ output = ${buildout:directory}/template.cfg
[template-kvm-export-script] [template-kvm-export-script]
<= download-base <= download-base
[template-nbd]
<= download-base
[template-nginx] [template-nginx]
<= download-base <= download-base
...@@ -129,3 +126,6 @@ context = ...@@ -129,3 +126,6 @@ context =
[whitelist-domains-default] [whitelist-domains-default]
<= download-base <= download-base
[boot-image-select-source-config]
<= download-base
...@@ -25,13 +25,6 @@ ...@@ -25,13 +25,6 @@
"request": "instance-kvm-cluster-input-schema.json", "request": "instance-kvm-cluster-input-schema.json",
"response": "instance-kvm-output-schema.json", "response": "instance-kvm-output-schema.json",
"index": 2 "index": 2
},
"nbd": {
"title": "NBD Server",
"description": "Simple NBD server where you can upload one image. This is a one-time server. Create another server if you want to change the image.",
"request": "instance-kvm-nbd-server-input-schema.json",
"response": "instance-kvm-nbd-server-output-schema.json",
"index": 4
} }
} }
} }
{%- set IMAGE_URL_MAPPING = {
"Debian Bookworm 12 netinst x86_64" : "https://shacache.nxdcdn.com/33c08e56c83d13007e4a5511b9bf2c4926c4aa12fd5dd56d493c0653aecbab380988c5bf1671dbaea75c582827797d98c4a611f7fb2b131fbde2c677d5258ec9#326b7737c4262e8eb09cd26773f3356a",
"Debian Bullseye 11 netinst x86_64" : "https://shacache.nxdcdn.com/02257c3ec27e45d9f022c181a69b59da67e5c72871cdb4f9a69db323a1fad58093f2e69702d29aa98f5f65e920e0b970d816475a5a936e1f3bf33832257b7e92#b710c178eb434d79ce40ce703d30a5f0",
"Centos 8.2004 Minimal x86_64" : "https://shacache.nxdcdn.com/ce5ddfdbdaccdf929b7fe321212356347d82a02f6b7733427282b416f113d91e587682b003e9d376ac189c3b731595c50c236962aadf2720c16d9f36913577c0#23bf2a2d60271e553e63525e794415f1",
"Ubuntu Noble 24.04 Live Server x86_64" : "https://shacache.nxdcdn.com/3d518612aabbdb77fd6b49cb55b824fed11e40540e4af52f5f26174257715c93740f83079ea618b4d933081f0b1bc69d32b7885b7c75bc90da5ad3fe1814cfd4#c53b2d7c3269c3b91a2d941ceaa8ab9b",
"Ubuntu Jammy 22.04 Live Server x86_64" : "https://shacache.nxdcdn.com/8017c532ed74586b718662d8b11cf8c34fa638b0affd0413ed38623989b8f98ffd0bcb475246e279ea2f3c194a3e33c55e0f376a9727de13e4bfd87e75e47b5d#e8d2a77c51b599c10651608a5d8c286f",
"Ubuntu Focal 20.04 Live Server x86_64": "https://shacache.nxdcdn.com/302c990c6d69575ff24c96566e5c7e26bf36908abb0cd546e22687c46fb07bf8dba595bf77a9d4fd9ab63e75c0437c133f35462fd41ea77f6f616140cd0e5e6a#f3a306f40e4a313fb5a584d73b3dee8f",
"openSUSE Leap 15 NET x86_64" : "https://shacache.nxdcdn.com/6635269a7eb6fbd6b85fda40cd94f14a27bf53cb1fc82ffcce9fe386a025a43e1ab681db7e8cec50416bfbfc90262f0d95273686a101c74b3f17646f0a34c85b#3708a59af6cf820a95cafe0ae73ac399",
"Arch Linux 2020.09.01 x86_64" : "https://shacache.nxdcdn.com/fc17e8c6ae0790162f4beb8fa6226d945cff638429588999b3a08493ff27b280dc2939fba825ae04be1d9082ea8d7c3c002c5e4c39fbbcf88b8ab5104619e28a#ebcdb2223a77f098af3923fe1fa180aa",
"Fedora Server 32 netinst x86_64" : "https://shacache.nxdcdn.com/c5a511f349a1146b615e6fab9c24f9be4362046adcf24f0ff82c470d361fac5f6628895e2110ebf8ff87db49d4c413a0a332699da6b1bec64275e0c17a15b999#ca7a1e555c04b4d9a549065fa2ddf713",
"FreeBSD 12.1 RELEASE bootonly x86_64" : "https://shacache.nxdcdn.com/6c355def68b3c0427f21598cb054ffc893568902f205601ac60f192854769b31bc9cff8eeb6ce99ef975a8fb887d8d3e56fc6cd5ea5cb4b3bba1175c520047cb#57088b77f795ca44b00971e44782ee23",
"SUSE Linux Enterprise Server 15 SP6 x86_64": "https://shacache.nxdcdn.com/e72e03bbcc4c54ce4b8d5f360b47dab9ee514d754e8d78c403626cf000d6ae98d808b3bcff2201e3cf49c1be1b0f308f1cb5ed81676adcb1837dfc811d2451ac",
} -%}
{%- if boot_image_url_select %}
{#- Fail in the promise if bad boot-image-url-select is set -#}
{%- set boot_image = IMAGE_URL_MAPPING.get(boot_image_url_select, boot_image_url_select) %}
{%- else %}
{#- Use default ONLY if no boot-image-url-select is set -#}
{%- set boot_image = IMAGE_URL_MAPPING.get(boot_image_url_select_default) %}
{%- endif %}
{%- if boot_image -%}
["{{ boot_image }}"]
{%- else -%}
[]
{%- endif -%}
...@@ -23,11 +23,6 @@ disk_type = {{ repr(parameter_dict["disk-type"]) }} ...@@ -23,11 +23,6 @@ disk_type = {{ repr(parameter_dict["disk-type"]) }}
network_adapter = {{ repr(parameter_dict["network-adapter"]) }} network_adapter = {{ repr(parameter_dict["network-adapter"]) }}
socket_path = '{{ parameter_dict.get("socket-path") }}' socket_path = '{{ parameter_dict.get("socket-path") }}'
nbd_list = (('{{ parameter_dict.get("nbd-host") }}',
{{ parameter_dict.get("nbd-port") }}),
('{{ parameter_dict.get("nbd2-host") }}',
{{ parameter_dict.get("nbd2-port") }}))
default_cdrom_iso = '{{ parameter_dict.get("default-cdrom-iso") }}'
nat_rules = '{{ parameter_dict.get("nat-rules") }}'.strip() nat_rules = '{{ parameter_dict.get("nat-rules") }}'.strip()
use_tap = '{{ parameter_dict.get("use-tap") }}'.lower() use_tap = '{{ parameter_dict.get("use-tap") }}'.lower()
...@@ -389,36 +384,15 @@ def handle_image(config, name): ...@@ -389,36 +384,15 @@ def handle_image(config, name):
else: else:
raise ValueError('%s not ready yet' % (name,)) raise ValueError('%s not ready yet' % (name,))
# Try to connect to NBD server (and second nbd if defined). # Note: Do not get tempted to use virtio-scsi-pci, as it does not work with
# If not available, don't even specify it in qemu command line parameters. # Debian installation CDs, rendering it uninstallable
# Reason: if qemu starts with unavailable NBD drive, it will just crash. # Note: boot-image-url-list has precedence over boot-image-url-select
for nbd_ip, nbd_port in nbd_list: if boot_image_url_list_json_config:
if nbd_ip and nbd_port: # Support boot-image-url-list
s = getSocketStatus(nbd_ip, nbd_port) handle_image(boot_image_url_list_json_config, 'boot-image-url-list')
if s is None: if boot_image_url_select_json_config:
# NBD is not available : launch kvm without it # Support boot-image-url-select
print('Warning : Nbd is not available.') handle_image(boot_image_url_select_json_config, 'boot-image-url-select')
else:
# NBD is available
# We close the NBD socket else qemu won't be able to use it apparently
s.close()
kvm_argument_list.extend([
'-drive',
'file=nbd:[%s]:%s,media=cdrom' % (nbd_ip, nbd_port)])
else:
# Note: Do not get tempted to use virtio-scsi-pci, as it does not work with
# Debian installation CDs, rendering it uninstallable
if boot_image_url_select_json_config:
# Support boot-image-url-select
handle_image(boot_image_url_select_json_config, 'boot-image-url-select')
if boot_image_url_list_json_config:
# Support boot-image-url-list
handle_image(boot_image_url_list_json_config, 'boot-image-url-list')
# Always add by default the default image
kvm_argument_list.extend([
'-drive', 'file=%s,media=cdrom' % default_cdrom_iso
])
print('Starting KVM: \n %s' % ' '.join(kvm_argument_list)) print('Starting KVM: \n %s' % ' '.join(kvm_argument_list))
os.execv(qemu_path, kvm_argument_list) os.execv(qemu_path, kvm_argument_list)
...@@ -130,16 +130,16 @@ class KVMTestCase(InstanceTestCase): ...@@ -130,16 +130,16 @@ class KVMTestCase(InstanceTestCase):
if q.startswith('location') and '/qemu/' in q] if q.startswith('location') and '/qemu/' in q]
assert (len(qemu_location) == 1) assert (len(qemu_location) == 1)
qemu_location = qemu_location[0].split('=')[1].strip() qemu_location = qemu_location[0].split('=')[1].strip()
cls.qemu_nbd = os.path.join(qemu_location, 'bin', 'qemu-nbd')
assert (os.path.exists(cls.qemu_nbd))
cls.qemu_img = os.path.join(qemu_location, 'bin', 'qemu-img') cls.qemu_img = os.path.join(qemu_location, 'bin', 'qemu-img')
assert (os.path.exists(cls.qemu_img)) assert (os.path.exists(cls.qemu_img))
def getRunningImageList( def getRunningImageList(
self, kvm_instance_partition, self,
_match_cdrom=re.compile('file=(.+),media=cdrom$').match, _match_cdrom=re.compile('file=(.+),media=cdrom$').match,
_sub_iso=re.compile(r'(/debian)(-[^-/]+)(-[^/]+-netinst\.iso)$').sub, _sub_iso=re.compile(r'(/debian)(-[^-/]+)(-[^/]+-netinst\.iso)$').sub,
): ):
kvm_instance_partition = os.path.join(
self.slap.instance_directory, self.kvm_instance_partition_reference)
with self.slap.instance_supervisor_rpc as instance_supervisor: with self.slap.instance_supervisor_rpc as instance_supervisor:
kvm_pid = next(q for q in instance_supervisor.getAllProcessInfo() kvm_pid = next(q for q in instance_supervisor.getAllProcessInfo()
if 'kvm-' in q['name'])['pid'] if 'kvm-' in q['name'])['pid']
...@@ -160,7 +160,7 @@ class KVMTestCase(InstanceTestCase): ...@@ -160,7 +160,7 @@ class KVMTestCase(InstanceTestCase):
return image_list return image_list
@classmethod @classmethod
def _findTopLevelPartitionPath(cls, path): def _findTopLevelPartitionPath(cls, path: str):
index = 0 index = 0
while True: while True:
index = path.find(os.path.sep, index) + len(os.path.sep) index = path.find(os.path.sep, index) + len(os.path.sep)
...@@ -199,27 +199,46 @@ class KVMTestCase(InstanceTestCase): ...@@ -199,27 +199,46 @@ class KVMTestCase(InstanceTestCase):
class KvmMixin: class KvmMixin:
def assertPromiseFails(self, promise):
partition_directory = os.path.join(
self.slap.instance_directory,
self.kvm_instance_partition_reference)
monitor_run_promise = os.path.join(
partition_directory, 'software_release', 'bin',
'monitor.runpromise'
)
monitor_configuration = os.path.join(
partition_directory, 'etc', 'monitor.conf')
self.assertNotEqual(
0,
subprocess.call([
monitor_run_promise, '-c', monitor_configuration, '-a', '-f',
'--run-only', promise])
)
def getConnectionParameterDictJson(self): def getConnectionParameterDictJson(self):
return json.loads( return json.loads(
self.computer_partition.getConnectionParameterDict()['_']) self.computer_partition.getConnectionParameterDict()['_'])
def getProcessInfo(self): def getProcessInfo(self, kvm_additional_hash_file_list=None):
if kvm_additional_hash_file_list is None:
kvm_additional_hash_file_list = []
hash_value = generateHashFromFiles([ hash_value = generateHashFromFiles([
os.path.join(self.computer_partition_root_path, hash_file) os.path.join(self.computer_partition_root_path, hash_file)
for hash_file in [ for hash_file in [
'software_release/buildout.cfg', 'software_release/buildout.cfg',
] ]
]) ])
# find bin/kvm_raw kvm_partition = os.path.join(
kvm_raw_list = glob.glob( self.slap.instance_directory, self.kvm_instance_partition_reference)
os.path.join(self.slap.instance_directory, '*', 'bin', 'kvm_raw'))
self.assertEqual(1, len(kvm_raw_list)) # allow to work only with one
hash_file_list = [ hash_file_list = [
kvm_raw_list[0], os.path.join(kvm_partition, 'bin', 'kvm_raw')
'software_release/buildout.cfg', ] + kvm_additional_hash_file_list + [
] 'software_release/buildout.cfg']
kvm_hash_value = generateHashFromFiles([ kvm_hash_value = generateHashFromFiles([
os.path.join(self.computer_partition_root_path, hash_file) os.path.join(kvm_partition, hash_file)
for hash_file in hash_file_list for hash_file in hash_file_list
]) ])
with self.slap.instance_supervisor_rpc as supervisor: with self.slap.instance_supervisor_rpc as supervisor:
...@@ -234,7 +253,9 @@ class KvmMixin: ...@@ -234,7 +253,9 @@ class KvmMixin:
with self.assertRaises(SlapOSNodeCommandError): with self.assertRaises(SlapOSNodeCommandError):
self.slap.waitForInstance(max_retry=max_retry) self.slap.waitForInstance(max_retry=max_retry)
def rerequestInstance(self, parameter_dict, state='started'): def rerequestInstance(self, parameter_dict=None, state='started'):
if parameter_dict is None:
parameter_dict = {}
software_url = self.getSoftwareURL() software_url = self.getSoftwareURL()
software_type = self.getInstanceSoftwareType() software_type = self.getInstanceSoftwareType()
return self.slap.request( return self.slap.request(
...@@ -251,7 +272,9 @@ class KvmMixinJson: ...@@ -251,7 +272,9 @@ class KvmMixinJson:
return { return {
'_': json.dumps(super().getInstanceParameterDict())} '_': json.dumps(super().getInstanceParameterDict())}
def rerequestInstance(self, parameter_dict, *args, **kwargs): def rerequestInstance(self, parameter_dict=None, *args, **kwargs):
if parameter_dict is None:
parameter_dict = {}
return super().rerequestInstance( return super().rerequestInstance(
parameter_dict={'_': json.dumps(parameter_dict)}, parameter_dict={'_': json.dumps(parameter_dict)},
*args, **kwargs *args, **kwargs
...@@ -261,6 +284,7 @@ class KvmMixinJson: ...@@ -261,6 +284,7 @@ class KvmMixinJson:
@skipUnlessKvm @skipUnlessKvm
class TestInstance(KVMTestCase, KvmMixin): class TestInstance(KVMTestCase, KvmMixin):
__partition_reference__ = 'i' __partition_reference__ = 'i'
kvm_instance_partition_reference = 'i0'
def test(self): def test(self):
connection_parameter_dict = self.getConnectionParameterDictJson() connection_parameter_dict = self.getConnectionParameterDictJson()
...@@ -292,6 +316,8 @@ class TestInstance(KVMTestCase, KvmMixin): ...@@ -292,6 +316,8 @@ class TestInstance(KVMTestCase, KvmMixin):
"""i0:6tunnel-10022-{hash}-on-watch RUNNING """i0:6tunnel-10022-{hash}-on-watch RUNNING
i0:6tunnel-10080-{hash}-on-watch RUNNING i0:6tunnel-10080-{hash}-on-watch RUNNING
i0:6tunnel-10443-{hash}-on-watch RUNNING i0:6tunnel-10443-{hash}-on-watch RUNNING
i0:boot-image-url-list-updater-{hash} EXITED
i0:boot-image-url-select-updater-{hash} EXITED
i0:bootstrap-monitor EXITED i0:bootstrap-monitor EXITED
i0:certificate_authority-{hash}-on-watch RUNNING i0:certificate_authority-{hash}-on-watch RUNNING
i0:crond-{hash}-on-watch RUNNING i0:crond-{hash}-on-watch RUNNING
...@@ -303,7 +329,19 @@ i0:nginx-graceful EXITED ...@@ -303,7 +329,19 @@ i0:nginx-graceful EXITED
i0:nginx-on-watch RUNNING i0:nginx-on-watch RUNNING
i0:whitelist-domains-download-{hash} RUNNING i0:whitelist-domains-download-{hash} RUNNING
i0:whitelist-firewall-{hash} RUNNING""", i0:whitelist-firewall-{hash} RUNNING""",
self.getProcessInfo() self.getProcessInfo([
'var/boot-image-url-list/boot-image-url-list.json',
'var/boot-image-url-select/boot-image-url-select.json'
])
)
# assure that the default image is used
self.assertEqual(
[
'${inst}/srv/boot-image-url-select-repository/'
'326b7737c4262e8eb09cd26773f3356a'
],
self.getRunningImageList()
) )
...@@ -316,6 +354,7 @@ class TestInstanceJson( ...@@ -316,6 +354,7 @@ class TestInstanceJson(
@skipUnlessKvm @skipUnlessKvm
class TestMemoryManagement(KVMTestCase, KvmMixin): class TestMemoryManagement(KVMTestCase, KvmMixin):
__partition_reference__ = 'i' __partition_reference__ = 'i'
kvm_instance_partition_reference = 'i0'
def getKvmProcessInfo(self, switch_list): def getKvmProcessInfo(self, switch_list):
return_list = [] return_list = []
...@@ -353,7 +392,7 @@ class TestMemoryManagement(KVMTestCase, KvmMixin): ...@@ -353,7 +392,7 @@ class TestMemoryManagement(KVMTestCase, KvmMixin):
self.assertNotEqual(kvm_pid_1, kvm_pid_2, "Unexpected: KVM not restarted") self.assertNotEqual(kvm_pid_1, kvm_pid_2, "Unexpected: KVM not restarted")
def tearDown(self): def tearDown(self):
self.rerequestInstance({}) self.rerequestInstance()
self.slap.waitForInstance(max_retry=10) self.slap.waitForInstance(max_retry=10)
def test_enable_device_hotplug(self): def test_enable_device_hotplug(self):
...@@ -484,6 +523,7 @@ class MonitorAccessMixin(KvmMixin): ...@@ -484,6 +523,7 @@ class MonitorAccessMixin(KvmMixin):
@skipUnlessKvm @skipUnlessKvm
class TestAccessDefault(MonitorAccessMixin, KVMTestCase): class TestAccessDefault(MonitorAccessMixin, KVMTestCase):
__partition_reference__ = 'ad' __partition_reference__ = 'ad'
kvm_instance_partition_reference = 'ad0'
expected_partition_with_monitor_base_url_count = 1 expected_partition_with_monitor_base_url_count = 1
def test(self): def test(self):
...@@ -505,6 +545,7 @@ class TestAccessDefaultJson(KvmMixinJson, TestAccessDefault): ...@@ -505,6 +545,7 @@ class TestAccessDefaultJson(KvmMixinJson, TestAccessDefault):
@skipUnlessKvm @skipUnlessKvm
class TestAccessDefaultAdditional(MonitorAccessMixin, KVMTestCase): class TestAccessDefaultAdditional(MonitorAccessMixin, KVMTestCase):
__partition_reference__ = 'ada' __partition_reference__ = 'ada'
kvm_instance_partition_reference = 'ada0'
expected_partition_with_monitor_base_url_count = 1 expected_partition_with_monitor_base_url_count = 1
@classmethod @classmethod
...@@ -541,6 +582,7 @@ class TestAccessDefaultAdditionalJson( ...@@ -541,6 +582,7 @@ class TestAccessDefaultAdditionalJson(
@skipUnlessKvm @skipUnlessKvm
class TestAccessDefaultBootstrap(MonitorAccessMixin, KVMTestCase): class TestAccessDefaultBootstrap(MonitorAccessMixin, KVMTestCase):
__partition_reference__ = 'adb' __partition_reference__ = 'adb'
kvm_instance_partition_reference = 'adb0'
expected_partition_with_monitor_base_url_count = 1 expected_partition_with_monitor_base_url_count = 1
@classmethod @classmethod
...@@ -551,7 +593,7 @@ class TestAccessDefaultBootstrap(MonitorAccessMixin, KVMTestCase): ...@@ -551,7 +593,7 @@ class TestAccessDefaultBootstrap(MonitorAccessMixin, KVMTestCase):
def test(self): def test(self):
# START: mock .slapos-resource with tap.ipv4_addr # START: mock .slapos-resource with tap.ipv4_addr
# needed for netconfig.sh # needed for netconfig.sh
partition_path = self.computer_partition_root_path partition_path = str(self.computer_partition_root_path)
top_partition_path = self._findTopLevelPartitionPath(partition_path) top_partition_path = self._findTopLevelPartitionPath(partition_path)
with open(os.path.join(top_partition_path, '.slapos-resource')) as f: with open(os.path.join(top_partition_path, '.slapos-resource')) as f:
...@@ -589,6 +631,7 @@ class TestAccessDefaultBootstrap(MonitorAccessMixin, KVMTestCase): ...@@ -589,6 +631,7 @@ class TestAccessDefaultBootstrap(MonitorAccessMixin, KVMTestCase):
@skipUnlessKvm @skipUnlessKvm
class TestAccessKvmCluster(MonitorAccessMixin, KVMTestCase): class TestAccessKvmCluster(MonitorAccessMixin, KVMTestCase):
__partition_reference__ = 'akc' __partition_reference__ = 'akc'
kvm_instance_partition_reference = 'akc0'
expected_partition_with_monitor_base_url_count = 2 expected_partition_with_monitor_base_url_count = 2
@classmethod @classmethod
...@@ -619,6 +662,7 @@ class TestAccessKvmCluster(MonitorAccessMixin, KVMTestCase): ...@@ -619,6 +662,7 @@ class TestAccessKvmCluster(MonitorAccessMixin, KVMTestCase):
@skipUnlessKvm @skipUnlessKvm
class TestAccessKvmClusterAdditional(MonitorAccessMixin, KVMTestCase): class TestAccessKvmClusterAdditional(MonitorAccessMixin, KVMTestCase):
__partition_reference__ = 'akca' __partition_reference__ = 'akca'
kvm_instance_partition_reference = 'akca0'
expected_partition_with_monitor_base_url_count = 2 expected_partition_with_monitor_base_url_count = 2
@classmethod @classmethod
...@@ -659,6 +703,7 @@ class TestAccessKvmClusterAdditional(MonitorAccessMixin, KVMTestCase): ...@@ -659,6 +703,7 @@ class TestAccessKvmClusterAdditional(MonitorAccessMixin, KVMTestCase):
@skipUnlessKvm @skipUnlessKvm
class TestAccessKvmClusterBootstrap(MonitorAccessMixin, KVMTestCase): class TestAccessKvmClusterBootstrap(MonitorAccessMixin, KVMTestCase):
__partition_reference__ = 'akcb' __partition_reference__ = 'akcb'
kvm_instance_partition_reference = 'akcb0'
expected_partition_with_monitor_base_url_count = 3 expected_partition_with_monitor_base_url_count = 3
@classmethod @classmethod
...@@ -702,6 +747,7 @@ class TestAccessKvmClusterBootstrap(MonitorAccessMixin, KVMTestCase): ...@@ -702,6 +747,7 @@ class TestAccessKvmClusterBootstrap(MonitorAccessMixin, KVMTestCase):
@skipUnlessKvm @skipUnlessKvm
class TestInstanceResilient(KVMTestCase, KvmMixin): class TestInstanceResilient(KVMTestCase, KvmMixin):
__partition_reference__ = 'ir' __partition_reference__ = 'ir'
kvm_instance_partition_reference = 'ir0'
instance_max_retry = 20 instance_max_retry = 20
@classmethod @classmethod
...@@ -712,7 +758,8 @@ class TestInstanceResilient(KVMTestCase, KvmMixin): ...@@ -712,7 +758,8 @@ class TestInstanceResilient(KVMTestCase, KvmMixin):
def setUpClass(cls): def setUpClass(cls):
super().setUpClass() super().setUpClass()
cls.pbs1_ipv6 = cls.getPartitionIPv6(cls.getPartitionId('PBS (kvm / 1)')) cls.pbs1_ipv6 = cls.getPartitionIPv6(cls.getPartitionId('PBS (kvm / 1)'))
cls.kvm0_ipv6 = cls.getPartitionIPv6(cls.getPartitionId('kvm0')) cls.kvm_instance_partition_reference = cls.getPartitionId('kvm0')
cls.kvm0_ipv6 = cls.getPartitionIPv6(cls.kvm_instance_partition_reference)
cls.kvm1_ipv6 = cls.getPartitionIPv6(cls.getPartitionId('kvm1')) cls.kvm1_ipv6 = cls.getPartitionIPv6(cls.getPartitionId('kvm1'))
def test_kvm_exporter(self): def test_kvm_exporter(self):
...@@ -785,6 +832,8 @@ ir1:pbs_sshkeys_authority-on-watch RUNNING ...@@ -785,6 +832,8 @@ ir1:pbs_sshkeys_authority-on-watch RUNNING
ir2:6tunnel-10022-{hash}-on-watch RUNNING ir2:6tunnel-10022-{hash}-on-watch RUNNING
ir2:6tunnel-10080-{hash}-on-watch RUNNING ir2:6tunnel-10080-{hash}-on-watch RUNNING
ir2:6tunnel-10443-{hash}-on-watch RUNNING ir2:6tunnel-10443-{hash}-on-watch RUNNING
ir2:boot-image-url-list-updater-{hash} EXITED
ir2:boot-image-url-select-updater-{hash} EXITED
ir2:bootstrap-monitor EXITED ir2:bootstrap-monitor EXITED
ir2:certificate_authority-{hash}-on-watch RUNNING ir2:certificate_authority-{hash}-on-watch RUNNING
ir2:crond-{hash}-on-watch RUNNING ir2:crond-{hash}-on-watch RUNNING
...@@ -812,7 +861,10 @@ ir3:resilient-web-takeover-httpd-on-watch RUNNING ...@@ -812,7 +861,10 @@ ir3:resilient-web-takeover-httpd-on-watch RUNNING
ir3:resilient_sshkeys_authority-on-watch RUNNING ir3:resilient_sshkeys_authority-on-watch RUNNING
ir3:sshd-graceful EXITED ir3:sshd-graceful EXITED
ir3:sshd-on-watch RUNNING""", ir3:sshd-on-watch RUNNING""",
self.getProcessInfo() self.getProcessInfo([
'var/boot-image-url-list/boot-image-url-list.json',
'var/boot-image-url-select/boot-image-url-select.json'
])
) )
...@@ -840,6 +892,7 @@ class TestInstanceResilientDiskTypeIdeJson( ...@@ -840,6 +892,7 @@ class TestInstanceResilientDiskTypeIdeJson(
@skipUnlessKvm @skipUnlessKvm
class TestAccessResilientAdditional(KVMTestCase): class TestAccessResilientAdditional(KVMTestCase):
__partition_reference__ = 'ara' __partition_reference__ = 'ara'
kvm_instance_partition_reference = 'ara0'
expected_partition_with_monitor_base_url_count = 1 expected_partition_with_monitor_base_url_count = 1
@classmethod @classmethod
...@@ -878,40 +931,6 @@ class TestAccessResilientAdditionalJson( ...@@ -878,40 +931,6 @@ class TestAccessResilientAdditionalJson(
pass pass
class TestInstanceNbdServer(KVMTestCase):
__partition_reference__ = 'ins'
instance_max_retry = 5
@classmethod
def getInstanceSoftwareType(cls):
return 'nbd'
@classmethod
def getInstanceParameterDict(cls):
# port 8080 is used by testnode, use another one
return {
'otu-port': '8090'
}
def test(self):
connection_parameter_dict = self.computer_partition\
.getConnectionParameterDict()
result = requests.get(
connection_parameter_dict['upload_url'].strip(), verify=False)
self.assertEqual(
httplib.OK,
result.status_code
)
self.assertIn('<title>Upload new File</title>', result.text)
self.assertIn("WARNING", connection_parameter_dict['status_message'])
@skipUnlessKvm
class TestInstanceNbdServerJson(
KvmMixinJson, TestInstanceNbdServer):
pass
class HttpHandler(http.server.SimpleHTTPRequestHandler): class HttpHandler(http.server.SimpleHTTPRequestHandler):
def log_message(self, *args): def log_message(self, *args):
if os.environ.get('SLAPOS_TEST_DEBUG'): if os.environ.get('SLAPOS_TEST_DEBUG'):
...@@ -1014,196 +1033,6 @@ class FakeImageServerMixin(KvmMixin): ...@@ -1014,196 +1033,6 @@ class FakeImageServerMixin(KvmMixin):
shutil.rmtree(cls.image_source_directory) shutil.rmtree(cls.image_source_directory)
@skipUnlessKvm
class TestInstanceNbd(KVMTestCase):
__partition_reference__ = 'in'
kvm_instance_partition_reference = 'in0'
@classmethod
def startNbdServer(cls):
cls.nbd_directory = tempfile.mkdtemp()
img_1 = os.path.join(cls.nbd_directory, 'one.qcow')
img_2 = os.path.join(cls.nbd_directory, 'two.qcow')
subprocess.check_call([cls.qemu_img, "create", "-f", "qcow", img_1, "1M"])
subprocess.check_call([cls.qemu_img, "create", "-f", "qcow", img_2, "1M"])
nbd_list = [cls.qemu_nbd, '-r', '-t', '-e', '32767']
cls.nbd_1_port = findFreeTCPPort(cls.ipv6_address_pure)
cls.nbd_1 = subprocess.Popen(
nbd_list + [
'-b', cls.ipv6_address_pure, '-p', str(cls.nbd_1_port), img_1])
cls.nbd_1_uri = '[%s]:%s' % (cls.ipv6_address_pure, cls.nbd_1_port)
cls.nbd_2_port = findFreeTCPPort(cls.ipv6_address_pure)
cls.nbd_2 = subprocess.Popen(
nbd_list + [
'-b', cls.ipv6_address_pure, '-p', str(cls.nbd_2_port), img_2])
cls.nbd_2_uri = '[%s]:%s' % (cls.ipv6_address_pure, cls.nbd_2_port)
@classmethod
def stopNbdServer(cls):
cls.nbd_1.terminate()
cls.nbd_2.terminate()
shutil.rmtree(cls.nbd_directory)
@classmethod
def setUpClass(cls):
# we need qemu-nbd binary location
# it's to hard to put qemu in software/slapos-sr-testing
# so let's find it here
# let's find our software .installed.cfg
cls.ipv6_address_pure = cls._ipv6_address.split('/')[0]
cls.findQemuTools()
cls.startNbdServer()
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls.stopNbdServer()
@classmethod
def getInstanceParameterDict(cls):
return {
"nbd-host": cls.ipv6_address_pure,
"nbd-port": cls.nbd_1_port
}
def test(self):
kvm_partition = os.path.join(
self.slap.instance_directory, self.kvm_instance_partition_reference)
self.assertEqual(
[f'nbd:{self.nbd_1_uri}', '${shared}/debian-${ver}-amd64-netinst.iso'],
self.getRunningImageList(kvm_partition)
)
@skipUnlessKvm
class TestInstanceNbdWithVirtualHardDriveUrl(
FakeImageServerMixin, TestInstanceNbd):
__partition_reference__ = 'inbvhdu'
kvm_instance_partition_reference = 'inbvhdu0'
@classmethod
def getInstanceParameterDict(cls):
return {
"nbd-host": cls.ipv6_address_pure,
"nbd-port": cls.nbd_1_port,
"virtual-hard-drive-url": cls.real_image,
"virtual-hard-drive-md5sum": cls.real_image_md5sum
}
def test(self):
kvm_partition = os.path.join(
self.slap.instance_directory, self.kvm_instance_partition_reference)
self.assertEqual(
[f'nbd:{self.nbd_1_uri}', '${shared}/debian-${ver}-amd64-netinst.iso'],
self.getRunningImageList(kvm_partition)
)
image_repository = os.path.join(
kvm_partition,
'srv', 'virtual-hard-drive-url-repository')
self.assertEqual(
[self.getInstanceParameterDict()['virtual-hard-drive-md5sum']],
os.listdir(image_repository)
)
destination_image = os.path.join(kvm_partition, 'srv', 'virtual.qcow2')
# compare result of qemu-img info of repository and the one
qemu_img_list = [self.qemu_img, 'info', '-U', '--output', 'json']
source_image_info_json = json.loads(subprocess.check_output(
qemu_img_list + [
os.path.join(self.image_source_directory, self.real_image_md5sum)]))
destination_image_info_json = json.loads(subprocess.check_output(
qemu_img_list + [destination_image]))
source_image_info_json.pop('filename')
destination_image_info_json.pop('filename')
# the best possible way to assure that provided image is used is by
# comparing the result of qemu-img info for both
self.assertEqual(
source_image_info_json,
destination_image_info_json
)
@skipUnlessKvm
class TestInstanceNbdWithBootImageUrlList(
FakeImageServerMixin, TestInstanceNbd):
__partition_reference__ = 'inbiul'
kvm_instance_partition_reference = 'inbiul0'
image_directory = 'boot-image-url-list-repository'
@classmethod
def getInstanceParameterDict(cls):
return {
"nbd-host": cls.ipv6_address_pure,
"nbd-port": cls.nbd_1_port,
"boot-image-url-list": f"{cls.fake_image}#{cls.fake_image_md5sum}"
}
def test(self):
kvm_partition = os.path.join(
self.slap.instance_directory, self.kvm_instance_partition_reference)
self.assertEqual(
[
f'nbd:{self.nbd_1_uri}',
f'${{inst}}/srv/{self.image_directory}/{self.fake_image_md5sum}',
'${shared}/debian-${ver}-amd64-netinst.iso',
],
self.getRunningImageList(kvm_partition)
)
@skipUnlessKvm
class TestInstanceNbdWithBootImageUrlSelect(
FakeImageServerMixin, TestInstanceNbd):
__partition_reference__ = 'inbius'
kvm_instance_partition_reference = 'inbius0'
image_directory = 'boot-image-url-select-repository'
@classmethod
def getInstanceParameterDict(cls):
return {
"nbd-host": cls.ipv6_address_pure,
"nbd-port": cls.nbd_1_port,
"boot-image-url-select": f'["{cls.fake_image}#{cls.fake_image_md5sum}"]'
}
def test(self):
kvm_partition = os.path.join(
self.slap.instance_directory, self.kvm_instance_partition_reference)
self.assertEqual(
[
f'nbd:{self.nbd_1_uri}',
f'${{inst}}/srv/{self.image_directory}/{self.fake_image_md5sum}',
'${shared}/debian-${ver}-amd64-netinst.iso',
],
self.getRunningImageList(kvm_partition)
)
@skipUnlessKvm
class TestInstanceNbdBoth(TestInstanceNbd):
__partition_reference__ = 'inb'
kvm_instance_partition_reference = 'inb0'
@classmethod
def getInstanceParameterDict(cls):
return {
"nbd-host": cls.ipv6_address_pure,
"nbd-port": cls.nbd_1_port,
"nbd2-host": cls.ipv6_address_pure,
"nbd2-port": cls.nbd_2_port
}
def test(self):
kvm_partition = os.path.join(
self.slap.instance_directory, self.kvm_instance_partition_reference)
self.assertEqual(
[f'nbd:{self.nbd_1_uri}', f'nbd:{self.nbd_2_uri}',
'${shared}/debian-${ver}-amd64-netinst.iso'],
self.getRunningImageList(kvm_partition)
)
@skipUnlessKvm @skipUnlessKvm
class TestVirtualHardDriveUrl(FakeImageServerMixin, KVMTestCase): class TestVirtualHardDriveUrl(FakeImageServerMixin, KVMTestCase):
__partition_reference__ = 'vhdu' __partition_reference__ = 'vhdu'
...@@ -1266,7 +1095,6 @@ class TestBootImageUrlList(FakeImageServerMixin, KVMTestCase): ...@@ -1266,7 +1095,6 @@ class TestBootImageUrlList(FakeImageServerMixin, KVMTestCase):
# variations # variations
key = 'boot-image-url-list' key = 'boot-image-url-list'
test_input = "%s#%s\n%s#%s" test_input = "%s#%s\n%s#%s"
empty_input = ""
image_directory = 'boot-image-url-list-repository' image_directory = 'boot-image-url-list-repository'
config_state_promise = 'boot-image-url-list-config-state-promise.py' config_state_promise = 'boot-image-url-list-config-state-promise.py'
download_md5sum_promise = 'boot-image-url-list-download-md5sum-promise.py' download_md5sum_promise = 'boot-image-url-list-download-md5sum-promise.py'
...@@ -1300,12 +1128,9 @@ class TestBootImageUrlList(FakeImageServerMixin, KVMTestCase): ...@@ -1300,12 +1128,9 @@ class TestBootImageUrlList(FakeImageServerMixin, KVMTestCase):
def tearDown(self): def tearDown(self):
# clean up the instance for other tests # clean up the instance for other tests
# 1st remove all images... # move instance to "default" state
self.rerequestInstance({self.key: ''}) self.rerequestInstance()
self.slap.waitForInstance(max_retry=10) self.slap.waitForInstance(max_retry=20)
# 2nd ...move instance to "default" state
self.rerequestInstance({})
self.slap.waitForInstance(max_retry=10)
super().tearDown() super().tearDown()
def test(self): def test(self):
...@@ -1330,9 +1155,8 @@ class TestBootImageUrlList(FakeImageServerMixin, KVMTestCase): ...@@ -1330,9 +1155,8 @@ class TestBootImageUrlList(FakeImageServerMixin, KVMTestCase):
[ [
f'${{inst}}/srv/{self.image_directory}/{self.fake_image_md5sum}', f'${{inst}}/srv/{self.image_directory}/{self.fake_image_md5sum}',
f'${{inst}}/srv/{self.image_directory}/{self.fake_image2_md5sum}', f'${{inst}}/srv/{self.image_directory}/{self.fake_image2_md5sum}',
'${shared}/debian-${ver}-amd64-netinst.iso',
], ],
self.getRunningImageList(kvm_instance_partition) self.getRunningImageList()
) )
# Switch image # Switch image
...@@ -1351,18 +1175,14 @@ class TestBootImageUrlList(FakeImageServerMixin, KVMTestCase): ...@@ -1351,18 +1175,14 @@ class TestBootImageUrlList(FakeImageServerMixin, KVMTestCase):
[ [
f'${{inst}}/srv/{self.image_directory}/{self.fake_image3_md5sum}', f'${{inst}}/srv/{self.image_directory}/{self.fake_image3_md5sum}',
f'${{inst}}/srv/{self.image_directory}/{self.fake_image2_md5sum}', f'${{inst}}/srv/{self.image_directory}/{self.fake_image2_md5sum}',
'${shared}/debian-${ver}-amd64-netinst.iso',
], ],
self.getRunningImageList(kvm_instance_partition) self.getRunningImageList()
) )
# cleanup of images works, also asserts that configuration changes are # cleanup of images works, also asserts that configuration changes are
# reflected # reflected
# Note: key is left and empty_input is provided, as otherwise the part self.rerequestInstance()
# which generate images is simply removed, which can lead to self.slap.waitForInstance(max_retry=15)
# leftover
self.rerequestInstance({self.key: self.empty_input})
self.slap.waitForInstance(max_retry=10)
self.assertEqual( self.assertEqual(
os.listdir(image_repository), os.listdir(image_repository),
[] []
...@@ -1370,26 +1190,11 @@ class TestBootImageUrlList(FakeImageServerMixin, KVMTestCase): ...@@ -1370,26 +1190,11 @@ class TestBootImageUrlList(FakeImageServerMixin, KVMTestCase):
# again only default image is available in the running process # again only default image is available in the running process
self.assertEqual( self.assertEqual(
['${shared}/debian-${ver}-amd64-netinst.iso'], [
self.getRunningImageList(kvm_instance_partition) '${inst}/srv/boot-image-url-select-repository/'
) '326b7737c4262e8eb09cd26773f3356a'
],
def assertPromiseFails(self, promise): self.getRunningImageList()
partition_directory = os.path.join(
self.slap.instance_directory,
self.kvm_instance_partition_reference)
monitor_run_promise = os.path.join(
partition_directory, 'software_release', 'bin',
'monitor.runpromise'
)
monitor_configuration = os.path.join(
partition_directory, 'etc', 'monitor.conf')
self.assertNotEqual(
0,
subprocess.call([
monitor_run_promise, '-c', monitor_configuration, '-a', '-f',
'--run-only', promise])
) )
def test_bad_parameter(self): def test_bad_parameter(self):
...@@ -1458,36 +1263,60 @@ class TestBootImageUrlListResilientJson( ...@@ -1458,36 +1263,60 @@ class TestBootImageUrlListResilientJson(
@skipUnlessKvm @skipUnlessKvm
class TestBootImageUrlSelect(TestBootImageUrlList): class TestBootImageUrlSelect(FakeImageServerMixin, KVMTestCase):
__partition_reference__ = 'bius' __partition_reference__ = 'bius'
kvm_instance_partition_reference = 'bius0' kvm_instance_partition_reference = 'bius0'
# variations
key = 'boot-image-url-select'
test_input = '["%s#%s", "%s#%s"]'
empty_input = '[]'
image_directory = 'boot-image-url-select-repository'
config_state_promise = 'boot-image-url-select-config-state-promise.py' config_state_promise = 'boot-image-url-select-config-state-promise.py'
download_md5sum_promise = 'boot-image-url-select-download-md5sum-promise.py'
download_state_promise = 'boot-image-url-select-download-state-promise.py' def test(self):
# check the default image
bad_value = '["jsutbad"]' image_repository = os.path.join(
incorrect_md5sum_value_image = '["%s#"]' self.slap.instance_directory, self.kvm_instance_partition_reference,
incorrect_md5sum_value = '["url#asdasd"]' 'srv', 'boot-image-url-select-repository')
single_image_value = '["%s#%s"]' self.assertEqual(
unreachable_host_value = '["evennotahost#%s"]' ['326b7737c4262e8eb09cd26773f3356a'],
too_many_image_value = """[ os.listdir(image_repository)
"image1#11111111111111111111111111111111", )
"image2#22222222222222222222222222222222", image = os.path.join(image_repository, '326b7737c4262e8eb09cd26773f3356a')
"image3#33333333333333333333333333333333", self.assertTrue(os.path.exists(image))
"image4#44444444444444444444444444444444", with open(image, 'rb') as fh:
"image5#55555555555555555555555555555555", image_md5sum = hashlib.md5(fh.read()).hexdigest()
"image6#66666666666666666666666666666666" self.assertEqual(image_md5sum, '326b7737c4262e8eb09cd26773f3356a')
]""" self.assertEqual(
[
def test_not_json(self): '${inst}/srv/boot-image-url-select-repository/'
'326b7737c4262e8eb09cd26773f3356a'
],
self.getRunningImageList()
)
# switch the image
self.rerequestInstance({ self.rerequestInstance({
self.key: 'notjson#notjson' 'boot-image-url-select': "Debian Bullseye 11 netinst x86_64"})
self.slap.waitForInstance(max_retry=10)
image_repository = os.path.join(
self.slap.instance_directory, self.kvm_instance_partition_reference,
'srv', 'boot-image-url-select-repository')
self.assertEqual(
['b710c178eb434d79ce40ce703d30a5f0'],
os.listdir(image_repository)
)
image = os.path.join(image_repository, 'b710c178eb434d79ce40ce703d30a5f0')
self.assertTrue(os.path.exists(image))
with open(image, 'rb') as fh:
image_md5sum = hashlib.md5(fh.read()).hexdigest()
self.assertEqual(image_md5sum, 'b710c178eb434d79ce40ce703d30a5f0')
self.assertEqual(
[
'${inst}/srv/boot-image-url-select-repository/'
'b710c178eb434d79ce40ce703d30a5f0'
],
self.getRunningImageList()
)
def test_bad_image(self):
self.rerequestInstance({
'boot-image-url-select': 'DOESNOTEXISTS'
}) })
self.raising_waitForInstance(3) self.raising_waitForInstance(3)
self.assertPromiseFails(self.config_state_promise) self.assertPromiseFails(self.config_state_promise)
...@@ -1496,66 +1325,113 @@ class TestBootImageUrlSelect(TestBootImageUrlList): ...@@ -1496,66 +1325,113 @@ class TestBootImageUrlSelect(TestBootImageUrlList):
partition_parameter_kw = { partition_parameter_kw = {
'boot-image-url-list': "{}#{}".format( 'boot-image-url-list': "{}#{}".format(
self.fake_image, self.fake_image_md5sum), self.fake_image, self.fake_image_md5sum),
'boot-image-url-select': '["{}#{}"]'.format( 'boot-image-url-select': "Debian Bullseye 11 netinst x86_64"
self.fake_image, self.fake_image_md5sum)
} }
self.rerequestInstance(partition_parameter_kw) self.rerequestInstance(partition_parameter_kw)
self.slap.waitForInstance(max_retry=10) self.slap.waitForInstance(max_retry=10)
# check that image is correctly downloaded # check that image is correctly downloaded
for image_directory in [ image_repository = os.path.join(
'boot-image-url-list-repository', 'boot-image-url-select-repository']: self.slap.instance_directory, self.kvm_instance_partition_reference,
image_repository = os.path.join( 'srv', 'boot-image-url-list-repository')
self.slap.instance_directory, self.kvm_instance_partition_reference, self.assertEqual(
'srv', image_directory) [self.fake_image_md5sum],
image = os.path.join(image_repository, self.fake_image_md5sum) os.listdir(image_repository)
self.assertTrue(os.path.exists(image)) )
with open(image, 'rb') as fh: image = os.path.join(image_repository, self.fake_image_md5sum)
image_md5sum = hashlib.md5(fh.read()).hexdigest() self.assertTrue(os.path.exists(image))
self.assertEqual(image_md5sum, self.fake_image_md5sum) with open(image, 'rb') as fh:
image_md5sum = hashlib.md5(fh.read()).hexdigest()
self.assertEqual(image_md5sum, self.fake_image_md5sum)
image_repository = os.path.join(
self.slap.instance_directory, self.kvm_instance_partition_reference,
'srv', 'boot-image-url-select-repository')
self.assertEqual(
['b710c178eb434d79ce40ce703d30a5f0'],
os.listdir(image_repository)
)
image = os.path.join(image_repository, 'b710c178eb434d79ce40ce703d30a5f0')
self.assertTrue(os.path.exists(image))
with open(image, 'rb') as fh:
image_md5sum = hashlib.md5(fh.read()).hexdigest()
self.assertEqual(image_md5sum, 'b710c178eb434d79ce40ce703d30a5f0')
kvm_instance_partition = os.path.join( kvm_instance_partition = os.path.join(
self.slap.instance_directory, self.kvm_instance_partition_reference) self.slap.instance_directory, self.kvm_instance_partition_reference)
self.assertEqual( self.assertEqual(
[ [
'${{inst}}/srv/boot-image-url-select-repository/{}'.format( '${{inst}}/srv/boot-image-url-list-repository/{}'.format(
self.fake_image_md5sum), self.fake_image_md5sum),
'${inst}/srv/boot-image-url-select-repository/'
'b710c178eb434d79ce40ce703d30a5f0'
],
self.getRunningImageList()
)
# check that using only boot-image-url-list results with not having
# boot-image-url-select if nothing is provided
partition_parameter_kw = {
'boot-image-url-list': "{}#{}".format(
self.fake_image, self.fake_image_md5sum),
}
self.rerequestInstance(partition_parameter_kw)
self.slap.waitForInstance(max_retry=10)
# check that image is correctly downloaded
image_repository = os.path.join(
self.slap.instance_directory, self.kvm_instance_partition_reference,
'srv', 'boot-image-url-list-repository')
self.assertEqual(
[self.fake_image_md5sum],
os.listdir(image_repository)
)
image = os.path.join(image_repository, self.fake_image_md5sum)
self.assertTrue(os.path.exists(image))
with open(image, 'rb') as fh:
image_md5sum = hashlib.md5(fh.read()).hexdigest()
self.assertEqual(image_md5sum, self.fake_image_md5sum)
self.assertEqual(
[],
os.listdir(os.path.join(
self.slap.instance_directory, self.kvm_instance_partition_reference,
'srv', 'boot-image-url-select-repository'))
)
kvm_instance_partition = os.path.join(
self.slap.instance_directory, self.kvm_instance_partition_reference)
self.assertEqual(
[
'${{inst}}/srv/boot-image-url-list-repository/{}'.format( '${{inst}}/srv/boot-image-url-list-repository/{}'.format(
self.fake_image_md5sum), self.fake_image_md5sum),
'${shared}/debian-${ver}-amd64-netinst.iso',
], ],
self.getRunningImageList(kvm_instance_partition) self.getRunningImageList()
) )
# cleanup of images works, also asserts that configuration changes are # cleanup of images works, also asserts that configuration changes are
# reflected # reflected
self.rerequestInstance( self.rerequestInstance()
{'boot-image-url-list': '', 'boot-image-url-select': ''}) self.slap.waitForInstance(max_retry=15)
self.slap.waitForInstance(max_retry=2)
for image_directory in [
'boot-image-url-list-repository', 'boot-image-url-select-repository']:
image_repository = os.path.join(
kvm_instance_partition, 'srv', image_directory)
self.assertEqual(
os.listdir(image_repository),
[]
)
# cleanup of images works, also asserts that configuration changes are
# reflected
partition_parameter_kw[self.key] = ''
partition_parameter_kw['boot-image-url-list'] = ''
self.rerequestInstance(partition_parameter_kw)
self.slap.waitForInstance(max_retry=2)
self.assertEqual( self.assertEqual(
os.listdir(image_repository), os.listdir(os.path.join(
kvm_instance_partition, 'srv', 'boot-image-url-select-repository')),
['326b7737c4262e8eb09cd26773f3356a']
)
self.assertEqual(
os.listdir(os.path.join(
kvm_instance_partition, 'srv', 'boot-image-url-list-repository')),
[] []
) )
# again only default image is available in the running process # again only default image is available in the running process
self.assertEqual( self.assertEqual(
['${shared}/debian-${ver}-amd64-netinst.iso'], [
self.getRunningImageList(kvm_instance_partition) '${inst}/srv/boot-image-url-select-repository/'
'326b7737c4262e8eb09cd26773f3356a'
],
self.getRunningImageList()
) )
...@@ -1642,12 +1518,62 @@ class TestBootImageUrlListKvmCluster(FakeImageServerMixin, KVMTestCase): ...@@ -1642,12 +1518,62 @@ class TestBootImageUrlListKvmCluster(FakeImageServerMixin, KVMTestCase):
@skipUnlessKvm @skipUnlessKvm
class TestBootImageUrlSelectKvmCluster(TestBootImageUrlListKvmCluster): class TestBootImageUrlSelectKvmCluster(KvmMixin, KVMTestCase):
__partition_reference__ = 'biuskc' __partition_reference__ = 'biuskc'
input_value = "[\"%s#%s\"]" @classmethod
key = 'boot-image-url-select' def getInstanceSoftwareType(cls):
config_file_name = 'boot-image-url-select.json' return 'kvm-cluster'
@classmethod
def getInstanceParameterDict(cls):
return {'_': json.dumps({
"kvm-partition-dict": {
"KVM0": {
"disable-ansible-promise": True,
},
"KVM1": {
"disable-ansible-promise": True,
}
}
})}
def test(self):
# Note: As there is no way to introspect nicely where partition landed
# we assume ordering of the cluster requests
self.rerequestInstance({'_': json.dumps({
"kvm-partition-dict": {
"KVM0": {
"disable-ansible-promise": True,
"boot-image-url-select": "Debian Bullseye 11 netinst x86_64"
},
"KVM1": {
"disable-ansible-promise": True,
"boot-image-url-select": "Debian Bookworm 12 netinst x86_64"
}
}
})})
self.slap.waitForInstance(max_retry=10)
KVM0_config = os.path.join(
self.slap.instance_directory, self.__partition_reference__ + '1', 'etc',
'boot-image-url-select.json')
KVM1_config = os.path.join(
self.slap.instance_directory, self.__partition_reference__ + '2', 'etc',
'boot-image-url-select.json')
with open(KVM0_config) as fh:
self.assertEqual(
'["https://shacache.nxdcdn.com/02257c3ec27e45d9f022c181a69b59da67e5c7'
'2871cdb4f9a69db323a1fad58093f2e69702d29aa98f5f65e920e0b970d816475a5a'
'936e1f3bf33832257b7e92#b710c178eb434d79ce40ce703d30a5f0"]',
fh.read().strip()
)
with open(KVM1_config) as fh:
self.assertEqual(
'["https://shacache.nxdcdn.com/33c08e56c83d13007e4a5511b9bf2c4926c4aa'
'12fd5dd56d493c0653aecbab380988c5bf1671dbaea75c582827797d98c4a611f7fb'
'2b131fbde2c677d5258ec9#326b7737c4262e8eb09cd26773f3356a"]',
fh.read().strip()
)
@skipUnlessKvm @skipUnlessKvm
...@@ -2320,9 +2246,9 @@ class TestExternalDisk(KVMTestCase, ExternalDiskMixin): ...@@ -2320,9 +2246,9 @@ class TestExternalDisk(KVMTestCase, ExternalDiskMixin):
restarted_drive_list = self.getRunningDriveList(kvm_instance_partition) restarted_drive_list = self.getRunningDriveList(kvm_instance_partition)
self.assertEqual(drive_list, restarted_drive_list) self.assertEqual(drive_list, restarted_drive_list)
# prove that even on resetting parameters, drives are still there # prove that even on resetting parameters, drives are still there
self.rerequestInstance({}, state='stopped') self.rerequestInstance(state='stopped')
self.waitForInstance() self.waitForInstance()
self.rerequestInstance({}) self.rerequestInstance()
self.waitForInstance() self.waitForInstance()
dropped_drive_list = self.getRunningDriveList(kvm_instance_partition) dropped_drive_list = self.getRunningDriveList(kvm_instance_partition)
self.assertEqual(drive_list, dropped_drive_list) self.assertEqual(drive_list, dropped_drive_list)
...@@ -2568,6 +2494,7 @@ class TestExternalDiskModernIndexRequired(KVMTestCase, ExternalDiskMixin): ...@@ -2568,6 +2494,7 @@ class TestExternalDiskModernIndexRequired(KVMTestCase, ExternalDiskMixin):
@skipUnlessKvm @skipUnlessKvm
class TestInstanceHttpServer(KVMTestCase, KvmMixin): class TestInstanceHttpServer(KVMTestCase, KvmMixin):
__partition_reference__ = 'ihs' __partition_reference__ = 'ihs'
kvm_instance_partition_reference = 'ihs0'
@classmethod @classmethod
def startHttpServer(cls): def startHttpServer(cls):
...@@ -2661,6 +2588,8 @@ vm""", ...@@ -2661,6 +2588,8 @@ vm""",
"""ihs0:6tunnel-10022-{hash}-on-watch RUNNING """ihs0:6tunnel-10022-{hash}-on-watch RUNNING
ihs0:6tunnel-10080-{hash}-on-watch RUNNING ihs0:6tunnel-10080-{hash}-on-watch RUNNING
ihs0:6tunnel-10443-{hash}-on-watch RUNNING ihs0:6tunnel-10443-{hash}-on-watch RUNNING
ihs0:boot-image-url-list-updater-{hash} EXITED
ihs0:boot-image-url-select-updater-{hash} EXITED
ihs0:bootstrap-monitor EXITED ihs0:bootstrap-monitor EXITED
ihs0:certificate_authority-{hash}-on-watch RUNNING ihs0:certificate_authority-{hash}-on-watch RUNNING
ihs0:crond-{hash}-on-watch RUNNING ihs0:crond-{hash}-on-watch RUNNING
...@@ -2673,7 +2602,10 @@ ihs0:nginx-graceful EXITED ...@@ -2673,7 +2602,10 @@ ihs0:nginx-graceful EXITED
ihs0:nginx-on-watch RUNNING ihs0:nginx-on-watch RUNNING
ihs0:whitelist-domains-download-{hash} RUNNING ihs0:whitelist-domains-download-{hash} RUNNING
ihs0:whitelist-firewall-{hash} RUNNING""", ihs0:whitelist-firewall-{hash} RUNNING""",
self.getProcessInfo() self.getProcessInfo([
'var/boot-image-url-list/boot-image-url-list.json',
'var/boot-image-url-select/boot-image-url-select.json'
])
) )
public_dir = os.path.join( public_dir = os.path.join(
self.computer_partition_root_path, 'srv', 'public') self.computer_partition_root_path, 'srv', 'public')
......
[instance-profile] [instance-profile]
filename = instance.cfg.in filename = instance.cfg.in
md5sum = a6061e8bea111d96c10223f9b201ecc0 md5sum = 136afc6a9b8ce14757d5931f5930de66
...@@ -51,7 +51,7 @@ install = ...@@ -51,7 +51,7 @@ install =
[mosquitto-password-file] [mosquitto-password-file]
recipe = plone.recipe.command recipe = plone.recipe.command
location = ${directory:etc}/${:_buildout_section_name_}.txt location = ${directory:etc}/${:_buildout_section_name_}.txt
command = command =
touch ${:location} touch ${:location}
{{ mosquitto_location }}/bin/mosquitto_passwd -b ${:location} ${mosquitto-password:username} ${mosquitto-password:passwd} {{ mosquitto_location }}/bin/mosquitto_passwd -b ${:location} ${mosquitto-password:username} ${mosquitto-password:passwd}
stop-on-error = true stop-on-error = true
......
import os import pathlib
import time
import paho.mqtt.client as mqtt import paho.mqtt.client as mqtt
import paho.mqtt.publish as publish import paho.mqtt.publish as publish
from slapos.testing.testcase import makeModuleSetUpAndTestCaseClass from slapos.testing.testcase import makeModuleSetUpAndTestCaseClass
setUpModule, SlapOSInstanceTestCase = makeModuleSetUpAndTestCaseClass( setUpModule, SlapOSInstanceTestCase = makeModuleSetUpAndTestCaseClass(
os.path.abspath( pathlib.Path(__file__).parent.parent / "software.cfg")
os.path.join(os.path.dirname(__file__), '..', 'software.cfg')))
class TestMosquitto(SlapOSInstanceTestCase):
class TestMosquitto(SlapOSInstanceTestCase):
""" """
Test if mosquitto service can publish and subscribe Test if mosquitto service can publish and subscribe
to specific topics with custom authentication ... to specific topics with custom authentication ...
""" """
def on_connect(self, client, userdata, flags, rc): def test_ipv4(self):
client.subscribe("test") self._test(self.computer_partition.getConnectionParameterDict()["ipv4"])
self.code = rc
def on_message(self, client, userdata, msg): def test_ipv6(self):
self.topic = msg.topic self._test(self.computer_partition.getConnectionParameterDict()["ipv6"])
self.payload = str(msg.payload.decode())
def test_topic_ipv4(self): def _test(self, host):
host = self.computer_partition.getConnectionParameterDict()["ipv4"]
username = self.computer_partition.getConnectionParameterDict()["username"] username = self.computer_partition.getConnectionParameterDict()["username"]
password = self.computer_partition.getConnectionParameterDict()["password"] password = self.computer_partition.getConnectionParameterDict()["password"]
port = int(self.computer_partition.getConnectionParameterDict()["port"])
topic = "test" topic = "test"
payload = "Hello, World!" payload = "Hello, World!"
client = mqtt.Client() client = mqtt.Client()
client.on_connect = self.on_connect client.enable_logger(self.logger)
client.on_message = self.on_message
client.username_pw_set(username=f"{username}", password=f"{password}")
client.connect(f"{host}", 1883, 10)
client.loop_start()
publish.single(
topic=topic,
payload=payload,
hostname=f"{host}",
auth={ "username": f"{username}", "password": f"{password}" }
)
time.sleep(10)
client.loop_stop()
self.assertEqual(self.code, 0)
self.assertEqual(self.topic, topic)
def test_payload_ipv4(self):
host = self.computer_partition.getConnectionParameterDict()["ipv4"]
username = self.computer_partition.getConnectionParameterDict()["username"]
password = self.computer_partition.getConnectionParameterDict()["password"]
topic = "test" def on_connect(client, userdata, flags, rc):
payload = "Hello, World!" client.subscribe(topic)
client = mqtt.Client() client.on_connect = on_connect
client.on_connect = self.on_connect
client.on_message = self.on_message
client.username_pw_set(username=f"{username}", password=f"{password}")
client.connect(f"{host}", 1883, 10)
client.loop_start() def on_subscribe(client, userdata, mid, granted_qos, properties=None):
# once our client is subscribed, publish from another connection
publish.single(
topic=topic,
payload=payload,
hostname=host,
auth={"username": username, "password": password},
)
publish.single( client.on_subscribe = on_subscribe
topic=topic,
payload=payload,
hostname=f"{host}",
auth={ "username": f"{username}", "password": f"{password}" }
)
time.sleep(10) def on_message(client, userdata, msg):
client.loop_stop() self.topic = msg.topic
self.payload = str(msg.payload.decode())
self.assertEqual(self.code, 0) client.on_message = on_message
self.assertEqual(self.payload, payload)
def test_topic_ipv6(self): client.username_pw_set(username=username, password=password)
host = self.computer_partition.getConnectionParameterDict()["ipv6"] client.connect(host, port)
username = self.computer_partition.getConnectionParameterDict()["username"]
password = self.computer_partition.getConnectionParameterDict()["password"]
topic = "test" self.topic = None # will be set by on_message
payload = "Hello, World!" max_retries = 100 # give up after this number of iterations
for _ in range(max_retries):
client.loop()
if self.topic is not None:
break
client = mqtt.Client()
client.on_connect = self.on_connect
client.on_message = self.on_message
client.username_pw_set(username=f"{username}", password=f"{password}")
client.connect(f"{host}", 1883, 10)
client.loop_start()
publish.single(
topic=topic,
payload=payload,
hostname=f"{host}",
auth={ "username": f"{username}", "password": f"{password}" }
)
time.sleep(10)
client.loop_stop()
self.assertEqual(self.code, 0)
self.assertEqual(self.topic, topic) self.assertEqual(self.topic, topic)
def test_payload_ipv6(self):
host = self.computer_partition.getConnectionParameterDict()["ipv6"]
username = self.computer_partition.getConnectionParameterDict()["username"]
password = self.computer_partition.getConnectionParameterDict()["password"]
topic = "test"
payload = "Hello, World!"
client = mqtt.Client()
client.on_connect = self.on_connect
client.on_message = self.on_message
client.username_pw_set(username=f"{username}", password=f"{password}")
client.connect(f"{host}", 1883, 10)
client.loop_start()
publish.single(
topic=topic,
payload=payload,
hostname=f"{host}",
auth={ "username": f"{username}", "password": f"{password}" }
)
time.sleep(10)
client.loop_stop()
self.assertEqual(self.code, 0)
self.assertEqual(self.payload, payload) self.assertEqual(self.payload, payload)
...@@ -8,6 +8,10 @@ extends = ...@@ -8,6 +8,10 @@ extends =
../../stack/slapos.cfg ../../stack/slapos.cfg
../../component/osie-coupler/buildout.cfg ../../component/osie-coupler/buildout.cfg
[osie-coupler]
bin_dir = ${buildout:directory}/bin/
post-install =
cp bin/server ${:bin_dir}
[instance-profile] [instance-profile]
recipe = slapos.recipe.template:jinja2 recipe = slapos.recipe.template:jinja2
......
...@@ -236,7 +236,7 @@ class TestFilesAndSocketsInInstanceDir(ProFTPdTestCase): ...@@ -236,7 +236,7 @@ class TestFilesAndSocketsInInstanceDir(ProFTPdTestCase):
[], [],
[ [
f for f in self.proftpdProcess.open_files() if f.mode != 'r' f for f in self.proftpdProcess.open_files() if f.mode != 'r'
if not f.path.startswith(self.computer_partition_root_path) if not f.path.startswith(str(self.computer_partition_root_path))
]) ])
def test_only_unix_socket_in_instance_dir(self): def test_only_unix_socket_in_instance_dir(self):
...@@ -244,7 +244,7 @@ class TestFilesAndSocketsInInstanceDir(ProFTPdTestCase): ...@@ -244,7 +244,7 @@ class TestFilesAndSocketsInInstanceDir(ProFTPdTestCase):
[], [],
[ [
s for s in self.proftpdProcess.connections('unix') s for s in self.proftpdProcess.connections('unix')
if not s.laddr.startswith(self.computer_partition_root_path) if not s.laddr.startswith(str(self.computer_partition_root_path))
]) ])
......
...@@ -14,6 +14,7 @@ extends = ...@@ -14,6 +14,7 @@ extends =
../../component/python-pynacl/buildout.cfg ../../component/python-pynacl/buildout.cfg
../../component/python-backports-lzma/buildout.cfg ../../component/python-backports-lzma/buildout.cfg
../../component/selenium/buildout.cfg ../../component/selenium/buildout.cfg
../../component/ZODB/buildout.cfg
../../stack/slapos.cfg ../../stack/slapos.cfg
../../stack/nxdtest.cfg ../../stack/nxdtest.cfg
...@@ -67,6 +68,11 @@ setup = ${slapos-repository:location}/software/erp5/test/ ...@@ -67,6 +68,11 @@ setup = ${slapos-repository:location}/software/erp5/test/
egg = slapos.test.upgrade_erp5 egg = slapos.test.upgrade_erp5
setup = ${slapos-repository:location}/software/erp5/upgrade_test/ setup = ${slapos-repository:location}/software/erp5/upgrade_test/
[slapos.test.gdal-setup]
<= setup-develop-egg
egg = slapos.test.gdal
setup = ${slapos-repository:location}/component/gdal/test/
[slapos.test.htmlvalidatorserver-setup] [slapos.test.htmlvalidatorserver-setup]
<= setup-develop-egg <= setup-develop-egg
egg = slapos.test.htmlvalidatorserver egg = slapos.test.htmlvalidatorserver
...@@ -351,6 +357,7 @@ setup = ${recurls-repository:location} ...@@ -351,6 +357,7 @@ setup = ${recurls-repository:location}
[python-interpreter] [python-interpreter]
eggs += eggs +=
${BTrees:egg}
${lxml-python:egg} ${lxml-python:egg}
${python-PyYAML:egg} ${python-PyYAML:egg}
${slapos.core-setup:egg} ${slapos.core-setup:egg}
...@@ -360,6 +367,7 @@ eggs += ...@@ -360,6 +367,7 @@ eggs +=
beautifulsoup4 beautifulsoup4
caucase caucase
erp5.util erp5.util
${persistent:egg}
${python-pynacl:egg} ${python-pynacl:egg}
${python-cryptography:egg} ${python-cryptography:egg}
${python-mysqlclient:egg} ${python-mysqlclient:egg}
...@@ -382,6 +390,7 @@ eggs += ...@@ -382,6 +390,7 @@ eggs +=
${slapos.test.erp5testnode-setup:egg} ${slapos.test.erp5testnode-setup:egg}
${slapos.test.fluentd-setup:egg} ${slapos.test.fluentd-setup:egg}
${slapos.test.galene-setup:egg} ${slapos.test.galene-setup:egg}
${slapos.test.gdal-setup:egg}
${slapos.test.headless-chromium-setup:egg} ${slapos.test.headless-chromium-setup:egg}
${slapos.test.html5as-base-setup:egg} ${slapos.test.html5as-base-setup:egg}
${slapos.test.html5as-setup:egg} ${slapos.test.html5as-setup:egg}
...@@ -473,6 +482,7 @@ tests = ...@@ -473,6 +482,7 @@ tests =
erp5testnode ${slapos.test.erp5testnode-setup:setup} erp5testnode ${slapos.test.erp5testnode-setup:setup}
fluentd ${slapos.test.fluentd-setup:setup} fluentd ${slapos.test.fluentd-setup:setup}
galene ${slapos.test.galene-setup:setup} galene ${slapos.test.galene-setup:setup}
gdal ${slapos.test.gdal-setup:setup}
gitlab ${slapos.test.gitlab-setup:setup} gitlab ${slapos.test.gitlab-setup:setup}
grafana ${slapos.test.grafana-setup:setup} grafana ${slapos.test.grafana-setup:setup}
headless-chromium ${slapos.test.headless-chromium-setup:setup} headless-chromium ${slapos.test.headless-chromium-setup:setup}
...@@ -519,18 +529,26 @@ recurls = ...@@ -519,18 +529,26 @@ recurls =
slapos.core = slapos.core =
# Various needed versions # Various needed versions
Pillow = 10.2.0+SlapOSPatched001 BTrees = 6.1
forcediphttpsadapter = 1.0.1 forcediphttpsadapter = 1.0.1
image = 1.5.25 image = 1.5.25
mysqlclient = 2.1.1
paho-mqtt = 1.5.0
pcpp = 1.30
persistent = 6.1
Pillow = 10.2.0+SlapOSPatched001
plantuml = 0.3.0:whl plantuml = 0.3.0:whl
pypdf = 3.6.0:whl pypdf = 3.6.0:whl
pysftp = 0.2.9 pysftp = 0.2.9
requests-toolbelt = 0.8.0 requests-toolbelt = 0.8.0
testfixtures = 6.11.0 testfixtures = 6.11.0
mysqlclient = 2.1.1 transaction = 5.0
paho-mqtt = 1.5.0
pcpp = 1.30
xmltodict = 0.13.0 xmltodict = 0.13.0
ZEO = 6.0.0
ZODB = 6.0.0
zodbpickle = 4.1.1
zope.deferredimport = 5.0
zope.proxy = 6.1
# Test Suite: SlapOS.SoftwareReleases.IntegrationTest-Master.Python2 ran at 2022/09/08 02:05:35.783873 UTC # Test Suite: SlapOS.SoftwareReleases.IntegrationTest-Master.Python2 ran at 2022/09/08 02:05:35.783873 UTC
......
...@@ -28,13 +28,11 @@ ...@@ -28,13 +28,11 @@
import os import os
import contextlib import contextlib
import paramiko import paramiko
import subprocess
from urllib.parse import urlparse from urllib.parse import urlparse
import socket
from slapos.testing.testcase import makeModuleSetUpAndTestCaseClass from slapos.testing.testcase import makeModuleSetUpAndTestCaseClass
from slapos.util import bytes2str
setUpModule, SlapOSInstanceTestCase = makeModuleSetUpAndTestCaseClass( setUpModule, SlapOSInstanceTestCase = makeModuleSetUpAndTestCaseClass(
os.path.abspath( os.path.abspath(
...@@ -71,5 +69,5 @@ class TestSSH(SlapOSInstanceTestCase): ...@@ -71,5 +69,5 @@ class TestSSH(SlapOSInstanceTestCase):
self.assertTrue(client.get_transport().is_active()) self.assertTrue(client.get_transport().is_active())
# simple commands can also be executed ( this would be like `ssh bash -c 'pwd'` ) # simple commands can also be executed ( this would be like `ssh bash -c 'pwd'` )
# exec_command means `ssh user@host command` # exec_command means `ssh user@host command`
current_path = bytes2str(client.exec_command("pwd")[1].read(1000)).strip() current_path = client.exec_command("pwd")[1].read(1000).decode().strip()
self.assertIn(current_path, self.computer_partition_root_path) self.assertIn(current_path, str(self.computer_partition_root_path))
{
"name": "Wendelin",
"description": "Wendelin",
"serialisation": "json-in-xml",
"software-type": {
"default": {
"title": "Default",
"software-type": "default",
"request": "../erp5/instance-erp5-input-schema.json",
"response": "../erp5/instance-erp5-output-schema.json",
"index": 0
}
}
}
...@@ -78,7 +78,7 @@ md5sum = 1333d2fc21f64da4010a4eafea59d141 ...@@ -78,7 +78,7 @@ md5sum = 1333d2fc21f64da4010a4eafea59d141
[template-zeo] [template-zeo]
filename = instance-zeo.cfg.in filename = instance-zeo.cfg.in
md5sum = 3190fb6b2380ffbef40db62e1d4ba4d0 md5sum = 702afb430227eebe4312a618da7ef7cb
[template-zeo-conf] [template-zeo-conf]
filename = zeo.conf.in filename = zeo.conf.in
......
...@@ -80,6 +80,45 @@ config-port = {{ "${" ~ zeo_section_name ~ ":port}" }} ...@@ -80,6 +80,45 @@ config-port = {{ "${" ~ zeo_section_name ~ ":port}" }}
{% set tidstorage_repozo_path = '' -%} {% set tidstorage_repozo_path = '' -%}
{% else -%} {% else -%}
[repozo-backup-script]
repozo-wrapper = ${buildout:bin-directory}/tidstorage-repozo
# BBB on python3 we don't use Products.TIDStorage but repozo directly.
[repozo-backup-script:python3]
recipe = slapos.recipe.template
inline =
#!/bin/sh
zodb_directory="${directory:zodb}"
zodb_backup_directory="{{ default_backup_path }}"
repozo="${tidstorage:repozo-binary}"
EXIT_CODE=0
{% for family, zodb in six.iteritems(zodb_dict) -%}
{% for name, zodb in zodb -%}
storage_name="{{ name }}"
zodb_path="$storage_name.fs"
[ ! -d "$zodb_backup_directory/$storage_name" ]] && mkdir "$zodb_backup_directory/$storage_name"
echo "Backing up $storage_name ..."
$repozo \
--backup \
--kill-old-on-full \
--gzip \
--quick \
--repository="$zodb_backup_directory/$storage_name" \
--file="$zodb_directory/$zodb_path"
CURRENT_EXIT_CODE=$?
if [ ! "$CURRENT_EXIT_CODE"="0" ]; then
EXIT_CODE="$CURRENT_EXIT_CODE"
echo "$storage_name Backup restoration failed."
fi
{% endfor -%}
{% endfor -%}
exit $EXIT_CODE
repozo-wrapper = ${:output}
mode = 755
output = ${buildout:bin-directory}/repozo-backup
[tidstorage] [tidstorage]
recipe = slapos.cookbook:tidstorage recipe = slapos.cookbook:tidstorage
known-tid-storage-identifier-dict = {{ dumps(known_tid_storage_identifier_dict) }} known-tid-storage-identifier-dict = {{ dumps(known_tid_storage_identifier_dict) }}
...@@ -116,7 +155,7 @@ recipe = slapos.cookbook:cron.d ...@@ -116,7 +155,7 @@ recipe = slapos.cookbook:cron.d
cron-entries = ${cron:cron-entries} cron-entries = ${cron:cron-entries}
name = tidstorage name = tidstorage
time = {{ dumps(backup_periodicity) }} time = {{ dumps(backup_periodicity) }}
command = ${tidstorage:repozo-wrapper} command = ${repozo-backup-script:repozo-wrapper}
# Used for ERP5 resiliency or (more probably) # Used for ERP5 resiliency or (more probably)
# webrunner resiliency with erp5 inside. # webrunner resiliency with erp5 inside.
...@@ -137,8 +176,9 @@ mode = 770 ...@@ -137,8 +176,9 @@ mode = 770
[{{ section("resiliency-after-import-script") }}] [{{ section("resiliency-after-import-script") }}]
# Generate after import script used by importer instance of webrunner # Generate after import script used by importer instance of webrunner
recipe = collective.recipe.template recipe = slapos.recipe.template
input = inline: #!/bin/sh inline =
#!/bin/sh
# DO NOT RUN THIS SCRIPT ON PRODUCTION INSTANCE # DO NOT RUN THIS SCRIPT ON PRODUCTION INSTANCE
# OR ZODB DATA WILL BE ERASED. # OR ZODB DATA WILL BE ERASED.
...@@ -146,8 +186,6 @@ input = inline: #!/bin/sh ...@@ -146,8 +186,6 @@ input = inline: #!/bin/sh
# zodb location. It is launched by the clone (importer) instance of webrunner # zodb location. It is launched by the clone (importer) instance of webrunner
# in the end of the import script. # in the end of the import script.
# Depending on the output, it will create a file containing
# the status of the restoration (success or failure).
zodb_directory="${directory:zodb}" zodb_directory="${directory:zodb}"
zodb_backup_directory="{{ default_backup_path }}" zodb_backup_directory="{{ default_backup_path }}"
repozo="${tidstorage:repozo-binary}" repozo="${tidstorage:repozo-binary}"
......
...@@ -138,10 +138,10 @@ eggs = ...@@ -138,10 +138,10 @@ eggs =
# The last version of setuptools compatible with Python 3.7 # The last version of setuptools compatible with Python 3.7
setuptools = 67.8.0 setuptools = 67.8.0
# Use SlapOS patched zc.buildout # Use SlapOS patched zc.buildout
zc.buildout = 3.0.1+slapos004 zc.buildout = 3.0.1+slapos005
pip = 23.2.1 pip = 23.2.1
# Use SlapOS patched zc.recipe.egg (zc.recipe.egg 2.x is for Buildout 2) # Use SlapOS patched zc.recipe.egg (zc.recipe.egg 2.x is for Buildout 2)
zc.recipe.egg = 2.0.8.dev0+slapos004 zc.recipe.egg = 2.0.8.dev0+slapos005
aiofiles = 23.1.0:whl aiofiles = 23.1.0:whl
aiohttp = 3.8.5:whl aiohttp = 3.8.5:whl
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment