Commit 60b48637 authored by Cédric de Saint Martin's avatar Cédric de Saint Martin

Merge branch 'master' into kvm

Conflicts:
	slapos/recipe/request.py
parents cfc2b8bb 611f80e0
[buildout]
parts = busybox
[busybox]
recipe = slapos.recipe.build
url = http://busybox.net/downloads/busybox-1.19.3.tar.bz2
md5sum = c3938e1ac59602387009bbf1dd1af7f6
script =
extract_dir = self.extract(self.download(%(url)r, %(md5sum)r))
workdir = guessworkdir(extract_dir)
self.logger.info("Creating default configuration")
call(['make', 'defconfig'], cwd=workdir, env=env)
self.logger.info("Building")
call(['make'], cwd=workdir, env=env)
self.logger.info("Installing")
call(['make', 'CONFIG_PREFIX=%(location)s', 'install'], cwd=workdir, env=env)
self.logger.info("Installation finished")
# CA certificates
[buildout]
parts =
ca-certificates
[ca-certificates-sbin-dir.patch]
recipe = hexagonit.recipe.download
url = ${:_profile_base_location_}/${:filename}
md5sum = 0b4e7d82ce768823c01954ee41ef177b
filename = ${:_buildout_section_name_}
download-only = true
[ca-certificates]
recipe = hexagonit.recipe.cmmi
version = 20111211
url = ftp://ftp.free.fr/mirrors/ftp.debian.org/pool/main/c/ca-certificates/ca-certificates_${:version}.tar.gz
patches =
${ca-certificates-sbin-dir.patch:location}/${ca-certificates-sbin-dir.patch:filename}
patch-options = -p0
configure-command = true
make-targets = install DESTDIR=${buildout:parts-directory}/${:_buildout_section_name_} CERTSDIR=certs SBINDIR=sbin
--- Makefile.orig 2011-12-11 20:54:02.000000000 +0100
+++ Makefile 2012-01-09 17:36:55.059392824 +0100
@@ -17,7 +17,7 @@
install:
for dir in $(SUBDIRS); do \
- mkdir $(DESTDIR)/$(CERTSDIR)/$$dir; \
+ mkdir -p $(DESTDIR)/$(CERTSDIR)/$$dir; \
$(MAKE) -C $$dir install CERTSDIR=$(DESTDIR)/$(CERTSDIR)/$$dir; \
done
for dir in sbin; do \
--- sbin/Makefile.orig 2011-12-11 20:54:02.000000000 +0100
+++ sbin/Makefile 2012-01-09 17:31:45.207387898 +0100
@@ -3,9 +3,12 @@
#
#
+SBINDIR=/usr/sbin
+
all:
clean:
install:
- install -m755 update-ca-certificates $(DESTDIR)/usr/sbin/
+ mkdir -p $(DESTDIR)/$(SBINDIR)
+ install -m755 update-ca-certificates $(DESTDIR)/$(SBINDIR)
[buildout]
extends =
nss.cfg
parts =
corocosync
[corosync]
recipe = hexagonit.recipe.cmmi
url = ftp://ftp:downloads@corosync.org/downloads/corosync-1.3.1/corosync-1.3.1.tar.gz
md5sum = c58459a009a3a9d0b9c00e276a190d90
environment =
CPPFLAGS=-I${nspr:location}/include/nspr -I${nss:location}/include/nss
PKG_CONFIG_PATH=${nss:location}/lib/pkgconfig:${nspr:location}/lib/pkgconfig
LDFLAGS =-L${nspr:location}/lib -Wl,-rpath=${nspr:location}/lib -L${nss:location}/lib -Wl,-rpath=${nss:location}/lib -Wl,-rpath=${buildout:parts-directory}/${:_buildout_section_name_}/lib
......@@ -11,8 +11,8 @@ parts =
[curl]
recipe = hexagonit.recipe.cmmi
url = http://curl.haxx.se/download/curl-7.21.7.tar.bz2
md5sum = 5f6d50c4d4ee38c57fe37e3cff75adbd
url = http://curl.haxx.se/download/curl-7.24.0.tar.bz2
#md5sum = 5f6d50c4d4ee38c57fe37e3cff75adbd
configure-options =
--disable-static
--disable-ldap
......
--- cyrus-sasl-2.1.23/include/sasl.h 2010-11-25 18:15:05.000000000 +0100
+++ cyrus-sasl-2.1.23/include/sasl.h 2010-11-25 18:15:34.000000000 +0100
@@ -346,7 +346,7 @@
* Mechanisms must ignore callbacks with id's they don't recognize.
*/
unsigned long id;
- int (*proc)(); /* Callback function. Types of arguments vary by 'id' */
+ int (*proc); /* Callback function. Types of arguments vary by 'id' */
void *context;
} sasl_callback_t;
[buildout]
parts =
erlang
[erlang]
recipe = hexagonit.recipe.cmmi
url = http://www.erlang.org/download/otp_src_R14B03.tar.gz
md5sum = 7979e662d11476b97c462feb7c132fb7
# File - Determines file type using "magic" numbers
# http://www.darwinsys.com/file/
[buildout]
parts = file
extends =
../zlib/buildout.cfg
[file]
recipe = hexagonit.recipe.cmmi
url = ftp://ftp.astron.com/pub/file/file-5.07.tar.gz
md5sum = b8d1f9a8a644067bd0a703cebf3f4858
url = ftp://ftp.astron.com/pub/file/file-5.10.tar.gz
md5sum = 4cea34b087b060772511e066e2038196
configure-options =
--disable-static
environment =
......
......@@ -63,6 +63,7 @@ md5sum = d7cd6a27c8801e66cbaa964a039ecfdb
filename = ecj.jar
[gcc-download]
hack-revision = ${gcc-interconnection-workaround:hack-revision}
recipe = hexagonit.recipe.download
url = http://www.mirrorservice.org/sites/sourceware.org/pub/gcc/releases/gcc-4.5.3/gcc-4.5.3.tar.bz2
md5sum = 8e0b5c12212e185f3e4383106bfa9cc6
......@@ -70,6 +71,7 @@ strip-top-level-dir = True
destination = ${gcc-source:location}
[gcc-java-download]
hack-revision = ${gcc-interconnection-workaround:hack-revision}
recipe = hexagonit.recipe.download
url = http://www.mirrorservice.org/sites/sourceware.org/pub/gcc/releases/gcc-4.5.3/gcc-java-4.5.3.tar.bz2
md5sum = 08e045fdbdc22ac9af3aec3b8d16dbab
......@@ -78,6 +80,7 @@ destination = ${gcc-source:location}
ignore-existing = true
[gcc-source]
hack-revision = ${gcc-interconnection-workaround:hack-revision}
location = ${buildout:parts-directory}/${:_buildout_section_name_}
[gcc-multiarch.patch]
......@@ -87,7 +90,15 @@ url = ${:_profile_base_location_}/${:filename}
filename = ${:_buildout_section_name_}
download-only = true
[gcc-java-pre-4.4.patch]
recipe = hexagonit.recipe.download
md5sum = 9a563576126d9fcf234ef29c2fc7df76
url = ${:_profile_base_location_}/${:filename}
filename = ${:_buildout_section_name_}
download-only = true
[gcc-java-minimal]
hack-revision = ${gcc-interconnection-workaround:hack-revision}
depends =
${gcc-download:location}
${gcc-java-download:location}
......@@ -96,6 +107,7 @@ path = ${gcc-source:location}
md5sum = bb3265edf0fa7543e50cedb93e04e427
patches =
${gcc-multiarch.patch:location}/${gcc-multiarch.patch:filename}
${gcc-java-pre-4.4.patch:location}/${gcc-java-pre-4.4.patch:filename}
patch-options = -p2
configure-command = make clean \\; make distclean \\; ./configure
# GMP does not correctly detect achitecture so it have to be given
......@@ -121,6 +133,7 @@ environment =
make-targets = install -j1
[gcc]
hack-revision = ${gcc-interconnection-workaround:hack-revision}
depends =
${gcc-download:location}
${gcc-java-download:location}
......@@ -151,3 +164,10 @@ environment =
PATH=${zip:location}/bin:%(PATH)s
# make install does not work when several core are used
make-targets = install -j1
[gcc-interconnection-workaround]
# gcc parts are interconnected, so buildout is not capable to clean them up
# until gcc will be simplified by using more robust build recipe (like
# slapos.recipe.build) each time any of parts which reuses this one gets updated
# the hack-revision have to be increased
hack-revision = 1
Patch for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=50888
--- a/src/libjava/libjava/prims.cc.orig 2012-01-20 11:30:18.586157610 +0100
+++ b/src/libjava/libjava/prims.cc 2012-01-20 11:30:58.192770947 +0100
@@ -38,6 +38,14 @@
#endif
#ifndef DISABLE_GETENV_PROPERTIES
+#ifdef __GLIBC__
+/* glibc 2.15+ provides even for C++ inline optimized ::isspace etc.
+ Unfortunately those inlines are throw (), and call a function pointer
+ (which is throw () too, but with -fnon-call-exceptions this results
+ in a __cxa_call_unexpected call. This macro disables the optimized
+ version. */
+#define __NO_CTYPE 1
+#endif
#include <ctype.h>
#include <java-props.h>
#define PROCESS_GCJ_PROPERTIES process_gcj_properties()
--- a/src/libjava/prims.cc.orig 2012-01-20 11:30:23.042818341 +0100
+++ b/src/libjava/prims.cc 2012-01-20 11:31:01.389433254 +0100
@@ -38,6 +38,14 @@
#endif
#ifndef DISABLE_GETENV_PROPERTIES
+#ifdef __GLIBC__
+/* glibc 2.15+ provides even for C++ inline optimized ::isspace etc.
+ Unfortunately those inlines are throw (), and call a function pointer
+ (which is throw () too, but with -fnon-call-exceptions this results
+ in a __cxa_call_unexpected call. This macro disables the optimized
+ version. */
+#define __NO_CTYPE 1
+#endif
#include <ctype.h>
#include <java-props.h>
#define PROCESS_GCJ_PROPERTIES process_gcj_properties()
--- Makefile.in 2002-10-08 16:09:12.000000000 +0000
+++ Makefile.in.nochange 2010-11-03 21:17:38.579435530 +0000
@@ -14,10 +14,6 @@
INSTALL_PROGRAM = @INSTALL_PROGRAM@
INSTALL_DATA = @INSTALL_DATA@
-# File ownership and group
-BINOWN = bin
-BINGRP = bin
-
MAKEINFO = makeinfo
TEXI2DVI = texi2dvi
@@ -131,11 +127,11 @@
$(INSTALL_ROOT)$(includedir) $(INSTALL_ROOT)$(man3dir) \
$(INSTALL_ROOT)$(infodir)
$(LIBTOOL) $(INSTALL) -c libgdbm.la $(INSTALL_ROOT)$(libdir)/libgdbm.la
- $(INSTALL_DATA) -o $(BINOWN) -g $(BINGRP) gdbm.h \
+ $(INSTALL_DATA) gdbm.h \
$(INSTALL_ROOT)$(includedir)/gdbm.h
- $(INSTALL_DATA) -o $(BINOWN) -g $(BINGRP) $(srcdir)/gdbm.3 \
+ $(INSTALL_DATA) $(srcdir)/gdbm.3 \
$(INSTALL_ROOT)$(man3dir)/gdbm.3
- $(INSTALL_DATA) -o $(BINOWN) -g $(BINGRP) $(srcdir)/gdbm.info \
+ $(INSTALL_DATA) $(srcdir)/gdbm.info \
$(INSTALL_ROOT)$(infodir)/gdbm.info
install-compat:
@@ -143,9 +139,9 @@
$(INSTALL_ROOT)$(includedir)
$(LIBTOOL) $(INSTALL) -c libgdbm_compat.la \
$(INSTALL_ROOT)$(libdir)/libgdbm_compat.la
- $(INSTALL_DATA) -o $(BINOWN) -g $(BINGRP) $(srcdir)/dbm.h \
+ $(INSTALL_DATA) $(srcdir)/dbm.h \
$(INSTALL_ROOT)$(includedir)/dbm.h
- $(INSTALL_DATA) -o $(BINOWN) -g $(BINGRP) $(srcdir)/ndbm.h \
+ $(INSTALL_DATA) $(srcdir)/ndbm.h \
$(INSTALL_ROOT)$(includedir)/ndbm.h
#libgdbm.a: $(OBJS) gdbm.h
......@@ -13,10 +13,8 @@ parts =
[git]
recipe = hexagonit.recipe.cmmi
# url = http://kernel.org/pub/software/scm/git/git-1.7.4.5.tar.bz2
# Circumvent kernel.org downtime
url = http://git-core.googlecode.com/files/git-1.7.8.tar.gz
md5sum = 4a3c03a04dbb857ecc875dae1278b76e
url = http://git-core.googlecode.com/files/git-1.7.8.3.tar.gz
md5sum = 7a4bc5160166537d4da5eb48a7670dff
configure-options =
--with-curl=${curl:location}
--with-openssl=${openssl:location}
......
......@@ -8,17 +8,15 @@ extends =
../fontconfig/buildout.cfg
../freetype/buildout.cfg
../libpng/buildout.cfg
../libtool/buildout.cfg
../pkgconfig/buildout.cfg
../zlib/buildout.cfg
[graphviz]
recipe = hexagonit.recipe.cmmi
url = http://www.graphviz.org/pub/graphviz/stable/SOURCES/graphviz-2.26.3.tar.gz
md5sum = 6f45946fa622770c45609778c0a982ee
url = http://www.graphviz.org/pub/graphviz/stable/SOURCES/graphviz-2.28.0.tar.gz
md5sum = 8d26c1171f30ca3b1dc1b429f7937e58
configure-options =
--with-ltdl-include=${libtool:location}/include
--with-ltdl-lib=${libtool:location}/lib
--with-included-ltdl
--with-pngincludedir=${libpng:location}/include
--with-pnglibdir=${libpng:location}/lib
--with-zincludedir=${zlib:location}/include
......
# mroonga - a MySQL storage engine using full-text search engine groonga
# http://mroonga.github.com/
# groonga - an open-source fulltext search engine and column store
# http://groonga.org/
[buildout]
extends =
../autoconf/buildout.cfg
../automake/buildout.cfg
../glib/buildout.cfg
../libtool/buildout.cfg
../pkgconfig/buildout.cfg
parts =
groonga
[groonga-1.2.8-configure-Wno-cflags-patch]
recipe = hexagonit.recipe.download
url = ${:_profile_base_location_}/${:filename}
md5sum = b48ac46c7de0ed4c0e632e06118b8a58
filename = groonga-1.2.8-configure-Wno-cflags.patch
download-only = true
[groonga]
recipe = hexagonit.recipe.cmmi
url = http://packages.groonga.org/source/groonga/groonga-1.2.8.tar.gz
md5sum = a319b1f3a55cbf250ef5255f5c51ff46
patch-options = -p0
patches =
${groonga-1.2.8-configure-Wno-cflags-patch:location}/${groonga-1.2.8-configure-Wno-cflags-patch:filename}
environment =
PATH =${autoconf:location}/bin:${automake-1.11:location}/bin:${libtool:location}/bin:%(PATH)s
configure-command =
aclocal -I ${glib:location}/share/aclocal -I ${libtool:location}/share/aclocal -I ${pkgconfig:location}/share/aclocal -I .
libtoolize -c -f
autoheader
automake -c -a -f
autoconf
./configure
url = http://packages.groonga.org/source/groonga/groonga-1.2.9.tar.gz
md5sum = 47117baa401a3db08362e00f94fced12
configure-options =
--prefix=${buildout:parts-directory}/${:_buildout_section_name_}
--disable-static
--disable-glibtest
--disable-benchmark
......
--- configure.ac.orig 2011-12-06 12:54:26.812408976 +0900
+++ configure.ac 2011-12-06 13:17:39.060465045 +0900
@@ -70,9 +70,10 @@
m4_pattern_allow(PKG_CONFIG_LIBDIR)])
AC_DEFUN([CHECK_CFLAG], [
+ cflag=$(echo "$1" | sed -e 's,^-Wno-,-W,')
AC_MSG_CHECKING([if gcc supports $1])
old_CFLAGS=$CFLAGS
- CFLAGS="$CFLAGS $1 -Werror"
+ CFLAGS="$CFLAGS $cflag -Werror"
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([])],
[check_cflag=yes],
[check_cflag=no])
@@ -84,9 +85,10 @@
])
AC_DEFUN([CHECK_CXXFLAG], [
+ cxxflag=$(echo "$1" | sed -e 's,^-Wno-,-W,')
AC_MSG_CHECKING([if g++ supports $1])
old_CXXFLAGS=$CXXFLAGS
- CXXFLAGS="$CXXFLAGS $1 -Werror"
+ CXXFLAGS="$CXXFLAGS $cxxflag -Werror"
AC_LANG_PUSH([C++])
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([])],
[check_cxxflag=yes],
diff -ur groonga-storage-engine-0.4.orig/configure groonga-storage-engine-0.4/configure
--- groonga-storage-engine-0.4.orig/configure 2010-11-24 06:23:50.000000000 +0100
+++ groonga-storage-engine-0.4/configure 2011-01-01 16:01:07.000000000 +0100
@@ -13925,8 +13925,8 @@
as_fn_error "failed to run \"$ac_mysql_config\": $plugindir" "$LINENO" 5
fi
MYSQL_INC="$MYSQL_INC $($ac_mysql_config --include)"
- ac_mysql_major_version="`$ac_mysql_config --version | cut -b 1-3`"
- if test "$ac_mysql_major_version" = "5.1"; then
+ ac_mysql_major_version="`$ac_mysql_config --version | cut -b 1,3`"
+ if test $ac_mysql_major_version -lt 55; then
MYSQL51="-DMYSQL51"
fi
......@@ -3,8 +3,8 @@ parts = haproxy
[haproxy]
recipe = hexagonit.recipe.cmmi
url = http://haproxy.1wt.eu/download/1.4/src/haproxy-1.4.18.tar.gz
md5sum = 4ac88bb1a76c4b84ed4f6131183bedbe
url = http://haproxy.1wt.eu/download/1.4/src/haproxy-1.4.19.tar.gz
md5sum = 41392d738460dbf99295fd928031c6a4
configure-command = true
# If the system is running on Linux 2.6, we use "linux26" as the TARGET,
# otherwise use "generic".
......
[buildout]
parts =
libmemcached
[libmemcached]
<= libmemcached-0.50
[libmemcached-0.50]
<= libmemcached-common
url = http://launchpad.net/libmemcached/1.0/0.50/+download/libmemcached-0.50.tar.gz
md5sum = c8627014a37cd821cf93317b8de6f9f8
[libmemcached-0.44]
<= libmemcached-common
url = http://launchpad.net/libmemcached/1.0/0.44/+download/libmemcached-0.44.tar.gz
md5sum = e6bd825c46fa080b550f90f9001cba8c
[libmemcached-common]
recipe = hexagonit.recipe.cmmi
configure-options =
--without-docs
--without-memcached
--without-libgtest-prefix
--without-libevent-prefix
--without-libinnodb-prefix
--without-libsasl-prefix
--without-libsasl2-prefix
......@@ -12,11 +12,12 @@ find-links =
[libreoffice-bin]
recipe = slapos.recipe.build
# here, two %s are used, first one is for directory name (eg. x86_64), and second one is for filename (eg. x86-64).
url = http://download.documentfoundation.org/libreoffice/stable/3.4.4/rpm/%s/LibO_3.4.4_Linux_%s_install-rpm_en-US.tar.gz
version = 3.4.5
url = http://download.documentfoundation.org/libreoffice/stable/${:version}/rpm/%s/LibO_${:version}_Linux_%s_install-rpm_en-US.tar.gz
# supported architectures md5sums
md5sum_x86 = 529c60e161d0c23405723f4a3cd1e046
md5sum_x86-64 = fc6cb85312d6e11a7ab6ddb1bc4e79cc
md5sum_x86 = 34786e6aa570782abac551ab092f3fb3
md5sum_x86-64 = 2159a50daab707c02b669a83f635ff0c
# where office code can be found?
officedir = libreoffice3.4
......
......@@ -61,8 +61,8 @@ environment =
[mroonga-mariadb]
recipe = hexagonit.recipe.cmmi
url = https://github.com/downloads/mroonga/mroonga/mroonga-1.10.tar.gz
md5sum = 6a712b2b20eddc65d918dabd8fba590f
url = https://github.com/downloads/mroonga/mroonga/mroonga-1.11.tar.gz
md5sum = 69e56246226e0b9969ee7f99e08aa7da
configure-options =
--with-mysql-source=${mariadb:location}__compile__/mariadb-${mariadb:version}
--with-mysql-config=${mariadb:location}/bin/mysql_config
......
[buildout]
extends =
../curl/buildout.cfg
../libevent/buildout.cfg
../erlang/buildout.cfg
parts =
membase-source
membase-memcached
membase-libmemcached
membase-libconflate
membase-libvbucket
membase-moxi
membase-bucket_engine
membase-ep-engine
membase-membase-cli
membase-memcachetest
membase-ns_server
membase-vbucketmigrator
[membase]
location = ${buildout:parts-directory}/${:_buildout_section_name_}
[membase-source]
<= membase-1.7.0-source
[membase-1.7.0-source]
<= membase-source-common
url = http://files.couchbase.com/source/membase-server_src-1.7.0.tar.gz
md5sum = c933fffea299d00e43b002cb65738663
[membase-source-common]
recipe = hexagonit.recipe.download
strip-top-level-dir = true
[membase-component-common]
recipe = hexagonit.recipe.cmmi
path = ${membase-source:location}/${:component}
prefix = ${membase:location}
configure-options =
--prefix=${:prefix}
${:component-configure-options}
[membase-libmemcached]
<= membase-component-common
component = libmemcached
component-configure-options =
--disable-static
--enable-shared
--disable-dtrace
--without-docs
--disable-sasl
--with-memcached=${membase:location}/bin/memcached
[membase-memcached]
<= membase-component-common
component = memcached
patches =
configure-command =
./configure
component-configure-options =
--enable-isasl
[membase-libconflate]
<= membase-component-common
component = libconflate
component-configure-options =
--disable-static
--enable-shared
--without-check
--with-libcurl-prefix=${curl:location}
[membase-libvbucket]
<= membase-component-common
component = libvbucket
component-configure-options =
--disable-static
--enable-shared
--without-docs
--with-libhashkit-prefix=${membase:location}
[membase-moxi]
<= membase-component-common
component = moxi
component-configure-options =
--enable-moxi-libvbucket
--enable-moxi-libmemcached
--without-check
--with-libevent-prefix=${libevent:location}
--with-libmemcached-prefix=${membase:location}
--with-memcached=${membase:location}/bin/memcached
--with-libhashkit-prefix=${membase:location}
--with-libconflate-prefix=${membase:location}
--with-libvbucket-prefix=${membase:location}
[membase-bucket_engine]
<= membase-component-common
component = bucket_engine
component-configure-options =
--with-memcached=${membase-source:location}/memcached
[membase-ep-engine]
<= membase-component-common
component = ep-engine
component-configure-options =
--with-memcached=${membase-source:location}/memcached
[membase-membase-cli]
<= membase-component-common
component = membase-cli
patches =
configure-command =
./configure
component-configure-options =
[membase-memcachetest]
<= membase-component-common
component = memcachetest
component-configure-options =
--with-memcached=${membase:location}/bin/memcached
[membase-ns_server]
<= membase-component-common
component = ns_server
patches =
configure-command =
./configure
component-configure-options =
environment =
PATH=${erlang:location}/bin:%(PATH)s
[membase-vbucketmigrator]
<= membase-component-common
component = vbucketmigrator
component-configure-options =
--without-sasl
--with-isasl
[buildout]
extends =
../libmemcached/buildout.cfg
../git/buildout.cfg
../autoconf/buildout.cfg
../automake/buildout.cfg
../libtool/buildout.cfg
parts =
memstrike
[memstrike]
recipe = hexagonit.recipe.cmmi
path = ${memstrikesource:location}
configure-command =
./bootstrap
./configure
configure-options =
--prefix=${buildout:parts-directory}/${:_buildout_section_name_}
--with-libmemcached=${libmemcached:location}
environment =
PATH =${autoconf:location}/bin:${automake-1.11:location}/bin:${libtool:location}/bin:%(PATH)s
LDFLAGS =-Wl,-rpath=${libmemcached:location}/lib
[memstrikesource]
recipe=plone.recipe.command
location = ${buildout:parts-directory}/${:_buildout_section_name_}
stop-on-error = true
revision = 39d7a99e8bb7eea6df8b
command =
rm -rf ${:location} &&
${git:location}/bin/git clone --quiet git://github.com/frsyuki/memstrike.git ${:location} &&
cd ${:location} &&
${git:location}/bin/git checkout --quiet ${:revision}
......@@ -24,6 +24,8 @@ pkg-config-path = ${glib:location}/lib/pkgconfig/:${pcre:location}/lib/pkgconfig
libraries = ${zlib:location}/lib/:${glib:location}/lib/:${pcre:location}/lib/:${mariadb:location}/lib/mysql/
includes = ${zlib:location}/include/:${glib:location}/include/:${pcre:location}/include/:${mariadb:location}/include/mysql/
cflags = -I${zlib:location}/include/ -I${glib:location}/include/ -I${pcre:location}/include/ -I${mariadb:location}/include/mysql/
mydumper-patches =
${:_profile_base_location_}/mydumper-remove-warnings-errors.patch 917fea16b5ddea195cfa33fbd9827f57 -p1
slapos_promise =
directory:bin
file:bin/mydumper
......@@ -33,13 +35,14 @@ script =
url = self.download(self.options['url'], self.options.get('md5sum'))
extract_dir = self.extract(url)
workdir = guessworkdir(extract_dir)
self.applyPatchList(self.options['mydumper-patches'], cwd=workdir)
env['PATH'] = self.options['buildout-bin-dir'] + ':' + env.get('PATH', '')
env['PKG_CONFIG_PATH'] = self.options['pkg-config-path'] + ':' + \
env.get('PKG_CONFIG_PATH', '')
env['CMAKE_INCLUDE_PATH'] = self.options['includes']
env['CMAKE_LIBRARY_PATH'] = self.options['libraries']
env['CFLAGS'] = self.options['cflags']
command_line = [self.options['cmake-command'],
'-DWITH_ZLIB=system',
'-DCMAKE_INSTALL_PREFIX=%%s' %% self.options['location'],
'-DMYSQL_CONFIG=%%s' %% self.options['mysql-config'],
'-DCMAKE_C_FLAGS=%%s' %% self.options['cflags'],
......
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -12,7 +12,7 @@
add_subdirectory(docs)
-set(CMAKE_C_FLAGS "-Wall -Wunused -Wwrite-strings -Wno-strict-aliasing -Wextra -Wshadow -Werror -O3 -g ${MYSQL_CFLAGS}")
+set(CMAKE_C_FLAGS "-Wall -Wunused -Wwrite-strings -Wno-strict-aliasing -Wextra -Wshadow -O3 -g ${MYSQL_CFLAGS}")
include_directories(${MYDUMPER_SOURCE_DIR} ${MYSQL_INCLUDE_DIR} ${GLIB2_INCLUDE_DIR} ${PCRE_INCLUDE_DIR} ${ZLIB_INCLUDE_DIRS})
......@@ -76,7 +76,6 @@ configure-options =
--enable-assembler
--without-readline
--with-sphinx-storage-engine
--with-named-curses-libs=${ncurses:location}/lib/libncurses.so
--with-zlib-dir=${zlib:location}
make-options =
......@@ -89,5 +88,5 @@ patches =
${mysql-5.0-sphinx-patch:location}/${mysql-5.0-sphinx-patch:filename}
environment =
PATH=${senna:location}/bin:${autoconf:location}/bin:${automake-1.11:location}/bin:${libtool:location}/bin:${bison:location}/bin:${flex:location}/bin:%(PATH)s
CPPFLAGS=-I${senna:location}/include/senna -I${ncurses:location}/include -I${readline:location}/include
LDFLAGS=-L${senna:location}/lib -L${readline:location}/lib -Wl,-rpath=${zlib:location}/lib -Wl,-rpath=${openssl:location}/lib -Wl,-rpath=${ncurses:location}/lib -Wl,-rpath=${readline:location}/lib
CPPFLAGS=-I${senna:location}/include/senna -I${ncurses:location}/include -I${readline5:location}/include
LDFLAGS=-L${senna:location}/lib -L${readline5:location}/lib -L${ncurses:location}/lib -Wl,-rpath=${zlib:location}/lib -Wl,-rpath=${openssl:location}/lib -Wl,-rpath=${ncurses:location}/lib -Wl,-rpath=${readline5:location}/lib
[buildout]
parts = nspr
[nspr-pkgconfig-patch-download]
recipe = hexagonit.recipe.download
filename = nspr-4.8.6-pkgconfig-1.patch
url = http://www.linuxfromscratch.org/patches/blfs/svn/${:filename}
md5sum = 7c00beff0475314f59214842509e407f
download-only = true
[nspr]
recipe = hexagonit.recipe.cmmi
url = https://ftp.mozilla.org/pub/mozilla.org/nspr/releases/v4.8.7/src/nspr-4.8.7.tar.gz
md5sum = 97e30989a56ab813453b71261849c200
patches = ${nspr-pkgconfig-patch-download:location}/${nspr-pkgconfig-patch-download:filename}
patch-options = -p1
configure-command =
cd mozilla/nsprpub
./configure
configure-options =
--prefix=${buildout:parts-directory}/${:_buildout_section_name_}
--with-mozilla
--enable-64bit
make-options =
-C mozilla/nsprpub
[buildout]
extends =
https://svn.erp5.org/repos/public/erp5/trunk/buildout/software-profiles/zlib.cfg
nspr.cfg
sed.cfg
parts =
nss
[nss-patch]
recipe = hexagonit.recipe.download
filename = nss-3.12.9-with-nspr-4.8.7-1.patch
url = http://www.linuxfromscratch.org/patches/blfs/svn/nss-3.12.8-standalone-1.patch
md5sum = ee7b5966961bef16ca896435e78652d3
download-only = true
[nss-download]
recipe = hexagonit.recipe.download
url = ftp://ftp.mozilla.org/pub/mozilla.org/security/nss/releases/NSS_3_12_9_RTM/src/nss-3.12.9.tar.gz
md5sum = bd32f183ca28440c1744650be31a9ccc
strip-top-level-dir = true
[nss]
recipe = plone.recipe.command
source = ${nss-download:location}
destination = ${buildout:parts-directory}/${:_buildout_section_name_}
location = ${buildout:parts-directory}/${:_buildout_section_name_}
compile-location = ${buildout:parts-directory}/${:_buildout_section_name_}_compile_
stop-on-error = true
command =
rm -rf ${:destination} &&
mkdir -p ${:destination} &&
rm -rf ${:compile-location} &&
cp -R ${:source} ${:compile-location} &&
cd ${:compile-location} &&
patch -Np1 -i ${nss-patch:location}/${nss-patch:filename} &&
${sed:location}/bin/sed -i "s/ZLIB_LIBS = -lz//g" ${:compile-location}/mozilla/security/coreconf/Linux.mk &&
${sed:location}/bin/sed -i "s/# INCLUDES += -I\/usr\/include -Y\/usr\/include\/linux/INCLUDES += \$(ZLIB_INCLUDE_DIR)/g" ${:compile-location}/mozilla/security/coreconf/Linux.mk &&
gmake -C mozilla/security/nss \
USE_64=1 \
BUILD_OPT=1 \
ZLIB_INCLUDE_DIR=-I${zlib:location}/include \
ZLIB_LIBS="-lz -L${zlib:location}/lib -Wl,-rpath=${zlib:location}/lib" \
NSPR_INCLUDE_DIR=${nspr:location}/include/nspr \
NSPR_LIB_DIR="${nspr:location}/lib -Wl,-rpath=${nspr:location}/lib" \
NSSUTIL_LIB_DIR="${:location}/lib -Wl,-rpath=${:location}/lib" \
nss_build_all &&
mkdir -p ${:destination}/bin &&
mkdir -p ${:destination}/include/nss3 &&
mkdir -p ${:destination}/lib/pkgconfig &&
install -v -m755 ${:compile-location}/mozilla/dist/*.OBJ/lib/*.so ${:destination}/lib &&
install -v -m644 ${:compile-location}/mozilla/dist/*.OBJ/lib/{*.chk,libcrmf.a} ${:destination}/lib &&
install -v -m755 -d ${:destination}/include/nss &&
install -v -m755 ${:compile-location}/mozilla/dist/*.OBJ/bin/{certutil,nss-config,pk12util} ${:destination}/bin &&
install -v -m644 ${:compile-location}/mozilla/dist/*.OBJ/lib/pkgconfig/nss.pc ${:destination}/lib/pkgconfig &&
cp -v -RL ${:compile-location}/mozilla/dist/{public,private}/nss/* ${:destination}/include/nss &&
chmod 644 ${:destination}/include/nss/* &&
cd ${buildout:parts-directory} &&
rm -rf ${:compile-location}
[nss-cmmi]
recipe = hexagonit.recipe.cmmi
path = ${nss-download:location}/mozilla/security/nss
configure-command = echo "No need to configure"
make-binary = gmake
make-options = USE_64=1
make-targets = nss_build_all
......@@ -5,15 +5,27 @@
[buildout]
extends =
../ca-certificates/buildout.cfg
../zlib/buildout.cfg
parts =
openssl
[openssl-nodoc.patch]
# Disable doc generation part in Makefile
recipe = hexagonit.recipe.download
md5sum = b4887a7b4e18402447bc6227d2493b92
url = ${:_profile_base_location_}/${:filename}
filename = ${:_buildout_section_name_}
download-only = true
[openssl]
recipe = hexagonit.recipe.cmmi
url = https://www.openssl.org/source/openssl-1.0.0e.tar.gz
md5sum = 7040b89c4c58c7a1016c0dfa6e821c86
url = https://www.openssl.org/source/openssl-1.0.0g.tar.gz
md5sum = 07ecbe4324f140d157478637d6beccf1
patches =
${openssl-nodoc.patch:location}/${openssl-nodoc.patch:filename}
patch-options = -p0
configure-command = ./config
configure-options =
-I${zlib:location}/include
......@@ -27,5 +39,7 @@ configure-options =
# it seems that parallel build sometimes fails for openssl.
make-options =
-j1
make-targets =
install && rm -f ${buildout:parts-directory}/${:_buildout_section_name_}/etc/ssl/certs/* && for i in ${ca-certificates:location}/certs/*/*.crt; do ln -sv $i ${buildout:parts-directory}/${:_buildout_section_name_}/etc/ssl/certs/`${buildout:parts-directory}/${:_buildout_section_name_}/bin/openssl x509 -hash -noout -in $i`.0; done; true
LDFLAGS="-Wl,-rpath=${zlib:location}/lib -Wl,-rpath=${buildout:parts-directory}/${:_buildout_section_name_}/lib"
SHARED_LDFLAGS="-Wl,-rpath=${zlib:location}/lib -Wl,-rpath=${buildout:parts-directory}/${:_buildout_section_name_}/lib"
--- Makefile 2012-01-18 14:42:28.000000000 +0100
+++ Makefile 2012-01-24 17:43:40.000000000 +0100
@@ -494,7 +494,7 @@
dist_pem_h:
(cd crypto/pem; $(MAKE) -e $(BUILDENV) pem.h; $(MAKE) clean)
-install: all install_docs install_sw
+install: all install_sw
install_sw:
@$(PERL) $(TOP)/util/mkdir-p.pl $(INSTALL_PREFIX)$(INSTALLTOP)/bin \
......@@ -4,8 +4,8 @@ parts =
[pcre]
recipe = hexagonit.recipe.cmmi
url = ftp://ftp.csx.cam.ac.uk/pub/software/programming/pcre/pcre-8.12.tar.bz2
md5sum = f14a9fef3c92f3fc6c5ac92d7a2c7eb3
url = ftp://ftp.csx.cam.ac.uk/pub/software/programming/pcre/pcre-8.21.tar.bz2
md5sum = 0a7b592bea64b7aa7f4011fc7171a730
configure-options =
--disable-static
--enable-utf8
......
......@@ -10,7 +10,7 @@ parts =
recipe = hexagonit.recipe.cmmi
depends =
${perl:version}
url = http://www.percona.com/redir/downloads/percona-toolkit/percona-toolkit-1.0.1.tar.gz
md5sum = 1d843b1b3ebd2eacfa3bf95ef2a00557
url = http://www.percona.com/redir/downloads/percona-toolkit/percona-toolkit-2.0.1.tar.gz
md5sum = 3a78c78672cb7c634bda35dfb2f817bf
configure-command =
${perl:location}/bin/perl Makefile.PL
......@@ -11,8 +11,8 @@ extends =
[pkgconfig]
recipe = hexagonit.recipe.cmmi
url = http://pkgconfig.freedesktop.org/releases/pkg-config-0.25.tar.gz
md5sum = a3270bab3f4b69b7dc6dbdacbcae9745
url = http://pkgconfig.freedesktop.org/releases/pkg-config-0.26.tar.gz
md5sum = 47525c26a9ba7ba14bf85e01509a7234
location = ${buildout:parts-directory}/${:_buildout_section_name_}
# build pkg-config twice so that second configure can use pkg-config
# to compute GLIB_CFLAGS and GLIB_LIBS.
......@@ -27,3 +27,5 @@ environment =
PKG_CONFIG_PATH=${glib:location}/lib/pkgconfig
CPPFLAGS=-I${glib:location}/include -I${popt:location}/include
LDFLAGS=-L${gettext:location}/lib -Wl,-rpath=${gettext:location}/lib -L${glib:location}/lib -Wl,-rpath=${glib:location}/lib -L${popt:location}/lib -Wl,-rpath=${popt:location}/lib
GLIB_CFLAGS=-I${glib:location}/include/glib-2.0 -I${glib:location}/lib/glib-2.0/include
GLIB_LIBS=-L${glib:location}/lib -lglib-2.0 -lintl
[buildout]
parts =
pwgen
[pwgen]
recipe = hexagonit.recipe.cmmi
url = http://downloads.sourceforge.net/project/pwgen/pwgen/2.06/pwgen-2.06.tar.gz
md5sum = 935aebcbe610fbc9de8125e7b7d71297
......@@ -2,8 +2,15 @@
parts =
readline5
readline
extends =
../ncurses/buildout.cfg
# readline-5.x is still used for GPL2 only softwares.
[readline5]
recipe = hexagonit.recipe.cmmi
url = http://ftp.gnu.org/gnu/readline/readline-5.2.tar.gz
md5sum = e39331f32ad14009b9ff49cc10c5e751
configure-options =
--enable-multibyte
--disable-static
# readline-5.x is still used for GPL2 only softwares.
[readline5]
......@@ -22,7 +29,5 @@ recipe = hexagonit.recipe.cmmi
url = http://ftp.gnu.org/gnu/readline/readline-6.2.tar.gz
md5sum = 67948acb2ca081f23359d0256e9a271c
configure-options =
--enable-multibyte
--disable-static
--with-ncurses=${ncurses:location}
environment =
LDFLAGS =-Wl,-rpath=${ncurses:location}/lib
......@@ -6,7 +6,5 @@ parts =
recipe = hexagonit.recipe.cmmi
url = ftp://ftp.gnu.org/gnu/sed/sed-4.2.1.tar.gz
md5sum = f0fd4d7da574d4707e442285fd2d3b86
configure-options =
--prefix=${buildout:parts-directory}/${:_buildout_section_name_}
environment =
LDFLAGS =-Wl,--as-needed
[buildout]
parts = sheepstrike
[sheepstrike]
recipe = hexagonit.recipe.cmmi
url = https://gitorious.org/sheepstrike/sheepstrike/archive-tarball/0.1
md5sum = 2c5009eb7c32d7ba5d270d0b88d7e5ab
prefix = ${buildout:parts-directory}/${:_buildout_section_name_}
configure-options = --prefix=${:prefix}
configure-command =
./bootstrap
./configure
From 50ec7439e80bd6a77346dc6482895e481d8cd43a Mon Sep 17 00:00:00 2001
From: Antoine Catton <acatton@tiolive.com>
Date: Tue, 10 Jan 2012 18:30:20 +0100
Subject: [PATCH] Switch to IPv6
---
libhttp/http.h | 4 ++--
libhttp/httpconnection.c | 11 ++++++++++-
libhttp/server.c | 33 +++++++++++++++++++--------------
libhttp/server.h | 6 +++---
shellinabox/shellinaboxd.c | 14 +++++++-------
5 files changed, 41 insertions(+), 27 deletions(-)
diff --git a/libhttp/http.h b/libhttp/http.h
index e7840fa..5cd61e3 100644
--- a/libhttp/http.h
+++ b/libhttp/http.h
@@ -66,8 +66,8 @@ typedef struct ServerConnection ServerConnection;
typedef struct Server Server;
typedef struct URL URL;
-Server *newCGIServer(int localhostOnly, int portMin, int portMax, int timeout);
-Server *newServer(int localhostOnly, int port);
+Server *newCGIServer(char *ipv6, int portMin, int portMax, int timeout);
+Server *newServer(char *ipv6, int port);
void deleteServer(Server *server);
int serverGetListeningPort(Server *server);
int serverGetFd(Server *server);
diff --git a/libhttp/httpconnection.c b/libhttp/httpconnection.c
index c8e69f6..cae467f 100644
--- a/libhttp/httpconnection.c
+++ b/libhttp/httpconnection.c
@@ -823,8 +823,17 @@ static int httpHandleCommand(struct HttpConnection *http,
const char *host = getFromHashMap(&http->header,
"host");
if (host) {
+ int brackets = 0; // For IPv6 hosts
for (char ch; (ch = *host) != '\000'; host++) {
- if (ch == ':') {
+ if (ch == '[') {
+ brackets = 1;
+ break;
+ }
+ if (ch == ']') {
+ brackets = 0;
+ break;
+ }
+ if (!brackets && ch == ':') {
*(char *)host = '\000';
break;
}
diff --git a/libhttp/server.c b/libhttp/server.c
index f52a269..2c30bd8 100644
--- a/libhttp/server.c
+++ b/libhttp/server.c
@@ -170,19 +170,19 @@ static int serverQuitHandler(struct HttpConnection *http, void *arg) {
return HTTP_DONE;
}
-struct Server *newCGIServer(int localhostOnly, int portMin, int portMax,
+struct Server *newCGIServer(char *ipv6, int portMin, int portMax,
int timeout) {
struct Server *server;
check(server = malloc(sizeof(struct Server)));
- initServer(server, localhostOnly, portMin, portMax, timeout);
+ initServer(server, ipv6, portMin, portMax, timeout);
return server;
}
-struct Server *newServer(int localhostOnly, int port) {
- return newCGIServer(localhostOnly, port, port, -1);
+struct Server *newServer(char *ipv6, int port) {
+ return newCGIServer(ipv6, port, port, -1);
}
-void initServer(struct Server *server, int localhostOnly, int portMin,
+void initServer(struct Server *server, char *ipv6, int portMin,
int portMax, int timeout) {
server->looping = 0;
server->exitAll = 0;
@@ -192,14 +192,19 @@ void initServer(struct Server *server, int localhostOnly, int portMin,
server->numConnections = 0;
int true = 1;
- server->serverFd = socket(PF_INET, SOCK_STREAM, 0);
+ server->serverFd = socket(PF_INET6, SOCK_STREAM, 0);
check(server->serverFd >= 0);
check(!setsockopt(server->serverFd, SOL_SOCKET, SO_REUSEADDR,
&true, sizeof(true)));
- struct sockaddr_in serverAddr = { 0 };
- serverAddr.sin_family = AF_INET;
- serverAddr.sin_addr.s_addr = htonl(localhostOnly
- ? INADDR_LOOPBACK : INADDR_ANY);
+ struct sockaddr_in6 serverAddr = { 0 };
+ serverAddr.sin6_family = AF_INET6;
+ if (ipv6 != NULL) {
+ if (!inet_pton(AF_INET6, ipv6, serverAddr.sin6_addr.s6_addr)) {
+ fatal("Bad ipv6 address");
+ }
+ } else {
+ serverAddr.sin6_addr = in6addr_any;
+ }
// Linux unlike BSD does not have support for picking a local port range.
// So, we have to randomly pick a port from our allowed port range, and then
@@ -214,14 +219,14 @@ void initServer(struct Server *server, int localhostOnly, int portMin,
int portStart = rand() % (portMax - portMin + 1) + portMin;
for (int p = 0; p <= portMax-portMin; p++) {
int port = (p+portStart)%(portMax-portMin+1)+ portMin;
- serverAddr.sin_port = htons(port);
+ serverAddr.sin6_port = htons(port);
if (!bind(server->serverFd, (struct sockaddr *)&serverAddr,
sizeof(serverAddr))) {
break;
}
- serverAddr.sin_port = 0;
+ serverAddr.sin6_port = 0;
}
- if (!serverAddr.sin_port) {
+ if (!serverAddr.sin6_port) {
fatal("Failed to find any available port");
}
}
@@ -231,7 +236,7 @@ void initServer(struct Server *server, int localhostOnly, int portMin,
check(!getsockname(server->serverFd, (struct sockaddr *)&serverAddr,
&socklen));
check(socklen == sizeof(serverAddr));
- server->port = ntohs(serverAddr.sin_port);
+ server->port = ntohs(serverAddr.sin6_port);
info("Listening on port %d", server->port);
check(server->pollFds = malloc(sizeof(struct pollfd)));
diff --git a/libhttp/server.h b/libhttp/server.h
index bb879fb..5ffb698 100644
--- a/libhttp/server.h
+++ b/libhttp/server.h
@@ -78,10 +78,10 @@ struct Server {
struct SSLSupport ssl;
};
-struct Server *newCGIServer(int localhostOnly, int portMin, int portMax,
+struct Server *newCGIServer(char *ipv6, int portMin, int portMax,
int timeout);
-struct Server *newServer(int localhostOnly, int port);
-void initServer(struct Server *server, int localhostOnly, int portMin,
+struct Server *newServer(char *ipv6, int port);
+void initServer(struct Server *server, char *ipv6, int portMin,
int portMax, int timeout);
void destroyServer(struct Server *server);
void deleteServer(struct Server *server);
diff --git a/shellinabox/shellinaboxd.c b/shellinabox/shellinaboxd.c
index dcf05ff..2d1d758 100644
--- a/shellinabox/shellinaboxd.c
+++ b/shellinabox/shellinaboxd.c
@@ -80,7 +80,7 @@
static int port;
static int portMin;
static int portMax;
-static int localhostOnly = 0;
+static char *ipv6 = NULL;
static int noBeep = 0;
static int numericHosts = 0;
static int enableSSL = 1;
@@ -747,7 +747,7 @@ static void usage(void) {
" -g, --group=GID switch to this group (default: %s)\n"
" -h, --help print this message\n"
" --linkify=[none|normal|agressive] default is \"normal\"\n"
- " --localhost-only only listen on 127.0.0.1\n"
+ " --ipv6 listen on a specific ipv6\n"
" --no-beep suppress all audio output\n"
" -n, --numeric do not resolve hostnames\n"
" -p, --port=PORT select a port (default: %d)\n"
@@ -839,7 +839,7 @@ static void parseArgs(int argc, char * const argv[]) {
{ "static-file", 1, 0, 'f' },
{ "group", 1, 0, 'g' },
{ "linkify", 1, 0, 0 },
- { "localhost-only", 0, 0, 0 },
+ { "ipv6", 1, 0, 0 },
{ "no-beep", 0, 0, 0 },
{ "numeric", 0, 0, 'n' },
{ "port", 1, 0, 'p' },
@@ -1001,8 +1001,8 @@ static void parseArgs(int argc, char * const argv[]) {
"\"none\", \"normal\", or \"aggressive\".");
}
} else if (!idx--) {
- // Localhost Only
- localhostOnly = 1;
+ // IPv6
+ ipv6 = optarg;
} else if (!idx--) {
// No Beep
noBeep = 1;
@@ -1197,7 +1197,7 @@ int main(int argc, char * const argv[]) {
// Create a new web server
Server *server;
if (port) {
- check(server = newServer(localhostOnly, port));
+ check(server = newServer(ipv6, port));
dropPrivileges();
setUpSSL(server);
} else {
@@ -1217,7 +1217,7 @@ int main(int argc, char * const argv[]) {
_exit(0);
}
check(!NOINTR(close(fds[0])));
- check(server = newCGIServer(localhostOnly, portMin, portMax,
+ check(server = newCGIServer(ipv6, portMin, portMax,
AJAX_TIMEOUT));
cgiServer = server;
setUpSSL(server);
--
1.7.6.5
From eee6f7180dc5dd4523264e7ce0721945ab2b78a1 Mon Sep 17 00:00:00 2001
From: Antoine Catton <acatton@tiolive.com>
Date: Wed, 11 Jan 2012 17:32:15 +0100
Subject: [PATCH 2/2] Allow to run entire command path.
---
shellinabox/launcher.c | 3 +--
1 files changed, 1 insertions(+), 2 deletions(-)
diff --git a/shellinabox/launcher.c b/shellinabox/launcher.c
index fb8a133..e116a75 100644
--- a/shellinabox/launcher.c
+++ b/shellinabox/launcher.c
@@ -1226,8 +1226,7 @@ static void execService(int width, int height, struct Service *service,
extern char **environ;
environ = environment;
- char *cmd = strrchr(argv[0], '/');
- execvp(cmd ? cmd + 1: argv[0], argv);
+ execvp(argv[0], argv);
}
void setWindowSize(int pty, int width, int height) {
--
1.7.6.5
[buildout]
extends =
../zlib/buildout.cfg
../openssl/buildout.cfg
../patch/buildout.cfg
parts = shellinabox
[shellinabox-full-path-patch]
recipe = hexagonit.recipe.download
filename = 0002-Allow-to-run-entire-command-path.patch
url = ${:_profile_base_location_}/${:filename}
download-only = true
[shellinabox-ipv6-patch]
recipe = hexagonit.recipe.download
filename = 0001-Switch-to-IPv6.patch
url = ${:_profile_base_location_}/${:filename}
download-only = true
[shellinabox]
recipe = hexagonit.recipe.cmmi
url = http://shellinabox.googlecode.com/files/shellinabox-2.10.tar.gz
md5sum = 0e144910d85d92edc54702ab9c46f032
environment =
CFLAGS = -I${zlib:location}/include -I${openssl:location}/include
LDFLAGS = -L${zlib:location}/lib -Wl,-rpath=${zlib:location}/lib -L${openssl:location}/lib -Wl,-rpath=${openssl:location}/lib
PKG_CONFIG_PATH = ${openssl:location}/lib/pkgconfig/
patch-binary = ${patch:location}/bin/patch
patch-options = -p1
patches =
${shellinabox-ipv6-patch:location}/${shellinabox-ipv6-patch:filename}
${shellinabox-full-path-patch:location}/${shellinabox-full-path-patch:filename}
......@@ -112,23 +112,25 @@ scripts =
slapproxy = slapos.proxy:main
[versions]
zc.buildout = 1.6.0-dev-SlapOS-003
zc.buildout = 1.6.0-dev-SlapOS-004
# Generated by buildout-versions
Jinja2 = 2.6
Werkzeug = 0.8.1
Werkzeug = 0.8.3
buildout-versions = 1.7
collective.recipe.template = 1.9
hexagonit.recipe.cmmi = 1.5.0
lxml = 2.3.2
meld3 = 0.6.7
lxml = 2.3.3
meld3 = 0.6.8
netaddr = 0.7.6
setuptools = 0.6c12dev-r88846
slapos.core = 0.20
slapos.libnetworkcache = 0.10
slapos.core = 0.22
slapos.libnetworkcache = 0.12
xml-marshaller = 0.9.7
z3c.recipe.scripts = 1.0.1
zc.recipe.egg = 1.3.2
# Required by:
# slapos.core==0.20
# slapos.core==0.22
Flask = 0.8
# Required by:
......@@ -136,13 +138,21 @@ Flask = 0.8
hexagonit.recipe.download = 1.5.0
# Required by:
# slapos.core==0.20
netifaces = 0.6
# slapos.core==0.22
netifaces = 0.8
# Required by:
# slapos.core==0.22
# slapos.libnetworkcache==0.12
# supervisor==3.0a12
# zc.buildout==1.6.0-dev-SlapOS-004
# zope.interface==3.8.0
setuptools = 0.6c12dev-r88846
# Required by:
# slapos.core==0.20
supervisor = 3.0a10
# slapos.core==0.22
supervisor = 3.0a12
# Required by:
# slapos.core==0.20
# slapos.core==0.22
zope.interface = 3.8.0
# http://www.sphinxsearch.com/bugs/view.php?id=550
# 0000550: Can not make libsphinxclient
--- sphinx-1.10-beta/api/libsphinxclient/sphinxclient.c 2010-07-15 13:05:40.000000000 +0200
+++ sphinx-1.10-beta/api/libsphinxclient/sphinxclient.c 2010-07-21 20:43:26.760024489 +0200
@@ -1355,11 +1355,13 @@
optval = 1;
#ifndef _WIN32
+ #ifdef SO_NOSIGPIPE
if ( setsockopt ( sock, SOL_SOCKET, SO_NOSIGPIPE, (void *)&optval, (socklen_t)sizeof(optval) ) < 0 )
{
set_error ( client, "setsockopt() failed: %s", sock_error() );
return -1;
}
+ #endif
#endif
res = connect ( sock, (struct sockaddr*)&sa, sizeof(sa) );
......@@ -5,8 +5,8 @@ parts =
[sqlite3]
recipe = hexagonit.recipe.cmmi
url = http://www.sqlite.org/sqlite-autoconf-3070900.tar.gz
md5sum = dce303524736fe89a76b8ed29d566352
url = http://www.sqlite.org/sqlite-autoconf-3071000.tar.gz
md5sum = 9ed2ca93577b58cfa0d01f64b9312ab9
configure-options =
--disable-static
--enable-readline
......
......@@ -17,8 +17,8 @@ filename = stunnel-4-hooks.py
[stunnel-4]
recipe = hexagonit.recipe.cmmi
url = http://mirror.bit.nl/stunnel/stunnel-4.50.tar.gz
md5sum = d68b4565294496a8bdf23c728a679f53
url = http://mirror.bit.nl/stunnel/stunnel-4.52.tar.gz
md5sum = f5e713dda0e8efa659f372832ecd0c2c
pre-configure-hook = ${stunnel-4-hook-download:location}/${stunnel-4-hook-download:filename}:pre_configure_hook
configure-options =
--enable-ipv6
......
......@@ -4,8 +4,8 @@ parts =
[zabbix-agent]
recipe = hexagonit.recipe.cmmi
url = http://prdownloads.sourceforge.net/zabbix/zabbix-1.8.9.tar.gz?download
md5sum = fc8c7fad2943dea73d4f2338cd216715
url = http://prdownloads.sourceforge.net/zabbix/zabbix-1.8.10.tar.gz?download
md5sum = 7e89f80c1822787c0831f7c0dbefcd7b
configure-options =
--enable-agent
--enable-ipv6
......@@ -53,8 +53,8 @@ setup(name=name,
'dropbear.add_authorized_key = slapos.recipe.dropbear:AddAuthorizedKey',
'dropbear.client = slapos.recipe.dropbear:Client',
'duplicity = slapos.recipe.duplicity:Recipe',
'erp5scalabilitytestbed = slapos.recipe.erp5scalabilitytestbed:Recipe',
'equeue = slapos.recipe.equeue:Recipe',
'erp5 = slapos.recipe.erp5:Recipe',
'erp5testnode = slapos.recipe.erp5testnode:Recipe',
'generate.mac = slapos.recipe.generatemac:Recipe',
'generic.kvm = slapos.recipe.generic_kvm:Recipe',
......@@ -91,9 +91,12 @@ setup(name=name,
'pbs = slapos.recipe.pbs:Recipe',
'publish = slapos.recipe.publish:Recipe',
'publishurl = slapos.recipe.publishurl:Recipe',
'pwgen = slapos.recipe.pwgen:Recipe',
'proactive = slapos.recipe.proactive:Recipe',
'request = slapos.recipe.request:Recipe',
'sheepdogtestbed = slapos.recipe.sheepdogtestbed:SheepDogTestBed',
'shell = slapos.recipe.shell:Recipe',
'shellinabox = slapos.recipe.shellinabox:Recipe',
'symbolic.link = slapos.recipe.symbolic_link:Recipe',
'softwaretype = slapos.recipe.softwaretype:Recipe',
'siptester = slapos.recipe.siptester:SipTesterRecipe',
......@@ -117,5 +120,9 @@ setup(name=name,
'tidstorage = slapos.recipe.tidstorage:Recipe',
'erp5.update = slapos.recipe.erp5_update:Recipe',
'erp5.test = slapos.recipe.erp5_test:Recipe',
]},
],
'slapos.recipe.nosqltestbed.plugin': [
'kumo = slapos.recipe.nosqltestbed.kumo:KumoTestBed',
],
},
)
......@@ -144,6 +144,9 @@ class Client(GenericBaseRecipe):
self.createDirectory(self.options['home'], '.ssh')
dropbear_cmd = [self.options['dbclient-binary'], '-T']
if self.optionIsTrue('force-host-key', default=False):
dropbear_cmd.extend(['-y'])
if 'identity-file' in self.options:
dropbear_cmd.extend(['-i', self.options['identity-file']])
......
##############################################################################
#
# Copyright (c) 2011 Vifib SARL and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs End users who
# are looking for a ready-to-use solution with commercial guarantees and
# support are strongly adviced to contract a Free Software Service Company
#
# This program is Free Software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59 Temple
# Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import os
import pkg_resources
from slapos.recipe.librecipe import BaseSlapRecipe
class Recipe(BaseSlapRecipe):
def _install(self):
self.parameter_dict = self.computer_partition.getInstanceParameterDict()
software_type = self.parameter_dict.get('slap_software_type', 'default')
if software_type is None or software_type == 'RootSoftwareInstance':
software_type = 'erp5_scalability_cloud'
if "run_%s" % software_type in dir(self) and \
callable(getattr(self, "run_%s" % software_type)):
return getattr(self, "run_%s" % software_type)()
else:
raise NotImplementedError("Do not support %s" % software_type)
def run_erp5_scalability_cloud(self):
config = {}
config.update(self.options)
config.update(self.parameter_dict)
config['address'] = self.getGlobalIPv6Address()
config['report_path'] = self.log_directory
config.setdefault('user_range_increment', 1)
config['software_release_url'] = self.software_release_url
config['server_url'] = self.server_url
config['key_file'] = self.key_file
config['cert_file'] = self.cert_file
config['computer_id'] = self.computer_id
config['computer_partition_id'] = self.computer_partition_id
config['plugin_name'] = 'erp5'
if ',' in config['nb_users']:
config['nb_tester_init'] = config['nb_users'].split(',')[0]
config['nb_tester_max'] = config['nb_users'].split(',')[1]
else:
config['nb_tester_init'] = config['nb_users']
config['nb_tester_max'] = config['nb_users']
connection = {}
connection['url'] = 'http://['+config['address']+']:5000/'
connection['erp5_url'] = config['erp5_url']
connection['repeat'] = config['repeat']
connection['nb_users'] = config['nb_users']
connection['benchmark_suites'] = config['benchmark_suites']
connection['erp5_publish_url'] = config.get('erp5_publish_url', '')
connection['erp5_publish_project'] = config.get('erp5_publish_project', '')
self.computer_partition.setConnectionDict(connection)
nosqltester_manager_wrapper_template_location = pkg_resources.resource_filename(
__name__, os.path.join('template', 'erp5tester_manager_run.in'))
nosqltester_manager_runner_path = self.createRunningWrapper("erp5tester_manager",
self.substituteTemplate(nosqltester_manager_wrapper_template_location, config))
return [nosqltester_manager_runner_path]
def run_erp5_tester(self):
tester_config = {}
tester_config.update(self.options)
tester_config.update(self.parameter_dict)
tester_config['tester_address'] = self.getGlobalIPv6Address()
tester_config['report_path'] = self.log_directory
tester_config['filename_prefix'] = '%s-%s' % (self.computer_id,
self.computer_partition_id)
tester_connection = {'url': 'http://[%s]:5000/' % \
tester_config['tester_address']}
self.computer_partition.setConnectionDict(tester_connection)
tester_wrapper_template_location = pkg_resources.resource_filename(
__name__, os.path.join('template', 'nosqltester_run.in'))
tester_runner_path = self.createRunningWrapper("nosqltester",
self.substituteTemplate(tester_wrapper_template_location, tester_config))
return [tester_runner_path]
#!/bin/sh
# BEWARE: This file is operated by slapgrid
# BEWARE: It will be overwritten automatically
exec %(scalability_tester_manager_binary)s %(plugin_name)s -a %(address)s \
-r %(report_path)s -m %(nb_tester_init)s -t %(nb_tester_max)s \
-i %(user_range_increment)s --erp5-publish-url "%(erp5_publish_url)s" \
--erp5-publish-project "%(erp5_publish_project)s" %(software_release_url)s \
%(server_url)s "%(key_file)s" "%(cert_file)s" %(computer_id)s \
%(computer_partition_id)s
#!/bin/sh
# BEWARE: This file is operated by slapgrid
# BEWARE: It will be overwritten automatically
cd %(benchmark_suite_path)s && \
exec %(scalability_tester_binary)s -m %(host_address)s -a %(tester_address)s \
-r %(report_path)s -l %(report_path)s --filename-prefix %(filename_prefix)s \
--repeat %(repeat)s %(erp5_url)s 1 %(benchmark_suites)s
......@@ -24,86 +24,80 @@
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from slapos.recipe.librecipe import BaseSlapRecipe
import ConfigParser
import json
import os
import pkg_resources
import zc.buildout
import zc.recipe.egg
import sys
import StringIO
class Recipe(BaseSlapRecipe):
def __init__(self, buildout, name, options):
self.egg = zc.recipe.egg.Egg(buildout, options['recipe'], options)
BaseSlapRecipe.__init__(self, buildout, name, options)
from slapos.recipe.librecipe import GenericBaseRecipe
def _install(self):
self.requirements, self.ws = self.egg.working_set()
class Recipe(GenericBaseRecipe):
def install(self):
path_list = []
CONFIG = dict()
CONFIG['slapos_directory'] = self.createDataDirectory('slapos')
CONFIG['working_directory'] = self.createDataDirectory('testnode')
CONFIG['test_suite_directory'] = self.createDataDirectory('test_suite')
CONFIG['proxy_host'] = self.getLocalIPv4Address()
CONFIG['proxy_port'] = '5000'
CONFIG['log_directory'] = self.createDataDirectory('testnodelog')
CONFIG['run_directory'] = self.createDataDirectory('testnoderun')
CONFIG['test_suite_title'] = self.parameter_dict.get('test_suite_title')
CONFIG['test_node_title'] = self.parameter_dict.get('test_node_title')
CONFIG['test_suite'] = self.parameter_dict.get('test_suite')
CONFIG['node_quantity'] = self.parameter_dict.get('node_quantity', '1')
CONFIG['project_title'] = self.parameter_dict.get('project_title')
CONFIG['ipv4_address'] = self.getLocalIPv4Address()
CONFIG['ipv6_address'] = self.getGlobalIPv6Address()
CONFIG['test_suite_master_url'] = self.parameter_dict.get(
'test_suite_master_url', None)
CONFIG['git_binary'] = self.options['git_binary']
CONFIG['slapgrid_partition_binary'] = self.options[
'slapgrid_partition_binary']
CONFIG['slapgrid_software_binary'] = self.options[
'slapgrid_software_binary']
CONFIG['slapproxy_binary'] = self.options['slapproxy_binary']
CONFIG['zip_binary'] = self.options['zip_binary']
options = self.options.copy()
del options['recipe']
CONFIG = {k.replace('-', '_'): v for k, v in options.iteritems()}
CONFIG['PATH'] = os.environ['PATH']
additional_bt5_repository_id = \
self.parameter_dict.get('additional_bt5_repository_id')
CONFIG['bt5_path'] = None
if additional_bt5_repository_id is not None:
CONFIG['bt5_path'] = ""
additional_bt5_repository_id_list = additional_bt5_repository_id.split(",")
for id in additional_bt5_repository_id_list:
id_path = os.path.join(CONFIG['slapos_directory'], id)
if CONFIG['bt5_path']:
additional_bt5_repository_id_list = CONFIG['bt5_path'].split(",")
CONFIG['bt5_path'] = ''
for bt5_repository_id in additional_bt5_repository_id_list:
id_path = os.path.join(CONFIG['slapos_directory'], bt5_repository_id)
bt_path = os.path.join(id_path, "bt5")
CONFIG['bt5_path'] += "%s,%s," % (id_path, bt_path)
CONFIG['instance_dict'] = ''
if 'instance_dict' in self.parameter_dict:
CONFIG['instance_dict'] = '[instance_dict]\n'
for k,v in eval(self.parameter_dict['instance_dict']).iteritems():
CONFIG['instance_dict'] += '%s = %s\n' % (k,v)
CONFIG['repository_list'] = ''
if self.options['instance-dict']:
config_instance_dict = ConfigParser.ConfigParser()
config_instance_dict.add_section('instance_dict')
instance_dict = json.loads(self.options['instance-dict'])
for k ,v in instance_dict.iteritems():
config_instance_dict.set('instance_dict', k, v)
value = StringIO.StringIO()
config_instance_dict.write(value)
CONFIG['instance_dict'] = value.getvalue()
vcs_repository_list = json.loads(self.options['repository-list'])
config_repository_list = ConfigParser.ConfigParser()
i = 0
for repository in eval(self.parameter_dict['vcs_repository_list']):
CONFIG['repository_list'] += '[vcs_repository_%s]\n' % i
CONFIG['repository_list'] += 'url = %s\n' % repository['url']
for repository in vcs_repository_list:
section_name = 'vcs_repository_%d' % i
config_repository_list.add_section(section_name)
config_repository_list.set(section_name, 'url', repository['url'])
if 'branch' in repository:
CONFIG['repository_list'] += 'branch = %s\n' % repository['branch']
config_repository_list.set(section_name, 'branch', repository['branch'])
if 'profile_path' in repository:
CONFIG['repository_list'] += 'profile_path = %s\n' % repository[
'profile_path']
config_repository_list.set(section_name, 'profile_path',
repository['profile_path'])
if 'buildout_section_id' in repository:
CONFIG['repository_list'] += 'buildout_section_id = %s\n' % repository[
'buildout_section_id']
CONFIG['repository_list'] += '\n'
config_repository_list.set(section_name, 'buildout_section_id',
repository['buildout_section_id'])
i += 1
testnode_config = self.createConfigurationFile('erp5testnode.cfg',
self.substituteTemplate(pkg_resources.resource_filename(__name__,
'template/erp5testnode.cfg.in'), CONFIG))
testnode_log = os.path.join(self.log_directory, 'erp5testnode.log')
wrapper = zc.buildout.easy_install.scripts([('erp5testnode',
'slapos.recipe.librecipe.execute', 'executee')], self.ws, sys.executable,
self.wrapper_directory, arguments=[[self.options['testnode'], '-l',
testnode_log, testnode_config], {'GIT_SSL_NO_VERIFY': '1'}])[0]
path_list.append(testnode_config)
path_list.append(wrapper)
value = StringIO.StringIO()
config_repository_list.write(value)
CONFIG['repository_list'] = value.getvalue()
configuration_file = self.createFile(
self.options['configuration-file'],
self.substituteTemplate(
self.getTemplateFilename('erp5testnode.cfg.in'),
CONFIG
),
)
path_list.append(configuration_file)
path_list.append(
self.createPythonScript(
self.options['wrapper'],
'slapos.recipe.librecipe.execute.executee',
[ # Executable
[ self.options['testnode'], '-l', self.options['log-file'],
configuration_file],
# Environment
{
'GIT_SSL_NO_VERIFY': '1',
}
],
)
)
return path_list
......@@ -101,14 +101,18 @@ class Recipe(GenericSlapRecipe):
part_list.append(zope_id)
part_list.append('logrotate-entry-%s' % zope_id)
output += snippet_zope % dict(zope_thread_amount=1, zope_id=zope_id,
zope_port=current_zope_port, zope_timeserver=True, **zope_dict)
zope_port=current_zope_port, zope_timeserver=True,
longrequest_logger_file='', longrequest_logger_timeout='',
longrequest_logger_interval='', **zope_dict)
# always one admin node
current_zope_port += 1
zope_id = 'zope-admin'
part_list.append(zope_id)
part_list.append('logrotate-entry-%s' % zope_id)
output += snippet_zope % dict(zope_thread_amount=1, zope_id=zope_id,
zope_port=current_zope_port, zope_timeserver=False, **zope_dict)
zope_port=current_zope_port, zope_timeserver=False,
longrequest_logger_file='', longrequest_logger_timeout='',
longrequest_logger_interval='', **zope_dict)
# handle activity key
for q in range(1, json_data['activity']['zopecount'] + 1):
current_zope_port += 1
......@@ -116,7 +120,9 @@ class Recipe(GenericSlapRecipe):
part_list.append(part_name)
part_list.append('logrotate-entry-%s' % part_name)
output += snippet_zope % dict(zope_thread_amount=1, zope_id=part_name,
zope_port=current_zope_port, zope_timeserver=True, **zope_dict)
zope_port=current_zope_port, zope_timeserver=True,
longrequest_logger_file='', longrequest_logger_timeout='',
longrequest_logger_interval='', **zope_dict)
# handle backend key
snippet_backend = open(self.options['snippet-backend']).read()
publish_url_list = []
......@@ -127,9 +133,22 @@ class Recipe(GenericSlapRecipe):
part_name = 'zope-%s-%s' % (backend_name, q)
part_list.append(part_name)
part_list.append('logrotate-entry-%s' % part_name)
longrequest_logger = backend_configuration.get("longrequest-logger", None)
if longrequest_logger is not None:
longrequest_part_name = '%s-longrequest' %part_name
longrequest_logger_file = '${basedirectory:log}/%s.log' \
%longrequest_part_name
longrequest_logger_timeout = longrequest_logger.get('timeout', '4')
longrequest_logger_interval = longrequest_logger.get('interval', '2')
else:
longrequest_logger_file = longrequest_logger_timeout = \
longrequest_logger_interval = ''
output += snippet_zope % dict(
zope_thread_amount=backend_configuration['thread-amount'],
zope_id=part_name, zope_port=current_zope_port, zope_timeserver=False,
longrequest_logger_file=longrequest_logger_file,
longrequest_logger_timeout=longrequest_logger_timeout,
longrequest_logger_interval=longrequest_logger_interval,
**zope_dict)
haproxy_backend_list.append('${%(part_name)s:ip}:${%(part_name)s:port}' % dict(part_name=part_name))
# now generate backend access
......
......@@ -117,6 +117,20 @@ class Recipe(GenericBaseRecipe):
PATH=self.options['bin-path'],
TIMEZONE=self.options['timezone'],
)
# longrequestlogger product which requires environment settings
longrequest_logger_file = self.options.get('longrequest-logger-file', None)
longrequest_logger_timeout = \
self.options.get('longrequest-logger-timeout', None)
longrequest_logger_interval= \
self.options.get('longrequest-logger-interval', None)
if longrequest_logger_file:
# add needed zope configuration
zope_environment.update(
**dict(longrequestlogger_file = longrequest_logger_file,
longrequestlogger_timeout = longrequest_logger_timeout,
longrequestlogger_interval = longrequest_logger_interval))
# configure default Zope2 zcml
open(self.options['site-zcml'], 'w').write(open(self.getTemplateFilename(
'site.zcml')).read())
......
......@@ -34,7 +34,6 @@ class GenericSlapRecipe(GenericBaseRecipe):
def __init__(self, buildout, name, options):
"""Default initialisation"""
options['eggs'] = 'slapos.cookbook'
GenericBaseRecipe.__init__(self, buildout, name, options)
self.slap = slap.slap()
......
......@@ -25,202 +25,31 @@
#
##############################################################################
import os
import urllib
import urllib2
import sys
import pkg_resources
from logging import Formatter
from slapos.recipe.librecipe import BaseSlapRecipe
class NoSQLTestBed(BaseSlapRecipe):
def _install(self):
self.parameter_dict = self.computer_partition.getInstanceParameterDict()
try:
entry_point = pkg_resources.iter_entry_points(group='slapos.recipe.nosqltestbed.plugin',
name=self.parameter_dict.get('plugin', 'kumo')).next()
plugin_class = entry_point.load()
testbed = plugin_class()
except:
print Formatter().formatException(sys.exc_info())
return None
software_type = self.parameter_dict.get('slap_software_type', 'default')
if software_type is None or software_type == 'RootSoftwareInstance':
software_type = 'kumo_cloud'
if "run_%s" % software_type in dir(self) and \
callable(getattr(self, "run_%s" % software_type)):
return getattr(self, "run_%s" % software_type)()
software_type = 'default'
if "run_%s" % software_type in dir(testbed) and \
callable(getattr(testbed, "run_%s" % software_type)):
return getattr(testbed, "run_%s" % software_type)(self)
else:
raise NotImplementedError("Do not support %s" % software_type)
def run_kumo_cloud(self):
""" Deploy kumofs systeom on a cloud. """
kumo_cloud_config = {}
kumo_cloud_config.update(self.options)
kumo_cloud_config.update(self.parameter_dict)
kumo_cloud_config['address'] = self.getGlobalIPv6Address()
kumo_cloud_config['report_path'] = self.log_directory
if 'nb_server_max' not in kumo_cloud_config:
kumo_cloud_config['nb_server_max'] = 3
if 'nb_tester_max' not in kumo_cloud_config:
kumo_cloud_config['nb_tester_max'] = 3
if 'nb_thread' not in kumo_cloud_config:
kumo_cloud_config['nb_thread'] = 1
if 'nb_request' not in kumo_cloud_config:
kumo_cloud_config['nb_request'] = 1000
kumo_cloud_config['software_release_url'] = self.software_release_url
kumo_cloud_config['server_url'] = self.server_url
kumo_cloud_config['key_file'] = self.key_file
kumo_cloud_config['cert_file'] = self.cert_file
kumo_cloud_config['computer_id'] = self.computer_id
kumo_cloud_config['computer_partition_id'] = self.computer_partition_id
kumo_cloud_config['plugin_name'] = 'kumo'
kumo_cloud_connection = {}
kumo_cloud_connection['url'] = "http://["+kumo_cloud_config['address']+"]:5000/"
self.computer_partition.setConnectionDict(kumo_cloud_connection)
nosqltester_manager_wrapper_template_location = pkg_resources.resource_filename(
__name__, os.path.join(
'template', 'kumotester_manager_run.in'))
nosqltester_manager_runner_path = self.createRunningWrapper("kumotester_manager",
self.substituteTemplate(nosqltester_manager_wrapper_template_location, kumo_cloud_config))
return [nosqltester_manager_runner_path]
def run_all(self):
""" Runs all services on one machine. """
all_config = {}
all_config.update(self.options)
ipaddress = "[%s]" % self.getGlobalIPv6Address()
all_config['manager_address'] = ipaddress
all_config['manager_port'] = 19700
all_config['server_address'] = ipaddress
all_config['server_port'] = 19800
all_config['server_listen_port'] = 19900
all_config['server_storage'] = os.path.join(self.data_root_directory, "kumodb.tch")
all_config['gateway_address'] = ipaddress
all_config['gateway_port'] = 11411
all_config['manager_log'] = os.path.join(self.log_directory, "kumo-manager.log")
all_config['server_log'] = os.path.join(self.log_directory, "kumo-server.log")
all_config['gateway_log'] = os.path.join(self.log_directory, "kumo-gateway.log")
manager_wrapper_template_location = pkg_resources.resource_filename(
__name__, os.path.join(
'template', 'kumo_manager_run.in'))
manager_runner_path = self.createRunningWrapper("kumo-manager",
self.substituteTemplate(manager_wrapper_template_location, all_config))
server_wrapper_template_location = pkg_resources.resource_filename(
__name__, os.path.join(
'template', 'kumo_server_run.in'))
server_runner_path = self.createRunningWrapper("kumo-server",
self.substituteTemplate(server_wrapper_template_location, all_config))
gateway_wrapper_template_location = pkg_resources.resource_filename(
__name__, os.path.join(
'template', 'kumo_gateway_run.in'))
gateway_runner_path = self.createRunningWrapper("kumo-gateway",
self.substituteTemplate(gateway_wrapper_template_location, all_config))
return [manager_runner_path, server_runner_path, gateway_runner_path]
def run_kumo_manager(self, ipaddress=None):
""" Runs the kumofs manager. """
manager_config = {}
manager_config.update(self.options)
if ipaddress is None:
manager_config['manager_address'] = "[%s]" % self.getGlobalIPv6Address()
else:
manager_config['manager_address'] = ipaddress
manager_config['manager_port'] = 19700
manager_config['manager_log'] = os.path.join(self.log_directory, "kumo-manager.log")
manager_connection = {}
manager_connection['address'] = manager_config['manager_address']
manager_connection['port'] = manager_config['manager_port']
self.computer_partition.setConnectionDict(manager_connection)
manager_wrapper_template_location = pkg_resources.resource_filename(
__name__, os.path.join(
'template', 'kumo_manager_run.in'))
manager_runner_path = self.createRunningWrapper("kumo-manager",
self.substituteTemplate(manager_wrapper_template_location, manager_config))
return [manager_runner_path]
def run_kumo_server(self):
""" Runs the kumofs server. """
server_config = {}
server_config.update(self.options)
server_config.update(self.parameter_dict)
server_config['server_address'] = "[%s]" % self.getGlobalIPv6Address()
server_config['server_port'] = 19800
server_config['server_listen_port'] = 19900
server_config['server_storage'] = os.path.join(self.var_directory,"kumodb.tch")
server_config['server_log'] = os.path.join(self.log_directory, "kumo-server.log")
server_connection = {}
server_connection['address'] = server_config['server_address']
self.computer_partition.setConnectionDict(server_connection)
server_wrapper_template_location = pkg_resources.resource_filename(
__name__, os.path.join(
'template', 'kumo_server_run.in'))
server_runner_path = self.createRunningWrapper("kumo-server",
self.substituteTemplate(server_wrapper_template_location, server_config))
return [server_runner_path]
def run_kumo_gateway(self):
""" Runs the kumofs gateway. """
gateway_config = {}
gateway_config.update(self.options)
gateway_config.update(self.parameter_dict)
gateway_config['gateway_address'] = "[%s]" % self.getGlobalIPv6Address()
gateway_config['gateway_port'] = 11411
gateway_config['gateway_log'] = os.path.join(self.log_directory, "kumo-gateway.log")
gateway_connection = {}
gateway_connection['address'] = gateway_config['gateway_address']
gateway_connection['port'] = gateway_config['gateway_port']
self.computer_partition.setConnectionDict(gateway_connection)
gateway_wrapper_template_location = pkg_resources.resource_filename(
__name__, os.path.join(
'template', 'kumo_gateway_run.in'))
gateway_runner_path = self.createRunningWrapper("kumo-gateway",
self.substituteTemplate(gateway_wrapper_template_location, gateway_config))
return [gateway_runner_path]
def run_kumo_tester(self):
""" Runs the kumofs tester. """
tester_config = {}
tester_config.update(self.options)
tester_config.update(self.parameter_dict)
tester_config['tester_address'] = self.getGlobalIPv6Address()
# tester_config['url'] = "http://%s:5000/" % tester_config['tester_address']
# tester_config['start_url'] = "http://%s:5000/start" % tester_config['tester_address']
tester_config['report_path'] = self.log_directory
tester_config['binary'] = tester_config['memstrike_binary'] + " -l " + \
tester_config['gateway_address'].strip("[]") + " -p " + \
tester_config['gateway_port'] + " -t " + \
tester_config['nb_thread'] + " " + \
tester_config['nb_request'] #" 1000" " -t 32 1024000"
tester_connection = {}
tester_connection['start_url'] = "http://%s:5000/start" % tester_config['tester_address']
self.computer_partition.setConnectionDict(tester_connection)
tester_wrapper_template_location = pkg_resources.resource_filename(
__name__, os.path.join(
'template', 'nosqltester_run.in'))
tester_runner_path = self.createRunningWrapper("nosqltester",
self.substituteTemplate(tester_wrapper_template_location, tester_config))
return [tester_runner_path]
This diff is collapsed.
#!/bin/sh
# BEWARE: This file is operated by slapgrid
# BEWARE: It will be overwritten automatically
exec %(gateway_binary)s -F -E -m %(manager_address)s:%(manager_port)s -t %(gateway_address)s:%(gateway_port)s --verbose -o %(gateway_log)s
exec %(gateway_binary)s -F -E -m %(manager_address)s:%(manager_port)s \
-t %(gateway_address)s:%(gateway_port)s --verbose -o %(gateway_log)s
#!/bin/sh
# BEWARE: This file is operated by slapgrid
# BEWARE: It will be overwritten automatically
exec %(manager_binary)s -a -l %(manager_address)s:%(manager_port)s --verbose -o %(manager_log)s
exec %(manager_binary)s -a -l %(manager_address)s:%(manager_port)s \
--verbose -o %(manager_log)s
#!/bin/sh
# BEWARE: This file is operated by slapgrid
# BEWARE: It will be overwritten automatically
exec %(server_binary)s -l %(server_address)s:%(server_port)s \
-L %(server_listen_port)s -m %(manager_address)s:%(manager_port)s \
-s %(server_storage)s --verbose -o %(server_log)s
#!/bin/sh
# BEWARE: This file is operated by slapgrid
# BEWARE: It will be overwritten automatically
exec %(nosqltester_manager_binary)s %(plugin_name)s -a %(address)s \
-r %(report_path)s -s %(max_server)s -t %(max_tester)s \
--erp5-publish-url "%(erp5_publish_url)s" --erp5-publish-project "%(erp5_publish_project)s" \
%(software_release_url)s %(server_url)s "%(key_file)s" "%(cert_file)s" %(computer_id)s %(computer_partition_id)s \
%(nb_thread)s %(nb_request)s
#!/bin/sh
# BEWARE: This file is operated by slapgrid
# BEWARE: It will be overwritten automatically
exec %(server_binary)s -l %(server_address)s:%(server_port)s -L %(server_listen_port)s -m %(manager_address)s:%(manager_port)s -s %(server_storage)s --verbose -o %(server_log)s
exec %(memstrike_binary)s -s -l %(gateway_address)s -p %(gateway_port)s -t %(nb_thread)s %(nb_request)s
#!/bin/sh
# BEWARE: This file is operated by slapgrid
# BEWARE: It will be overwritten automatically
exec %(nosqltester_manager_binary)s -a %(address)s -r %(report_path)s -s %(nb_server_max)s -t %(nb_tester_max)s %(software_release_url)s %(server_url)s "%(key_file)s" "%(cert_file)s" %(computer_id)s %(computer_partition_id)s %(plugin_name)s %(nb_thread)s %(nb_request)s
#!/bin/sh
# BEWARE: This file is operated by slapgrid
# BEWARE: It will be overwritten automatically
exec %(nosqltester_manager_binary)s -a %(address)s -r %(report_path)s -s %(nb_server_max)s -t %(nb_tester_max)s %(software_release_url)s %(server_url)s "%(key_file)s" "%(cert_file)s" %(computer_id)s %(computer_partition_id)s %(plugin_name)s
exec %(nosqltester_manager_binary)s %(plugin_name)s -a %(address)s \
-r %(report_path)s -s %(max_server)s -t %(max_tester)s \
--erp5-publish-url "%(erp5_publish_url)s" --erp5-publish-project "%(erp5_publish_project)s" \
%(software_release_url)s %(server_url)s "%(key_file)s" "%(cert_file)s" %(computer_id)s %(computer_partition_id)s
#!/bin/sh
# BEWARE: This file is operated by slapgrid
# BEWARE: It will be overwritten automatically
exec %(nosqltester_binary)s -h %(host_address)s -a %(tester_address)s -r %(report_path)s -b "%(binary)s"
exec %(nosqltester_binary)s -m %(host_address)s -a %(tester_address)s \
-r %(report_path)s -b "%(binary)s" -l %(log_directory)s \
-c "%(compress_method)s"
This diff is collapsed.
import os
import sys
import time
def runApache(args):
sleep = 60
conf = args[0]
while True:
ready = True
for f in conf.get('required_path_list', []):
if not os.path.exists(f):
print 'File %r does not exists, sleeping for %s' % (f, sleep)
ready = False
if ready:
break
time.sleep(sleep)
apache_wrapper_list = [conf['binary'], '-f', conf['config'], '-DFOREGROUND']
apache_wrapper_list.extend(sys.argv[1:])
sys.stdout.flush()
sys.stderr.flush()
os.execl(apache_wrapper_list[0], *apache_wrapper_list)
import os
import subprocess
import time
import ConfigParser
def popenCommunicate(command_list, input=None):
subprocess_kw = dict(stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if input is not None:
subprocess_kw.update(stdin=subprocess.PIPE)
popen = subprocess.Popen(command_list, **subprocess_kw)
result = popen.communicate(input)[0]
if popen.returncode is None:
popen.kill()
if popen.returncode != 0:
raise ValueError('Issue during calling %r, result was:\n%s' % (
command_list, result))
return result
class CertificateAuthority:
def __init__(self, key, certificate, openssl_binary,
openssl_configuration, request_dir):
self.key = key
self.certificate = certificate
self.openssl_binary = openssl_binary
self.openssl_configuration = openssl_configuration
self.request_dir = request_dir
def checkAuthority(self):
file_list = [ self.key, self.certificate ]
ca_ready = True
for f in file_list:
if not os.path.exists(f):
ca_ready = False
break
if ca_ready:
return
for f in file_list:
if os.path.exists(f):
os.unlink(f)
try:
# no CA, let us create new one
popenCommunicate([self.openssl_binary, 'req', '-nodes', '-config',
self.openssl_configuration, '-new', '-x509', '-extensions',
'v3_ca', '-keyout', self.key, '-out', self.certificate,
'-days', '10950'], 'Automatic Certificate Authority\n')
except:
try:
for f in file_list:
if os.path.exists(f):
os.unlink(f)
except:
# do not raise during cleanup
pass
raise
def _checkCertificate(self, common_name, key, certificate):
file_list = [key, certificate]
ready = True
for f in file_list:
if not os.path.exists(f):
ready = False
break
if ready:
return False
for f in file_list:
if os.path.exists(f):
os.unlink(f)
csr = certificate + '.csr'
try:
popenCommunicate([self.openssl_binary, 'req', '-config',
self.openssl_configuration, '-nodes', '-new', '-keyout',
key, '-out', csr, '-days', '3650'],
common_name + '\n')
try:
popenCommunicate([self.openssl_binary, 'ca', '-batch', '-config',
self.openssl_configuration, '-out', certificate,
'-infiles', csr])
finally:
if os.path.exists(csr):
os.unlink(csr)
except:
try:
for f in file_list:
if os.path.exists(f):
os.unlink(f)
except:
# do not raise during cleanup
pass
raise
else:
return True
def checkRequestDir(self):
for request_file in os.listdir(self.request_dir):
parser = ConfigParser.RawConfigParser()
parser.readfp(open(os.path.join(self.request_dir, request_file), 'r'))
if self._checkCertificate(parser.get('certificate', 'name'),
parser.get('certificate', 'key_file'), parser.get('certificate',
'certificate_file')):
print 'Created certificate %r' % parser.get('certificate', 'name')
def runCertificateAuthority(args):
ca_conf = args[0]
ca = CertificateAuthority(ca_conf['key'], ca_conf['certificate'],
ca_conf['openssl_binary'], ca_conf['openssl_configuration'],
ca_conf['request_dir'])
while True:
ca.checkAuthority()
ca.checkRequestDir()
time.sleep(60)
import os
import glob
def controller(args):
"""Creates full or incremental backup
If no full backup is done, it is created
If full backup exists incremental backup is done starting with base
base is the newest (according to date) full or incremental backup
"""
innobackupex_incremental, innobackupex_full, full_backup, incremental_backup \
= args
if len(os.listdir(full_backup)) == 0:
print 'Doing full backup in %r' % full_backup
os.execv(innobackupex_full, [innobackupex_full, full_backup])
else:
backup_list = filter(os.path.isdir, glob.glob(full_backup + "/*") +
glob.glob(incremental_backup + "/*"))
backup_list.sort(key=lambda x: os.path.getmtime(x), reverse=True)
base = backup_list[0]
print 'Doing incremental backup in %r using %r as a base' % (
incremental_backup, base)
os.execv(innobackupex_incremental, [innobackupex_incremental,
'--incremental-basedir=%s'%base, incremental_backup])
import os
import subprocess
import time
import sys
def runMysql(args):
sleep = 60
conf = args[0]
mysqld_wrapper_list = [conf['mysqld_binary'], '--defaults-file=%s' %
conf['configuration_file']]
# we trust mysql_install that if mysql directory is available mysql was
# correctly initalised
if not os.path.isdir(os.path.join(conf['data_directory'], 'mysql')):
while True:
# XXX: Protect with proper root password
# XXX: Follow http://dev.mysql.com/doc/refman/5.0/en/default-privileges.html
popen = subprocess.Popen([conf['mysql_install_binary'],
'--skip-name-resolve', '--no-defaults', '--datadir=%s' %
conf['data_directory']],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
result = popen.communicate()[0]
if popen.returncode is None or popen.returncode != 0:
print "Failed to initialise server.\nThe error was: %s" % result
print "Waiting for %ss and retrying" % sleep
time.sleep(sleep)
else:
print "Mysql properly initialised"
break
else:
print "MySQL already initialised"
print "Starting %r" % mysqld_wrapper_list[0]
sys.stdout.flush()
sys.stderr.flush()
os.execl(mysqld_wrapper_list[0], *mysqld_wrapper_list)
def updateMysql(args):
conf = args[0]
sleep = 30
is_succeed = False
while True:
if not is_succeed:
mysql_upgrade_list = [conf['mysql_upgrade_binary'], '--no-defaults', '--user=root', '--socket=%s' % conf['socket']]
mysql_upgrade = subprocess.Popen(mysql_upgrade_list, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
result = mysql_upgrade.communicate()[0]
if mysql_upgrade.returncode is None:
mysql_upgrade.kill()
if mysql_upgrade.returncode != 0 and not 'is already upgraded' in result:
print "Command %r failed with result:\n%s" % (mysql_upgrade_list, result)
print 'Sleeping for %ss and retrying' % sleep
else:
if mysql_upgrade.returncode == 0:
print "MySQL database upgraded with result:\n%s" % result
else:
print "No need to upgrade MySQL database"
mysql_list = [conf['mysql_binary'].strip(), '--no-defaults', '-B', '--user=root', '--socket=%s' % conf['socket']]
mysql = subprocess.Popen(mysql_list, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
result = mysql.communicate(conf['mysql_script'])[0]
if mysql.returncode is None:
mysql.kill()
if mysql.returncode != 0:
print 'Command %r failed with:\n%s' % (mysql_list, result)
print 'Sleeping for %ss and retrying' % sleep
else:
is_succeed = True
print 'SlapOS initialisation script succesfully applied on database.'
sys.stdout.flush()
sys.stderr.flush()
time.sleep(sleep)
# Apache static configuration
# Automatically generated
# Basic server configuration
PidFile "%(pid_file)s"
LockFile "%(lock_file)s"
Listen %(ip)s:%(port)s
PHPINIDir %(php_ini_dir)s
ServerAdmin someone@email
DefaultType text/plain
TypesConfig conf/mime.types
AddType application/x-compress .Z
AddType application/x-gzip .gz .tgz
AddType application/x-httpd-php .php .phtml .php5 .php4
AddType application/x-httpd-php-source .phps
# Log configuration
ErrorLog "%(error_log)s"
LogLevel warn
LogFormat "%%h %%{REMOTE_USER}i %%l %%u %%t \"%%r\" %%>s %%b \"%%{Referer}i\" \"%%{User-Agent}i\"" combined
LogFormat "%%h %%{REMOTE_USER}i %%l %%u %%t \"%%r\" %%>s %%b" common
CustomLog "%(access_log)s" common
# Directory protection
<Directory />
Options FollowSymLinks
AllowOverride None
Order deny,allow
Deny from all
</Directory>
Alias /mmc %(document_root)s
### Allow access to lmc web directory to everyone
<Directory %(document_root)s>
AllowOverride None
Order allow,deny
allow from all
php_flag short_open_tag on
php_flag magic_quotes_gpc on
</Directory>
DocumentRoot %(document_root)s
DirectoryIndex index.html index.php
# List of modules
LoadModule authz_host_module modules/mod_authz_host.so
LoadModule log_config_module modules/mod_log_config.so
LoadModule setenvif_module modules/mod_setenvif.so
LoadModule version_module modules/mod_version.so
LoadModule proxy_module modules/mod_proxy.so
LoadModule proxy_http_module modules/mod_proxy_http.so
LoadModule mime_module modules/mod_mime.so
LoadModule dav_module modules/mod_dav.so
LoadModule dav_fs_module modules/mod_dav_fs.so
LoadModule negotiation_module modules/mod_negotiation.so
LoadModule rewrite_module modules/mod_rewrite.so
LoadModule headers_module modules/mod_headers.so
LoadModule dir_module modules/mod_dir.so
LoadModule php5_module modules/libphp5.so
%(file_list)s {
daily
dateext
rotate 30
compress
notifempty
sharedscripts
create
postrotate
%(postrotate)s
endscript
olddir %(olddir)s
}
#!/bin/sh
exec %(memcached_binary)s -p %(memcached_port)s -U %(memcached_port)s -l %(memcached_ip)s
#!/bin/sh
exec %(mmc_core_binary)s -f %(mmc_core_config_file)s
[main]
host = %(mmc_host)s
port = %(mmc_port)s
# Credentials for HTTP basic authentication
login = mmc
password = s3cr3t
# RPC Session timeout in seconds.
# If unset default to Twisted hardcoded 900 seconds.
#sessiontimeout = 900
# Multi-threading support (enabled by default)
#multithreading = 1
#maxthreads = 20
# SSL support
enablessl = 1
localcert = %(ssl_localcert)s
cacert = %(ssl_cacert)s
# Certificate check
# verifypeer = 0
# Path to the file containing the Certificate Authority (PEM format)
# cacert =
# Path to the file containing the local key and certificate (PEM format)
# localcert =
[daemon]
user = %(daemon_user)s
group = %(daemon_user)s
umask = 0077
pidfile= %(daemon_pidfile)s
# user = mmc
# group = mmc
# umask = 0007
# pidfile= /var/run/mmc-agent.pid
[loggers]
keys=root
[handlers]
keys=hand01,hand02
[formatters]
keys=form01
[logger_root]
level=NOTSET
handlers=hand01
[handler_hand01]
class=FileHandler
level=INFO
formatter=form01
args=("%s" % %(mmc_log)s,)
[handler_hand02]
class=StreamHandler
level=DEBUG
args=(sys.stderr,)
[formatter_form01]
format=%(asctime)s #%(thread)d %(levelname)s %(message)s
[main]
host = %(mmc_host)s
port = %(mmc_port)s
# Credentials for HTTP basic authentication
login = mmc
password = s3cr3t
# RPC Session timeout in seconds.
# If unset default to Twisted hardcoded 900 seconds.
#sessiontimeout = 900
# Multi-threading support (enabled by default)
#multithreading = 1
#maxthreads = 20
# SSL support
enablessl = 1
localcert = %(ssl_localcert)s
cacert = %(ssl_cacert)s
# Certificate check
# verifypeer = 0
# Path to the file containing the Certificate Authority (PEM format)
# cacert =
# Path to the file containing the local key and certificate (PEM format)
# localcert =
[daemon]
user = %(daemon_user)s
group = %(daemon_user)s
umask = 0077
pidfile= %(daemon_pidfile)s
# user = mmc
# group = mmc
# umask = 0007
# pidfile= /var/run/mmc-agent.pid
[loggers]
keys=root
[handlers]
keys=hand01,hand02
[formatters]
keys=form01
[logger_root]
level=NOTSET
handlers=hand01
[handler_hand01]
class=FileHandler
level=INFO
formatter=form01
args=("%s" % %(mmc_log)s,)
[handler_hand02]
class=StreamHandler
level=DEBUG
args=(sys.stderr,)
[formatter_form01]
format=%(asctime)s #%(thread)d %(levelname)s %(message)s
[global]
; RPC backend to use
backend = xmlrpc
; HTTP basic authentication credentials to use for XMLRPC communication
login = mmc
password = s3cr3t
; Root URL where the MMC web pages are installed
; root= /mmc/ for http://127.0.0.1/mmc/
root = /mmc/
; Filesystem path where the MMC web modules are installed
rootfsmodules = %(mmc_location)s/modules/
; Available pagination steps
pagination = 10 20 50 100
; Number of items to display in lists
maxperpage = 10
; Declare if this installation is a community version or not
; community can be yes or no
community = yes
; Debug section
[debug]
; 0 : no information
; 1 : XMLRPC calls and results are displayed by the web interface
level = 0
; Login page title
; UTF-8 strings must be used
[logintitle]
; Default page title for English and non-translated languages
C =
; French title
fr_FR =
; Spanish title
es_ES =
[server_01]
description = localhost
url = https://127.0.0.1:7080
; Timeout in seconds for all socket I/O operations
; Beware that timeout on a SSL socket only works with PHP >= 5.2.1
; timeout = 300
; SSL certificate check support
; verifypeer = 0
; Path to the file containing the Certificate Authority (PEM format)
; cacert =
; Path to the file containing the local key and certificate (PEM format)
; localcert =
[ldap]
# LDAP we are connected to
# If ldapurl starts with "ldaps://", use LDAP over SSL on the LDAPS port.
# LDAPS is deprecated, and you should use StartTLS.
# If ldapverifypeer = demand, always use the server hostname instead of its IP
# address in the LDAP URL. This hostname must match the CN field of the server
# certificate.
ldapurl = ldap://%(ldap_host)s:%(ldap_port)s
# Network timeout in seconds for LDAP operations. No default timeout set.
#network_timeout =
# TLS connection parameters when LDAPS is not used:
# off - never use TLS (default value)
# start_tls - Use the LDAPv3 StartTLS extended operation (better)
#start_tls = off
# If start_tls != off or LDAPS, specify check to perform on server certificate:
# never - don't ask certificate
# demand - request certificate. If none or bad certificate provided, stop the
# connection (recommended)
#ldapverifypeer = demand
# Client certicates to use (default are empty) for LDAPS or TLS connections:
# For example: /etc/ssl/certs
#cacertdir =
# For example: /etc/mmc/certs/demoCA/cacert.pem
#cacert =
# For example: /etc/mmc/certs/client.cert
#localcert =
# For example: /etc/mmc/certs/client.key
#localkey =
# Accepted ciphers
# Use this for more security: TLSv1+SHA1+AES+DH:CAMELLIA:!NULL:!ADH
#ciphersuites = TLSv1:!NULL
# LDAP debug level - set this to 255 to debug LDAP connection problems
#ldapdebuglevel = 0
# LDAP base DN
baseDN = dc=mandriva, dc=com
# Users location in the LDAP
baseUsersDN = ou=Users, %%(basedn)s
# Groups location in the LDAP
baseGroupsDN = ou=Groups, %%(basedn)s
# LDAP manager
rootName = cn=admin, %%(basedn)s
password = secret
# If enabled, the MMC will create/move/delete the home of the users
# Else will do nothing, but only write user informations into LDAP
userHomeAction = 1
# Skeleton directory to populate a new home directory
skelDir = /etc/skel
# If set, all new users will belong to this group when created
defaultUserGroup = Domain Users
# Default home directory for users
defaultHomeDir = /home
# user uid number start
uidStart = 10000
# group gid number start
gidStart = 10000
# LDAP log file path
logfile = %(ldap_logfile_path)s
# FDS log file path
# logfile = /opt/fedora-ds/slapd-hostname/logs/access
# you can specify here where you can authorized creation of your homedir
# default is your defaultHomeDir
# example:
# authorizedHomeDir = /home, /home2, /mnt/depot/newhome
# LDAP user password scheme to use
# Possible values are "ssha", "crypt" and "passmod"
# "passmod" uses the LDAP Password Modify Extended Operations to change
# password. The password encryption is done by the LDAP server.
passwordscheme = passmod
#[backup-tools]
## Path of the backup tools
#path = /usr/lib/mmc/backup-tools
## Where are put the archives
#destpath = /home/archives
# Computer inventory plugin to use (Pulse 2 related option)
# [computers]
# method = glpi
# method = inventory
# Audit system configuration
# If commented, the audit module will be disabled
# [audit]
# method = database
# dbhost = localhost
# MySQL and PostgreSQL backends are available
# dbdriver = postgres
# dbport = 5432
# dbdriver = mysql
# dbport = 3306
# dbuser = audit
# dbpassword = audit
# dbname = audit
# User authentication configuration
#[authentication]
# Authenticators chain
#method = baseldap externalldap
# baseldap authenticator configuration
#[authentication_baseldap]
# Set a list of login that will only be authentified using this authenticator
#authonly = root
# Externalldap authenticator configuration
#[authentication_externalldap]
# Login list that won't be authenticated with this authenticator.
#exclude =
# If set, only the speficied logins will be authenticated with this
# authenticator.
#authonly =
# Set whether this authenticator is mandatory. If it is mandatory and can't be
# validated during the mmc-agent activation phase, the mmc-agent exits with an
# error.
#mandatory = True
# LDAP server URLs. The LDAP server are selected in the given order when
# authenticating a user.
#ldapurl = ldap://192.168.0.1:389 ldap://192.168.0.2:389
# LDAP connection timeout in seconds. If the LDAP connection failed after this
# timeout, we try the next LDAP server in the list or give up if it the last.
#network_timeout =
# LDAP suffix where to search for user
#suffix = cn=Users,dc=mandriva,dc=com
# How to bind to the LDAP. Empty if anonymous
#bindname = cn=account, cn=Users, dc=linboxad, dc=com
#bindpasswd = s3cr3t
#bindname =
#bindpasswd =
# User filter
#filter = objectClass=*
# User attribute containing her/his login
#attr = cn
# User provisioning configuration
#[provisioning]
#method = externalldap
# externalldap provisioner configuration
#[provisioning_externalldap]
# Login list that won't be provisioned with this provisioner
#exclude = root
# These attributes are mandatory to create a user
#ldap_uid = cn
#ldap_givenName = sn
#ldap_sn = sn
# Other attributes to fill in
#ldap_mail = mail
#...
# We are able to fill the ACL fields the user logs in according to the value of
# an attribute from the external LDAP.
# What is the field name ?
#profile_attr =
# Here we define two profiles: profile1 and profile2
# profile1 allows the user to log in and change her/his password in the web
# interface
#profile_acl_profile1= :base#users#passwd/
# profile2 disallows the user to do anything (no ACL defined)
#profile_acl_profile2 =
# ... You can define as much profile_acl_* options as you need
# For each profile, we can create a group of user, and put users with a given
# profile in the corresponding group automatically when they log in.
# Set the next line to True to activate profile to group mapping
#profile_group_mapping = False
# A prefix for the created group can be set
#profile_group_prefix =
# Example userdefault settings to support Kerberos
# [userdefault]
# objectClass = +krb5KDCEntry,krb5Principal
# krb5KeyVersionNumber = 1
# krb5KDCFlags = 126
# krb5PrincipalName = %uid%@DOMAIN
# Subscription informations
# [subscription]
# product_name = MDS
# vendor_name = Mandriva
# vendor_mail = sales@mandriva.com
# customer_name =
# customer_mail =
# comment =
# users = 0
# computers = 0
# # Support informations
# support_mail = customer@customercare.mandriva.com
# support_phone = 0810 LINBOX
# support_comment =
[ldap]
# LDAP we are connected to
# If ldapurl starts with "ldaps://", use LDAP over SSL on the LDAPS port.
# LDAPS is deprecated, and you should use StartTLS.
# If ldapverifypeer = demand, always use the server hostname instead of its IP
# address in the LDAP URL. This hostname must match the CN field of the server
# certificate.
ldapurl = ldap://%(ldap_host)s:%(ldap_port)s
# Network timeout in seconds for LDAP operations. No default timeout set.
#network_timeout =
# TLS connection parameters when LDAPS is not used:
# off - never use TLS (default value)
# start_tls - Use the LDAPv3 StartTLS extended operation (better)
#start_tls = off
# If start_tls != off or LDAPS, specify check to perform on server certificate:
# never - don't ask certificate
# demand - request certificate. If none or bad certificate provided, stop the
# connection (recommended)
#ldapverifypeer = demand
# Client certicates to use (default are empty) for LDAPS or TLS connections:
# For example: /etc/ssl/certs
#cacertdir =
# For example: /etc/mmc/certs/demoCA/cacert.pem
#cacert =
# For example: /etc/mmc/certs/client.cert
#localcert =
# For example: /etc/mmc/certs/client.key
#localkey =
# Accepted ciphers
# Use this for more security: TLSv1+SHA1+AES+DH:CAMELLIA:!NULL:!ADH
#ciphersuites = TLSv1:!NULL
# LDAP debug level - set this to 255 to debug LDAP connection problems
#ldapdebuglevel = 0
# LDAP base DN
baseDN = dc=mandriva, dc=com
# Users location in the LDAP
baseUsersDN = ou=Users, %%(basedn)s
# Groups location in the LDAP
baseGroupsDN = ou=Groups, %%(basedn)s
# LDAP manager
rootName = cn=admin, %%(basedn)s
password = secret
# If enabled, the MMC will create/move/delete the home of the users
# Else will do nothing, but only write user informations into LDAP
userHomeAction = 1
# Skeleton directory to populate a new home directory
skelDir = /etc/skel
# If set, all new users will belong to this group when created
defaultUserGroup = Domain Users
# Default home directory for users
defaultHomeDir = /home
# user uid number start
uidStart = 10000
# group gid number start
gidStart = 10000
# LDAP log file path
logfile = %(ldap_logfile_path)s
# FDS log file path
# logfile = /opt/fedora-ds/slapd-hostname/logs/access
# you can specify here where you can authorized creation of your homedir
# default is your defaultHomeDir
# example:
# authorizedHomeDir = /home, /home2, /mnt/depot/newhome
# LDAP user password scheme to use
# Possible values are "ssha", "crypt" and "passmod"
# "passmod" uses the LDAP Password Modify Extended Operations to change
# password. The password encryption is done by the LDAP server.
passwordscheme = passmod
#[backup-tools]
## Path of the backup tools
#path = /usr/lib/mmc/backup-tools
## Where are put the archives
#destpath = /home/archives
# Computer inventory plugin to use (Pulse 2 related option)
# [computers]
# method = glpi
# method = inventory
# Audit system configuration
# If commented, the audit module will be disabled
# [audit]
# method = database
# dbhost = localhost
# MySQL and PostgreSQL backends are available
# dbdriver = postgres
# dbport = 5432
# dbdriver = mysql
# dbport = 3306
# dbuser = audit
# dbpassword = audit
# dbname = audit
# User authentication configuration
#[authentication]
# Authenticators chain
#method = baseldap externalldap
# baseldap authenticator configuration
#[authentication_baseldap]
# Set a list of login that will only be authentified using this authenticator
#authonly = root
# Externalldap authenticator configuration
#[authentication_externalldap]
# Login list that won't be authenticated with this authenticator.
#exclude =
# If set, only the speficied logins will be authenticated with this
# authenticator.
#authonly =
# Set whether this authenticator is mandatory. If it is mandatory and can't be
# validated during the mmc-agent activation phase, the mmc-agent exits with an
# error.
#mandatory = True
# LDAP server URLs. The LDAP server are selected in the given order when
# authenticating a user.
#ldapurl = ldap://192.168.0.1:389 ldap://192.168.0.2:389
# LDAP connection timeout in seconds. If the LDAP connection failed after this
# timeout, we try the next LDAP server in the list or give up if it the last.
#network_timeout =
# LDAP suffix where to search for user
#suffix = cn=Users,dc=mandriva,dc=com
# How to bind to the LDAP. Empty if anonymous
#bindname = cn=account, cn=Users, dc=linboxad, dc=com
#bindpasswd = s3cr3t
#bindname =
#bindpasswd =
# User filter
#filter = objectClass=*
# User attribute containing her/his login
#attr = cn
# User provisioning configuration
#[provisioning]
#method = externalldap
# externalldap provisioner configuration
#[provisioning_externalldap]
# Login list that won't be provisioned with this provisioner
#exclude = root
# These attributes are mandatory to create a user
#ldap_uid = cn
#ldap_givenName = sn
#ldap_sn = sn
# Other attributes to fill in
#ldap_mail = mail
#...
# We are able to fill the ACL fields the user logs in according to the value of
# an attribute from the external LDAP.
# What is the field name ?
#profile_attr =
# Here we define two profiles: profile1 and profile2
# profile1 allows the user to log in and change her/his password in the web
# interface
#profile_acl_profile1= :base#users#passwd/
# profile2 disallows the user to do anything (no ACL defined)
#profile_acl_profile2 =
# ... You can define as much profile_acl_* options as you need
# For each profile, we can create a group of user, and put users with a given
# profile in the corresponding group automatically when they log in.
# Set the next line to True to activate profile to group mapping
#profile_group_mapping = False
# A prefix for the created group can be set
#profile_group_prefix =
# Example userdefault settings to support Kerberos
# [userdefault]
# objectClass = +krb5KDCEntry,krb5Principal
# krb5KeyVersionNumber = 1
# krb5KDCFlags = 126
# krb5PrincipalName = %uid%@DOMAIN
# Subscription informations
# [subscription]
# product_name = MDS
# vendor_name = Mandriva
# vendor_mail = sales@mandriva.com
# customer_name =
# customer_mail =
# comment =
# users = 0
# computers = 0
# # Support informations
# support_mail = customer@customercare.mandriva.com
# support_phone = 0810 LINBOX
# support_comment =
[main]
disable = 1
[ppolicy]
# Branch where the password policies are stored
ppolicyDN = ou=Password Policies, %(baseDN)s
# Name of the default password policy
ppolicyDefault = default
# This options are used only once to create the default password policy entry
# into the LDAP
[ppolicyattributes]
pwdAttribute = userPassword
pwdLockout = True
pwdMaxFailure = 5
pwdLockoutDuration = 900
# Password can't be change if it not 7 days old
pwdMinAge = 25200
# Password expiration is 42 days
pwdMaxAge = 3628800
pwdMinLength = 8
pwdInHistory = 5
pwdMustChange = True
# To check password quality
pwdCheckModule = mmc-check-password.so
pwdCheckQuality = 2
# ERP5 buildout my.cnf template based on my-huge.cnf shipped with mysql
# The MySQL server
[mysqld]
# ERP5 by default requires InnoDB storage. MySQL by default fallbacks to using
# different engine, like MyISAM. Such behaviour generates problems only, when
# tables requested as InnoDB are silently created with MyISAM engine.
#
# Loud fail is really required in such case.
sql-mode="NO_ENGINE_SUBSTITUTION"
skip-show-database
port = %(tcp_port)s
bind-address = %(ip)s
socket = %(socket)s
datadir = %(data_directory)s
pid-file = %(pid_file)s
log-error = %(error_log)s
log-slow-file = %(slow_query_log)s
long_query_time = 5
max_allowed_packet = 128M
query_cache_size = 32M
plugin-load = ha_innodb_plugin.so
# The following are important to configure and depend a lot on to the size of
# your database and the available resources.
#innodb_buffer_pool_size = 4G
#innodb_log_file_size = 256M
#innodb_log_buffer_size = 8M
# Some dangerous settings you may want to uncomment if you only want
# performance or less disk access. Useful for unit tests.
#innodb_flush_log_at_trx_commit = 0
#innodb_flush_method = nosync
#innodb_doublewrite = 0
#sync_frm = 0
# Uncomment the following if you need binary logging, which is recommended
# on production instances (either for replication or incremental backups).
#log-bin=mysql-bin
# Force utf8 usage
collation_server = utf8_unicode_ci
character_set_server = utf8
skip-character-set-client-handshake
[mysql]
no-auto-rehash
socket = %(socket)s
[mysqlhotcopy]
interactive-timeout
CREATE DATABASE IF NOT EXISTS %(database)s;
GRANT ALL PRIVILEGES ON %(database)s.* TO %(user)s@localhost IDENTIFIED BY %(password)r;
GRANT ALL PRIVILEGES ON %(database)s.* TO %(user)s@'%%' IDENTIFIED BY %(password)r;
GRANT SHOW DATABASES ON *.* TO %(user)s@localhost IDENTIFIED BY %(password)r;
GRANT SHOW DATABASES ON *.* TO %(user)s@'%%' IDENTIFIED BY %(password)r;
FLUSH PRIVILEGES;
EXIT
This diff is collapsed.
[PHP]
engine = On
safe_mode = Off
expose_php = On
error_reporting = E_ALL | E_STRICT
display_errors = On
display_startup_errors = On
log_errors = On
log_errors_max_len = 1024
ignore_repeated_errors = Off
ignore_repeated_source = Off
[main]
# Is the plugin disable ?
disable = 0
# Are dynamic group enable ?
dynamic_enable = 1
# Are profiles enable ?
# profiles_enable = 0
# Preselected module in the dynamic group creation page
# default_module =
# Maximum number of elements in the static group creation list
# max_elements_for_static_list = 2000
[database]
dbdriver = mysql
dbhost = %(mysql_host)s
dbport = %(mysql_port)s
dbuser = %(mysql_user)s
dbpasswd = %(mysql_password)s
dbname = dyngroup
# dbsslenable = 0
# dbsslca =
# dbsslcert =
# dbsslkey =
# Database connection lifetime
# dbpoolrecycle = 60
# Database connection pool size
# dbpoolsize = 5
[querymanager]
# can we query on group names ?
activate = 0
[main]
disable = 0
dbdriver = mysql
dbhost = %(mysql_host)s
dbport = %(mysql_port)s
dbuser = %(mysql_user)s
dbpasswd = %(mysql_password)s
dbname = glpi
# dbsslenable = 0
# dbsslca =
# dbsslcert =
# dbsslkey =
# Database connection lifetime
# dbpoolrecycle = 60
# Database connection pool size
# dbpoolsize = 5
# Allow users to filter computers list using an entity selector
localisation = True
# Accepted GLPI profiles. A user must have her/his GLPI profile in this list
# else she/he can't display any computers from the GLPI inventory
# active_profiles = profile1 profile2 profile3
# Only display computers with the specified state
# filter_on = state=3
# Give the uri to link to for a computer inventory
# glpi_computer_uri =
# should be something like that :
# glpi_computer_uri = http://localhost/glpi/front/computer.form.php?ID=
# Tell whether the query manager of the dyngroup plugin can use this module
[querymanager]
activate = True
# GLPI authentication configuration
#[authentication_glpi]
# URL to connect to the GLPI HTTP interface ?
#baseurl = http://glpi-server/glpi/
# GLPI provisioning configuration
#[provisioning_glpi]
# Users that will never be provisioned
#exclude = root
# Before provisioning, should we perform a GLPI authentication to create or
# update the user informations in the GLPI database ?
#doauth = 1
# MMC web interface ACLs definition according to the user GLPI profile
#profile_acl_profile1 = :##:base#main#default
#profile_acl_profile2 =
#profile_acl_profile3 =
# If the user belong to more than one profile, the first profile of this list
# will be used
#profiles_order = profile1 profile2 profile3
[main]
disable = 0
[database]
# dbdriver = mysql
# dbhost = localhost
# dbport = 3306
# dbname = imaging
# dbuser = mmc
# dbpasswd = mmc
# dbsslenable = 0
# dbsslca = /etc/mmc/pulse2/imaging/cacert.pem
# dbsslcert = /etc/mmc/pulse2/imaging/cert.pem
# dbsslkey = /etc/mmc/pulse2/imaging/key.pem
# Database connection lifetime
# dbpoolrecycle = 60
# Database connection pool size
# dbpoolsize = 5
# [web]
# ##### Interface customization #####
# user may want to override the way dates are displayed (see http://www.php.net/date for more informations)
# web_def_date_fmt = "%Y-%m-%d %H:%M:%S"
# web_def_default_protocol = nfs
# Menu settings
# web_def_default_menu_name = Menu
# web_def_default_timeout = 60
# web_def_default_background_uri =
# web_def_default_message = Warning ! Your PC is being backed up or restored. Do not reboot !
# start options
# web_def_kernel_parameters = quiet
# backup/restore options
# web_def_image_parameters =
[main]
disable = 0
displayLocalisationBar = 0
# Example of software filter when querying a computer inventory.
# All softwares containing the KB string will be excluded
# software_filter = %KB%
[inventory]
dbdriver = mysql
dbhost = %(mysql_host)s
dbport = %(mysql_port)s
dbuser = %(mysql_user)s
dbpasswd = %(mysql_password)s
dbname = inventory
dbsslenable = 0
dbsslca = %(inventory_ssl_cacert)s
dbsslcert = %(inventory_ssl_cert)s
dbsslkey = %(inventory_ssl_key)s
# Database connection lifetime
# dbpoolrecycle = 60
# Database connection pool size
# dbpoolsize = 5
[computers]
# display = cn::Computer Name||displayName::Description
# content =
[expert_mode]
Bios = BiosVersion|ChipSerial|BiosVendor|SmbManufacturer|SmbProduct|SmbVersion|SmbSerial|SmbUUID|SmbType|DateFirstSwitchOn
Network = CardType|MIB|Bandwidth|NetworkType|SubnetMask|State
Hardware = Build|Version|ProcessorCount|SwapSpace|User|Date|Workgroup|RegisteredName|RegisteredCompany|OSSerialNumber|Type|OsSerialKey|ProcessorFrequency|Host
Software = ProductPath|Type|Icon|UninstallPath|ExecutableSize|Application
Controller = ExpandedType|HardwareVersion|StandardType
Drive = DriveType|FileCount|FileSystem
Input = StandardDescription|ExpandedDescription|Connector
Memory = ExtendedDescription|SlotCount
Monitor = Stamp|Type|Serial|Manuf
Pci =
Port = Stamp
Printer =
Slot =
Sound = Description
Storage = ExtendedType|VolumeName|Media
VideoCard =
[graph]
Network = Gateway
Hardware = OperatingSystem|ProcessorType
Memory = Size
# [querymanager]
# list = Entity/Label||Software/ProductName||Hardware/ProcessorType||Hardware/OperatingSystem||Drive/TotalSpace||Inventory/Date
# double = Software/Products::Software/ProductName##Software/ProductVersion
# halfstatic = Registry/Value/display name::Path##DisplayName
# extended = Inventory/Date||Drive/TotalSpace
# [provisioning_inventory]
# Users that will never be provisioned
# exclude = root
# A user can be automatically linked to a list of entities according to his
# profile.
# What is the LDAP field name that defines its profile name ?
# profile_attr =
# Here are the possible notations for profile to entities mapping:
# A simple list of the entities names
# profile_entity_profile1 = entityA entityB
# The dot char is the root entity
# profile_entity_profile2 = .
# In this example the content of the multi-valued 'pulse2entity' LDAP attribute
# will be used
# profile_entity_profile3 = %pulse2entity%
# Here the provisioning plugin 'network_to_entity' will be used
# profile_entity_profile4 = plugin:network_to_entity
[main]
disable = 0
[msc]
# repopath = /var/lib/pulse2/packages
# qactionspath = /var/lib/pulse2/qactions
# download_directory_path = /var/lib/pulse2/downloads
# dbdriver = mysql
# dbhost = localhost
# dbport = 3306
# dbname = msc
# dbuser = mmc
# dbpasswd = mmc
# dbdebug = ERROR
# Database connection lifetime
# dbpoolrecycle = 60
# Database connection pool size
# dbpoolsize = 5
# SSL support
# dbsslenable = 0
# dbsslca =
# dbsslcert =
# dbsslkey =
# Computer's IP addresses filtering
# ignore_non_rfc2780 = 1
# ignore_non_rfc1918 = 0
# Comma separated values of excluded or included IP addresses or ranges
# For example: exclude_ipaddr = 192.168.0.1,10.0.0.0/10.255.255.255
# Included addresses are never filtered.
# exclude_ipaddr =
# include_ipaddr =
# Computer's host name filtering
# ignore_non_fqdn = 0
# ignore_invalid_hostname = 0
# Space separated list regexp for rejected or accepted host name
# Host name matching the regexp in include_hostname are always accepted
# For example: exclude_hostname = ^computer[0-9]*$ ^server[0-9]*$
# exclude_hostname =
# include_hostname =
# Computer's MAC addresses filtering
# wol_macaddr_blacklist =
# default scheduler used by the msc
default_scheduler = scheduler_01
[scheduler_api]
host = %(ipv4)s
port = 9990
username =
password =
enablessl = 1
# verifypeer = 0
# cacert =
# localcert =
[scheduler_01]
host = %(ipv4)s
port = 8000
username = username
password = password
enablessl = 1
# verifypeer = 0
# cacert =
# localcert =
[web]
# ##### Interface customization #####
# user may want to override the way dates are displayed (see http://www.php.net/date for more informations)
# web_def_date_fmt = "%Y-%m-%d %H:%M:%S"
#
# ##### Deployment default settings #####
# pre-check the "awake" checkbox in 'standard' deploy mode
# web_def_awake = 1
# pre-check the "do inventory" checkbox in 'standard' deploy mode
# web_def_inventory = 1
# hidden option
# web_show_reboot = 0
# pre-select the following mode in 'advanced' deploy mode
# the two available modes are push and push_pull
# web_def_mode = push
# if set to False, the following setting allow the user to toggle between push and push/pull mode in advanced mode
# web_force_mode = True
# web_def_maxbw = 0
# web_def_delay = 60
# web_def_attempts = 3
# web_def_deployment_intervals =
# web_dlpath =
# Max bandwidth for file download in Kbit/s
# web_def_dlmaxbw = 0
#
# ##### Proxy default settings ####
# May the local proxy system be used ?
# web_allow_local_proxy = False
# default proxy mode, defaut "multiple", other possible value "single"
# web_def_local_proxy_mode = multiple
# Max number of clients per proxy in proxy mode
# web_def_max_clients_per_proxy = 10
# Number of auto-selected proxy in semi-auto mode
# web_def_proxy_number = 2
# default mode (semi_auto / manual)
# web_def_proxy_selection_mode = semi_auto
#
# ##### VNC Applet default settings ####
# Here we may define the VNC applet behavior (in some way)
# may the VNC applet used ? (this setting simply (en/dis)able the display of the VNC action button)
# vnc_show_icon = True
# allow user to interact with remote desktop ?
# vnc_view_only = True
# use the following VNC client pre-defined rules,
# currently available profiles:
# fiber: for high speed local networks (low latency, 10 Mb/s per connection)
# lan: for 100 Mb local networks (low latency, 3 Mb/s per connection)
# cable: for high-end broadband links (high latency, 400 kb/s per connection)
# dsl: for low-end broadband links (high latency, 120 kb/s per connection)
# isdn: (high latency, 75 kb/s)
# vnc_network_connectivity = lan
# display applet control to user
# vnc_allow_user_control = False
# the port to use to connect to a VNC
# vnc_port = 5900
#
# ##### Client probing behavior ####
# the LED which represents the client status can take four colors:
# black => no probe done
# red => all probe failed
# orange => minimal probe succedeed (ping), maximal probe failed (ssh)
# green => all probe succedeed
# available probes are: none (field is empty), ping, ssh, ping_ssh (ie. both)
# for networks where icmp is not allowed, ping may be disabled: probe_order=ssh
# to speed-up display, ssh may be disabled: probe_order=ping
# to fully disable probe: probe_order=
# default conf: ping_ssh, in other terms: ping = orange, ssh = green
# probe_order=ping_ssh
[package_api]
# mserver = 127.0.0.1
# mport = 9990
# mmountpoint = /rpc
# enablessl = 1
# verifypeer = 0
# localcert =
# cacert =
[main]
disable = 0
[user_package_api]
server = localhost
port = 9990
mountpoint = /upaa
username =
password =
enablessl = 1
# verifypeer = 0
# cacert =
# localcert =
[main]
disable = 0
# [database]
# dbdriver = mysql
# dbhost = localhost
# dbport = 3306
# dbname = pulse2
# dbuser = mmc
# dbpasswd = mmc
# dbsslenable = 0
# dbsslca =
# dbsslcert =
# dbsslkey =
^/tftpboot/revoboot/bin/revoboot.pxe$ bootloader/pxe_boot
^/bootloader/pxe_boot$ bootloader/pxe_boot
^/bootloader/bootsplash.xpm$ bootloader/bootsplash.xpm
^/bootmenus/default bootmenus/default
^/bootmenus/([0-9A-F]{12}) bootmenus/$1
^/bootmenus/([0-9A-F]{8}) bootmenus/$1
^/custom/(.+) custom/$1
^/diskless/kernel$ diskless/kernel
^/diskless/initrd$ diskless/initrd
^/diskless/memtest$ diskless/memtest
^/tools/memtest$ diskless/memtest
^/masters/(.+) masters/$1
^/(.*)$ ?
.* ?
[main]
# We will bind on this address ...
# host = 0.0.0.0
# ... and this UDP port
# port = 1001
# required password to record a client, not checked if empty
# adminpass =
[daemon]
# as we are a service, we need some info to be run, such as:
# - the identity we will run into
# user = root
# group = root
# - our umask
# umask = 0077
# and where to record our PID
# pidfile= /var/run/pulse2-imaging-server.pid
[package-server]
# here are described how to talk to our referent package server
#
# host = 127.0.0.1
# port = 9990
# mount_point = /imaging_api
# enablessl = True
# username = username
# password = password
# cacert = /etc/mmc/pulse2/imaging-server/keys/cacert.pem
# localcert = /etc/mmc/pulse2/imaging-server/keys/privkey.pem
# verifypeer = False
[hooks]
# hooks_dir = /usr/lib/pulse2/imaging-server/hooks
#
# 0xAD => "I'm new" action : menu creation request, triggered by the bootloader
# arg[1] = source MAC (short)
# arg[2] = given ID (short)
# arg[3] = given PASSWORD (optionnal)
# exit 0 on success
# create_client_path = create_client
#
# 0xAA => "I just booted" action : menu update request + inventory processing request, triggered by the bootloader
# arg[1] = source MAC (short)
# boot_client_path = boot_client
# arg[1] = source MAC (short)
# arg[2] = where the inventory is temporaly stored
# process_inventory_path = process_inventory
#
# 0xEC => "I just started a backup" : start a backup request, triggered by revoinc
# arg[1] = source MAC (short)
# arg[2] = kind of backup (L = image, B = Master)
# exit 0 on success
# start_image_path = start_image
#
# 0xED => "I just finished a backup" : end a backup request, triggered by revodoneimage
# end_image_path = end_image
#
# 0xCD => "Change my default menu" => change default menu request, triggered by revodefault
# arg[1] = source MAC (short)
# arg[2] = item to use
# exit 0 on success
# change_default_path = change_default
#
# 0x4C => log stuff
# arg[1] = source MAC (short)
# arg[2] = action :
# 0 => booted
# 1 => took item arg[3]
# 2 => starting restoration (more info in arg[3])
# 3 => finished restoration (more info in arg[3])
# 4 => starting backup (more info in arg[3])
# 5 => finished backup (more info in arg[3])
# 6 => started postinst
# 7 => finished postinst
# 8 => critical error
# arg[3] = optional, see upper
# exit 0 on success
# log_action_path = log_action
#
# 0x1A => asks for its UUID
# arg[1] = source MAC (short)
# exit 0 on success, UUID is the last line on stdout
# get_uuid_path = get_uuid
#
# 0x1B => asks for its Hostname
# arg[1] = source MAC (short)
# exit 0 on success, hostname is the last line on stdout
# get_hostname_path = get_hostname
#
# 0x54 => ask for time sync (for mtftp)
# arg[1] = source MAC (short)
# arg[2] = computed sync
# exit 0 on success
# mtftp_sync_path = mtftp_sync
[loggers]
keys = root
[handlers]
keys = hand01
[formatters]
keys = form01
[logger_root]
level = NOTSET
handlers = hand01
[handler_hand01]
class = FileHandler
level = INFO
formatter = form01
args = ("/var/log/mmc/pulse2-imaging-server.log",)
[formatter_form01]
format = %(asctime)s %(levelname)s %(message)s
[main]
# host =
# port = 9999
# ocsmapping = /etc/mmc/pulse2/inventory-server/OcsNGMap.xml
# enablessl = False
# verifypeer = False
# cacert = /etc/mmc/pulse2/inventory-server/keys/cacert.pem
# localcert = /etc/mmc/pulse2/inventory-server/keys/privkey.pem
# to put the data from the registry as hostname
# hostname = Hardware/Host # by default
# default_entity = .
# entities_rules_file =
[database]
# dbdriver = mysql
# dbhost = localhost
# dbport = 3306
# dbname = inventory
# dbuser = mmc
# dbpasswd = mmc
# dbpoolrecycle = 60
# dbsslenable = 0
# dbsslca = /etc/mmc/pulse2/inventory/cacert.pem
# dbsslcert = /etc/mmc/pulse2/inventory/cert.pem
# dbsslkey = /etc/mmc/pulse2/inventory/key.pem
[daemon]
# pidfile = /var/run/pulse2-inventory-server.pid
# user = root
# group = root
# umask = 0077
# Example of non-root execution settings:
# user = mmc
# group = mmc
# umask = 0007
# [option_01]
# NAME = REGISTRY
# PARAM_01 = NAME::srvcomment||REGKEY::SYSTEM\CurrentControlSet\Services\lanmanserver\parameters||REGTREE::2##srvcomment
# PARAM_02 = NAME::DisplayName||REGKEY::SYSTEM\CurrentControlSet\Services\lanmanserver||REGTREE::2##DisplayName
[loggers]
keys=root
[handlers]
keys=hand01,hand02
[formatters]
keys=form01
[logger_root]
level=NOTSET
handlers=hand01
[handler_hand01]
class=FileHandler
level=INFO
formatter=form01
args=("/var/log/mmc/pulse2-inventory-server.log",)
[handler_hand02]
class=StreamHandler
level=DEBUG
args=(sys.stderr,)
[formatter_form01]
format=%(asctime)s %(levelname)s %(message)s
[loggers]
keys = root
[handlers]
keys = hand01
[formatters]
keys = form01
[logger_root]
level = NOTSET
handlers = hand01
[handler_hand01]
class = FileHandler
level = INFO
formatter = form01
args = ("/var/log/mmc/pulse2-launcher-01.log",)
[formatter_form01]
format = %(asctime)s %(levelname)s %(message)s
[main]
server =
port = 9999
command_name = C:\Program Files\OCS Inventory Agent\OCSInventory.exe
command_attr = /SERVER:127.0.0.1 /PNUM:9999
enablessl = True
verifypeer = False
cacert = cacert.pem
localcert = privkey.pem
# [polling]
# activate = 0
# type = reg
# time = 600
# path = HKEY_LOCAL_MACHINE\Software\Mandriva\Inventory\Client\do_inventory
[loggers]
keys = root
[handlers]
keys = hand01
[formatters]
keys = form01
[logger_root]
level = NOTSET
handlers = hand01
[handler_hand01]
class = handlers.NTEventLogHandler
level = INFO
formatter = form01
args = ("Pulse 2 Proxy SSL",)
# [handler_hand01]
# class = handlers.RotatingFileHandler
# level = DEBUG
# formatter = form01
# args = ("C:\\Program Files\\Mandriva\\Pulse2 Inventory SSL Proxy\\log.txt", "a", 100*1024, 5)
[formatter_form01]
format = %(asctime)s %(levelname)s %(message)s
# Xml update add extra information to OCS XML output
[xmlupdate]
# execute the update
enable = False
# keep a local copy of the updated XML
keepxmlupdate = False
# notify software updates
updatedetection = False
# add software icons to XML output
addicon = False
# add OCS execution debug to OCS XML output
[ocsdebug]
enable = False
\ No newline at end of file
[main]
# server =
# port = 9999
# path = /
# tmpdirname = /tmp/Pulse2InventoryProxy
# command_name = /usr/local/bin/ocsinventory-agent
# command_attr = -l /tmp/Pulse2InventoryProxy
# enablessl = True
# key_file = conf/key/privkey.pem
# cert_file = conf/key/cacert.pem
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment