Commit 700475b3 authored by Alain Takoudjou's avatar Alain Takoudjou

Merge branch 'master' into 1.0

parents 9418c896 fcfabeb4
Changes
=======
1.0.32 (unreleased)
1.0.35 (2016-09-19)
-------------------
* pbs: fix/accelerates deployment of resilient instances
* recipe: new recipe to get a free network port
* Remove url-list parameter to download fonts from fontconfig instance
1.0.31 (2016-05-30)
......
......@@ -12,8 +12,8 @@ parts =
[curl]
recipe = slapos.recipe.cmmi
url = http://curl.haxx.se/download/curl-7.50.1.tar.bz2
md5sum = 015f6a0217ca6f2c5442ca406476920b
url = http://curl.haxx.se/download/curl-7.50.3.tar.bz2
md5sum = bd177fd6deecce00cfa7b5916d831c5e
configure-options =
--disable-static
--disable-ldap
......
# https://mail.zope.org/pipermail/zodb-dev/2014-February/015182.html
diff --git a/src/ZODB/FileStorage/FileStorage.py b/src/ZODB/FileStorage/FileStorage.py
index d45cbbf..d662bf4 100644
--- a/src/ZODB/FileStorage/FileStorage.py
+++ b/src/ZODB/FileStorage/FileStorage.py
@@ -683,6 +683,7 @@ def tpc_vote(self, transaction):
# Hm, an error occurred writing out the data. Maybe the
# disk is full. We don't want any turd at the end.
self._file.truncate(self._pos)
+ self._files.flush()
raise
self._nextpos = self._pos + (tl + 8)
@@ -737,6 +738,7 @@ def _finish_finish(self, tid):
def _abort(self):
if self._nextpos:
self._file.truncate(self._pos)
+ self._files.flush()
self._nextpos=0
self._blob_tpc_abort()
@@ -1996,6 +1998,15 @@ def __init__(self, file_name):
self._out = []
self._cond = threading.Condition()
+ def flush(self):
+ """Empty read buffers.
+
+ This is required if they may contain data of rolled back transactions.
+ """
+ with self.write_lock():
+ for f in self._files:
+ f.flush()
+
@contextlib.contextmanager
def write_lock(self):
with self._cond:
# https://github.com/zopefoundation/ZODB/pull/15/files
diff -ur a/src/ZODB/FileStorage/FileStorage.py b/src/ZODB/FileStorage/FileStorage.py
--- a/src/ZODB/FileStorage/FileStorage.py
+++ b/src/ZODB/FileStorage/FileStorage.py
@@ -430,7 +430,7 @@
if h.tid == serial:
break
pos = h.prev
- if not pos:
+ if h.tid < serial or not pos:
raise POSKeyError(oid)
if h.plen:
return self._file.read(h.plen)
From d387a425941b37b99355077657edf7a2f117cf47 Mon Sep 17 00:00:00 2001
From: Kirill Smelkov <kirr@nexedi.com>
Date: Thu, 21 Jul 2016 22:34:55 +0300
Subject: [PATCH] persistent: On deactivate release in-slots objects too
( This is backport of https://github.com/zopefoundation/persistent/pull/44
to ZODB-3.10 )
On ._p_deactivate() and ._p_invalidate(), when an object goes to ghost
state, objects referenced by all its attributes, except related to
persistence machinery, are released, this way freeing memory (if they
were referenced only from going-to-ghost object).
That's the idea - an object in ghost state is simply a stub, which loads
its content on first access (via hooking into get/set attr) while
occupying minimal memory in not-yet-loaded state.
However the above is not completely true right now, as currently on
ghostification only object's .__dict__ is released, while in-slots objects
are retained attached to ghost object staying in RAM:
---- 8< ----
from ZODB import DB
from persistent import Persistent
import gc
db = DB(None)
jar = db.open()
class C:
def __init__(self, v):
self.v = v
def __del__(self):
print 'released (%s)' % self.v
class P1(Persistent):
pass
class P2(Persistent):
__slots__ = ('aaa')
p1 = P1()
jar.add(p1)
p1.aaa = C(1)
p2 = P2()
jar.add(p2)
p2.aaa = C(2)
p1._p_invalidate()
# "released (1)" is printed
p2._p_invalidate()
gc.collect()
# "released (2)" is NOT printed <--
---- 8< ----
So teach ghostify() & friends to release objects in slots to free-up
memory when an object goes to ghost state.
NOTE PyErr_Occurred() added after ghostify() calls because
pickle_slotnames() can raise an error, but we do not want to change
ghostify() prototype for backward compatibility reason - as it is used
in cPersistenceCAPIstruct.
( I hit this bug with wendelin.core which uses proxies to load
data from DB to virtual memory manager and then deactivate proxy right
after load has been completed:
https://lab.nexedi.com/nexedi/wendelin.core/blob/f7803634/bigfile/file_zodb.py#L239
https://lab.nexedi.com/nexedi/wendelin.core/blob/f7803634/bigfile/file_zodb.py#L295 )
---
src/persistent/cPersistence.c | 41 +++++++++++++++++++++++++++++++++-
src/persistent/tests/testPersistent.py | 24 ++++++++++++++++++++
2 files changed, 64 insertions(+), 1 deletion(-)
diff --git a/src/persistent/cPersistence.c b/src/persistent/cPersistence.c
index b4a185c..28d1f9a 100644
--- a/src/persistent/cPersistence.c
+++ b/src/persistent/cPersistence.c
@@ -75,6 +75,7 @@ fatal_1350(cPersistentObject *self, const char *caller, const char *detail)
#endif
static void ghostify(cPersistentObject*);
+static PyObject * pickle_slotnames(PyTypeObject *cls);
/* Load the state of the object, unghostifying it. Upon success, return 1.
* If an error occurred, re-ghostify the object and return -1.
@@ -141,7 +142,7 @@ accessed(cPersistentObject *self)
static void
ghostify(cPersistentObject *self)
{
- PyObject **dictptr;
+ PyObject **dictptr, *slotnames;
/* are we already a ghost? */
if (self->state == cPersistent_GHOST_STATE)
@@ -171,6 +172,8 @@ ghostify(cPersistentObject *self)
_estimated_size_in_bytes(self->estimated_size);
ring_del(&self->ring);
self->state = cPersistent_GHOST_STATE;
+
+ /* clear __dict__ */
dictptr = _PyObject_GetDictPtr((PyObject *)self);
if (dictptr && *dictptr)
{
@@ -178,6 +181,38 @@ ghostify(cPersistentObject *self)
*dictptr = NULL;
}
+ /* clear all slots besides _p_* */
+ slotnames = pickle_slotnames(Py_TYPE(self));
+ if (slotnames && slotnames != Py_None)
+ {
+ int i;
+
+ for (i = 0; i < PyList_GET_SIZE(slotnames); i++)
+ {
+ PyObject *name;
+ char *cname;
+ int is_special;
+
+ name = PyList_GET_ITEM(slotnames, i);
+ if (PyBytes_Check(name))
+ {
+ cname = PyBytes_AS_STRING(name);
+ is_special = !strncmp(cname, "_p_", 3);
+ if (is_special) /* skip persistent */
+ {
+ continue;
+ }
+ }
+
+ /* NOTE: this skips our delattr hook */
+ if (PyObject_GenericSetAttr((PyObject *)self, name, NULL) < 0)
+ /* delattr of non-set slot will raise AttributeError - we
+ * simply ignore. */
+ PyErr_Clear();
+ }
+ }
+ Py_XDECREF(slotnames);
+
/* We remove the reference to the just ghosted object that the ring
* holds. Note that the dictionary of oids->objects has an uncounted
* reference, so if the ring's reference was the only one, this frees
@@ -261,6 +296,8 @@ Per__p_deactivate(cPersistentObject *self)
called directly. Methods that override this need to
do the same! */
ghostify(self);
+ if (PyErr_Occurred())
+ return NULL;
}
Py_INCREF(Py_None);
@@ -289,6 +326,8 @@ Per__p_invalidate(cPersistentObject *self)
if (Per_set_changed(self, NULL) < 0)
return NULL;
ghostify(self);
+ if (PyErr_Occurred())
+ return NULL;
}
Py_INCREF(Py_None);
return Py_None;
diff --git a/src/persistent/tests/testPersistent.py b/src/persistent/tests/testPersistent.py
index 51e0382..fdb8b67 100644
--- a/src/persistent/tests/testPersistent.py
+++ b/src/persistent/tests/testPersistent.py
@@ -180,6 +180,30 @@ class PersistenceTest(unittest.TestCase):
self.assertEqual(obj._p_changed, None)
self.assertEqual(obj._p_state, GHOST)
+ def test__p_invalidate_from_changed_w_slots(self):
+ from persistent import Persistent
+ class Derived(Persistent):
+ __slots__ = ('myattr1', 'myattr2')
+ def __init__(self):
+ self.myattr1 = 'value1'
+ self.myattr2 = 'value2'
+ obj = Derived()
+ jar = self._makeJar()
+ jar.add(obj)
+ obj._p_activate()
+ obj._p_changed = True
+ jar._loaded = []
+ jar._registered = []
+ self.assertEqual(Derived.myattr1.__get__(obj), 'value1')
+ self.assertEqual(Derived.myattr2.__get__(obj), 'value2')
+ obj._p_invalidate()
+ self.assertIs(obj._p_changed, None)
+ self.assertEqual(list(jar._loaded), [])
+ self.assertRaises(AttributeError, lambda: Derived.myattr1.__get__(obj))
+ self.assertRaises(AttributeError, lambda: Derived.myattr2.__get__(obj))
+ self.assertEqual(list(jar._loaded), [])
+ self.assertEqual(list(jar._registered), [])
+
def test_initial_serial(self):
NOSERIAL = "\000" * 8
obj = self._makeOne()
--
2.9.2.701.gf965a18.dirty
[buildout]
extends =
../bison/buildout.cfg
../m4/buildout.cfg
../xz-utils/buildout.cfg
parts =
......@@ -11,4 +12,4 @@ url = http://downloads.sourceforge.net/project/flex/flex-2.6.0.tar.xz
md5sum = 3cbbfa1554d0b75fad9f8100732454de
environment =
M4=${m4:location}/bin/m4
PATH=${xz-utils:location}/bin:%(PATH)s
PATH=${bison:location}/bin:${xz-utils:location}/bin:%(PATH)s
From 29a11774d8ebbafe8418b4a5ffb4cc1160b194a1 Mon Sep 17 00:00:00 2001
From: Pascal Cuoq <cuoq@trust-in-soft.com>
Date: Sun, 15 May 2016 09:05:46 +0200
Subject: [PATCH] Avoid relying on undefined behavior in CVE-2015-1283 fix. It
does not really work: https://godbolt.org/g/Zl8gdF
---
expat/lib/xmlparse.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/lib/xmlparse.c b/lib/xmlparse.c
index 13e080d..cdb12ef 100644
--- a/lib/xmlparse.c
+++ b/lib/xmlparse.c
@@ -1695,7 +1695,8 @@ XML_GetBuffer(XML_Parser parser, int len
}
if (len > bufferLim - bufferEnd) {
- int neededSize = len + (int)(bufferEnd - bufferPtr);
+ /* Do not invoke signed arithmetic overflow: */
+ int neededSize = (int) ((unsigned)len + (unsigned)(bufferEnd - bufferPtr));
/* BEGIN MOZILLA CHANGE (sanity check neededSize) */
if (neededSize < 0) {
errorCode = XML_ERROR_NO_MEMORY;
@@ -1729,7 +1730,8 @@ XML_GetBuffer(XML_Parser parser, int len
if (bufferSize == 0)
bufferSize = INIT_BUFFER_SIZE;
do {
- bufferSize *= 2;
+ /* Do not invoke signed arithmetic overflow: */
+ bufferSize = (int) (2U * (unsigned) bufferSize);
/* BEGIN MOZILLA CHANGE (prevent infinite loop on overflow) */
} while (bufferSize < neededSize && bufferSize > 0);
/* END MOZILLA CHANGE */
--
2.8.2
Description: fix multiple integer overflows in the XML_GetBuffer function
Multiple integer overflows in the XML_GetBuffer function in Expat through
2.1.0, as used in Google Chrome before 44.0.2403.89 and other products,
allow remote attackers to cause a denial of service (heap-based buffer
overflow) or possibly have unspecified other impact via crafted XML data,
a related issue to CVE-2015-2716.
Origin: Mozilla, https://hg.mozilla.org/releases/mozilla-esr31/rev/2f3e78643f5c
Author: Eric Rahm <erahm@mozilla.com>
Forwarded: not-needed
Last-Update: 2015-07-24
diff --git a/lib/xmlparse.c b/lib/xmlparse.c
--- a/lib/xmlparse.c
+++ b/lib/xmlparse.c
@@ -1673,29 +1673,40 @@ XML_ParseBuffer(XML_Parser parser, int l
XmlUpdatePosition(encoding, positionPtr, bufferPtr, &position);
positionPtr = bufferPtr;
return result;
}
void * XMLCALL
XML_GetBuffer(XML_Parser parser, int len)
{
+/* BEGIN MOZILLA CHANGE (sanity check len) */
+ if (len < 0) {
+ errorCode = XML_ERROR_NO_MEMORY;
+ return NULL;
+ }
+/* END MOZILLA CHANGE */
switch (ps_parsing) {
case XML_SUSPENDED:
errorCode = XML_ERROR_SUSPENDED;
return NULL;
case XML_FINISHED:
errorCode = XML_ERROR_FINISHED;
return NULL;
default: ;
}
if (len > bufferLim - bufferEnd) {
- /* FIXME avoid integer overflow */
int neededSize = len + (int)(bufferEnd - bufferPtr);
+/* BEGIN MOZILLA CHANGE (sanity check neededSize) */
+ if (neededSize < 0) {
+ errorCode = XML_ERROR_NO_MEMORY;
+ return NULL;
+ }
+/* END MOZILLA CHANGE */
#ifdef XML_CONTEXT_BYTES
int keep = (int)(bufferPtr - buffer);
if (keep > XML_CONTEXT_BYTES)
keep = XML_CONTEXT_BYTES;
neededSize += keep;
#endif /* defined XML_CONTEXT_BYTES */
if (neededSize <= bufferLim - buffer) {
@@ -1714,17 +1725,25 @@ XML_GetBuffer(XML_Parser parser, int len
}
else {
char *newBuf;
int bufferSize = (int)(bufferLim - bufferPtr);
if (bufferSize == 0)
bufferSize = INIT_BUFFER_SIZE;
do {
bufferSize *= 2;
- } while (bufferSize < neededSize);
+/* BEGIN MOZILLA CHANGE (prevent infinite loop on overflow) */
+ } while (bufferSize < neededSize && bufferSize > 0);
+/* END MOZILLA CHANGE */
+/* BEGIN MOZILLA CHANGE (sanity check bufferSize) */
+ if (bufferSize <= 0) {
+ errorCode = XML_ERROR_NO_MEMORY;
+ return NULL;
+ }
+/* END MOZILLA CHANGE */
newBuf = (char *)MALLOC(bufferSize);
if (newBuf == 0) {
errorCode = XML_ERROR_NO_MEMORY;
return NULL;
}
bufferLim = newBuf + bufferSize;
#ifdef XML_CONTEXT_BYTES
if (bufferPtr) {
From cdfcb1b5c95e93b00ae9e9d25708b4a3bee72c15 Mon Sep 17 00:00:00 2001
From: Sebastian Pipping <sebastian@pipping.org>
Date: Mon, 2 May 2016 00:02:44 +0200
Subject: [PATCH] Address CVE-2016-0718 (/patch/ version 2.2.1)
* Out of bounds memory access when doing text conversion on malformed input
* Integer overflow related to memory allocation
Reported by Gustavo Grieco
Patch credits go to
* Christian Heimes
* Karl Waclawek
* Gustavo Grieco
* Sebastian Pipping
* Pascal Cuoq
---
expat/lib/xmlparse.c | 34 +++++++++-----
expat/lib/xmltok.c | 115 +++++++++++++++++++++++++++++++++++-------------
expat/lib/xmltok.h | 10 ++++-
expat/lib/xmltok_impl.c | 62 +++++++++++++-------------
4 files changed, 146 insertions(+), 75 deletions(-)
diff --git a/lib/xmlparse.c b/lib/xmlparse.c
index e308c79..13e080d 100644
--- a/lib/xmlparse.c
+++ b/lib/xmlparse.c
@@ -2436,11 +2436,11 @@ doContent(XML_Parser parser,
for (;;) {
int bufSize;
int convLen;
- XmlConvert(enc,
+ const enum XML_Convert_Result convert_res = XmlConvert(enc,
&fromPtr, rawNameEnd,
(ICHAR **)&toPtr, (ICHAR *)tag->bufEnd - 1);
convLen = (int)(toPtr - (XML_Char *)tag->buf);
- if (fromPtr == rawNameEnd) {
+ if ((convert_res == XML_CONVERT_COMPLETED) || (convert_res == XML_CONVERT_INPUT_INCOMPLETE)) {
tag->name.strLen = convLen;
break;
}
@@ -2661,11 +2661,11 @@ doContent(XML_Parser parser,
if (MUST_CONVERT(enc, s)) {
for (;;) {
ICHAR *dataPtr = (ICHAR *)dataBuf;
- XmlConvert(enc, &s, next, &dataPtr, (ICHAR *)dataBufEnd);
+ const enum XML_Convert_Result convert_res = XmlConvert(enc, &s, next, &dataPtr, (ICHAR *)dataBufEnd);
*eventEndPP = s;
charDataHandler(handlerArg, dataBuf,
(int)(dataPtr - (ICHAR *)dataBuf));
- if (s == next)
+ if ((convert_res == XML_CONVERT_COMPLETED) || (convert_res == XML_CONVERT_INPUT_INCOMPLETE))
break;
*eventPP = s;
}
@@ -3269,11 +3269,11 @@ doCdataSection(XML_Parser parser,
if (MUST_CONVERT(enc, s)) {
for (;;) {
ICHAR *dataPtr = (ICHAR *)dataBuf;
- XmlConvert(enc, &s, next, &dataPtr, (ICHAR *)dataBufEnd);
+ const enum XML_Convert_Result convert_res = XmlConvert(enc, &s, next, &dataPtr, (ICHAR *)dataBufEnd);
*eventEndPP = next;
charDataHandler(handlerArg, dataBuf,
(int)(dataPtr - (ICHAR *)dataBuf));
- if (s == next)
+ if ((convert_res == XML_CONVERT_COMPLETED) || (convert_res == XML_CONVERT_INPUT_INCOMPLETE))
break;
*eventPP = s;
}
@@ -5350,6 +5350,7 @@ reportDefault(XML_Parser parser, const ENCODING *enc,
const char *s, const char *end)
{
if (MUST_CONVERT(enc, s)) {
+ enum XML_Convert_Result convert_res;
const char **eventPP;
const char **eventEndPP;
if (enc == encoding) {
@@ -5362,11 +5363,11 @@ reportDefault(XML_Parser parser, const ENCODING *enc,
}
do {
ICHAR *dataPtr = (ICHAR *)dataBuf;
- XmlConvert(enc, &s, end, &dataPtr, (ICHAR *)dataBufEnd);
+ convert_res = XmlConvert(enc, &s, end, &dataPtr, (ICHAR *)dataBufEnd);
*eventEndPP = s;
defaultHandler(handlerArg, dataBuf, (int)(dataPtr - (ICHAR *)dataBuf));
*eventPP = s;
- } while (s != end);
+ } while ((convert_res != XML_CONVERT_COMPLETED) && (convert_res != XML_CONVERT_INPUT_INCOMPLETE));
}
else
defaultHandler(handlerArg, (XML_Char *)s, (int)((XML_Char *)end - (XML_Char *)s));
@@ -6169,8 +6170,8 @@ poolAppend(STRING_POOL *pool, const ENCODING *enc,
if (!pool->ptr && !poolGrow(pool))
return NULL;
for (;;) {
- XmlConvert(enc, &ptr, end, (ICHAR **)&(pool->ptr), (ICHAR *)pool->end);
- if (ptr == end)
+ const enum XML_Convert_Result convert_res = XmlConvert(enc, &ptr, end, (ICHAR **)&(pool->ptr), (ICHAR *)pool->end);
+ if ((convert_res == XML_CONVERT_COMPLETED) || (convert_res == XML_CONVERT_INPUT_INCOMPLETE))
break;
if (!poolGrow(pool))
return NULL;
@@ -6254,8 +6255,13 @@ poolGrow(STRING_POOL *pool)
}
}
if (pool->blocks && pool->start == pool->blocks->s) {
- int blockSize = (int)(pool->end - pool->start)*2;
- BLOCK *temp = (BLOCK *)
+ BLOCK *temp;
+ int blockSize = (int)((unsigned)(pool->end - pool->start)*2U);
+
+ if (blockSize < 0)
+ return XML_FALSE;
+
+ temp = (BLOCK *)
pool->mem->realloc_fcn(pool->blocks,
(offsetof(BLOCK, s)
+ blockSize * sizeof(XML_Char)));
@@ -6270,6 +6276,10 @@ poolGrow(STRING_POOL *pool)
else {
BLOCK *tem;
int blockSize = (int)(pool->end - pool->start);
+
+ if (blockSize < 0)
+ return XML_FALSE;
+
if (blockSize < INIT_BLOCK_SIZE)
blockSize = INIT_BLOCK_SIZE;
else
diff --git a/lib/xmltok.c b/lib/xmltok.c
index bf09dfc..cb98ce1 100644
--- a/lib/xmltok.c
+++ b/lib/xmltok.c
@@ -318,39 +318,55 @@ enum { /* UTF8_cvalN is value of masked first byte of N byte sequence */
UTF8_cval4 = 0xf0
};
-static void PTRCALL
+static enum XML_Convert_Result PTRCALL
utf8_toUtf8(const ENCODING *enc,
const char **fromP, const char *fromLim,
char **toP, const char *toLim)
{
+ enum XML_Convert_Result res = XML_CONVERT_COMPLETED;
char *to;
const char *from;
if (fromLim - *fromP > toLim - *toP) {
/* Avoid copying partial characters. */
+ res = XML_CONVERT_OUTPUT_EXHAUSTED;
for (fromLim = *fromP + (toLim - *toP); fromLim > *fromP; fromLim--)
if (((unsigned char)fromLim[-1] & 0xc0) != 0x80)
break;
}
- for (to = *toP, from = *fromP; from != fromLim; from++, to++)
+ for (to = *toP, from = *fromP; (from < fromLim) && (to < toLim); from++, to++)
*to = *from;
*fromP = from;
*toP = to;
+
+ if ((to == toLim) && (from < fromLim))
+ return XML_CONVERT_OUTPUT_EXHAUSTED;
+ else
+ return res;
}
-static void PTRCALL
+static enum XML_Convert_Result PTRCALL
utf8_toUtf16(const ENCODING *enc,
const char **fromP, const char *fromLim,
unsigned short **toP, const unsigned short *toLim)
{
+ enum XML_Convert_Result res = XML_CONVERT_COMPLETED;
unsigned short *to = *toP;
const char *from = *fromP;
- while (from != fromLim && to != toLim) {
+ while (from < fromLim && to < toLim) {
switch (((struct normal_encoding *)enc)->type[(unsigned char)*from]) {
case BT_LEAD2:
+ if (fromLim - from < 2) {
+ res = XML_CONVERT_INPUT_INCOMPLETE;
+ break;
+ }
*to++ = (unsigned short)(((from[0] & 0x1f) << 6) | (from[1] & 0x3f));
from += 2;
break;
case BT_LEAD3:
+ if (fromLim - from < 3) {
+ res = XML_CONVERT_INPUT_INCOMPLETE;
+ break;
+ }
*to++ = (unsigned short)(((from[0] & 0xf) << 12)
| ((from[1] & 0x3f) << 6) | (from[2] & 0x3f));
from += 3;
@@ -358,8 +374,14 @@ utf8_toUtf16(const ENCODING *enc,
case BT_LEAD4:
{
unsigned long n;
- if (to + 1 == toLim)
+ if (toLim - to < 2) {
+ res = XML_CONVERT_OUTPUT_EXHAUSTED;
goto after;
+ }
+ if (fromLim - from < 4) {
+ res = XML_CONVERT_INPUT_INCOMPLETE;
+ goto after;
+ }
n = ((from[0] & 0x7) << 18) | ((from[1] & 0x3f) << 12)
| ((from[2] & 0x3f) << 6) | (from[3] & 0x3f);
n -= 0x10000;
@@ -377,6 +399,7 @@ utf8_toUtf16(const ENCODING *enc,
after:
*fromP = from;
*toP = to;
+ return res;
}
#ifdef XML_NS
@@ -425,7 +448,7 @@ static const struct normal_encoding internal_utf8_encoding = {
STANDARD_VTABLE(sb_) NORMAL_VTABLE(utf8_)
};
-static void PTRCALL
+static enum XML_Convert_Result PTRCALL
latin1_toUtf8(const ENCODING *enc,
const char **fromP, const char *fromLim,
char **toP, const char *toLim)
@@ -433,30 +456,35 @@ latin1_toUtf8(const ENCODING *enc,
for (;;) {
unsigned char c;
if (*fromP == fromLim)
- break;
+ return XML_CONVERT_COMPLETED;
c = (unsigned char)**fromP;
if (c & 0x80) {
if (toLim - *toP < 2)
- break;
+ return XML_CONVERT_OUTPUT_EXHAUSTED;
*(*toP)++ = (char)((c >> 6) | UTF8_cval2);
*(*toP)++ = (char)((c & 0x3f) | 0x80);
(*fromP)++;
}
else {
if (*toP == toLim)
- break;
+ return XML_CONVERT_OUTPUT_EXHAUSTED;
*(*toP)++ = *(*fromP)++;
}
}
}
-static void PTRCALL
+static enum XML_Convert_Result PTRCALL
latin1_toUtf16(const ENCODING *enc,
const char **fromP, const char *fromLim,
unsigned short **toP, const unsigned short *toLim)
{
- while (*fromP != fromLim && *toP != toLim)
+ while (*fromP < fromLim && *toP < toLim)
*(*toP)++ = (unsigned char)*(*fromP)++;
+
+ if ((*toP == toLim) && (*fromP < fromLim))
+ return XML_CONVERT_OUTPUT_EXHAUSTED;
+ else
+ return XML_CONVERT_COMPLETED;
}
#ifdef XML_NS
@@ -483,13 +511,18 @@ static const struct normal_encoding latin1_encoding = {
STANDARD_VTABLE(sb_)
};
-static void PTRCALL
+static enum XML_Convert_Result PTRCALL
ascii_toUtf8(const ENCODING *enc,
const char **fromP, const char *fromLim,
char **toP, const char *toLim)
{
- while (*fromP != fromLim && *toP != toLim)
+ while (*fromP < fromLim && *toP < toLim)
*(*toP)++ = *(*fromP)++;
+
+ if ((*toP == toLim) && (*fromP < fromLim))
+ return XML_CONVERT_OUTPUT_EXHAUSTED;
+ else
+ return XML_CONVERT_COMPLETED;
}
#ifdef XML_NS
@@ -536,13 +569,14 @@ unicode_byte_type(char hi, char lo)
}
#define DEFINE_UTF16_TO_UTF8(E) \
-static void PTRCALL \
+static enum XML_Convert_Result PTRCALL \
E ## toUtf8(const ENCODING *enc, \
const char **fromP, const char *fromLim, \
char **toP, const char *toLim) \
{ \
- const char *from; \
- for (from = *fromP; from != fromLim; from += 2) { \
+ const char *from = *fromP; \
+ fromLim = from + (((fromLim - from) >> 1) << 1); /* shrink to even */ \
+ for (; from < fromLim; from += 2) { \
int plane; \
unsigned char lo2; \
unsigned char lo = GET_LO(from); \
@@ -552,7 +586,7 @@ E ## toUtf8(const ENCODING *enc, \
if (lo < 0x80) { \
if (*toP == toLim) { \
*fromP = from; \
- return; \
+ return XML_CONVERT_OUTPUT_EXHAUSTED; \
} \
*(*toP)++ = lo; \
break; \
@@ -562,7 +596,7 @@ E ## toUtf8(const ENCODING *enc, \
case 0x4: case 0x5: case 0x6: case 0x7: \
if (toLim - *toP < 2) { \
*fromP = from; \
- return; \
+ return XML_CONVERT_OUTPUT_EXHAUSTED; \
} \
*(*toP)++ = ((lo >> 6) | (hi << 2) | UTF8_cval2); \
*(*toP)++ = ((lo & 0x3f) | 0x80); \
@@ -570,7 +604,7 @@ E ## toUtf8(const ENCODING *enc, \
default: \
if (toLim - *toP < 3) { \
*fromP = from; \
- return; \
+ return XML_CONVERT_OUTPUT_EXHAUSTED; \
} \
/* 16 bits divided 4, 6, 6 amongst 3 bytes */ \
*(*toP)++ = ((hi >> 4) | UTF8_cval3); \
@@ -580,7 +614,11 @@ E ## toUtf8(const ENCODING *enc, \
case 0xD8: case 0xD9: case 0xDA: case 0xDB: \
if (toLim - *toP < 4) { \
*fromP = from; \
- return; \
+ return XML_CONVERT_OUTPUT_EXHAUSTED; \
+ } \
+ if (fromLim - from < 4) { \
+ *fromP = from; \
+ return XML_CONVERT_INPUT_INCOMPLETE; \
} \
plane = (((hi & 0x3) << 2) | ((lo >> 6) & 0x3)) + 1; \
*(*toP)++ = ((plane >> 2) | UTF8_cval4); \
@@ -596,20 +634,32 @@ E ## toUtf8(const ENCODING *enc, \
} \
} \
*fromP = from; \
+ if (from < fromLim) \
+ return XML_CONVERT_INPUT_INCOMPLETE; \
+ else \
+ return XML_CONVERT_COMPLETED; \
}
#define DEFINE_UTF16_TO_UTF16(E) \
-static void PTRCALL \
+static enum XML_Convert_Result PTRCALL \
E ## toUtf16(const ENCODING *enc, \
const char **fromP, const char *fromLim, \
unsigned short **toP, const unsigned short *toLim) \
{ \
+ enum XML_Convert_Result res = XML_CONVERT_COMPLETED; \
+ fromLim = *fromP + (((fromLim - *fromP) >> 1) << 1); /* shrink to even */ \
/* Avoid copying first half only of surrogate */ \
if (fromLim - *fromP > ((toLim - *toP) << 1) \
- && (GET_HI(fromLim - 2) & 0xF8) == 0xD8) \
+ && (GET_HI(fromLim - 2) & 0xF8) == 0xD8) { \
fromLim -= 2; \
- for (; *fromP != fromLim && *toP != toLim; *fromP += 2) \
+ res = XML_CONVERT_INPUT_INCOMPLETE; \
+ } \
+ for (; *fromP < fromLim && *toP < toLim; *fromP += 2) \
*(*toP)++ = (GET_HI(*fromP) << 8) | GET_LO(*fromP); \
+ if ((*toP == toLim) && (*fromP < fromLim)) \
+ return XML_CONVERT_OUTPUT_EXHAUSTED; \
+ else \
+ return res; \
}
#define SET2(ptr, ch) \
@@ -1288,7 +1338,7 @@ unknown_isInvalid(const ENCODING *enc, const char *p)
return (c & ~0xFFFF) || checkCharRefNumber(c) < 0;
}
-static void PTRCALL
+static enum XML_Convert_Result PTRCALL
unknown_toUtf8(const ENCODING *enc,
const char **fromP, const char *fromLim,
char **toP, const char *toLim)
@@ -1299,21 +1349,21 @@ unknown_toUtf8(const ENCODING *enc,
const char *utf8;
int n;
if (*fromP == fromLim)
- break;
+ return XML_CONVERT_COMPLETED;
utf8 = uenc->utf8[(unsigned char)**fromP];
n = *utf8++;
if (n == 0) {
int c = uenc->convert(uenc->userData, *fromP);
n = XmlUtf8Encode(c, buf);
if (n > toLim - *toP)
- break;
+ return XML_CONVERT_OUTPUT_EXHAUSTED;
utf8 = buf;
*fromP += (AS_NORMAL_ENCODING(enc)->type[(unsigned char)**fromP]
- (BT_LEAD2 - 2));
}
else {
if (n > toLim - *toP)
- break;
+ return XML_CONVERT_OUTPUT_EXHAUSTED;
(*fromP)++;
}
do {
@@ -1322,13 +1372,13 @@ unknown_toUtf8(const ENCODING *enc,
}
}
-static void PTRCALL
+static enum XML_Convert_Result PTRCALL
unknown_toUtf16(const ENCODING *enc,
const char **fromP, const char *fromLim,
unsigned short **toP, const unsigned short *toLim)
{
const struct unknown_encoding *uenc = AS_UNKNOWN_ENCODING(enc);
- while (*fromP != fromLim && *toP != toLim) {
+ while (*fromP < fromLim && *toP < toLim) {
unsigned short c = uenc->utf16[(unsigned char)**fromP];
if (c == 0) {
c = (unsigned short)
@@ -1340,6 +1390,11 @@ unknown_toUtf16(const ENCODING *enc,
(*fromP)++;
*(*toP)++ = c;
}
+
+ if ((*toP == toLim) && (*fromP < fromLim))
+ return XML_CONVERT_OUTPUT_EXHAUSTED;
+ else
+ return XML_CONVERT_COMPLETED;
}
ENCODING *
@@ -1503,7 +1558,7 @@ initScan(const ENCODING * const *encodingTable,
{
const ENCODING **encPtr;
- if (ptr == end)
+ if (ptr >= end)
return XML_TOK_NONE;
encPtr = enc->encPtr;
if (ptr + 1 == end) {
diff --git a/lib/xmltok.h b/lib/xmltok.h
index ca867aa..752007e 100644
--- a/lib/xmltok.h
+++ b/lib/xmltok.h
@@ -130,6 +130,12 @@ typedef int (PTRCALL *SCANNER)(const ENCODING *,
const char *,
const char **);
+enum XML_Convert_Result {
+ XML_CONVERT_COMPLETED = 0,
+ XML_CONVERT_INPUT_INCOMPLETE = 1,
+ XML_CONVERT_OUTPUT_EXHAUSTED = 2 /* and therefore potentially input remaining as well */
+};
+
struct encoding {
SCANNER scanners[XML_N_STATES];
SCANNER literalScanners[XML_N_LITERAL_TYPES];
@@ -158,12 +164,12 @@ struct encoding {
const char *ptr,
const char *end,
const char **badPtr);
- void (PTRCALL *utf8Convert)(const ENCODING *enc,
+ enum XML_Convert_Result (PTRCALL *utf8Convert)(const ENCODING *enc,
const char **fromP,
const char *fromLim,
char **toP,
const char *toLim);
- void (PTRCALL *utf16Convert)(const ENCODING *enc,
+ enum XML_Convert_Result (PTRCALL *utf16Convert)(const ENCODING *enc,
const char **fromP,
const char *fromLim,
unsigned short **toP,
diff --git a/lib/xmltok_impl.c b/lib/xmltok_impl.c
index 9c2895b..6c5a3ba 100644
--- a/lib/xmltok_impl.c
+++ b/lib/xmltok_impl.c
@@ -93,13 +93,13 @@ static int PTRCALL
PREFIX(scanComment)(const ENCODING *enc, const char *ptr,
const char *end, const char **nextTokPtr)
{
- if (ptr != end) {
+ if (ptr < end) {
if (!CHAR_MATCHES(enc, ptr, ASCII_MINUS)) {
*nextTokPtr = ptr;
return XML_TOK_INVALID;
}
ptr += MINBPC(enc);
- while (ptr != end) {
+ while (ptr < end) {
switch (BYTE_TYPE(enc, ptr)) {
INVALID_CASES(ptr, nextTokPtr)
case BT_MINUS:
@@ -147,7 +147,7 @@ PREFIX(scanDecl)(const ENCODING *enc, const char *ptr,
*nextTokPtr = ptr;
return XML_TOK_INVALID;
}
- while (ptr != end) {
+ while (ptr < end) {
switch (BYTE_TYPE(enc, ptr)) {
case BT_PERCNT:
if (ptr + MINBPC(enc) == end)
@@ -233,7 +233,7 @@ PREFIX(scanPi)(const ENCODING *enc, const char *ptr,
*nextTokPtr = ptr;
return XML_TOK_INVALID;
}
- while (ptr != end) {
+ while (ptr < end) {
switch (BYTE_TYPE(enc, ptr)) {
CHECK_NAME_CASES(enc, ptr, end, nextTokPtr)
case BT_S: case BT_CR: case BT_LF:
@@ -242,7 +242,7 @@ PREFIX(scanPi)(const ENCODING *enc, const char *ptr,
return XML_TOK_INVALID;
}
ptr += MINBPC(enc);
- while (ptr != end) {
+ while (ptr < end) {
switch (BYTE_TYPE(enc, ptr)) {
INVALID_CASES(ptr, nextTokPtr)
case BT_QUEST:
@@ -305,7 +305,7 @@ static int PTRCALL
PREFIX(cdataSectionTok)(const ENCODING *enc, const char *ptr,
const char *end, const char **nextTokPtr)
{
- if (ptr == end)
+ if (ptr >= end)
return XML_TOK_NONE;
if (MINBPC(enc) > 1) {
size_t n = end - ptr;
@@ -348,7 +348,7 @@ PREFIX(cdataSectionTok)(const ENCODING *enc, const char *ptr,
ptr += MINBPC(enc);
break;
}
- while (ptr != end) {
+ while (ptr < end) {
switch (BYTE_TYPE(enc, ptr)) {
#define LEAD_CASE(n) \
case BT_LEAD ## n: \
@@ -391,11 +391,11 @@ PREFIX(scanEndTag)(const ENCODING *enc, const char *ptr,
*nextTokPtr = ptr;
return XML_TOK_INVALID;
}
- while (ptr != end) {
+ while (ptr < end) {
switch (BYTE_TYPE(enc, ptr)) {
CHECK_NAME_CASES(enc, ptr, end, nextTokPtr)
case BT_S: case BT_CR: case BT_LF:
- for (ptr += MINBPC(enc); ptr != end; ptr += MINBPC(enc)) {
+ for (ptr += MINBPC(enc); ptr < end; ptr += MINBPC(enc)) {
switch (BYTE_TYPE(enc, ptr)) {
case BT_S: case BT_CR: case BT_LF:
break;
@@ -432,7 +432,7 @@ static int PTRCALL
PREFIX(scanHexCharRef)(const ENCODING *enc, const char *ptr,
const char *end, const char **nextTokPtr)
{
- if (ptr != end) {
+ if (ptr < end) {
switch (BYTE_TYPE(enc, ptr)) {
case BT_DIGIT:
case BT_HEX:
@@ -441,7 +441,7 @@ PREFIX(scanHexCharRef)(const ENCODING *enc, const char *ptr,
*nextTokPtr = ptr;
return XML_TOK_INVALID;
}
- for (ptr += MINBPC(enc); ptr != end; ptr += MINBPC(enc)) {
+ for (ptr += MINBPC(enc); ptr < end; ptr += MINBPC(enc)) {
switch (BYTE_TYPE(enc, ptr)) {
case BT_DIGIT:
case BT_HEX:
@@ -464,7 +464,7 @@ static int PTRCALL
PREFIX(scanCharRef)(const ENCODING *enc, const char *ptr,
const char *end, const char **nextTokPtr)
{
- if (ptr != end) {
+ if (ptr < end) {
if (CHAR_MATCHES(enc, ptr, ASCII_x))
return PREFIX(scanHexCharRef)(enc, ptr + MINBPC(enc), end, nextTokPtr);
switch (BYTE_TYPE(enc, ptr)) {
@@ -474,7 +474,7 @@ PREFIX(scanCharRef)(const ENCODING *enc, const char *ptr,
*nextTokPtr = ptr;
return XML_TOK_INVALID;
}
- for (ptr += MINBPC(enc); ptr != end; ptr += MINBPC(enc)) {
+ for (ptr += MINBPC(enc); ptr < end; ptr += MINBPC(enc)) {
switch (BYTE_TYPE(enc, ptr)) {
case BT_DIGIT:
break;
@@ -506,7 +506,7 @@ PREFIX(scanRef)(const ENCODING *enc, const char *ptr, const char *end,
*nextTokPtr = ptr;
return XML_TOK_INVALID;
}
- while (ptr != end) {
+ while (ptr < end) {
switch (BYTE_TYPE(enc, ptr)) {
CHECK_NAME_CASES(enc, ptr, end, nextTokPtr)
case BT_SEMI:
@@ -529,7 +529,7 @@ PREFIX(scanAtts)(const ENCODING *enc, const char *ptr, const char *end,
#ifdef XML_NS
int hadColon = 0;
#endif
- while (ptr != end) {
+ while (ptr < end) {
switch (BYTE_TYPE(enc, ptr)) {
CHECK_NAME_CASES(enc, ptr, end, nextTokPtr)
#ifdef XML_NS
@@ -716,7 +716,7 @@ PREFIX(scanLt)(const ENCODING *enc, const char *ptr, const char *end,
hadColon = 0;
#endif
/* we have a start-tag */
- while (ptr != end) {
+ while (ptr < end) {
switch (BYTE_TYPE(enc, ptr)) {
CHECK_NAME_CASES(enc, ptr, end, nextTokPtr)
#ifdef XML_NS
@@ -740,7 +740,7 @@ PREFIX(scanLt)(const ENCODING *enc, const char *ptr, const char *end,
case BT_S: case BT_CR: case BT_LF:
{
ptr += MINBPC(enc);
- while (ptr != end) {
+ while (ptr < end) {
switch (BYTE_TYPE(enc, ptr)) {
CHECK_NMSTRT_CASES(enc, ptr, end, nextTokPtr)
case BT_GT:
@@ -785,7 +785,7 @@ static int PTRCALL
PREFIX(contentTok)(const ENCODING *enc, const char *ptr, const char *end,
const char **nextTokPtr)
{
- if (ptr == end)
+ if (ptr >= end)
return XML_TOK_NONE;
if (MINBPC(enc) > 1) {
size_t n = end - ptr;
@@ -832,7 +832,7 @@ PREFIX(contentTok)(const ENCODING *enc, const char *ptr, const char *end,
ptr += MINBPC(enc);
break;
}
- while (ptr != end) {
+ while (ptr < end) {
switch (BYTE_TYPE(enc, ptr)) {
#define LEAD_CASE(n) \
case BT_LEAD ## n: \
@@ -895,7 +895,7 @@ PREFIX(scanPercent)(const ENCODING *enc, const char *ptr, const char *end,
*nextTokPtr = ptr;
return XML_TOK_INVALID;
}
- while (ptr != end) {
+ while (ptr < end) {
switch (BYTE_TYPE(enc, ptr)) {
CHECK_NAME_CASES(enc, ptr, end, nextTokPtr)
case BT_SEMI:
@@ -921,7 +921,7 @@ PREFIX(scanPoundName)(const ENCODING *enc, const char *ptr, const char *end,
*nextTokPtr = ptr;
return XML_TOK_INVALID;
}
- while (ptr != end) {
+ while (ptr < end) {
switch (BYTE_TYPE(enc, ptr)) {
CHECK_NAME_CASES(enc, ptr, end, nextTokPtr)
case BT_CR: case BT_LF: case BT_S:
@@ -941,7 +941,7 @@ PREFIX(scanLit)(int open, const ENCODING *enc,
const char *ptr, const char *end,
const char **nextTokPtr)
{
- while (ptr != end) {
+ while (ptr < end) {
int t = BYTE_TYPE(enc, ptr);
switch (t) {
INVALID_CASES(ptr, nextTokPtr)
@@ -973,7 +973,7 @@ PREFIX(prologTok)(const ENCODING *enc, const char *ptr, const char *end,
const char **nextTokPtr)
{
int tok;
- if (ptr == end)
+ if (ptr >= end)
return XML_TOK_NONE;
if (MINBPC(enc) > 1) {
size_t n = end - ptr;
@@ -1141,7 +1141,7 @@ PREFIX(prologTok)(const ENCODING *enc, const char *ptr, const char *end,
*nextTokPtr = ptr;
return XML_TOK_INVALID;
}
- while (ptr != end) {
+ while (ptr < end) {
switch (BYTE_TYPE(enc, ptr)) {
CHECK_NAME_CASES(enc, ptr, end, nextTokPtr)
case BT_GT: case BT_RPAR: case BT_COMMA:
@@ -1204,10 +1204,10 @@ PREFIX(attributeValueTok)(const ENCODING *enc, const char *ptr,
const char *end, const char **nextTokPtr)
{
const char *start;
- if (ptr == end)
+ if (ptr >= end)
return XML_TOK_NONE;
start = ptr;
- while (ptr != end) {
+ while (ptr < end) {
switch (BYTE_TYPE(enc, ptr)) {
#define LEAD_CASE(n) \
case BT_LEAD ## n: ptr += n; break;
@@ -1262,10 +1262,10 @@ PREFIX(entityValueTok)(const ENCODING *enc, const char *ptr,
const char *end, const char **nextTokPtr)
{
const char *start;
- if (ptr == end)
+ if (ptr >= end)
return XML_TOK_NONE;
start = ptr;
- while (ptr != end) {
+ while (ptr < end) {
switch (BYTE_TYPE(enc, ptr)) {
#define LEAD_CASE(n) \
case BT_LEAD ## n: ptr += n; break;
@@ -1326,7 +1326,7 @@ PREFIX(ignoreSectionTok)(const ENCODING *enc, const char *ptr,
end = ptr + n;
}
}
- while (ptr != end) {
+ while (ptr < end) {
switch (BYTE_TYPE(enc, ptr)) {
INVALID_CASES(ptr, nextTokPtr)
case BT_LT:
@@ -1373,7 +1373,7 @@ PREFIX(isPublicId)(const ENCODING *enc, const char *ptr, const char *end,
{
ptr += MINBPC(enc);
end -= MINBPC(enc);
- for (; ptr != end; ptr += MINBPC(enc)) {
+ for (; ptr < end; ptr += MINBPC(enc)) {
switch (BYTE_TYPE(enc, ptr)) {
case BT_DIGIT:
case BT_HEX:
@@ -1760,7 +1760,7 @@ PREFIX(updatePosition)(const ENCODING *enc,
case BT_CR:
pos->lineNumber++;
ptr += MINBPC(enc);
- if (ptr != end && BYTE_TYPE(enc, ptr) == BT_LF)
+ if (ptr < end && BYTE_TYPE(enc, ptr) == BT_LF)
ptr += MINBPC(enc);
pos->columnNumber = (XML_Size)-1;
break;
--
2.8.2
......@@ -2,19 +2,9 @@
parts =
libexpat
extends =
../patch/buildout.cfg
[libexpat]
recipe = slapos.recipe.cmmi
url = http://downloads.sourceforge.net/project/expat/expat/2.1.0/expat-2.1.0.tar.gz
md5sum = dd7dab7a5fea97d2a6a43f511449b7cd
url = http://downloads.sourceforge.net/project/expat/expat/2.2.0/expat-2.2.0.tar.bz2
md5sum = 2f47841c829facb346eb6e3fab5212e2
configure-options =
--disable-static
patch-options = -p1
patches =
${:_profile_base_location_}/CVE-2015-1283.patch#44b31d7377035591f37ad94a31a8042b
${:_profile_base_location_}/CVE-2015-1283-refix.patch#47382fe30c9a49724c626cef793ef382
${:_profile_base_location_}/CVE-2016-0718-v2-2-1.patch#4ee57e7a052ada99f2e11fa21a229727
environment =
PATH=${patch:location}/bin:%(PATH)s
......@@ -5,7 +5,7 @@ parts =
[noVNC]
recipe = hexagonit.recipe.download
ignore-existing = true
# version-0.5.1 release from 29 Nov 2014
url = https://github.com/kanaka/noVNC/archive/v0.5.1.tar.gz
md5sum = ac55b2316b2164b6e09ae3bd89c37cb6
# version-0.6.1 release from 04 Jul 2016
url = https://github.com/kanaka/noVNC/archive/v0.6.1.tar.gz
md5sum = d153c6aa69a9178081768fecbace1932
strip-top-level-dir = true
......@@ -7,20 +7,24 @@
[buildout]
extends =
../openssl/buildout.cfg
../patch/buildout.cfg
../zlib/buildout.cfg
parts =
openssh
[openssh]
recipe = slapos.recipe.cmmi
md5sum = 0541579adf9d55abb15ef927048d372e
url = http://ftp.openbsd.org/pub/OpenBSD/OpenSSH/portable/openssh-5.8p2.tar.gz
configure-option =
-I${zlib:location}/include
-L${zlib:location}/lib
-I${openssl:location}/include
-L${openssl:location}/lib
--with-ssl-dir=${openssl:location}/lib
md5sum = dfadd9f035d38ce5d58a3bf130b86d08
url = http://ftp.openbsd.org/pub/OpenBSD/OpenSSH/portable/openssh-7.3p1.tar.gz
patch-binary = ${patch:location}/bin/patch
#patch-options = -p1
patches =
${:_profile_base_location_}/no_create_privsep_path.patch#d5b61a2442fffa457cebe4ad1dc68f4e
environment =
CPPFLAGS=-I${zlib:location}/include -I${openssl:location}/include
LDFLAGS=-L${zlib:location}/lib -Wl,-rpath=${zlib:location}/lib -L${openssl:location}/lib -Wl,-rpath=${openssl:location}/lib
configure-options =
--prefix=${buildout:parts-directory}/${:_buildout_section_name_}
--libdir=lib
--exec-prefix=${buildout:parts-directory}/${:_buildout_section_name_}
--with-privsep-path=${buildout:parts-directory}/${:_buildout_section_name_}/var/empty
\ No newline at end of file
--- Makefile.in 2016-07-28 00:54:27.000000000 +0200
+++ Makefile.in 2016-08-19 13:02:30.227177750 +0200
@@ -304,7 +304,6 @@
$(srcdir)/mkinstalldirs $(DESTDIR)$(mandir)/$(mansubdir)5
$(srcdir)/mkinstalldirs $(DESTDIR)$(mandir)/$(mansubdir)8
$(srcdir)/mkinstalldirs $(DESTDIR)$(libexecdir)
- (umask 022 ; $(srcdir)/mkinstalldirs $(DESTDIR)$(PRIVSEP_PATH))
$(INSTALL) -m 0755 $(STRIP_OPT) ssh$(EXEEXT) $(DESTDIR)$(bindir)/ssh$(EXEEXT)
$(INSTALL) -m 0755 $(STRIP_OPT) scp$(EXEEXT) $(DESTDIR)$(bindir)/scp$(EXEEXT)
$(INSTALL) -m 0755 $(STRIP_OPT) ssh-add$(EXEEXT) $(DESTDIR)$(bindir)/ssh-add$(EXEEXT)
......@@ -4,8 +4,8 @@ parts =
[pcre]
recipe = slapos.recipe.cmmi
url = http://ftp.csx.cam.ac.uk/pub/software/programming/pcre/pcre-8.37.tar.bz2
md5sum = ed91be292cb01d21bc7e526816c26981
url = http://ftp.csx.cam.ac.uk/pub/software/programming/pcre/pcre-8.39.tar.bz2
md5sum = e3fca7650a0556a2647821679d81f585
configure-options =
--disable-static
--enable-unicode-properties
......@@ -6,6 +6,7 @@ extends =
../../component/pkgconfig/buildout.cfg
../../component/xorg/buildout.cfg
../../component/zlib/buildout.cfg
../../component/libaio/buildout.cfg
# XXX Change all reference to kvm section to qemu section, then
# use qemu as main name section.
......@@ -15,25 +16,25 @@ extends =
[kvm]
recipe = slapos.recipe.cmmi
# qemu-kvm and qemu are now the same since 1.3.
url = http://wiki.qemu-project.org/download/qemu-2.4.0.tar.bz2
md5sum = 186ee8194140a484a455f8e3c74589f4
url = http://wiki.qemu-project.org/download/qemu-2.7.0.tar.bz2
md5sum = 08d4d06d1cb598efecd796137f4844ab
configure-options =
--target-list="$(uname -m 2>/dev/null|sed 's,^i[456]86$,i386,')-softmmu"
--enable-system
--with-system-pixman
--disable-sdl
--disable-xen
--enable-vnc-tls
--disable-vnc-sasl
--disable-curses
--disable-curl
--enable-kvm
--enable-linux-aio
--disable-docs
--enable-vnc
--enable-vnc-png
--disable-vnc-jpeg
--extra-cflags="-I${gnutls:location}/include -I${libuuid:location}/include -I${zlib:location}/include -I${libpng:location}/include"
--extra-ldflags="-Wl,-rpath -Wl,${glib:location}/lib -L${glib:location}/lib -Wl,-rpath -Wl,${gnutls:location}/lib -L${gnutls:location}/lib -Wl,-rpath -Wl,${gpg-error:location}/lib -L${gpg-error:location}/lib -L${gettext:location}/lib -Wl,-rpath -Wl,${gettext:location}/lib -Wl,-rpath -Wl,${libpng:location}/lib -L${libpng:location}/lib -L${libuuid:location}/lib -Wl,-rpath -Wl,${libuuid:location}/lib -L${zlib:location}/lib -Wl,-rpath -Wl,${zlib:location}/lib -lpng -lz -lgnutls"
--extra-cflags="-I${gnutls:location}/include -I${libuuid:location}/include -I${zlib:location}/include -I${libpng:location}/include -I${libaio:location}/include"
--extra-ldflags="-Wl,-rpath -Wl,${glib:location}/lib -L${glib:location}/lib -Wl,-rpath -Wl,${gnutls:location}/lib -L${gnutls:location}/lib -Wl,-rpath -Wl,${gpg-error:location}/lib -L${gpg-error:location}/lib -L${gettext:location}/lib -Wl,-rpath -Wl,${gettext:location}/lib -Wl,-rpath -Wl,${libpng:location}/lib -L${libpng:location}/lib -L${libuuid:location}/lib -Wl,-rpath -Wl,${libuuid:location}/lib -L${libaio:location}/lib -Wl,-rpath=${libaio:location}/lib -L${zlib:location}/lib -Wl,-rpath -Wl,${zlib:location}/lib -lpng -lz -lgnutls"
--disable-werror
environment =
PATH=${pkgconfig:location}/bin:%(PATH)s
......@@ -44,9 +45,10 @@ environment =
# Download the installer of Debian 8 (Jessie)
recipe = hexagonit.recipe.download
ignore-existing = true
url = http://cdimage.debian.org/debian-cd/8.4.0/amd64/iso-cd/debian-8.4.0-amd64-netinst.iso
filename = ${:_buildout_section_name_}
md5sum = 8d52d1b7789cd5a464aae719f05299ec
url = http://cdimage.debian.org/debian-cd/${:version}/amd64/iso-cd/debian-${:version}-amd64-netinst.iso
version = 8.6.0
md5sum = e9f61bf327db6d8f7cee05a99f2353cc
download-only = true
mode = 0644
location = ${buildout:parts-directory}/${:_buildout_section_name_}
......@@ -127,6 +127,6 @@ scripts = py
[versions]
setuptools = 19.6.2
slapos.rebootstrap = 3.5
slapos.rebootstrap = 3.7
zc.buildout = 2.5.2+slapos002
zc.recipe.egg = 2.0.3+slapos001
......@@ -28,7 +28,7 @@ from setuptools import setup, find_packages
import glob
import os
version = '1.0.31'
version = '1.0.35'
name = 'slapos.cookbook'
long_description = open("README.rst").read() + "\n" + \
open("CHANGES.rst").read() + "\n"
......@@ -111,6 +111,7 @@ setup(name=name,
'erp5testnode = slapos.recipe.erp5testnode:Recipe',
'firefox = slapos.recipe.firefox:Recipe',
'fontconfig = slapos.recipe.fontconfig:Recipe',
'free_port = slapos.recipe.free_port:Recipe',
'generate.mac = slapos.recipe.generatemac:Recipe',
'generate.password = slapos.recipe.generatepassword:Recipe',
'generic.cloudooo = slapos.recipe.generic_cloudooo:Recipe',
......
......@@ -37,7 +37,7 @@ class KnownHostsFile(dict):
if os.path.exists(self._filename):
with open(self._filename, 'r') as keyfile:
for line in keyfile:
host, key = [column.strip() for column in line.split(' ', 1)]
host, key = [column.strip() for column in line.split(' ', 1) if line != ""]
self[host] = key
def _dump(self):
......
##############################################################################
#
# Copyright (c) 2016 Vifib SARL and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import ConfigParser
import os
import netaddr
import socket
class Recipe(object):
"""
Uses the socket python standard library to get an unused port.
Notice : this recipe may still fail because of race condition : if a new
process spawns and use the picked port before the service for which it has
been generated starts, then the service won't start. Therefore, the result
would be the same giving an already-in-use port to the service.
"""
def __init__(self, buildout, name, options):
self.options = options
# If section has already been installed, port is already taken by the
# requested service itself.
# If this check isn't done, a new port would be picked for every upgrade
# of the software release
try:
parser = ConfigParser.RawConfigParser()
if os.path.exists(buildout['buildout']['installed']):
with open(buildout['buildout']['installed']) as config_file:
parser.readfp(config_file)
port = parser.get(name, 'port')
# Port can be 0 in case of upgrade: some old service still runs on port,
# so 0 is returned by default. Then, on next run, this recipe is processed
# again until a correct value is returned
if port != '0':
self.options['port'] = port
return
except (IOError, ConfigParser.NoSectionError, ConfigParser.NoOptionError):
pass
# Otherwise, let's find one
self.minimum = int(options.get('minimum', 1024))
self.maximum = int(options.get('maximum', 49151))
self.ip = options.get('ip')
if self.minimum == self.maximum:
self.options['port'] = str(self.minimum)
return
if netaddr.valid_ipv4(self.ip):
self.inet_family = socket.AF_INET
elif netaddr.valid_ipv6(self.ip):
self.inet_family = socket.AF_INET6
else:
# address family is unknown, so let's return a general purpose port
self.options['port'] = str(0)
return
self.options['port'] = str(self._getFreePort())
def _getFreePort(self):
"""
Port number will be picked from a given range, smaller port first, then
incremented until a free one is found.
This algorithm thus returns always the same value with the same parameters in
a standard environment.
"""
for port in xrange(self.minimum, self.maximum):
sock = socket.socket(self.inet_family, socket.SOCK_STREAM)
try:
sock.bind((self.ip, port))
break
except socket.error:
continue
finally:
sock.close()
else:
port = 0
return port
install = update = lambda self: []
......@@ -66,8 +66,15 @@ class Callback(GenericBaseRecipe):
class Notify(GenericBaseRecipe):
def __init__(self, buildout, name, options):
super(Notify, self).__init__(buildout, name, options)
log = os.path.join(options['feeds'], options['name'])
options['log-file'] = log
self.options = options
def createNotifier(self, notifier_binary, wrapper, executable,
log, title, notification_url, feed_url, pidfile=None):
log, title, notification_url, feed_url, pidfile=None,
instance_root_name=None, log_url=None, status_item_directory=None):
if not os.path.exists(log):
# Just a touch
......@@ -81,6 +88,13 @@ class Notify(GenericBaseRecipe):
]
parameters.extend(notification_url.split(' '))
parameters.extend(['--executable', executable])
# For a more verbose mode, writing feed items for any action
if instance_root_name and log_url and status_item_directory:
parameters.extend([
'--instance-root-name', instance_root_name,
'--log-url', log_url,
'--status-item-directory', status_item_directory,
])
return self.createWrapper(name=wrapper,
command=notifier_binary,
......@@ -98,13 +112,11 @@ class Notify(GenericBaseRecipe):
port=self.options['port'],
path='/get/%s' % self.options['name'])
log = os.path.join(self.options['feeds'], self.options['name'])
options = self.options
script = self.createNotifier(notifier_binary=options['notifier-binary'],
wrapper=options['wrapper'],
executable=options['executable'],
log=log,
log=options['log-file'],
title=options['title'],
pidfile=options['pidfile'],
notification_url=options['notify'],
......
......@@ -40,9 +40,11 @@ from slapos.recipe.librecipe import shlex
def promise(args):
# Redirect output to /dev/null
with open("/dev/null") as _dev_null:
ssh = subprocess.Popen(
[args['ssh_client'], '%(user)s@%(host)s/%(port)s' % args],
stdin=subprocess.PIPE, stdout=None, stderr=None
[args['ssh_client'], '%(user)s@%(host)s' % args, '-p', '%(port)s' % args],
stdin=subprocess.PIPE, stdout=_dev_null, stderr=_dev_null
)
# Rdiff Backup protocol quit command
......@@ -62,6 +64,11 @@ def promise(args):
class Recipe(GenericSlapRecipe, Notify, Callback):
def _options(self, options):
if 'slave-instance-list' in options:
for slave in json.loads(options['slave-instance-list']):
if slave['type'] == 'pull':
options['rdiff-backup-data-folder'] = str(os.path.join(options['directory'], slave['name'], 'rdiff-backup-data'))
def wrapper_push(self, remote_schema, local_dir, remote_dir, rdiff_wrapper_path):
# Create a simple rdiff-backup wrapper that will push
......@@ -225,7 +232,11 @@ class Recipe(GenericSlapRecipe, Notify, Callback):
url = entry.get('url')
if not url:
raise ValueError('Missing URL parameter for PBS recipe')
return path_list
# It used to raise an error if url was not defined.
# This behavior has been removed to accelerate deployment of the
# Software Release. The buildout, instead of failing, can process
# other sections, which will return parameters to the main instance faster
parsed_url = urlparse.urlparse(url)
slave_type = entry['type']
......@@ -249,7 +260,12 @@ class Recipe(GenericSlapRecipe, Notify, Callback):
# Create known_hosts file by default.
# In some case, we don't want to create it (case where we share IP mong partitions)
if not self.isTrueValue(self.options.get('ignore-known-hosts-file')):
known_hosts_file[parsed_url.hostname] = entry['server-key']
# Migration code: if known_hosts file contains entry with just IP, then it
# is updated to use [IP]:port. It allows to share same IP among partitions
if parsed_url.hostname in known_hosts_file:
del known_hosts_file[parsed_url.hostname]
known_hostname = "[%s]:%s" % (parsed_url.hostname, parsed_url.port)
known_hosts_file[known_hostname] = entry['server-key'].strip()
notifier_wrapper_path = os.path.join(self.options['wrappers-directory'], slave_id)
rdiff_wrapper_path = notifier_wrapper_path + '_raw'
......@@ -257,9 +273,7 @@ class Recipe(GenericSlapRecipe, Notify, Callback):
# Create the rdiff-backup wrapper
# It is useful to separate it from the notifier so that we can run it manually.
# XXX use -y because the host might not yet be in the
# trusted hosts file until the next time slapgrid is run.
remote_schema = '{ssh} -y -K 300 -p %s {username}@{hostname}'.format(
remote_schema = '{ssh} -o "ConnectTimeout 300" -p %s {username}@{hostname}'.format(
ssh=self.options['sshclient-binary'],
username=parsed_url.username,
hostname=parsed_url.hostname
......@@ -291,7 +305,10 @@ class Recipe(GenericSlapRecipe, Notify, Callback):
title=entry.get('title', slave_id),
notification_url=entry['notify'],
feed_url='%s/get/%s' % (self.options['notifier-url'], entry['notification-id']),
pidfile=os.path.join(self.options['run-directory'], '%s.pid' % slave_id)
pidfile=os.path.join(self.options['run-directory'], '%s.pid' % slave_id),
instance_root_name=self.options.get('instance-root-name', None),
log_url=self.options.get('log-url', None),
status_item_directory=self.options.get('status-item-directory', None)
)
path_list.append(notifier_wrapper)
......
......@@ -33,18 +33,24 @@ import re
from slapos.recipe.librecipe import GenericBaseRecipe
from slapos.recipe.librecipe.inotify import subfiles
# This authority only works with dropbear sshkey generator
# This authority only works with dropbear or openssh sshkey generators
def sshkeys_authority(args):
requests_directory = args['requests']
keygen_binary = args['sshkeygen']
if 'openssh' in keygen_binary:
authority_type = 'openssh'
else:
# Keep dropbear for compatibility
authority_type = 'dropbear'
for request_filename in subfiles(requests_directory):
with open(request_filename) as request_file:
request = json.load(request_file)
key_type = request.get('type', 'rsa')
size = str(request.get('size', 2048))
size = str(request.get('size', 4096))
try:
private_key = request['private_key']
public_key = request['public_key']
......@@ -54,6 +60,10 @@ def sshkeys_authority(args):
if not os.path.exists(private_key):
if os.path.exists(public_key):
os.unlink(public_key)
if authority_type == 'openssh':
keygen_cmd = [keygen_binary, '-N', "", '-C', "", '-t', key_type,
'-f', private_key, '-b', size]
else:
keygen_cmd = [keygen_binary, '-t', key_type, '-f', private_key,
'-s', size]
# If the keygeneration return an non-zero status, it means there's a
......
import socket
import unittest
from slapos.recipe import free_port
class SocketMock():
def __init__(self, *args, **kw):
self.args = args
self.kw = kw
pass
def nothing_happen(self, *args, **kw):
pass
bind = close = nothing_happen
import sys
sys.modules['socket'].socket = SocketMock
class FreePortTest(unittest.TestCase):
def afterSetup(self):
SocketMock.bind = SocketMock.close = SocketMock.nothing_happen
def new_recipe(self, **kw):
buildout = {
'buildout': {
'bin-directory': '',
'find-links': '',
'allow-hosts': '',
'develop-eggs-directory': '',
'eggs-directory': '',
'python': 'testpython',
'installed': '.installed.cfg',
},
'testpython': {
'executable': sys.executable,
},
'slap-connection': {
'computer-id': '',
'partition-id': '',
'server-url': '',
'software-release-url': '',
}
}
options = {
'ip': '127.0.0.1',
}
options.update(kw)
return free_port.Recipe(buildout=buildout, name='free_port', options=options)
def test_ifNoBusyPortThenMinPortIsAlwaysReturned(self):
recipe = self.new_recipe(minimum=2000)
self.assertEqual(recipe.options['port'], '2000')
def test_iterateUntilFreePortIsFound(self):
def bindFailExceptOnPort2020(socket_instance, binding):
ip, port = binding
if port != 2020:
raise socket.error()
SocketMock.bind = bindFailExceptOnPort2020
recipe = self.new_recipe(minimum=2000)
self.assertEqual(recipe.options['port'], '2020')
def test_returnsPort0IfNoPortIsFreeInRange(self):
def bindAlwaysFail(socket_instance, binding):
raise socket.error()
SocketMock.bind = bindAlwaysFail
recipe = self.new_recipe(minimum=2000, maximum=2100)
self.assertEqual(recipe.options['port'], '0')
if __name__ == '__main__':
unittest.main()
......@@ -48,29 +48,29 @@ gitdb = 0.6.4
pycrypto = 2.6.1
slapos.recipe.download = 1.0
slapos.recipe.template = 2.8
slapos.toolbox = 0.58
slapos.toolbox = 0.59
smmap = 0.9.0
# Required by:
# slapos.toolbox = 0.58
# slapos.toolbox = 0.59
GitPython = 2.0.8
# Required by:
# slapos.toolbox = 0.58
# slapos.toolbox = 0.59
atomize = 0.2.0
# Required by:
# slapos.toolbox = 0.58
# slapos.toolbox = 0.59
feedparser = 5.2.1
# Required by:
# slapos.toolbox = 0.58
# slapos.toolbox = 0.59
lockfile = 0.12.2
# Required by:
# slapos.toolbox = 0.58
# slapos.toolbox = 0.59
paramiko = 2.0.1
# Required by:
# slapos.toolbox = 0.58
# slapos.toolbox = 0.59
rpdb = 0.1.5
......@@ -11,29 +11,29 @@ plone.recipe.command = 1.1
pycrypto = 2.6.1
rdiff-backup = 1.0.5
slapos.recipe.template = 2.8
slapos.toolbox = 0.58
slapos.toolbox = 0.59
smmap = 0.9.0
# Required by:
# slapos.toolbox = 0.58
# slapos.toolbox = 0.59
GitPython = 2.0.8
# Required by:
# slapos.toolbox = 0.58
# slapos.toolbox = 0.59
atomize = 0.2.0
# Required by:
# slapos.toolbox = 0.58
# slapos.toolbox = 0.59
feedparser = 5.2.1
# Required by:
# slapos.toolbox = 0.58
# slapos.toolbox = 0.59
lockfile = 0.12.2
# Required by:
# slapos.toolbox = 0.58
# slapos.toolbox = 0.59
paramiko = 2.0.1
# Required by:
# slapos.toolbox = 0.58
# slapos.toolbox = 0.59
rpdb = 0.1.5
......@@ -27,7 +27,7 @@ parts =
# XXX: we have to manually add this for resilience
rdiff-backup
collective.recipe.template-egg
pbs-recipe-egg
#XXX-Cedric : Currently, one can only access to KVM using noVNC.
# Ideally one should be able to access KVM by using either NoVNC or VNC.
......@@ -101,7 +101,7 @@ recipe = hexagonit.recipe.download
ignore-existing = true
url = ${:_profile_base_location_}/instance-kvm.cfg.jinja2
mode = 644
md5sum = 8b8d6410ca93314d5ee15d3db9f13aa5
md5sum = 7abec10f8e24e7a75935a0637a006329
download-only = true
on-update = true
......@@ -110,7 +110,7 @@ recipe = hexagonit.recipe.download
ignore-existing = true
url = ${:_profile_base_location_}/instance-kvm-cluster.cfg.jinja2.in
mode = 644
md5sum = 51b5664e103e5c663895cb4d324719fd
md5sum = d29f02443f48096f176c8ae78cc5596c
download-only = true
on-update = true
......@@ -197,7 +197,7 @@ ignore-existing = true
url = ${:_profile_base_location_}/template/template-kvm-run.in
mode = 644
filename = template-kvm-run.in
md5sum = 24b09d68f7cd0e81630c685cc679676b
md5sum = 178a24cdad77cb6c2e519ac629dd0e74
download-only = true
on-update = true
......
......@@ -196,6 +196,12 @@
"minimum": 128,
"multipleOf": 128
},
"auto-ballooning": {
"title": "Enable qemu auto ballooning.",
"description": "Enable virtio balloon device to allows KVM guests to reduce/re-increase their memory size.",
"type": "boolean",
"default": true
},
"disk-size": {
"title": "Disk size",
"description": "Disk size, in GB.",
......@@ -219,6 +225,20 @@
"virtio"
]
},
"disk-cache": {
"title": "Cache option to use with Disk.",
"description": "Disk cache controls how the host cache is used to access block data.",
"type": "string",
"default": "writeback",
"enum": ["none", "writeback", "unsafe", "directsync", "writethrough"]
},
"disk-aio": {
"title": "Disk aio to use.",
"description": "Selects between pthread based disk I/O and native Linux AIO.",
"type": "string",
"default": "threads",
"enum": ["threads", "native"]
},
"cpu-count": {
"title": "CPU count",
"description": "Number of CPU cores.",
......@@ -235,6 +255,16 @@
"description": "Simulate a multi node NUMA system. If mem and cpus are omitted, resources are split equally. Each numa option are separated by space: node,nodeid=4,cpus=40-49,mem=64g node,nodeid=1,cpus=10-19,mem=128g. Set this option if you know what you're doing.",
"type": "string"
},
"machine-options": {
"title": "Machine options.",
"description": "Select the emulated machine by name. Ex: pc-i440fx-2.4,accel=kvm",
"type": "string"
},
"cpu-model": {
"title": "CPU model.",
"description": "Select the emulated CPU model. Ex: SandyBridge,+erms,+smep,+smx,+vmx",
"type": "string"
},
"keyboard-layout-language": {
"title": "Use keyboard layout language",
"description": "Use keyboard layout language (for example fr for French). Can be usefull with VNC display",
......
......@@ -51,6 +51,11 @@ config-disk-type = {{ dumps(kvm_parameter_dict.get('disk-type', 'virtio')) }}
config-cpu-count = {{ dumps(kvm_parameter_dict.get('cpu-count', 1)) }}
config-cpu-options = {{ dumps(kvm_parameter_dict.get('cpu-options', '')) }}
config-numa = {{ dumps(kvm_parameter_dict.get('numa', '')) }}
config-disk-cache = {{ dumps(kvm_parameter_dict.get('disk-cache', '')) }}
config-disk-aio = {{ dumps(kvm_parameter_dict.get('disk-aio', '')) }}
config-auto-ballooning = {{ dumps(kvm_parameter_dict.get('auto-ballooning', True)) }}
config-machine-options = {{ dumps(kvm_parameter_dict.get('machine-options', '')) }}
config-cpu-model = {{ dumps(kvm_parameter_dict.get('cpu-model', '')) }}
{% set nat_rules_list = kvm_parameter_dict.get('nat-rules', []) -%}
config-nat-rules = {{ nat_rules_list | join(' ') }}
......
......@@ -12,6 +12,12 @@
"minimum": 128,
"multipleOf": 128
},
"auto-ballooning": {
"title": "Enable qemu auto ballooning.",
"description": "Enable virtio balloon device to allows KVM guests to reduce/re-increase their memory size.",
"type": "boolean",
"default": true
},
"disk-size": {
"title": "Disk size",
"description": "Disk size, in GB.",
......@@ -27,6 +33,20 @@
"default": "virtio",
"enum": ["ide", "scsi", "sd", "mtd", "floppy", "pflash", "virtio"]
},
"disk-cache": {
"title": "Cache option to use with Disk.",
"description": "Disk cache controls how the host cache is used to access block data.",
"type": "string",
"default": "writeback",
"enum": ["none", "writeback", "unsafe", "directsync", "writethrough"]
},
"disk-aio": {
"title": "Disk aio to use.",
"description": "Selects between pthread based disk I/O and native Linux AIO.",
"type": "string",
"default": "threads",
"enum": ["threads", "native"]
},
"cpu-count": {
"title": "CPU count",
......@@ -44,6 +64,17 @@
"description": "Simulate a multi node NUMA system. If mem and cpus are omitted, resources are split equally. Each numa option are separated by space: node,nodeid=4,cpus=40-49,mem=64g node,nodeid=1,cpus=10-19,mem=128g. Set this option if you know what you're doing.",
"type": "string"
},
"machine-options": {
"title": "Machine options.",
"description": "Select the emulated machine by name. Ex: pc-i440fx-2.4,accel=kvm",
"type": "string"
},
"cpu-model": {
"title": "CPU model.",
"description": "Select the emulated CPU model. Ex: SandyBridge,+erms,+smep,+smx,+vmx",
"type": "string"
},
"keyboard-layout-language": {
"title": "Use keyboard layout language",
"description": "Use keyboard layout language (for example fr for French). Can be usefull with VNC display",
......
......@@ -142,6 +142,15 @@ cluster-doc-port = 0
netcat-binary = {{ netcat_bin }}
language = ${slap-parameter:keyboard-layout-language}
name = {{ slapparameter_dict.get('name', 'Single KVM') }}
disk-cache = ${slap-parameter:disk-cache}
disk-aio = ${slap-parameter:disk-aio}
auto-ballooning = ${slap-parameter:auto-ballooning}
machine-options = ${slap-parameter:machine-options}
cpu-options = ${slap-parameter:cpu-model}
log-file = ${directory:log}/qemu.log
[kvm-run]
recipe = slapos.recipe.template:jinja2
template = {{ template_kvm_run }}
......@@ -428,8 +437,10 @@ mode = {{ mode }}
# write vm-data into file public/data
{{ writefile('vm-data-content', '${directory:public}/data', slapparameter_dict.get('data-to-vm', ''), '700') }}
{% if slapparameter_dict.get('authorized-key', '') -%}
# write public key for vms to public/authorized_keys
{{ writefile('get-authorized-key', '${directory:public}/authorized_keys', slapparameter_dict.get('authorized-key', ''), '700') }}
{% endif -%}
{% if use_tap == 'true' and nat_restrict == 'true' -%}
# Ask to set default to tap interface in the vm
......@@ -546,6 +557,11 @@ cpu-count = 1
cpu-options =
# list of numa options separate by space: node,nodeid=1,cpus=9-15 node,nodeid=2,cpus=1,3,7
numa =
disk-cache = writeback
disk-aio = native
auto-ballooning = True
machine-options =
cpu-model =
nat-rules = 22 80 443
use-nat = True
......
......@@ -5,47 +5,44 @@ extends = common.cfg
# XXX - use websockify = 0.5.1 for compatibility with kvm frontend
websockify = 0.5.1
slapos.toolbox = 0.48
PyRSS2Gen = 1.1
apache-libcloud = 0.17.0
cns.recipe.symlink = 0.2.3
ecdsa = 0.13
slapos.toolbox = 0.59
erp5.util = 0.4.45
apache-libcloud = 1.1.0
collective.recipe.environment = 0.2.0
gitdb = 0.6.4
plone.recipe.command = 1.1
pycrypto = 2.6.1
slapos.recipe.template = 2.7
pycurl = 7.43.0
slapos.recipe.template = 2.9
smmap = 0.9.0
erp5.util = 0.4.45
pycurl = 7.19.5.1
# websockify = 0.8.0
# Required by:
# slapos.toolbox==0.48
# slapos.toolbox==0.59
GitPython = 2.0.8
# Required by:
# slapos.toolbox==0.48
# slapos.toolbox==0.59
atomize = 0.2.0
# Required by:
# apache-libcloud==0.17.0
backports.ssl-match-hostname = 3.4.0.2
# slapos.toolbox==0.59
dnspython = 1.14.0
# Required by:
# slapos.toolbox==0.48
feedparser = 5.2.0.post1
# slapos.toolbox==0.59
feedparser = 5.2.1
# Required by:
# slapos.toolbox==0.48
# slapos.toolbox==0.59
lockfile = 0.12.2
# Required by:
# websockify==0.5.1
numpy = 1.9.2
# websockify==0.8.0
numpy = 1.11.2rc1
# Required by:
# slapos.toolbox==0.48
paramiko = 2.0.1
# slapos.toolbox==0.59
paramiko = 2.0.2
# Required by:
# slapos.toolbox==0.48
rpdb = 0.1.5
# slapos.toolbox==0.59
passlib = 1.6.5
\ No newline at end of file
......@@ -61,6 +61,18 @@ language_list = ['ar', 'da', 'de', 'de-ch', 'en-gb', 'en-us', 'es', 'et', 'fi',
'lv', 'mk', 'nl', 'nl-be', 'no', 'pl', 'pt', 'pt-br', 'ru', 'sl', 'sv',
'th', 'tr']
url_check_certificate = '{{ parameter_dict.get("hard-drive-url-check-certificate", "true") }}'.lower()
auto_ballooning = '{{ parameter_dict.get("auto-ballooning") }}' in ('true', 'True', '1')
vm_name = '{{ parameter_dict.get("name") }}'
disk_cache = '{{ parameter_dict.get("disk-cache") }}'.strip()
disk_cache = disk_cache if disk_cache in ["none", "writeback", "unsafe",
"directsync", "writethrough"] else "writeback"
disk_aio = '{{ parameter_dict.get("disk-aio") }}'.strip()
disk_aio = disk_aio if disk_aio in ["threads", "native"] and \
disk_cache == "directsync" else "threads"
machine_options = '{{ parameter_dict.get("machine-options", "") }}'.strip()
cpu_model = '{{ parameter_dict.get("cpu-model", "") }}'.strip()
logfile = '{{ parameter_dict.get("log-file") }}'
if hasattr(ssl, '_create_unverified_context') and url_check_certificate == 'false':
opener = urllib.FancyURLopener(context=ssl._create_unverified_context())
......@@ -250,13 +262,14 @@ if smp_options:
if key in ('cores', 'threads', 'sockets', 'maxcpus') and val.isdigit():
smp += ',%s=%s' % (key, val)
kvm_argument_list = [qemu_path,
'-enable-kvm', '-smp', smp,
'-enable-kvm', '-smp', smp, '-name', vm_name,
'-m', ram_size, '-vga', 'std',
'-drive', 'file=%s,if=%s' % (disk_path, disk_type),
'-drive', 'file=%s,if=%s,cache=%s,aio=%s' % (disk_path, disk_type, disk_cache, disk_aio),
'-vnc', '%s:1,ipv4,password' % listen_ip,
'-boot', 'order=cd,menu=on',
'-qmp', 'unix:%s,server' % socket_path,
'-pidfile', pid_file_path,
'-pidfile', pid_file_path, '-msg', 'timestamp=on',
'-D', logfile,
]
rgx = re.compile('^[\w*\,][\=\d+\-\,\w]*$')
......@@ -275,6 +288,31 @@ if language in language_list:
for disk in additional_disk_list:
kvm_argument_list.extend([
'-drive', 'file=%s,if=%s' % (disk, disk_type)])
if auto_ballooning:
kvm_argument_list.extend(['-device', 'virtio-balloon-pci,id=balloon0'])
machine_option_list = machine_options.split(',')
if machine_options and len(machine_option_list) > 0:
name = 'type'
if '=' in machine_option_list[0]:
name, val = machine_option_list[0].split('=')
else:
val = machine_option_list[0]
machine_option_list[0] = 'type=%s' % val
if name == 'type':
machine = ''
for option in machine_option_list:
key, val = option.split('=')
machine += ',%s=%s' % (key, val)
kvm_argument_list.extend(['-machine', machine])
if cpu_model:
rgx = re.compile('^[\w*\,-_][\=\d+\-\,\w]*$')
if rgx.match(cpu_model):
kvm_argument_list.extend(['-cpu', cpu_model])
# Try to connect to NBD server (and second nbd if defined).
# If not available, don't even specify it in qemu command line parameters.
# Reason: if qemu starts with unavailable NBD drive, it will just crash.
......
......@@ -42,11 +42,6 @@ recipe = zc.recipe.egg
eggs = neoppod[admin, ctl, master, storage-importer, storage-mysqldb, tests]
${python-mysqlclient:egg}
ZODB3
patch-binary = ${patch:location}/bin/patch
ZODB3-patches =
${:_profile_base_location_}/../../component/egg-patch/ZODB3-3.10.5.patch#c5fe331b1e3a930446f93ab4f6e97c6e
${:_profile_base_location_}/../../component/egg-patch/ZODB3-persistent-ghostify-slots.patch#3a66e9c018d7269bd522d5b0a746f510
ZODB3-patch-options = -p1
[slapos-deps-eggs]
recipe = zc.recipe.egg
......@@ -105,12 +100,13 @@ md5sum = 81ab5e842ecf8385b12d735585497cc8
[versions]
slapos.recipe.template = 2.9
# patched egg
ZODB3 = 3.10.5+SlapOSPatched002
# Required by slapos.toolbox = 0.58
slapos.toolbox = 0.58
apache-libcloud = 0.20.1
ZODB3 = 3.10.7
# Required by slapos.toolbox = 0.59
slapos.toolbox = 0.59
PyRSS2Gen = 1.1
apache-libcloud = 1.1.0
atomize = 0.2.0
dnspython = 1.14.0
ecdsa = 0.13
feedparser = 5.2.1
GitPython = 2.0.8
......
......@@ -7,11 +7,8 @@ eggs = neoppod
psutil
ZODB
zope.testing
ZODB-patches =
${neoppod-repository:location}/ZODB.patch
ZODB-patch-options = -p1
[versions]
ZODB = 4.3.1+SlapOSPatched001
ZODB = 4.4.3
transaction =
zdaemon =
......@@ -34,11 +34,13 @@ eggs = erp5.util
interpreter = ${:_buildout_section_name_}
[neoppod]
patch-binary = ${patch:location}/bin/patch
ZODB3-patch-options = -p1
ZODB3-patches +=
${neoppod-repository:location}/ZODB3.patch
[versions]
ZODB3 = 3.10.5+SlapOSPatched003
ZODB3 = 3.10.7+SlapOSPatched001
erp5.util = 0.4.45
# To match ERP5
transaction = 1.1.1
......
......@@ -112,15 +112,15 @@ gitdb = 0.6.4
plone.recipe.command = 1.1
pycrypto = 2.6.1
slapos.recipe.template = 2.7
slapos.toolbox = 0.58
slapos.toolbox = 0.59
smmap = 0.9.0
# Required by:
# slapos.toolbox = 0.58
# slapos.toolbox = 0.59
GitPython = 2.0.8
# Required by:
# slapos.toolbox = 0.58
# slapos.toolbox = 0.59
atomize = 0.2.0
# Required by:
......@@ -128,11 +128,11 @@ atomize = 0.2.0
backports.ssl-match-hostname = 3.4.0.2
# Required by:
# slapos.toolbox = 0.58
# slapos.toolbox = 0.59
feedparser = 5.1.3
# Required by:
# slapos.toolbox = 0.58
# slapos.toolbox = 0.59
lockfile = 0.12.2
# Required by:
......@@ -140,10 +140,10 @@ lockfile = 0.12.2
miniupnpc = 1.9
# Required by:
# slapos.toolbox = 0.58
# slapos.toolbox = 0.59
paramiko = 2.0.1
# Required by:
# slapos.toolbox = 0.58
# slapos.toolbox = 0.59
rpdb = 0.1.5
[buildout]
extends =
../../software/erp5/sofware.cfg
../../software/erp5/software.cfg
parts +=
vifib-fix-products-paths
......
......@@ -10,6 +10,18 @@ Slaprunner is an all-in-one IDE used to develop and test profiles and recipes fo
You can learn how to use it here :
http://community.slapos.org/wiki/slapos-Wiki.Home/developer-Lecture.Web.Runner.Extended
Development
-----------
Here is an exemple of parameter XML to develop the slaprunner into another slaprunner :
<?xml version="1.0" encoding="utf-8"?>
<instance>
<parameter id="runner-importer-sshd-port">22224</parameter>
<parameter id="runner-sshd-port">22223</parameter>
<parameter id="no-ipv4-frontend">true</parameter>
</instance>
Parameters
----------
......
......@@ -5,13 +5,13 @@ extends =
../../component/curl/buildout.cfg
../../component/dash/buildout.cfg
../../component/dcron/buildout.cfg
../../component/dropbear/buildout.cfg
../../component/git/buildout.cfg
../../component/tig/buildout.cfg
../../component/logrotate/buildout.cfg
../../component/lxml-python/buildout.cfg
../../component/nano/buildout.cfg
../../component/nginx/buildout.cfg
../../component/openssh/buildout.cfg
../../component/rsync/buildout.cfg
../../component/python-2.7/buildout.cfg
../../component/screen/buildout.cfg
......@@ -34,7 +34,7 @@ common-parts =
template-slapos-cfg
# XXX: we have to manually add this for resilience
rdiff-backup
collective.recipe.template-egg
pbs-recipe-egg
parts =
${:common-parts}
......@@ -54,7 +54,7 @@ mode = 0644
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/instance-runner.cfg
output = ${buildout:directory}/template-runner.cfg.in
md5sum = 5fbdf6f9996d6cb948ba042e9dd6e43e
md5sum = 26a58e69c4c59d30c9e2f50075cdc679
mode = 0644
[template-runner-import-script]
......@@ -62,7 +62,7 @@ recipe = hexagonit.recipe.download
ignore-existing = true
url = ${:_profile_base_location_}/template/runner-import.sh.jinja2
download-only = true
md5sum = d7f88b58b2508ce5af42ea7d7241626e
md5sum = d3ce78b35cb47dcb647772891a1bf814
filename = runner-import.sh.jinja2
mode = 0644
......@@ -70,7 +70,7 @@ mode = 0644
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/instance-runner-import.cfg.in
output = ${buildout:directory}/instance-runner-import.cfg
md5sum = a41ff9e12a2304224704f6f31529879b
md5sum = 5234b94f24d57d43e3cd29ccabb2b119
mode = 0644
[template-runner-export-script]
......@@ -78,7 +78,7 @@ recipe = hexagonit.recipe.download
ignore-existing = true
url = ${:_profile_base_location_}/template/runner-export.sh.jinja2
download-only = true
md5sum = 9dc934fe5015ff53869830a833266192
md5sum = d89161fba1dce0de6f4ebbc7eb396ccb
filename = runner-export.sh.jinja2
mode = 0644
......@@ -86,7 +86,7 @@ mode = 0644
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/instance-runner-export.cfg.in
output = ${buildout:directory}/instance-runner-export.cfg
md5sum = cab358589975d6f250b6363ecc95aab2
md5sum = a898117fe5b001747ef6a273fd03b42c
mode = 0644
[template-resilient]
......@@ -191,6 +191,15 @@ destination = ${buildout:parts-directory}/${:filename}
filename = monitor-check-webrunner-internal-instances.py
mode = 0644
[template-resilient-software-release-information]
recipe = hexagonit.recipe.download
ignore-existing = true
download-only = true
md5sum = 922498a301ab3defe412602f626e02ec
url = ${:_profile_base_location_}/template/${:filename}
filename = resilient_software_release_information.py.in
mode = 0644
[eggs]
recipe = zc.recipe.egg
eggs =
......
......@@ -9,16 +9,15 @@ parts +=
ca-nginx
gunicorn-launcher
gunicorn-graceful
sshkeys-dropbear-runner
dropbear-server-add-authorized-key
sshkeys-authority
publish-connection-information
slaprunner-promise
slaprunner-frontend-promise
apache-httpd-promise
httpd-frontend-promise
slaprunner-supervisord-wrapper
dropbear-promise
runner-sshd-add-authorized-key
runner-sshd-graceful
runner-sshd-promise
runner-sshkeys-authority
runner-sshkeys-sshd
runtestsuite
symlinks
shellinabox
......@@ -38,6 +37,30 @@ parts +=
monitor-check-resilient-feed-file
monitor-check-webrunner-internal-instance
[proxy-free-port]
recipe = slapos.cookbook:free_port
minimum = 49980
maximum = 49989
ip = $${slap-network-information:global-ipv6}
[runner-free-port]
recipe = slapos.cookbook:free_port
minimum = 50005
maximum = 50014
ip = $${slap-network-information:global-ipv6}
[slaprunner]
proxy_port = $${proxy-free-port:port}
runner_port = $${runner-free-port:port}
[supervisord-free-port]
recipe = slapos.cookbook:free_port
minimum = 39986
maximum = 39995
ip = $${slaprunner:ipv4}
[supervisord]
port = $${supervisord-free-port:port}
[exporter]
recipe = slapos.recipe.template:jinja2
......@@ -48,15 +71,21 @@ wrapper = $${:rendered}
mode = 700
context =
section directory directory
raw shell_binary ${dash:location}/bin/dash
raw output_log_file $${directory:log}/resilient.log
raw shell_binary ${bash:location}/bin/bash
raw rsync_binary ${rsync:location}/bin/rsync
[monitor-httpd-free-port]
recipe = slapos.cookbook:free_port
minimum = 8437
maximum = 8446
ip = $${slap-network-information:global-ipv6}
[monitor-instance-parameter]
monitor-httpd-port = 8437
monitor-httpd-port = $${monitor-httpd-free-port:port}
# Pass some parameter to dispay in monitoring interface
instance-configuration =
httpdcors cors-domain $${slaprunner-httpd-cors:location} $${httpd-graceful-wrapper:output}
raw webrunner-url https://$${request-frontend:connection-domain}
# Extends publish section with resilient parameters
[publish-connection-information]
......
......@@ -9,12 +9,13 @@ parts +=
ca-nginx
gunicorn-launcher
gunicorn-graceful
sshkeys-dropbear-runner
dropbear-server-add-authorized-key
sshkeys-authority
slaprunner-promise
slaprunner-supervisord-wrapper
dropbear-promise
runner-sshd-add-authorized-key
runner-sshd-graceful
runner-sshd-promise
runner-sshkeys-authority
runner-sshkeys-sshd
runtestsuite
shellinabox
symlinks
......@@ -25,6 +26,8 @@ parts +=
bash-profile
supervisord-wrapper
importer-consistency-promise
resilient-software-release-information
# have to repeat the next one, as it's not inherited from pbsready-import
import-on-notification
......@@ -34,9 +37,35 @@ parts +=
# In case both exporter and importer (aka main instance and clone instance)
# run with the same IP (usually for testing purposes),
# run slaprunner using different ports.
[proxy-free-port]
recipe = slapos.cookbook:free_port
minimum = 49990
maximum = 49999
ip = $${slap-network-information:global-ipv6}
[runner-free-port]
recipe = slapos.cookbook:free_port
minimum = 50015
maximum = 50024
ip = $${slap-network-information:global-ipv6}
[slaprunner]
proxy_port = 50000
runner_port = 50005
proxy_port = $${proxy-free-port:port}
runner_port = $${runner-free-port:port}
[supervisord-free-port]
recipe = slapos.cookbook:free_port
minimum = 39996
maximum = 40005
ip = $${slaprunner:ipv4}
[supervisord]
port = $${supervisord-free-port:port}
# Idem for some other services
[runner-sshd-port]
minimum = 22232
maximum = 22241
[importer]
recipe = slapos.recipe.template:jinja2
......@@ -50,26 +79,39 @@ context =
key backend_url slaprunner:access-url
key proxy_host slaprunner:ipv4
section directory directory
raw shell_binary ${dash:location}/bin/dash
raw output_log_file $${directory:log}/resilient.log
raw shell_binary ${bash:location}/bin/bash
raw rsync_binary ${rsync:location}/bin/rsync
raw restore_exit_code_file $${:restore-exit-code-file}
[importer-consistency-promise]
# Test that the importer script and "after-import" subscripts:
# 1/ Have been run in the last 24 hours
# 2/ Have succeeded
# Test that the importer script and "after-import" subscripts
# are not older than 1 day (24h), and have succeeded
recipe = collective.recipe.template
input = inline: #!/bin/sh
EXIT_CODE_FILE=$(find "$${importer:restore-exit-code-file}" -mtime -1)
EXIT_CODE_FILE=$(find "$${importer:restore-exit-code-file}")
RECENT_EXIT_CODE_FILE=$(find "$${importer:restore-exit-code-file}" -mtime -1)
if [ -z "$EXIT_CODE_FILE" ]; then
echo "Consistency check is too old."
exit 1
fi
exit 0;
else
if [ -z "$RECENT_EXIT_CODE_FILE" ]; then
echo "Consistency check is too old.";
exit 1;
else
EXIT_CODE=$(cat $EXIT_CODE_FILE)
exit $EXIT_CODE
fi
fi
exit 1; # Something else went wrong
output = $${directory:promises}/importer-consistency-promise
mode = 755
[resilient-software-release-information]
recipe = slapos.recipe.template
url = ${template-resilient-software-release-information:destination}/${template-resilient-software-release-information:filename}
output = $${directory:cgi-bin}/resilient_software_release_information.py
mode = 0600
[slap-parameter]
auto-deploy-instance = false
auto-deploy = true
......@@ -84,7 +126,6 @@ monitor-url = $${publish:monitor-url}
monitor-user = $${publish:monitor-user}
monitor-password = $${publish:monitor-password}
[monitor-instance-parameter]
monitor-httpd-port = 8360
#monitor-title = $${slap-parameter:name}
......
......@@ -62,6 +62,12 @@
"description": "Name of the instance, to show in the window title",
"type": "string"
},
"no-ipv4-frontend": {
"title": "No IPv4 frontend",
"description": "Prevent the slaprunner to order an IPv4 frontend for itself",
"enum": ["true", "false"],
"default": "false"
},
"custom-frontend-backend-url": {
"title": "Custom Frontend Backend URL",
"description": "return an ipv4 frontend of the given ipv6(+optional port)",
......
......@@ -6,16 +6,15 @@ parts =
ca-nginx
gunicorn-launcher
gunicorn-graceful
sshkeys-dropbear-runner
dropbear-server-add-authorized-key
sshkeys-authority
publish-connection-information
slaprunner-promise
slaprunner-frontend-promise
apache-httpd-promise
httpd-frontend-promise
slaprunner-supervisord-wrapper
dropbear-promise
runner-sshd-add-authorized-key
runner-sshd-graceful
runner-sshd-promise
runner-sshkeys-authority
runner-sshkeys-sshd
runtestsuite
symlinks
shellinabox
......@@ -29,6 +28,10 @@ parts =
supervisord-wrapper
supervisord-promise
httpd-graceful-wrapper
{% if slapparameter_dict.get('no-ipv4-frontend', 'false') == 'false' %}
slaprunner-frontend-promise
httpd-frontend-promise
{% endif %}
{% if slapparameter_dict.get('custom-frontend-backend-url') and slapparameter_dict.get('check-custom-frontend-promise', 'false') == 'true' %}
custom-frontend-promise
{% endif %}
......@@ -103,6 +106,7 @@ test = $${:etc}/test/
nginx-data = $${:srv}/nginx
ca-dir = $${:srv}/ssl
project = $${:srv}/runner/project
cgi-bin = $${:srv}/cgi-bin
[runnerdirectory]
recipe = slapos.cookbook:mkdirectory
......@@ -131,13 +135,13 @@ working-directory = $${runnerdirectory:home}
project-directory = $${runnerdirectory:project}
instance_root = $${runnerdirectory:instance-root}
software_root = $${runnerdirectory:software-root}
ssh_client = ${openssh:location}/bin/ssh
public_key = $${runner-sshd-raw-server:rsa-keyfile}.pub
private_key = $${runner-sshd-raw-server:rsa-keyfile}
instance-monitor-url = https://[$${:ipv6}]:$${monitor-parameters:port}
etc_dir = $${directory:etc}
log_dir = $${directory:log}
run_dir = $${directory:run}
ssh_client = $${sshkeys-dropbear-runner:wrapper}
public_key = $${sshkeys-dropbear-runner:public-key}
private_key = $${sshkeys-dropbear-runner:private-key}
ipv4 = $${slap-network-information:local-ipv4}
ipv6 = $${slap-network-information:global-ipv6}
instance_root = $${runnerdirectory:instance-root}
......@@ -196,43 +200,82 @@ command-line = ${buildout:directory}/bin/slaprunnertest
wrapper-path = $${directory:bin}/runTestSuite
environment = RUNNER_CONFIG=$${slapos-cfg:rendered}
# Deploy dropbear (minimalist SSH server)
# Deploy openssh-server
[runner-sshd-port]
recipe = slapos.cookbook:free_port
minimum = 22222
maximum = 22231
ip = $${slap-network-information:global-ipv6}
[runner-sshd-config]
recipe = slapos.recipe.template:jinja2
rendered = $${directory:etc}/runner-sshd.conf
path_pid = $${directory:run}/runner-sshd.pid
host_key = $${directory:ssh}/runner_server_key.rsa
template = inline:
PidFile $${:path_pid}
Port $${runner-sshd-port:port}
ListenAddress $${slap-network-information:global-ipv6}
Protocol 2
UsePrivilegeSeparation no
HostKey $${:host_key}
PasswordAuthentication no
PubkeyAuthentication yes
AuthorizedKeysFile $${buildout:directory}/.ssh/authorized_keys
ForceCommand if [ -z "$SSH_ORIGINAL_COMMAND" ]; then ${bash:location}/bin/bash -l; else $SSH_ORIGINAL_COMMAND; fi
[runner-sshd-raw-server]
recipe = slapos.cookbook:wrapper
host = $${slap-network-information:global-ipv6}
rsa-keyfile = $${runner-sshd-config:host_key}
home = $${directory:ssh}
command-line = ${openssh:location}/sbin/sshd -D -e -f $${runner-sshd-config:rendered}
wrapper-path = $${directory:bin}/runner_raw_sshd
[runner-sshd-authorized-key]
<= runner-sshd-raw-server
recipe = slapos.cookbook:dropbear.add_authorized_key
key = $${slap-parameter:user-authorized-key}
[runner-sshd-server]
recipe = collective.recipe.template
log = $${basedirectory:log}/runner-sshd.log
input = inline:#!/bin/sh
exec $${runner-sshd-raw-server:wrapper-path} >> $${:log} 2>&1
output = $${rootdirectory:bin}/runner_raw_sshd_log
mode = 700
[runner-sshd-graceful]
recipe = slapos.cookbook:wrapper
command-line = $${directory:bin}/killpidfromfile $${runner-sshd-config:path_pid} SIGHUP
wrapper-path = $${directory:scripts}/runner-sshd-graceful
[sshkeys-directory]
recipe = slapos.cookbook:mkdirectory
requests = $${directory:sshkeys}/requests/
keys = $${directory:sshkeys}/keys/
[sshkeys-authority]
[runner-sshkeys-authority]
recipe = slapos.cookbook:sshkeys_authority
request-directory = $${sshkeys-directory:requests}
keys-directory = $${sshkeys-directory:keys}
wrapper = $${directory:services}/sshkeys_authority
keygen-binary = ${dropbear:location}/bin/dropbearkey
keygen-binary = ${openssh:location}/bin/ssh-keygen
[dropbear-runner-server]
recipe = slapos.cookbook:dropbear
host = $${slap-network-information:global-ipv6}
port = 22222
home = $${buildout:directory}
wrapper = $${directory:bin}/runner_sshd
shell = ${bash:location}/bin/bash
rsa-keyfile = $${directory:ssh}/server_key.rsa
allow-port-forwarding = true
dropbear-binary = ${dropbear:location}/sbin/dropbear
[sshkeys-dropbear-runner]
<= sshkeys-authority
[runner-sshkeys-sshd]
<= runner-sshkeys-authority
recipe = slapos.cookbook:sshkeys_authority.request
name = dropbear
name = sshd
type = rsa
executable = $${dropbear-runner-server:wrapper}
public-key = $${dropbear-runner-server:rsa-keyfile}.pub
private-key = $${dropbear-runner-server:rsa-keyfile}
wrapper = $${directory:services}/runner_sshd
executable = $${runner-sshd-server:output}
public-key = $${runner-sshd-raw-server:rsa-keyfile}.pub
private-key = $${runner-sshd-raw-server:rsa-keyfile}
wrapper = $${directory:services}/runner-sshd
[dropbear-server-add-authorized-key]
<= dropbear-runner-server
[runner-sshd-add-authorized-key]
recipe = slapos.cookbook:dropbear.add_authorized_key
home = $${buildout:directory}
key = $${slap-parameter:user-authorized-key}
#---------------------------
......@@ -422,6 +465,7 @@ name = example.com
#--
#-- Request frontend
{% if slapparameter_dict.get('no-ipv4-frontend', 'false') == 'false' -%}
[request-frontend]
<= slap-connection
recipe = slapos.cookbook:requestoptional
......@@ -433,6 +477,14 @@ config-url = $${slaprunner:access-url}
config-domain = $${slap-parameter:frontend-domain}
return = site_url domain
[slaprunner-frontend-promise]
recipe = slapos.cookbook:check_url_available
path = $${directory:promises}/slaprunner_frontend
url = https://$${request-frontend:connection-domain}/login
dash_path = ${dash:location}/bin/dash
curl_path = ${curl:location}/bin/curl
check-secure = 1
[request-httpd-frontend]
<= slap-connection
recipe = slapos.cookbook:requestoptional
......@@ -454,6 +506,7 @@ dash_path = {{ dash_executable_location }}
curl_path = {{ curl_executable_location }}
check-secure = 1
{% endif %}
[htpasswd]
recipe = slapos.cookbook:generate.password
......@@ -477,18 +530,28 @@ password = $${htpasswd:passwd}
#--
#-- Send information to SlapOS Master
[user-info]
recipe = slapos.cookbook:userinfo
[publish-connection-information]
recipe = slapos.cookbook:publish
backend-url = $${slaprunner:access-url}
url = https://$${request-frontend:connection-domain}
init-user = $${runner-htpasswd:user}
init-password = $${runner-htpasswd:password}
ssh-command = ssh $${dropbear-runner-server:host} -p $${dropbear-runner-server:port}
webdav-url = $${request-httpd-frontend:connection-secure_access}/share/
public-url = $${request-httpd-frontend:connection-secure_access}/public/
ssh-command = ssh $${user-info:pw-name}@$${slap-network-information:global-ipv6} -p $${runner-sshd-port:port}
git-public-url = https://[$${httpd-parameters:global_ip}]:$${httpd-parameters:global_port}/git-public/
git-private-url = https://[$${httpd-parameters:global_ip}]:$${httpd-parameters:global_port}/git/
monitor-base-url = $${publish:monitor-base-url}
{% if slapparameter_dict.get('no-ipv4-frontend', 'false') == 'false' -%}
url = https://$${request-frontend:connection-domain}
webdav-url = $${request-httpd-frontend:connection-secure_access}/share/
public-url = $${request-httpd-frontend:connection-secure_access}/public/
{% else %}
url = $${slaprunner:access-url}
webdav-url = $${apache-httpd:access-url}/share/
public-url = $${apache-httpd:access-url}/public/
{% endif %}
{% if slapparameter_dict.get('instance-type', '') != 'resilient' -%}
{% set monitor_interface_url = slapparameter_dict.get('monitor-interface-url', 'https://monitor.app.officejs.com') -%}
monitor-setup-url = {{ monitor_interface_url }}/#page=settings_configurator&url=$${publish:monitor-url}&username=$${publish:monitor-user}&password=$${publish:monitor-password}
......@@ -507,19 +570,11 @@ path = $${directory:promises}/slaprunner
hostname = $${slaprunner:ipv6}
port = $${slaprunner:runner_port}
[slaprunner-frontend-promise]
recipe = slapos.cookbook:check_url_available
path = $${directory:promises}/slaprunner_frontend
url = https://$${request-frontend:connection-domain}/login
dash_path = ${dash:location}/bin/dash
curl_path = ${curl:location}/bin/curl
check-secure = 1
[dropbear-promise]
[runner-sshd-promise]
recipe = slapos.cookbook:check_port_listening
path = $${directory:promises}/dropbear
hostname = $${dropbear-runner-server:host}
port = $${dropbear-runner-server:port}
path = $${directory:promises}/runner-sshd
hostname = $${slap-network-information:global-ipv6}
port = $${runner-sshd-port:port}
[symlinks]
recipe = cns.recipe.symlink
......@@ -670,7 +725,8 @@ no_logfile = NONE
numprocs = 1
path = $${shell:path}
pidfile = $${directory:run}/supervisord.pid
server = $${slaprunner:ipv4}:$${:port}
ip = $${slaprunner:ipv4}
server = $${:ip}:$${:port}
port = 39986
slapgrid-cp = slapgrid-cp
slapgrid-cp-command = $${slaprunner:slapos} node instance --all --cfg $${:slapos-cfg} --pidfile $${:slapgrid-cp-pid} --verbose --logfile $${:slapgrid-cp-log}
......@@ -747,7 +803,6 @@ monitor-url-list = {{ slapparameter_dict['monitor-url-list'] }}
# Pass some parameter to dispay in monitoring interface
instance-configuration =
httpdcors cors-domain $${slaprunner-httpd-cors:location} $${httpd-graceful-wrapper:output}
raw webrunner-url https://$${request-frontend:connection-domain}
{% endif -%}
configuration-file-path = $${buildout:directory}/knowledge0.cfg
......
......@@ -20,29 +20,29 @@ gunicorn = 19.5.0
prettytable = 0.7.2
pycrypto = 2.6.1
slapos.recipe.template = 2.9
slapos.toolbox = 0.58
slapos.toolbox = 0.59
smmap = 0.9.0
# Required by:
# slapos.toolbox = 0.58
# slapos.toolbox = 0.59
GitPython = 2.0.8
# Required by:
# slapos.toolbox = 0.58
# slapos.toolbox = 0.59
atomize = 0.2.0
# Required by:
# slapos.toolbox = 0.58
# slapos.toolbox = 0.59
feedparser = 5.2.1
# Required by:
# slapos.toolbox = 0.58
# slapos.toolbox = 0.59
lockfile = 0.12.2
# Required by:
# slapos.toolbox = 0.58
# slapos.toolbox = 0.59
paramiko = 2.0.1
# Required by:
# slapos.toolbox = 0.58
# slapos.toolbox = 0.59
passlib = 1.6.5
\ No newline at end of file
# Provides information related to the Webrunner Software Release to the
# takeover interface of the Resilient stack
def main():
return {}
\ No newline at end of file
......@@ -3,6 +3,15 @@ LC_ALL=C
export LC_ALL
umask 077
# Exit on any error, to prevent inconsistent backup
set -e
# Redirect output to log
exec > >(tee -ai {{ output_log_file }})
exec 2>&1
echo -e "\n\n$0 run at : $(date)"
srv_directory={{ directory['srv'] }}
sync_element () {
......@@ -35,16 +44,18 @@ sync_element () {
echo "Changing current directory to $path."
cd $path;
if [ -f $element ] || [ -d $element ]; then
echo "Running {{ rsync_binary }} -rlptgov --safe-links --exclude-from=$srv_directory/exporter.exclude --delete --delete-excluded $element $backup_path"
{{ rsync_binary }} -rlptgov --safe-links --exclude-from=$srv_directory/exporter.exclude --delete --delete-excluded $element $backup_path;
command="{{ rsync_binary }} -rlptgov --stats --safe-links --exclude-from=$srv_directory/exporter.exclude --delete --delete-excluded $element $backup_path"
echo "Running : $command"
$command
fi
done
}
sync_element $srv_directory/runner {{ directory['backup'] }}/runner/ instance project proxy.db
# We sync .* appart
date +%s -u > {{ directory['etc'] }}/.resilient-timestamp
sync_element {{ directory['etc'] }} {{ directory['backup'] }}/etc/ config.json
# Hidden files are related to the webrunner's internals
cp -r {{ directory['etc'] }}/.??* {{ directory['backup'] }}/etc/
sync_element {{ directory['etc'] }} {{ directory['backup'] }}/etc/ config.json ssh
if [ -d {{ directory['backup'] }}/runner/software ]; then
rm {{ directory['backup'] }}/runner/software/*
fi
......
......@@ -3,6 +3,13 @@ set -e
LC_ALL=C
export LC_ALL
umask 077
# Redirect output to log
exec > >(tee -ai {{ output_log_file }})
exec 2>&1
echo -e "\n\nrunner-import run at : $(date)"
srv_directory={{ directory['srv'] }}
restore_element () {
backup_path=$1
......@@ -13,22 +20,15 @@ restore_element () {
do
cd $backup_path;
if [ -f $element ] || [ -d $element ]; then
{{ rsync_binary }} -av --delete --exclude *.sock --exclude *.pid --exclude .installed.cfg --exclude .installed-switch-softwaretype.cfg $backup_path/$element $restore_path;
command="{{ rsync_binary }} --stats -av --delete --exclude *.sock --exclude *.pid --exclude .installed.cfg --exclude .installed-switch-softwaretype.cfg $backup_path/$element $restore_path"
echo "Running: \"$command\""
$command
fi
done
}
write_backup_proof () {
cd {{ directory['backup'] }}
find -type f ! -name backup.signature ! -wholename "./rdiff-backup-data/*" -print0 | xargs -P4 -0 sha256sum | LC_ALL=C sort -k 66 > $srv_directory/proof.signature
diff -ruw {{ directory['backup'] }} $srv_directory/proof.signature > $srv_directory/backup.diff || true # diff exits with code 1 when files are different
}
# For now we just make the diff before
write_backup_proof
restore_element {{ directory['backup'] }}/runner/ $srv_directory/runner instance project proxy.db
restore_element {{ directory['backup'] }}/etc/ {{ directory['etc'] }} config.json ssh
restore_element {{ directory['backup'] }}/etc/ {{ directory['etc'] }} config.json
cp -r {{ directory['backup'] }}/etc/.??* {{ directory['etc'] }};
......@@ -62,7 +62,6 @@ $SQLITE3 $DATABASE "update software11 set url='$SOFTWARE_RELEASE' where url='$OL
# Change slapproxy database to have all instances stopped
$SQLITE3 $DATABASE "update partition11 set slap_state='stopped';"
set -x
# Run slapproxy on different port (in case of running inside of erp5testnode
# with only one IP and port 50000 already taken by slapproxy of main instance)
HOST="{{ proxy_host }}"
......
......@@ -552,6 +552,7 @@ eggs =
Products.ExternalEditor
Products.TIDStorage
Products.LongRequestLogger
Products.PloneHotfix20160830
# BBB: Temporarily keep zope.app.testing awaiting we use newer version of CMF
# (for tests like testCookieCrumbler).
......@@ -649,7 +650,7 @@ SOAPpy = 0.12.0nxd001
# CMF 2.3 is not yet supported.
Products.CMFCalendar = 2.2.3
Products.CMFCore = 2.2.9
Products.CMFCore = 2.2.10
Products.CMFDefault = 2.2.4
Products.CMFTopic = 2.2.1
Products.CMFUid = 2.2.1
......@@ -672,6 +673,7 @@ Products.ExternalEditor = 1.1.1
Products.GenericSetup = 1.8.3
Products.LongRequestLogger = 2.0.0
Products.MimetypesRegistry = 2.0.10
Products.PloneHotfix20160830 = 1.3
Products.PluginRegistry = 1.4
Products.TIDStorage = 5.4.9
PyPDF2 = 1.26.0
......@@ -681,6 +683,7 @@ Pympler = 0.4.3
StructuredText = 2.11.1
WSGIUtils = 0.7
Zope2 = 2.13.24
astor = 0.5
# astroid 1.4.1 breaks testDynamicClassGeneration
astroid = 1.3.8
backports-abc = 0.4
......@@ -696,23 +699,23 @@ httplib2 = 0.9.2
huBarcode = 1.0.0
interval = 1.0.0
ipdb = 0.10.1
ipykernel = 4.4.1
ipykernel = 4.5.0
ipython = 5.1.0
ipywidgets = 5.1.5
ipywidgets = 5.2.2
logilab-common = 1.2.2
matplotlib = 1.5.2
matplotlib = 1.5.3
mistune = 0.7.3
notebook = 4.2.2
numpy = 1.11.1
objgraph = 3.0.0
pandas = 0.18.1
ply = 3.8
ply = 3.9
polib = 1.0.7
pprofile = 1.9.1
prompt-toolkit = 1.0.3
pprofile = 1.9.2
prompt-toolkit = 1.0.7
ptyprocess = 0.5.1
pycountry = 1.20
pyflakes = 1.2.3
pyflakes = 1.3.0
# pylint 1.5.1 breaks testDynamicClassGeneration
pylint = 1.4.4
python-memcached = 1.58
......@@ -725,22 +728,22 @@ scikit-learn = 0.17.1
scipy = 0.18.0
simplegeneric = 0.8.1
socketpool = 0.5.3
spyne = 2.12.11
spyne = 2.12.13
suds = 0.4
terminado = 0.6
threadframe = 0.2
timerserver = 2.0.2
tornado = 4.4.1
traitlets = 4.2.2
traitlets = 4.3.0
urlnorm = 1.1.4
uuid = 1.30
validictory = 1.0.2
widgetsnbextension = 1.2.3
widgetsnbextension = 1.2.6
xfw = 0.10
xupdate-processor = 0.4
# Required by:
# Products.CMFCore==2.2.9
# Products.CMFCore==2.2.10
Products.ZSQLMethods = 2.13.4
# Required by:
......@@ -753,15 +756,15 @@ backports.ssl-match-hostname = 3.5.0.1
# Required by:
# tornado==4.4.1
certifi = 2016.8.8
certifi = 2016.8.31
# Required by:
# matplotlib==1.5.1
# matplotlib==1.5.3
cycler = 0.10.0
# Required by:
# ipython==5.1.0
# traitlets==4.2.2
# traitlets==4.3.0
decorator = 4.0.10
# Required by:
......@@ -771,7 +774,7 @@ fpconst = 0.7.2
# Required by:
# nbformat==4.1.0
# notebook==4.2.2
# traitlets==4.2.2
# traitlets==4.3.0
ipython-genutils = 0.1.0
# Required by:
......@@ -797,7 +800,7 @@ pexpect = 4.2.1
pickleshare = 0.7.4
# Required by:
# matplotlib==1.5.1
# matplotlib==1.5.3
# pandas==0.18.1
python-dateutil = 2.5.3
......
......@@ -8,6 +8,8 @@ extends =
../../component/openssl/buildout.cfg
../../component/logrotate/buildout.cfg
../../component/gzip/buildout.cfg
../../component/lxml-python/buildout.cfg
../../component/python-cryptography/buildout.cfg
parts =
slapos-cookbook
......@@ -36,6 +38,8 @@ on-update = true
[monitor-eggs]
recipe = zc.recipe.egg
eggs =
${lxml-python:egg}
${python-cryptography:egg}
plone.recipe.command
collective.recipe.template
cns.recipe.symlink
......@@ -115,5 +119,5 @@ depends =
[versions]
PyRSS2Gen = 1.1
cns.recipe.symlink = 0.2.3
slapos.toolbox = 0.58
slapos.toolbox = 0.59
......@@ -3,13 +3,14 @@ extends =
../../component/apache/buildout.cfg
../../component/bash/buildout.cfg
../../component/dropbear/buildout.cfg
../../component/openssh/buildout.cfg
../../component/gzip/buildout.cfg
../../component/rdiff-backup/buildout.cfg
../../component/rsync/buildout.cfg
../monitor/buildout.cfg
parts =
collective.recipe.template-egg
pbs-recipe-egg
pbsready
pbsready-import
pbsready-export
......@@ -22,9 +23,11 @@ parts =
rdiff-backup
dash
[collective.recipe.template-egg]
[pbs-recipe-egg]
recipe = zc.recipe.egg
eggs = collective.recipe.template
eggs =
collective.recipe.template
collective.recipe.environment
#----------------
#--
......@@ -38,7 +41,7 @@ eggs = collective.recipe.template
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/pbsready.cfg.in
output = ${buildout:directory}/pbsready.cfg
md5sum = d2b06a13354127e9cbbf1c5d21791cb4
md5sum = fee06ab268015b394ec49cb4600de1ed
mode = 0644
[pbsready-import]
......@@ -47,7 +50,7 @@ mode = 0644
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/pbsready-import.cfg.in
output = ${buildout:directory}/pbsready-import.cfg
md5sum = dd13497575d13b92c3abb0a633777e2c
md5sum = 89619b0d8626c76402a11bb08e7ba8a1
mode = 0644
[pbsready-export]
......@@ -56,14 +59,14 @@ mode = 0644
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/pbsready-export.cfg.in
output = ${buildout:directory}/pbsready-export.cfg
md5sum = bfd71e454140cf13179d408e10f95bf8
md5sum = 48cf26431ab052eae8227efd457db8c3
mode = 0644
[template-pull-backup]
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/instance-pull-backup.cfg.in
output = ${buildout:directory}/instance-pull-backup.cfg
md5sum = cb7acac7ab41bf44c20d6d03bfad8217
md5sum = 098a9c266fe7256455f079af112f9149
mode = 0644
[template-replicated]
......@@ -92,7 +95,7 @@ output = ${buildout:directory}/instance-frozen.cfg
[resilient-web-takeover-cgi-script-download]
recipe = slapos.recipe.build:download
url = ${:_profile_base_location_}/resilient-web-takeover-cgi-script.py.in
md5sum = 3aa7624af1196062d7d01946d4de9f0e
md5sum = c46c8e3e4ce4376c98ad2fc0e2ff0fe4
mode = 0644
destination = ${buildout:directory}/resilient-web-takeover-cgi-script.py.in
......
......@@ -7,8 +7,13 @@ parts =
cron
cron-entry-logrotate
sshkeys-authority
sshkeys-dropbear
sshkeys-openssh
backup-transfer-integrity-promise
resilient-genstatrss-wrapper
pbs-push-history-log
backup-signature-link
cron-pbs-status-feed
pull-push-stalled-promise
## Monitor for pbs
monitor-base
......@@ -57,7 +62,8 @@ pbs-wrappers = $${rootdirectory:bin}/pbs
dot-ssh = $${basedirectory:ssh-home}/.ssh
notifier-feeds = $${basedirectory:notifier}/feeds
notifier-callbacks = $${basedirectory:notifier}/callbacks
notifier-status-items = $${basedirectory:notifier}/status-items
monitor-resilient = $${monitor-directory:private}/resilient
#----------------
......@@ -73,6 +79,12 @@ database = $${rootdirectory:srv}/equeue.db
wrapper = $${basedirectory:services}/equeue
equeue-binary = ${buildout:bin-directory}/equeue
[notifier-port]
recipe = slapos.cookbook:free_port
minimum = 8088
maximum = 8097
ip = $${notifier:host}
# notifier.notify adds the [exporter, notifier] to the execution queue
# notifier.notify.callback sets up a callback
[notifier]
......@@ -83,11 +95,10 @@ wrapper = $${basedirectory:services}/notifier
mode = 0700
command = ${buildout:bin-directory}/pubsubserver --callbacks $${directory:notifier-callbacks} --feeds $${directory:notifier-feeds} --equeue-socket $${equeue:socket} --logfile $${basedirectory:log}/notifier.log $${:host} $${:port}
host = $${slap-network-information:global-ipv6}
port = 8088
port = $${notifier-port:port}
context =
key content notifier:command
[logrotate-entry-equeue]
<= logrotate
recipe = slapos.cookbook:logrotate.d
......@@ -111,7 +122,7 @@ callbacks = $${directory:notifier-callbacks}
equeue-socket = $${equeue:socket}
notifier-binary = ${buildout:bin-directory}/pubsubnotifier
rdiffbackup-binary = ${buildout:bin-directory}/rdiff-backup
sshclient-binary = $${dropbear-client:wrapper}
sshclient-binary = $${openssh-client:wrapper-path}
known-hosts = $${directory:dot-ssh}/known_hosts
promises-directory = $${basedirectory:promises}
directory = $${directory:pbs-backup}
......@@ -122,7 +133,23 @@ run-directory = $${basedirectory:run}
notifier-url = http://[$${notifier:host}]:$${notifier:port}
slave-instance-list = $${slap-parameter:slave_instance_list}
ignore-known-hosts-file = $${slap-parameter:ignore-known-hosts-file}
# To get a verbose feed about PBS state
instance-root-name = $${instance-info-parameters:root-name}
log-url = $${publish:monitor-base-url}/private/notifier/
status-item-directory = $${directory:notifier-status-items}
[pbs-resilient-status-feed]
recipe = slapos.cookbook:wrapper
command-line = ${buildout:directory}/bin/generatefeed --output $${:feed-path} --status-item-path $${pbs:status-item-directory} --title "Status feed for $${instance-info-parameters:root-name}-PBS" --link $${pbs:log-url}
feed-path = $${directory:monitor-resilient}/pbs-status-rss
wrapper-path = $${rootdirectory:bin}/resilient-genstatusrss.py
[cron-pbs-status-feed]
<= cron
recipe = slapos.cookbook:cron.d
name = resilient-pbs-status-feed
frequency = 5 * * * *
command = $${pbs-resilient-status-feed:wrapper-path}
#----------------
#--
......@@ -190,29 +217,30 @@ recipe = slapos.cookbook:sshkeys_authority
request-directory = $${sshkeys-directory:requests}
keys-directory = $${sshkeys-directory:keys}
wrapper = $${basedirectory:services}/sshkeys_authority
keygen-binary = ${dropbear:location}/bin/dropbearkey
keygen-binary = ${openssh:location}/bin/ssh-keygen
[sshkeys-dropbear]
[sshkeys-openssh]
<= sshkeys-authority
recipe = slapos.cookbook:sshkeys_authority.request
name = pbs
name = pbs-client
type = rsa
executable = $${dropbear-client:wrapper}
public-key = $${dropbear-client:identity-file}.pub
private-key = $${dropbear-client:identity-file}
executable = $${openssh-client:wrapper-path}
public-key = $${openssh-client:identity-file}.pub
private-key = $${openssh-client:identity-file}
wrapper = $${rootdirectory:bin}/do_backup
#----------------
#--
#-- Dropbear.
#-- OpenSSH.
[dropbear-client]
recipe = slapos.cookbook:dropbear.client
dbclient-binary = ${dropbear:location}/bin/dbclient
wrapper = $${rootdirectory:bin}/ssh
[openssh-client]
recipe = slapos.cookbook:wrapper
home = $${basedirectory:ssh-home}
identity-file = $${basedirectory:ssh-home}/id_rsa
identity-file = $${:home}/id_rsa
command-line = ${openssh:location}/bin/ssh -T -o "UserKnownHostsFile $${pbs:known-hosts}" -i $${:identity-file}
wrapper-path = $${rootdirectory:bin}/ssh
parameters-extra = true
#----------------
......@@ -233,6 +261,46 @@ monitor-title = PBS Instance
monitor-password = $${htpasswd:passwd}
monitor-username = $${htpasswd:username}
#----------------
#--
#-- Resiliency promises.
[backup-transfer-integrity-promise]
recipe = slapos.recipe.template:jinja2
template = inline:
#!${dash:location}/bin/dash
# Raise an error if signatures are different
# Error cannot be deduced if files do not exist
cd $${directory:pbs-backup}
if [ ! -f "proof.signature" ]; then exit 0; fi
backup_signature=$(find . -maxdepth 2 -name backup.signature)
if [ -z "$backup_signature" ]; then
exit 0;
else
diff "proof.signature" "$backup_signature";
fi
rendered = $${basedirectory:promises}/backup-transfer-integrity-promise
mode = 700
[resilient-genstatrss-wrapper]
recipe = slapos.cookbook:wrapper
# XXX - hard-coded Urls
command-line = ${buildout:directory}/bin/rdiffbackup.genstatrss --output '$${monitor-directory:public}/resilient-feed' --rdiff_backup_data_folder '$${pbs:rdiff-backup-data-folder}' --feed_url '$${monitor-conf-parameters:base-url}/public/resilient-feed'
wrapper-path = $${directory:bin}/resilient-genstatrss.py
[pbs-push-history-log]
recipe = cns.recipe.symlink
symlink = $${pbs:rdiff-backup-data-folder}/restore.log = $${basedirectory:log}/pbs-push-history-log
[backup-signature-link]
recipe = cns.recipe.symlink
symlink = $${directory:pbs-backup}/proof.signature = $${directory:monitor-resilient}/backup.signature
[pull-push-stalled-promise]
recipe = slapos.cookbook:wrapper
# time-buffer is 18h : cron for backup is run once a day - 6h of random sleep
command-line = ${buildout:bin-directory}/check-feed-as-promise --feed-path $${pbs-resilient-status-feed:feed-path} --title --ok-pattern 'OK' --time-buffer 64800
wrapper-path = $${basedirectory:promises}/stalled-pull-push
#----------------
#--
......@@ -240,7 +308,7 @@ monitor-username = $${htpasswd:username}
[publish-connection-information]
recipe = slapos.cookbook:publish
ssh-key = $${sshkeys-dropbear:public-key-value}
ssh-key = $${sshkeys-openssh:public-key-value}
notification-url = http://[$${notifier:host}]:$${notifier:port}/notify
feeds-url = http://[$${notifier:host}]:$${notifier:port}/get/
monitor-base-url = $${publish:monitor-base-url}
......
......@@ -11,11 +11,14 @@ parts =
cron
cron-entry-logrotate
sshkeys-authority
dropbear-server
sshkeys-dropbear
resilient-sshkeys-dropbear-promise
dropbear-server-pbs-authorized-key
sshd-raw-server
sshd-graceful
sshkeys-sshd
sshd-promise
resilient-sshkeys-sshd-promise
sshd-pbs-authorized-key
notifier
notifier-exporter-promise
cron-entry-backup
......@@ -28,8 +31,17 @@ pid = $${:var}/pid
# Define port of ssh server. It has to be different from import so that it
# supports export/import using same IP (slaprunner, slapos-in-partition,
# ipv4...)
[dropbear-server]
port = 22221
[sshd-port]
recipe = slapos.cookbook:free_port
minimum = 22200
maximum = 22209
ip = $${slap-network-information:global-ipv6}
[notifier-port]
recipe = slapos.cookbook:free_port
minimum = 65526
maximum = 65535
ip = $${notifier:host}
[resilient-publish-connection-parameter]
notification-id = http://[$${notifier:host}]:$${notifier:port}/get/$${notifier-exporter:name}
......@@ -46,6 +58,18 @@ wrapper = $${rootdirectory:bin}/exporter
notify = $${slap-parameter:notify}
pidfile = $${resilient-directory:pid}/$${:name}.pid
[notifier-exporter-promise]
recipe = slapos.recipe.template:jinja2
mode = 700
template = inline:
#!${bash:location}/bin/bash
EXPORTER_FEED="$${notifier-exporter:log-file}"
FAILURE_PATTERN="FAILURE"
if [ -s "$EXPORTER_FEED" ]; then
tail -n 1 $EXPORTER_FEED | grep -vq FAILURE_PATTERN
fi
rendered = $${basedirectory:promises}/exporter-status
[cron-entry-backup]
# Schedule the periodic database dump.
# Through notifications, this triggers (one or more) incremental backups on PBS instances.
......@@ -53,7 +77,9 @@ pidfile = $${resilient-directory:pid}/$${:name}.pid
recipe = slapos.cookbook:cron.d
name = backup
frequency = $${slap-parameter:resiliency-backup-periodicity}
command = $${notifier-exporter:wrapper} --transaction-id `date +%s`
# Sleep from 1 to 6 hours before backing up (disks/network IO optimization)
sleep-command = ${bash:location}/bin/bash -c "sleep $((RANDOM%(60*60*6)))"
command = ($${:sleep-command}; $${notifier-exporter:wrapper} --transaction-id `date +%s`)
[slap-parameter]
# In cron.d format (i.e things like */15 * * * * are accepted).
......
......@@ -11,10 +11,12 @@ parts =
cron
cron-entry-logrotate
sshkeys-authority
dropbear-server
sshkeys-dropbear
resilient-sshkeys-dropbear-promise
dropbear-server-pbs-authorized-key
sshd-raw-server
sshd-graceful
sshkeys-sshd
sshd-promise
resilient-sshkeys-sshd-promise
sshd-pbs-authorized-key
notifier
resiliency-takeover-script
......@@ -22,9 +24,13 @@ parts =
resilient-web-takeover-httpd-wrapper
resilient-web-takeover-httpd-promise
check-backup-integrity-on-notification
import-on-notification
backup-transfer-integrity-promise
resilient-publish-connection-parameter
backup-signature-link
[resilient-publish-connection-parameter]
notification-url = http://[$${notifier:host}]:$${notifier:port}/notify
takeover-url = http://[$${resilient-web-takeover-httpd-configuration-file:listening-ip}]:$${resilient-web-takeover-httpd-configuration-file:listening-port}/
......@@ -33,12 +39,18 @@ takeover-password = $${resilient-web-takeover-password:passwd}
# Define port of ssh server. It has to be different from import so that it
# supports export/import using same IP (slaprunner, slapos-in-partition,
# ipv4...)
[dropbear-server]
port = 22220
[sshd-port]
recipe = slapos.cookbook:free_port
minimum = 22210
maximum = 22219
ip = $${slap-network-information:global-ipv6}
# Define port of notifier (same reason)
[notifier]
port = 65533
[notifier-port]
recipe = slapos.cookbook:free_port
minimum = 65516
maximum = 65525
ip = $${notifier:host}
[import-on-notification]
# notifier.callback runs a script when a notification (sent by a parent PBS)
......@@ -48,6 +60,42 @@ recipe = slapos.cookbook:notifier.callback
on-notification-id = $${slap-parameter:on-notification}
callback = $${importer:wrapper}
[post-notification-run]
recipe = collective.recipe.template
diff-file = $${basedirectory:backup}/backup.diff
proof-signature-file = $${basedirectory:backup}/proof.signature
input = inline:
#!/${bash:location}/bin/bash
cd $${directory:backup}
find -type f ! -name backup.signature ! -wholename "./rdiff-backup-data/*" -print0 | xargs -P4 -0 sha256sum | LC_ALL=C sort -k 66 > $${:proof-signature-file}
diff -ruw backup.signature $${:proof-signature-file} > $${:diff-file}
output = $${rootdirectory:bin}/post-notification-run
mode = 0700
[check-backup-integrity-on-notification]
<= notifier
recipe = slapos.cookbook:notifier.callback
on-notification-id = $${slap-parameter:on-notification}
callback = $${post-notification-run:output}
[backup-transfer-integrity-promise]
recipe = slapos.recipe.template:jinja2
template = inline:
#!/${bash:location}/bin/bash
backup_diff_file=$${post-notification-run:diff-file}
if [ -f "$backup_diff_file" ]; then
if [ $(wc -l "$backup_diff_file" | cut -d \ -f1) -eq 0 ]; then
exit 0;
else
exit 1;
fi
else
# If file doesn't exist, promise should raise false positive
exit 0;
fi
rendered = $${basedirectory:promises}/backup-transfer-integrity-promise
mode = 700
###########
# Generate the takeover script
###########
......@@ -76,6 +124,7 @@ input = ${resilient-web-takeover-cgi-script-download:destination}
output = $${directory:cgi-bin}/web-takeover.cgi
password = $${resilient-web-takeover-password:passwd}
mode = 700
proof-signature-url = $${publish:monitor-base-url}/private/resilient/backup.signature
# XXX could it be something lighter?
# XXX Add SSL
......@@ -125,3 +174,9 @@ url = http://[$${resilient-web-takeover-httpd-configuration-file:listening-ip}]:
dash_path = ${dash:location}/bin/dash
curl_path = ${curl:location}/bin/curl
###########
# Symlinks
###########
[backup-signature-link]
recipe = cns.recipe.symlink
symlink = $${post-notification-run:proof-signature-file} = $${directory:monitor-resilient}/backup.signature
\ No newline at end of file
......@@ -7,10 +7,11 @@ parts =
cron
cron-entry-logrotate
sshkeys-authority
dropbear-server
sshkeys-dropbear
resilient-sshkeys-dropbear-promise
dropbear-server-pbs-authorized-key
sshd-graceful
sshkeys-sshd
sshd-promise
resilient-sshkeys-sshd-promise
sshd-pbs-authorized-key
notifier
......@@ -30,7 +31,7 @@ recipe = slapos.cookbook:mkdirectory
log = $${rootdirectory:var}/log
services = $${rootdirectory:etc}/service
run = $${rootdirectory:var}/run
script = $${rootdirectory:etc}/script
scripts = $${rootdirectory:etc}/run
backup = $${rootdirectory:srv}/backup
promises = $${rootdirectory:etc}/promise
services = $${rootdirectory:etc}/service
......@@ -50,6 +51,7 @@ cronstamps = $${rootdirectory:etc}/cronstamps
logrotate-entries = $${rootdirectory:etc}/logrotate.d
logrotate-backup = $${basedirectory:backup}/logrotate
cgi-bin = $${rootdirectory:srv}/cgi-bin
monitor-resilient = $${monitor-directory:private}/resilient
#----------------
#--
......@@ -120,14 +122,14 @@ create = true
<= logrotate
recipe = slapos.cookbook:logrotate.d
name = equeue
log = $${equeue:log} $${dropbear-sshd:log}
log = $${equeue:log} $${sshd-server:log}
frequency = daily
rotate-num = 30
#----------------
#--
#-- Sets up an rdiff-backup server (with a dropbear server for ssh)
#-- Sets up an rdiff-backup server (with a openssh server for ssh)
[rdiff-backup-server]
recipe = slapos.cookbook:pbs
......@@ -163,40 +165,64 @@ callbacks = $${directory:notifier-callbacks}
command = ${buildout:bin-directory}/pubsubserver --callbacks $${directory:notifier-callbacks} --feeds $${directory:notifier-feeds} --equeue-socket $${equeue:socket} --logfile $${basedirectory:log}/notifier.log $${:host} $${:port}
notifier-binary = ${buildout:bin-directory}/pubsubnotifier
host = $${slap-network-information:global-ipv6}
port = 65534
port = $${notifier-port:port}
context =
key content notifier:command
#----------------
#--
#-- Dropbear.
[dropbear-server]
recipe = slapos.cookbook:dropbear
#-- OpenSSH.
[resilient-sshd-config]
# XXX: Add timeout support
recipe = slapos.recipe.template:jinja2
rendered = $${directory:etc}/resilient-sshd.conf
path_pid = $${directory:run}/resilient-sshd.pid
template = inline:
PidFile $${:path_pid}
Port $${sshd-port:port}
ListenAddress $${slap-network-information:global-ipv6}
Protocol 2
UsePrivilegeSeparation no
HostKey $${directory:ssh}/server_key.rsa
AuthorizedKeysFile $${directory:ssh}/.ssh/authorized_keys
PasswordAuthentication no
PubkeyAuthentication yes
ForceCommand $${rdiff-backup-server:wrapper}
[sshd-raw-server]
recipe = slapos.cookbook:wrapper
host = $${slap-network-information:global-ipv6}
# Explicitely excludes to define "port" argument. It will be defined in
# pbs-ready-import.cfg.in and pbs-ready-export.cfg.in
home = $${directory:ssh}
wrapper = $${rootdirectory:bin}/raw_sshd
shell = $${rdiff-backup-server:wrapper}
rsa-keyfile = $${directory:ssh}/server_key.rsa
dropbear-binary = ${dropbear:location}/sbin/dropbear
home = $${directory:ssh}
command-line = ${openssh:location}/sbin/sshd -D -e -f $${resilient-sshd-config:rendered}
wrapper-path = $${rootdirectory:bin}/raw_sshd
[dropbear-server-pbs-authorized-key]
<= dropbear-server
[sshd-pbs-authorized-key]
<= sshd-raw-server
recipe = slapos.cookbook:dropbear.add_authorized_key
key = $${slap-parameter:authorized-key}
[dropbear-sshd]
[sshd-server]
recipe = collective.recipe.template
log = $${basedirectory:log}/sshd.log
input = inline:#!/bin/sh
exec $${dropbear-server:wrapper} >> $${:log} 2>&1
exec $${sshd-raw-server:wrapper-path} >> $${:log} 2>&1
output = $${rootdirectory:bin}/raw_sshd_log
mode = 700
[sshd-graceful]
recipe = slapos.cookbook:wrapper
command-line = $${directory:bin}/killpidfromfile $${resilient-sshd-config:path_pid} SIGHUP
wrapper-path = $${basedirectory:scripts}/sshd-graceful
[sshd-promise]
recipe = slapos.cookbook:check_port_listening
path = $${basedirectory:promises}/sshd
hostname = $${slap-network-information:global-ipv6}
port = $${sshd-port:port}
#----------------
#--
#-- sshkeys
......@@ -211,36 +237,39 @@ recipe = slapos.cookbook:sshkeys_authority
request-directory = $${sshkeys-directory:requests}
keys-directory = $${sshkeys-directory:keys}
wrapper = $${basedirectory:services}/sshkeys_authority
keygen-binary = ${dropbear:location}/bin/dropbearkey
keygen-binary = ${openssh:location}/bin/ssh-keygen
[sshkeys-dropbear]
[sshkeys-sshd]
<= sshkeys-authority
recipe = slapos.cookbook:sshkeys_authority.request
name = dropbear
name = sshd
type = rsa
executable = $${dropbear-sshd:output}
public-key = $${dropbear-server:rsa-keyfile}.pub
private-key = $${dropbear-server:rsa-keyfile}
executable = $${sshd-server:output}
public-key = $${sshd-raw-server:rsa-keyfile}.pub
private-key = $${sshd-raw-server:rsa-keyfile}
wrapper = $${basedirectory:services}/sshd
[resilient-sshkeys-dropbear-promise]
[resilient-sshkeys-sshd-promise]
# Check that public key file exists and is not empty
recipe = collective.recipe.template
input = inline:#!${bash:location}/bin/bash
PUBLIC_KEY_CONTENT="$${sshkeys-dropbear:public-key-value}"
PUBLIC_KEY_CONTENT="$${sshkeys-sshd:public-key-value}"
if [[ ! -n "$PUBLIC_KEY_CONTENT" || "$PUBLIC_KEY_CONTENT" == *None* ]]; then
exit 1
fi
output = $${basedirectory:promises}/public-key-existence
mode = 700
#----------------
#--
#-- Connection informations to re-use.
[user-info]
recipe = slapos.cookbook:userinfo
# XXX-Cedric: when "aggregation" system is done in libslap, directly publish.
[resilient-publish-connection-parameter]
recipe = slapos.cookbook:publish
ssh-public-key = $${sshkeys-dropbear:public-key-value}
ssh-url = ssh://nobody@[$${dropbear-server:host}]:$${dropbear-server:port}/$${rdiff-backup-server:path}
ssh-public-key = $${sshkeys-sshd:public-key-value}
ssh-url = ssh://$${user-info:pw-name}@[$${sshd-raw-server:host}]:$${sshd-port:port}/$${rdiff-backup-server:path}
ip = $${slap-network-information:global-ipv6}
......@@ -13,6 +13,12 @@ import shutil
import subprocess
import sys
import tempfile
if os.path.exists('resilient_software_release_information.py'):
from resilient_software_release_information import main as resilient_main
else:
resilient_main = lambda: {}
cgitb.enable()
def getLatestBackupDate():
......@@ -27,12 +33,18 @@ def getLatestBackupDate():
# Usually, there is only one callback (so only one key
# in the db), but if there are several:
# Take the "oldest" one (oldest value).
if not db.keys():
result = False
else:
last_backup = db[db.keys()[0]]
for callback in db.keys():
timestamp = float(db[callback])
if timestamp < last_backup:
last_backup = timestamp
return datetime.datetime.fromtimestamp(last_backup)
result = datetime.datetime.fromtimestamp(last_backup)
db.close()
shutil.rmtree(temporary_directory)
return result
def isBackupInProgress():
"""
......@@ -42,6 +54,25 @@ def isBackupInProgress():
# XXX: check if file is valid
return os.path.exists(equeue_lockfile)
def getInformationFromSoftwareRelease():
result = resilient_main()
if isinstance(result, dict):
return result
else:
return {'Custom Information': 'Error, received information is malformed'}
def getSoftwareReleaseInformationFormatted():
result_string = ""
for key, value in getInformationFromSoftwareRelease().items():
result_string += "<p><b>%s:</b> %s</p>" % (key, value)
return result_string
latest_backup_date = getLatestBackupDate()
if latest_backup_date == False:
latest_backup_message = "No backup downloaded yet, takeover should not happen now."
else:
latest_backup_message = latest_backup_date.strftime('%Y-%m-%d %H:%M:%S')
print "Content-Type: text/html"
print
......@@ -51,16 +82,19 @@ if "password" not in form:
<body>
<h1>This is takeover web interface.</h1>
<p>Calling takeover will stop and freeze the current main instance, and make this clone instance the new main instance, replacing the old one.</p>
<p><b>Warning: submit the form only if you understand what you are doing.</b></p>
<p><font size=\"+2\"><b>Warning: submit the form only if you understand what you are doing.</b></font></p>
<p>Note: the password asked here can be found within the parameters of your SlapOS instance page.</p>
<p>Last valid backup: %s</p>
<p>Importer script(s) of backup in progress: %s</p>
<hr />
<p><b>Last valid backup:</b> %s</p>
<p><b>Importer script(s) of backup in progress:</b> %s</p>
<p><b>Backup Signature:</b> <a href='${resilient-web-takeover-cgi-script:proof-signature-url}'>${resilient-web-takeover-cgi-script:proof-signature-url}</a></b></p>
%s
<form action="/">
Password: <input type="text" name="password">
<input type="submit" value="Take over" style="background: red;">
</form>
</body>
</html>""" % (getLatestBackupDate().strftime('%Y-%m-%d %H:%M:%S'), isBackupInProgress())
</html>""" % (latest_backup_message, isBackupInProgress(), getSoftwareReleaseInformationFormatted())
sys.exit(0)
if form['password'].value != '${:password}':
......
......@@ -31,7 +31,6 @@ exec-sitecustomize = false
# Add location for modified non-official slapos.buildout
find-links +=
http://dist.repoze.org
http://www.nexedi.org/static/packages/source/
http://www.nexedi.org/static/packages/source/hexagonit.recipe.download/
http://www.nexedi.org/static/packages/source/slapos.buildout/
......@@ -45,7 +44,6 @@ allow-hosts +=
alastairs-place.net
bitbucket.org
code.google.com
dist.repoze.org
effbot.org
github.com
launchpad.net
......@@ -109,9 +107,9 @@ hexagonit.recipe.download = 1.7.post4
Jinja2 = 2.8
PyYAML = 3.12
Werkzeug = 0.11.10
Werkzeug = 0.11.11
buildout-versions = 1.7
cffi = 1.7.0
cffi = 1.8.2
click = 6.6
cliff = 2.2.0
cmd2 = 0.6.8
......@@ -127,10 +125,10 @@ netaddr = 0.7.18
pbr = 1.10.0
plone.recipe.command = 1.1
prettytable = 0.7.2
psutil = 4.3.0
psutil = 4.3.1
pyOpenSSL = 16.1.0
pyasn1 = 0.1.9
pyparsing = 2.1.8
pyparsing = 2.1.9
pytz = 2016.6.1
requests = 2.11.1
setuptools = 19.6.2
......@@ -163,7 +161,7 @@ functools32 = 3.2.3.post2
# Required by:
# cryptography==1.5
ipaddress = 1.0.16
ipaddress = 1.0.17
# Required by:
# slapos.cookbook==1.0.31
......@@ -179,7 +177,7 @@ lock-file = 2.0
netifaces = 0.10.4
# Required by:
# cffi==1.7.0
# cffi==1.8.2
pycparser = 2.14
# Required by:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment