install && rm -f ${buildout:parts-directory}/${:_buildout_section_name_}/etc/ssl/certs/* && for i in ${ca-certificates:location}/certs/*/*.crt; do ln -sv $i ${buildout:parts-directory}/${:_buildout_section_name_}/etc/ssl/certs/`${buildout:parts-directory}/${:_buildout_section_name_}/bin/openssl x509 -hash -noout -in $i`.0; done; true
all install_sw && rm -f ${buildout:parts-directory}/${:_buildout_section_name_}/etc/ssl/certs/* && for i in ${ca-certificates:location}/certs/*/*.crt; do ln -sv $i ${buildout:parts-directory}/${:_buildout_section_name_}/etc/ssl/certs/`${buildout:parts-directory}/${:_buildout_section_name_}/bin/openssl x509 -hash -noout -in $i`.0; done; true
This file must be valid JSON. But comments are allowed
Please edit settings.json, not settings.json.template
*/
{
// Name your instance!
"title": "Etherpad",
// favicon default name
// alternatively, set up a fully specified Url to your own favicon
"favicon": "favicon.ico",
//IP and port which etherpad should bind at
"ip": "${:ip}",
"port" : "${:port}",
// Session Key, used for reconnecting user sessions
// Set this to a secure string at least 10 characters long. Do not share this value.
"sessionKey" : "",
/*
// Node native SSL support
// this is disabled by default
//
// make sure to have the minimum and correct file access permissions set
// so that the Etherpad server can access them
"ssl" : {
"key" : "/path-to-your/epl-server.key",
"cert" : "/path-to-your/epl-server.crt"
},
*/
//The Type of the database. You can choose between dirty, postgres, sqlite and mysql
//You shouldn't use "dirty" for for anything else than testing or development
"dbType" : "dirty",
//the database specific settings
"dbSettings" : {
"filename" : "${:dirtydb-location}"
},
/* An Example of MySQL Configuration
"dbType" : "mysql",
"dbSettings" : {
"user" : "root",
"host" : "localhost",
"password": "",
"database": "store"
},
*/
//the default text of a pad
"defaultPadText" : "Welcome to Etherpad!\n\nThis pad text is synchronized as you type, so that everyone viewing this page sees the same text. This allows you to collaborate seamlessly on documents!\n\nGet involved with Etherpad at http:\/\/etherpad.org\n",
/* Users must have a session to access pads. This effectively allows only group pads to be accessed. */
"requireSession" : false,
/* Users may edit pads but not create new ones. Pad creation is only via the API. This applies both to group pads and regular pads. */
"editOnly" : false,
/* if true, all css & js will be minified before sending to the client. This will improve the loading performance massivly,
but makes it impossible to debug the javascript/css */
"minify" : true,
/* How long may clients use served javascript code (in seconds)? Without versioning this
may cause problems during deployment. Set to 0 to disable caching */
"maxAge" : 21600, // 60 * 60 * 6 = 6 hours
/* This is the path to the Abiword executable. Setting it to null, disables abiword.
Abiword is needed to advanced import/export features of pads*/
"abiword" : null,
/* This setting is used if you require authentication of all users.
Note: /admin always requires authentication. */
"requireAuthentication": false,
/* Require authorization by a module, or a user with is_admin set, see below. */
"requireAuthorization": false,
/* Users for basic authentication. is_admin = true gives access to /admin.
If you do not uncomment this, /admin will not be available! */
/*
"users": {
"admin": {
"password": "changeme1",
"is_admin": true
},
"user": {
"password": "changeme1",
"is_admin": false
}
},
*/
// restrict socket.io transport methods
"socketTransportProtocols" : ["xhr-polling"],
/* The log level we are using, can be: DEBUG, INFO, WARN, ERROR */
"loglevel": "INFO",
//Logging configuration. See log4js documentation for further information
// https://github.com/nomiddlename/log4js-node
// You can add as many appenders as you want here:
"logconfig" :
{ "appenders": [
{ "type": "console",
// "category": "access"// only logs pad access
}
/*
, { "type": "file"
, "filename": "your-log-file-here.log"
, "maxLogSize": 1024
, "backups": 3 // how many log files there're gonna be at max
//, "category": "test" // only log a specific category
}*/
/*{ "type": "logLevelFilter",
"level": "warn" // filters out all log messages that have a lower level than "error"
, "appender":
{ Use whatever appender you want here }
}*/
/*
, { "type": "logLevelFilter"
, "level": "error" // filters out all log messages that have a lower level than "error"
, "appender":
{ "type": "smtp"
, "subject": "An error occured in your EPL instance!"
@@ -127,6 +127,27 @@ wrapper. I suggest you only add options and specify your export/import recipe.
...
@@ -127,6 +127,27 @@ wrapper. I suggest you only add options and specify your export/import recipe.
Checking that it works
----------------------
To check that your software instance is resilient you can proceed this way:
Once all instances are successfully deployed, go to your export instance, connect as the instance user and run:
$ ~/bin/exporter
It is the script responsible for triggering the resiliency stack on your instance. After doing a backup of your data, it will notify the pull-backup instances of a new backup, triggering the transfer of this data to the import instances.
Once this script is run successfully, go to your import instance, connect as its instance user and check ~/srv/backup/"your sofwtare"/, the location of the data you wanted to receive. The last part of the resiliency is up to your import script.
DEBUGGING:
Here is a partial list of things you can check to understand what is causing the problem:
- Check that your import script does not fail and successfully places your data in ~/srv/backup/"your software" (as the import instance user) by runnig:
$ ~/bin/"your software"-exporter
- Check the export instance script is run successfully as this instance user by running:
$ ~/bin/exporter
- Check the pull-instance system did its job by going to one of your pull-backup instance, connect as its user and check the log : ~/var/log/equeue.log