Commit 218db596 authored by Jim Fulton's avatar Jim Fulton

Merged the test_repozo branch, which converted the old basic manual

test into an automated test, and included Chris Wither's repozo fix
(as monified by Godefroid Chapelle) to avoid a deprecation warning.
parent 956ea8a6
#!/usr/bin/env python
##############################################################################
#
# Copyright (c) 2004 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test repozo.py.
This is a by-hand test. It succeeds iff it doesn't blow up. Run it with
its home directory as the current directory. It will destroy all files
matching Data.* and Copy.* in this directory, and anything in a
subdirectory of name 'backup'.
Usage:
python testrepozo.py [repozo_script]
repozo_script, if provided, is a path to a script that runs repozo,
such as that generated by buildout.
eg:
$ ../../../../bin/py testrepozo.py ../../../../bin/repozo
"""
import os
import random
import time
import glob
import sys
import shutil
import ZODB
from ZODB import FileStorage
import transaction
def cleanup():
for fname in glob.glob('Data.*') + glob.glob('Copy.*'):
os.remove(fname)
if os.path.isdir('backup'):
for fname in os.listdir('backup'):
os.remove(os.path.join('backup', fname))
os.rmdir('backup')
class OurDB:
def __init__(self):
from BTrees.OOBTree import OOBTree
self.getdb()
conn = self.db.open()
conn.root()['tree'] = OOBTree()
transaction.commit()
self.close()
def getdb(self):
storage = FileStorage.FileStorage('Data.fs')
self.db = ZODB.DB(storage)
def gettree(self):
self.getdb()
conn = self.db.open()
return conn.root()['tree']
def pack(self):
self.getdb()
self.db.pack()
def close(self):
if self.db is not None:
self.db.close()
self.db = None
# Do recovery to time 'when', and check that it's identical to correctpath.
def check(correctpath='Data.fs', when=None):
if when is None:
extra = ''
else:
extra = ' -D ' + when
cmd = PYTHON + REPOZO + ' -vRr backup -o Copy.fs' + extra
os.system(cmd)
f = file(correctpath, 'rb')
g = file('Copy.fs', 'rb')
fguts = f.read()
gguts = g.read()
f.close()
g.close()
if fguts != gguts:
raise ValueError("guts don't match\n"
" correctpath=%r when=%r\n"
" cmd=%r" % (correctpath, when, cmd))
def mutatedb(db):
# Make random mutations to the btree in the database.
tree = db.gettree()
for dummy in range(100):
if random.random() < 0.6:
tree[random.randrange(100000)] = random.randrange(100000)
else:
keys = tree.keys()
if keys:
del tree[keys[0]]
transaction.commit()
db.close()
def main():
cleanup()
os.mkdir('backup')
d = OurDB()
# Every 9th time thru the loop, we save a full copy of Data.fs,
# and at the end we ensure we can reproduce those too.
saved_snapshots = [] # list of (name, time) pairs for copies.
for i in range(100):
# Make some mutations.
mutatedb(d)
# Pack about each tenth time.
if random.random() < 0.1:
print "packing"
d.pack()
d.close()
# Make an incremental backup, half the time with gzip (-z).
if random.random() < 0.5:
os.system(PYTHON + REPOZO + ' -vBQr backup -f Data.fs')
else:
os.system(PYTHON + REPOZO + ' -zvBQr backup -f Data.fs')
if i % 9 == 0:
copytime = '%04d-%02d-%02d-%02d-%02d-%02d' % (time.gmtime()[:6])
copyname = os.path.join('backup', "Data%d" % i) + '.fs'
shutil.copyfile('Data.fs', copyname)
saved_snapshots.append((copyname, copytime))
# Make sure the clock moves at least a second.
time.sleep(1.01)
# Verify current Data.fs can be reproduced exactly.
check()
# Verify snapshots can be reproduced exactly.
for copyname, copytime in saved_snapshots:
print "Checking that", copyname, "at", copytime, "is reproducible."
check(copyname, copytime)
# Tear it all down.
cleanup()
print 'Test passed!'
if __name__ == '__main__':
PYTHON = sys.executable + ' '
if len(sys.argv)>1:
REPOZO = sys.argv[1]
else:
REPOZO = '../repozo.py'
main()
...@@ -65,7 +65,12 @@ Options for -R/--recover: ...@@ -65,7 +65,12 @@ Options for -R/--recover:
import os import os
import sys import sys
import md5 try:
# the hashlib package is available from Python 2.5
from hashlib import md5
except ImportError:
# the md5 package is deprecated in Python 2.6
from md5 import new as md5
import gzip import gzip
import time import time
import errno import errno
...@@ -101,10 +106,10 @@ def log(msg, *args): ...@@ -101,10 +106,10 @@ def log(msg, *args):
print >> sys.stderr, msg % args print >> sys.stderr, msg % args
def parseargs(): def parseargs(argv):
global VERBOSE global VERBOSE
try: try:
opts, args = getopt.getopt(sys.argv[1:], 'BRvhf:r:FD:o:Qz', opts, args = getopt.getopt(argv, 'BRvhf:r:FD:o:Qz',
['backup', 'recover', 'verbose', 'help', ['backup', 'recover', 'verbose', 'help',
'file=', 'repository=', 'full', 'date=', 'file=', 'repository=', 'full', 'date=',
'output=', 'quick', 'gzip']) 'output=', 'quick', 'gzip'])
...@@ -210,7 +215,7 @@ def dofile(func, fp, n=None): ...@@ -210,7 +215,7 @@ def dofile(func, fp, n=None):
def checksum(fp, n): def checksum(fp, n):
# Checksum the first n bytes of the specified file # Checksum the first n bytes of the specified file
sum = md5.new() sum = md5()
def func(data): def func(data):
sum.update(data) sum.update(data)
dofile(func, fp, n) dofile(func, fp, n)
...@@ -221,7 +226,7 @@ def copyfile(options, dst, start, n): ...@@ -221,7 +226,7 @@ def copyfile(options, dst, start, n):
# Copy bytes from file src, to file dst, starting at offset start, for n # Copy bytes from file src, to file dst, starting at offset start, for n
# length of bytes. For robustness, we first write, flush and fsync # length of bytes. For robustness, we first write, flush and fsync
# to a temp file, then rename the temp file at the end. # to a temp file, then rename the temp file at the end.
sum = md5.new() sum = md5()
ifp = open(options.file, 'rb') ifp = open(options.file, 'rb')
ifp.seek(start) ifp.seek(start)
tempname = os.path.join(os.path.dirname(dst), 'tmp.tmp') tempname = os.path.join(os.path.dirname(dst), 'tmp.tmp')
...@@ -248,7 +253,7 @@ def concat(files, ofp=None): ...@@ -248,7 +253,7 @@ def concat(files, ofp=None):
# Concatenate a bunch of files from the repository, output to `outfile' if # Concatenate a bunch of files from the repository, output to `outfile' if
# given. Return the number of bytes written and the md5 checksum of the # given. Return the number of bytes written and the md5 checksum of the
# bytes. # bytes.
sum = md5.new() sum = md5()
def func(data): def func(data):
sum.update(data) sum.update(data)
if ofp: if ofp:
...@@ -504,8 +509,10 @@ def do_recover(options): ...@@ -504,8 +509,10 @@ def do_recover(options):
log('Recovered %s bytes, md5: %s', reposz, reposum) log('Recovered %s bytes, md5: %s', reposz, reposum)
def main(): def main(argv=None):
options = parseargs() if argv is None:
argv = sys.argv[1:]
options = parseargs(argv)
if options.mode == BACKUP: if options.mode == BACKUP:
do_backup(options) do_backup(options)
else: else:
......
#!/usr/bin/env python
##############################################################################
#
# Copyright (c) 2004-2009 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import unittest
import os
import ZODB.tests.util
_NOISY = os.environ.get('NOISY_REPOZO_TEST_OUTPUT')
class OurDB:
def __init__(self, dir):
from BTrees.OOBTree import OOBTree
import transaction
self.dir = dir
self.getdb()
conn = self.db.open()
conn.root()['tree'] = OOBTree()
transaction.commit()
self.close()
def getdb(self):
from ZODB import DB
from ZODB.FileStorage import FileStorage
storage_filename = os.path.join(self.dir, 'Data.fs')
storage = FileStorage(storage_filename)
self.db = DB(storage)
def gettree(self):
self.getdb()
conn = self.db.open()
return conn.root()['tree']
def pack(self):
self.getdb()
self.db.pack()
def close(self):
if self.db is not None:
self.db.close()
self.db = None
def mutate(self):
# Make random mutations to the btree in the database.
import random
import transaction
tree = self.gettree()
for dummy in range(100):
if random.random() < 0.6:
tree[random.randrange(100000)] = random.randrange(100000)
else:
keys = tree.keys()
if keys:
del tree[keys[0]]
transaction.commit()
self.close()
class RepozoTests(unittest.TestCase):
layer = ZODB.tests.util.MininalTestLayer('repozo')
def setUp(self):
# compute directory names
import tempfile
self.basedir = tempfile.mkdtemp()
self.backupdir = os.path.join(self.basedir, 'backup')
self.datadir = os.path.join(self.basedir, 'data')
self.restoredir = os.path.join(self.basedir, 'restore')
self.copydir = os.path.join(self.basedir, 'copy')
self.currdir = os.getcwd()
# create empty directories
os.mkdir(self.backupdir)
os.mkdir(self.datadir)
os.mkdir(self.restoredir)
os.mkdir(self.copydir)
os.chdir(self.datadir)
self.db = OurDB(self.datadir)
def tearDown(self):
os.chdir(self.currdir)
import shutil
shutil.rmtree(self.basedir)
def _callRepozoMain(self, argv):
from ZODB.scripts.repozo import main
main(argv)
def testRepozo(self):
self.saved_snapshots = [] # list of (name, time) pairs for copies.
for i in range(100):
self.mutate_pack_backup(i)
# Verify snapshots can be reproduced exactly.
for copyname, copytime in self.saved_snapshots:
if _NOISY:
print "Checking that", copyname,
print "at", copytime, "is reproducible."
self.assertRestored(copyname, copytime)
def mutate_pack_backup(self, i):
import random
from shutil import copyfile
from time import gmtime
from time import sleep
self.db.mutate()
# Pack about each tenth time.
if random.random() < 0.1:
if _NOISY:
print "packing"
self.db.pack()
self.db.close()
# Make an incremental backup, half the time with gzip (-z).
argv = ['-BQr', self.backupdir, '-f', 'Data.fs']
if _NOISY:
argv.insert(0, '-v')
if random.random() < 0.5:
argv.insert(0, '-z')
self._callRepozoMain(argv)
# Save snapshots to assert that dated restores are possible
if i % 9 == 0:
srcname = os.path.join(self.datadir, 'Data.fs')
copytime = '%04d-%02d-%02d-%02d-%02d-%02d' % (gmtime()[:6])
copyname = os.path.join(self.copydir, "Data%d.fs" % i)
copyfile(srcname, copyname)
self.saved_snapshots.append((copyname, copytime))
# Make sure the clock moves at least a second.
sleep(1.01)
# Verify current Data.fs can be reproduced exactly.
self.assertRestored()
def assertRestored(self, correctpath='Data.fs', when=None):
# Do recovery to time 'when', and check that it's identical to correctpath.
# restore to Restored.fs
restoredfile = os.path.join(self.restoredir, 'Restored.fs')
argv = ['-Rr', self.backupdir, '-o', restoredfile]
if _NOISY:
argv.insert(0, '-v')
if when is not None:
argv.append('-D')
argv.append(when)
self._callRepozoMain(argv)
# check restored file content is equal to file that was backed up
f = file(correctpath, 'rb')
g = file(restoredfile, 'rb')
fguts = f.read()
gguts = g.read()
f.close()
g.close()
msg = ("guts don't match\ncorrectpath=%r when=%r\n cmd=%r" %
(correctpath, when, ' '.join(argv)))
self.assertEquals(fguts, gguts, msg)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(RepozoTests))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment