Commit 7b1864f5 authored by bescoto's avatar bescoto

Final check in for version 0.11.2


git-svn-id: http://svn.savannah.nongnu.org/svn/rdiff-backup@285 2b77aa54-bcbc-44c9-a7ec-4f6cf2b41109
parent d7abfbe2
New in v0.11.2 (2003/02/07)
New in v0.11.2 (2003/03/01)
---------------------------
Fixed seg fault bug reported by a couple sparc/openbsd users. Thanks
......@@ -35,12 +35,20 @@ time to write):
rdiff-backup makes no attempt to recover or clean up after
unrecoverable errors.
However, it now uses fsync() to increment the destination
directory in a reversable way. If there is an error, the next
backup will regress the destination directory into its state
before the aborted backup.
The above process can be done without a backup with the
--check-destination-dir option.
Improved error logging. Instead of the old haphazard reporting
method, which sometimes didn't indicate the file an error occurred on,
now all recoverable errors are reported in a standard format and also
written to the error_log.<time>.data file in the rdiff-backup-data
directory.
directory. Thanks to Dean Gaudet and others for repeatedly bugging me
about this.
New in v0.11.1 (2002/12/31)
......
Look at Kent Borg's suggestion for restore options and digests.
Write some better selection test cases to test new Iterate_fast func.
Work on killtest code - avoid returning a failure when a file is
simply skipped.
Work on WindowsTest code - avoid returning failure because symlinks,
etc., are skipped.
Look at error code, make sure filename is always mentioned (see Knops
email, Dean Gaudet's reminders).
Add --list-files-changed-between or similar option, to list files that
have changed between two times
......
#!/usr/bin/env python
import os, re, shutil, time, sys, getopt
......@@ -97,9 +94,9 @@ def MakeTar():
"increment.py", "__init__.py", "iterfile.py",
"lazy.py", "librsync.py", "log.py", "Main.py",
"manage.py", "metadata.py", "Rdiff.py",
"restore.py", "robust.py", "rorpiter.py",
"rpath.py", "Security.py", "selection.py",
"SetConnections.py", "static.py",
"regress.py", "restore.py", "robust.py",
"rorpiter.py", "rpath.py", "Security.py",
"selection.py", "SetConnections.py", "static.py",
"statistics.py", "TempFile.py", "Time.py"]:
assert not os.system("cp %s/%s %s/rdiff_backup" %
(SourceDir, filename, tardir)), filename
......
......@@ -562,17 +562,19 @@ def ListChangedSince(rp):
def CheckDest(dest_rp):
"""Check the destination directory, """
if Globals.rbdir is None:
SetConnections.UpdateGlobal('rbdir',
dest_rp.append_path("rdiff-backup-data"))
need_check = checkdest_need_check(dest_rp)
if need_check is None:
Log.FatalError("No destination dir found at %s" % (dest_rp.path,))
elif need_check == 0:
Log.FatalError("Destination dir %s does not need checking" %
(dest_rp.path,))
regress.Regress(dest_rp)
dest_rp.conn.regress.Regress(dest_rp)
def checkdest_need_check(dest_rp):
"""Return None if no dest dir found, 1 if dest dir needs check, 0 o/w"""
assert dest_rp.conn is Globals.rbdir.conn
if not dest_rp.isdir() or not Globals.rbdir.isdir(): return None
curmirroot = Globals.rbdir.append("current_mirror")
curmir_incs = restore.get_inclist(curmirroot)
......@@ -600,4 +602,4 @@ def checkdest_if_necessary(dest_rp):
need_check = checkdest_need_check(dest_rp)
if need_check == 1:
Log("Previous backup seems to have failed, checking now.", 2)
regress.Regress(dest_rp)
dest_rp.conn.regress.Regress(dest_rp)
......@@ -57,8 +57,9 @@ def set_security_level(action, cmdpairs):
if Globals.server: return
cp1 = cmdpairs[0]
if len(cmdpairs) > 1: cp2 = cmdpairs[1]
else: cp2 = cp1
if action == "backup":
if action == "backup" or action == "check-destination-dir":
if bothlocal(cp1, cp2) or bothremote(cp1, cp2):
sec_level = "minimal"
rdir = tempfile.gettempdir()
......@@ -140,7 +141,9 @@ def set_allowed_requests(sec_level):
"backup.DestinationStruct.patch_and_finalize",
"backup.DestinationStruct.patch_increment_and_finalize",
"Main.backup_touch_curmirror_local",
"Globals.ITRB.increment_stat"])
"Globals.ITRB.increment_stat",
"statistics.record_error",
"log.ErrorLog.write_if_open"])
if Globals.server:
allowed_requests.extend(
["SetConnections.init_connection_remote",
......
......@@ -151,7 +151,9 @@ Couldn't start up the remote connection by executing
Remember that, under the default settings, rdiff-backup must be
installed in the PATH on the remote system. See the man page for more
information.""" % (exception, remote_cmd))
information on this. This message may also be displayed if the remote
version of rdiff-backup is quite different from the local version (%s)."""
% (exception, remote_cmd, Globals.version))
if remote_version != Globals.version:
Log("Warning: Local version %s does not match remote version %s."
......
......@@ -20,6 +20,7 @@
"""High level functions for mirroring and mirror+incrementing"""
from __future__ import generators
import errno
import Globals, metadata, rorpiter, TempFile, Hardlink, robust, increment, \
rpath, static, log, selection, Time, Rdiff, statistics
......@@ -153,7 +154,7 @@ class DestinationStruct:
Hardlink.islinked(src_rorp or dest_rorp)):
dest_sig = rpath.RORPath(index)
dest_sig.flaglinked(Hardlink.get_link_index(dest_sig))
elif dest_rorp:
elif dest_rorp:
dest_sig = dest_rorp.getRORPath()
if dest_rorp.isreg():
dest_rp = dest_base_rpath.new_index(index)
......@@ -196,10 +197,11 @@ class CacheCollatedPostProcess:
receives.
2. The metadata must match what is stored in the destination
directory. If there is an error we do not update the dest
directory for that file, and the old metadata is used. Thus
we cannot write any metadata until we know the file has been
procesed correctly.
directory. If there is an error, either we do not update the
dest directory for that file and the old metadata is used, or
the file is deleted on the other end.. Thus we cannot write
any metadata until we know the file has been procesed
correctly.
The class caches older source_rorps and dest_rps so the patch
function can retrieve them if necessary. The patch function can
......@@ -218,8 +220,9 @@ class CacheCollatedPostProcess:
# the following should map indicies to lists [source_rorp,
# dest_rorp, changed_flag, success_flag] where changed_flag
# should be true if the rorps are different, and success_flag
# should be true if dest_rorp has been successfully updated to
# source_rorp. They both default to false.
# should be 1 if dest_rorp has been successfully updated to
# source_rorp, and 2 if the destination file is deleted
# entirely. They both default to false (0).
self.cache_dict = {}
self.cache_indicies = []
......@@ -268,17 +271,26 @@ class CacheCollatedPostProcess:
if not changed or success:
if source_rorp: self.statfileobj.add_source_file(source_rorp)
if dest_rorp: self.statfileobj.add_dest_file(dest_rorp)
if success:
if success == 0: metadata_rorp = dest_rorp
elif success == 1:
self.statfileobj.add_changed(source_rorp, dest_rorp)
metadata_rorp = source_rorp
else: metadata_rorp = dest_rorp
else: metadata_rorp = None
if metadata_rorp and metadata_rorp.lstat():
metadata.WriteMetadata(metadata_rorp)
def in_cache(self, index):
"""Return true if given index is cached"""
return self.cache_dict.has_key(index)
def flag_success(self, index):
"""Signal that the file with given index was updated successfully"""
self.cache_dict[index][3] = 1
def flag_deleted(self, index):
"""Signal that the destination file was deleted"""
self.cache_dict[index][3] = 2
def flag_changed(self, index):
"""Signal that the file with given index has changed"""
self.cache_dict[index][2] = 1
......@@ -291,6 +303,10 @@ class CacheCollatedPostProcess:
"""Retrieve source_rorp with given index from cache"""
return self.cache_dict[index][0]
def get_mirror_rorp(self, index):
"""Retrieve mirror_rorp with given index from cache"""
return self.cache_dict[index][1]
def close(self):
"""Process the remaining elements in the cache"""
while self.cache_indicies: self.shorten_cache()
......@@ -335,9 +351,12 @@ class PatchITRB(rorpiter.ITRBranch):
rp = self.get_rp_from_root(index)
tf = TempFile.new(rp)
if self.patch_to_temp(rp, diff_rorp, tf):
if tf.lstat(): rpath.rename(tf, rp)
elif rp.lstat(): rp.delete()
self.CCPP.flag_success(index)
if tf.lstat():
rpath.rename(tf, rp)
self.CCPP.flag_success(index)
elif rp.lstat():
rp.delete()
self.CCPP.flag_deleted(index)
else:
tf.setdata()
if tf.lstat(): tf.delete()
......@@ -355,7 +374,23 @@ class PatchITRB(rorpiter.ITRBranch):
if robust.check_common_error(self.error_handler,
Rdiff.patch_local, (basis_rp, diff_rorp, new)) == 0: return 0
if new.lstat(): rpath.copy_attribs(diff_rorp, new)
return 1
return self.matches_cached_rorp(diff_rorp, new)
def matches_cached_rorp(self, diff_rorp, new_rp):
"""Return true if new_rp matches cached src rorp
This is a final check to make sure the temp file just written
matches the stats which we got earlier. If it doesn't it
could confuse the regress operation. This is only necessary
for regular files.
"""
if not new_rp.isreg(): return 1
cached_rorp = self.CCPP.get_source_rorp(diff_rorp.index)
if cached_rorp.equal_loose(new_rp): return 1
log.ErrorLog.write_if_open("UpdateError", diff_rorp, "Updated mirror "
"temp file %s does not match source" % (new_rp.path,))
return 0
def write_special(self, diff_rorp, new):
"""Write diff_rorp (which holds special file) to new"""
......@@ -370,7 +405,8 @@ class PatchITRB(rorpiter.ITRBranch):
base_rp = self.base_rp = self.get_rp_from_root(index)
assert diff_rorp.isdir() or base_rp.isdir() or not base_rp.index
if diff_rorp.isdir(): self.prepare_dir(diff_rorp, base_rp)
else: self.set_dir_replacement(diff_rorp, base_rp)
elif self.set_dir_replacement(diff_rorp, base_rp):
self.CCPP.flag_success(index)
def set_dir_replacement(self, diff_rorp, base_rp):
"""Set self.dir_replacement, which holds data until done with dir
......@@ -380,8 +416,15 @@ class PatchITRB(rorpiter.ITRBranch):
"""
assert diff_rorp.get_attached_filetype() == 'snapshot'
self.dir_replacement = TempFile.new(base_rp)
rpath.copy_with_attribs(diff_rorp, self.dir_replacement)
if not self.patch_to_temp(None, diff_rorp, self.dir_replacement):
if self.dir_replacement.lstat(): self.dir_replacement.delete()
# Was an error, so now restore original directory
rpath.copy_with_attribs(self.CCPP.get_mirror_rorp(diff_rorp.index),
self.dir_replacement)
success = 0
else: success = 1
if base_rp.isdir(): base_rp.chmod(0700)
return success
def prepare_dir(self, diff_rorp, base_rp):
"""Prepare base_rp to turn into a directory"""
......@@ -389,6 +432,10 @@ class PatchITRB(rorpiter.ITRBranch):
if not base_rp.isdir():
if base_rp.lstat(): base_rp.delete()
base_rp.mkdir()
self.CCPP.flag_success(diff_rorp.index)
else: # maybe no change, so query CCPP before tagging success
if self.CCPP.in_cache(diff_rorp.index):
self.CCPP.flag_success(diff_rorp.index)
base_rp.chmod(0700)
def end_process(self):
......@@ -401,7 +448,6 @@ class PatchITRB(rorpiter.ITRBranch):
self.base_rp.rmdir()
if self.dir_replacement.lstat():
rpath.rename(self.dir_replacement, self.base_rp)
self.CCPP.flag_success(self.base_rp.index)
class IncrementITRB(PatchITRB):
......@@ -421,25 +467,48 @@ class IncrementITRB(PatchITRB):
self.cached_incrp = self.inc_root_rp.new_index(index)
return self.cached_incrp
def inc_with_checking(self, new, old, inc_rp):
"""Produce increment taking new to old checking for errors"""
try: inc = increment.Increment(new, old, inc_rp)
except OSError, exc:
if (errno.errorcode.has_key(exc[0]) and
errno.errorcode[exc[0]] == 'ENAMETOOLONG'):
self.error_handler(exc, old)
return None
else: raise
return inc
def fast_process(self, index, diff_rorp):
"""Patch base_rp with diff_rorp and write increment (neither is dir)"""
rp = self.get_rp_from_root(index)
tf = TempFile.new(rp)
self.patch_to_temp(rp, diff_rorp, tf)
increment.Increment(tf, rp, self.get_incrp(index))
if tf.lstat(): rpath.rename(tf, rp)
else: rp.delete()
self.CCPP.flag_success(index)
if self.patch_to_temp(rp, diff_rorp, tf):
inc = self.inc_with_checking(tf, rp, self.get_incrp(index))
if inc is not None:
if inc.isreg():
inc.fsync_with_dir() # Write inc before rp changed
if tf.lstat():
rpath.rename(tf, rp)
self.CCPP.flag_success(index)
elif rp.lstat():
rp.delete()
self.CCPP.flag_deleted(index)
return # normal return, otherwise error occurred
tf.setdata()
if tf.lstat(): tf.delete()
def start_process(self, index, diff_rorp):
"""Start processing directory"""
base_rp = self.base_rp = self.get_rp_from_root(index)
assert diff_rorp.isdir() or base_rp.isdir()
if diff_rorp.isdir():
increment.Increment(diff_rorp, base_rp, self.get_incrp(index))
inc = self.inc_with_checking(diff_rorp, base_rp,
self.get_incrp(index))
if inc and inc.isreg():
inc.fsync_with_dir() # must writte inc before rp changed
self.prepare_dir(diff_rorp, base_rp)
else:
self.set_dir_replacement(diff_rorp, base_rp)
increment.Increment(self.dir_replacement, base_rp,
self.get_incrp(index))
elif (self.set_dir_replacement(diff_rorp, base_rp) and
self.inc_with_checking(self.dir_replacement, base_rp,
self.get_incrp(index))):
self.CCPP.flag_success(index)
......@@ -512,7 +512,7 @@ class VirtualFile:
import Globals, Time, Rdiff, Hardlink, FilenameMapping, C, Security, \
Main, rorpiter, selection, increment, statistics, manage, lazy, \
iterfile, rpath, robust, restore, manage, backup, connection, \
TempFile, SetConnections, librsync, log
TempFile, SetConnections, librsync, log, regress
Globals.local_connection = LocalConnection()
Globals.connections.append(Globals.local_connection)
......
......@@ -33,9 +33,6 @@ def Increment(new, mirror, incpref):
file to incpref.
"""
if not (new and new.lstat() or mirror.lstat()):
return None # Files deleted in meantime, do nothing
log.Log("Incrementing mirror file " + mirror.path, 5)
if ((new and new.isdir()) or mirror.isdir()) and not incpref.isdir():
incpref.mkdir()
......
......@@ -34,7 +34,7 @@ recovered.
"""
from __future__ import generators
import Globals, restore, log, rorpiter, journal, TempFile, metadata, rpath
import Globals, restore, log, rorpiter, TempFile, metadata, rpath, C, Time
# regress_time should be set to the time we want to regress back to
# (usually the time of the last successful backup)
......@@ -43,10 +43,6 @@ regress_time = None
# This should be set to the latest unsuccessful backup time
unsuccessful_backup_time = None
# This is set by certain tests and allows overriding of global time
# variables.
time_override_mode = None
class RegressException(Exception):
"""Raised on any exception in regress process"""
......@@ -71,6 +67,7 @@ def Regress(mirror_rp):
ITR = rorpiter.IterTreeReducer(RegressITRB, [])
for rf in iterate_meta_rfs(mirror_rp, inc_rpath): ITR(rf.index, rf)
ITR.Finish()
remove_rbdir_increments()
def set_regress_time():
"""Set global regress_time to previous sucessful backup
......@@ -80,18 +77,14 @@ def set_regress_time():
"""
global regress_time, unsuccessful_backup_time
if time_override_mode:
assert regress_time and unsuccessful_backup_time
return
curmir_incs = restore.get_inclist(Globals.rbdir.append("current_mirror"))
assert len(curmir_incs) == 2, \
"Found %s current_mirror flags, expected 2" % len(curmir_incs)
inctimes = [inc.getinctime() for inc in curmir_incs]
inctimes.sort()
regress_time = inctimes[0]
unsucessful_backup_time = inctimes[-1]
log.Log("Regressing to " + Time.timetopretty(regress_time), 5)
unsuccessful_backup_time = inctimes[-1]
log.Log("Regressing to " + Time.timetopretty(regress_time), 4)
def set_restore_times():
"""Set _rest_time and _mirror_time in the restore module
......@@ -103,6 +96,20 @@ def set_restore_times():
restore._mirror_time = unsuccessful_backup_time
restore._rest_time = regress_time
def remove_rbdir_increments():
"""Delete the increments in the rdiff-backup-data directory"""
old_current_mirror = None
for filename in Globals.rbdir.listdir():
rp = Globals.rbdir.append(filename)
if rp.isincfile() and rp.getinctime() == unsuccessful_backup_time:
if rp.getincbase_str() == "current_mirror": old_current_mirror = rp
else:
log.Log("Removing rdiff-backup-data increment " + rp.path, 5)
rp.delete()
if old_current_mirror:
C.sync() # Sync first, since we are marking dest dir as good now
old_current_mirror.delete()
def iterate_raw_rfs(mirror_rp, inc_rp):
"""Iterate all RegressFile objects in mirror/inc directory"""
root_rf = RegressFile(mirror_rp, inc_rp, restore.get_inclist(inc_rp))
......@@ -132,6 +139,11 @@ def iterate_meta_rfs(mirror_rp, inc_rp):
raw_rfs = iterate_raw_rfs(mirror_rp, inc_rp)
collated = rorpiter.Collate2Iters(raw_rfs, yield_metadata())
for raw_rf, metadata_rorp in collated:
if not raw_rf:
log.Log("Warning, metadata file has entry for %s,\n"
"but there are no associated files." %
(metadata_rorp.get_indexpath(),), 2)
continue
raw_rf.set_metadata_rorp(metadata_rorp)
yield raw_rf
......@@ -146,11 +158,8 @@ class RegressFile(restore.RestoreFile):
"""
def __init__(self, mirror_rp, inc_rp, inc_list):
restore.RestoreFile.__init__(self, mirror_rp, inc_rp, inc_list)
assert len(self.relevant_incs) <= 2, "Too many incs"
if len(self.relevant_incs) == 2:
self.regress_inc = self.relevant_incs[-1]
else: self.regress_inc = None
self.set_regress_inc()
def set_metadata_rorp(self, metadata_rorp):
"""Set self.metadata_rorp, creating empty if given None"""
if metadata_rorp: self.metadata_rorp = metadata_rorp
......@@ -161,6 +170,13 @@ class RegressFile(restore.RestoreFile):
return ((self.metadata_rorp and self.metadata_rorp.isdir()) or
(self.mirror_rp and self.mirror_rp.isdir()))
def set_regress_inc(self):
"""Set self.regress_inc to increment to be removed (or None)"""
newer_incs = self.get_newer_incs()
assert len(newer_incs) <= 1, "Too many recent increments"
if newer_incs: self.regress_inc = newer_incs[0] # first is mirror_rp
else: self.regress_inc = None
class RegressITRB(rorpiter.ITRBranch):
"""Turn back state of dest directory (use with IterTreeReducer)
......@@ -168,7 +184,7 @@ class RegressITRB(rorpiter.ITRBranch):
The arguments to the ITR will be RegressFiles. There are two main
assumptions this procedure makes (besides those mentioned above):
1. The mirror_rp and the metadata_rorp cmp_attribs correctly iff
1. The mirror_rp and the metadata_rorp equal_loose correctly iff
they contain the same data. If this is the case, then the inc
file is unnecessary and we can delete it.
......@@ -189,13 +205,16 @@ class RegressITRB(rorpiter.ITRBranch):
def fast_process(self, index, rf):
"""Process when nothing is a directory"""
if (not rf.metadata_rorp.lstat() or not rf.mirror_rp.lstat() or
not rpath.cmp_attribs(rf.metadata_rorp, rf.mirror_rp)):
if not rf.metadata_rorp.equal_loose(rf.mirror_rp):
log.Log("Regressing file %s" %
(rf.metadata_rorp.get_indexpath()), 5)
if rf.metadata_rorp.isreg(): self.restore_orig_regfile(rf)
else:
if rf.mirror_rp.lstat(): rf.mirror_rp.delete()
rpath.copy_with_attribs(rf.metadata_rorp, rf.mirror_rp)
if rf.regress_inc: rf.regress_inc.delete()
if rf.regress_inc:
log.Log("Deleting increment " + rf.regress_inc.path, 5)
rf.regress_inc.delete()
def restore_orig_regfile(self, rf):
"""Restore original regular file
......@@ -233,29 +252,21 @@ class RegressITRB(rorpiter.ITRBranch):
rf = self.rf
if rf.metadata_rorp.isdir():
if rf.mirror_rp.isdir():
if not rpath.cmp_attribs(rf.metadata_rorp, rf.mirror_rp):
rf.mirror_rp.setdata()
if not rf.metadata_rorp.equal_loose(rf.mirror_rp):
log.Log("Regressing attributes of " + rf.mirror_rp.path, 5)
rpath.copy_attribs(rf.metadata_rorp, rf.mirror_rp)
else:
rf.mirror_rp.delete()
log.Log("Regressing file " + rf.mirror_rp.path, 5)
rpath.copy_with_attribs(rf.metadata_rorp, rf.mirror_rp)
else: # replacing a dir with some other kind of file
assert rf.mirror_rp.isdir()
log.Log("Replacing directory " + rf.mirror_rp.path, 5)
if rf.metadata_rorp.isreg(): self.restore_orig_regfile(rf)
else:
rf.mirror_rp.delete()
rpath.copy_with_attribs(rf.metadata_rorp, rf.mirror_rp)
if rf.regress_inc: rf.regress_inc.delete()
def on_error(self, exc, *args):
"""This is run on any exception, raises RegressException
RegressException should be fatal. We don't want to tolerate
the kinds of errors we would when backing up.
"""
if args and args[0] and isinstance(args[0], tuple):
filename = "/".join(args[0])
elif self.index: filename = "/".join(*self.index)
else: filename = "."
log.Log("Error '%s' processing %s" % (exc, filename), 2)
raise RegressException("Error during Regress")
if rf.regress_inc:
log.Log("Deleting increment " + rf.regress_inc.path, 5)
rf.regress_inc.delete()
......@@ -369,7 +369,7 @@ class RestoreFile:
def get_restore_fp(self):
"""Return file object of restored data"""
assert self.relevant_incs[-1].isreg(), "Not a regular file"
assert self.relevant_incs[-1].isreg()
current_fp = self.get_first_fp()
for inc_diff in self.relevant_incs[1:]:
log.Log("Applying patch %s" % (inc_diff.get_indexpath(),), 7)
......
......@@ -185,22 +185,6 @@ def copy_with_attribs(rpin, rpout, compress = 0):
copy(rpin, rpout, compress)
if rpin.lstat(): copy_attribs(rpin, rpout)
def quick_cmp_with_attribs(rp1, rp2):
"""Quicker version of cmp_with_attribs
Instead of reading all of each file, assume that regular files
are the same if the attributes compare.
"""
if not cmp_attribs(rp1, rp2): return None
if rp1.isreg() and rp2.isreg() and (rp1.getlen() == rp2.getlen()):
return 1
return cmp(rp1, rp2)
def cmp_with_attribs(rp1, rp2):
"""Combine cmp and cmp_attribs"""
return cmp_attribs(rp1, rp2) and cmp(rp1, rp2)
def rename(rp_source, rp_dest):
"""Rename rp_source to rp_dest"""
assert rp_source.conn is rp_dest.conn
......@@ -209,15 +193,14 @@ def rename(rp_source, rp_dest):
if not rp_source.lstat(): rp_dest.delete()
else:
if rp_dest.lstat() and rp_source.getinode() == rp_dest.getinode():
assert 0, ("Rename over same inode: %s to %s" %
(rp_source.path, rp_dest.path))
log.Log("Warning: Attempt to rename over same inode: %s to %s"
% (rp_source.path, rp_dest.path), 2)
# You can't rename one hard linked file over another
rp_source.delete()
else: rp_source.conn.os.rename(rp_source.path, rp_dest.path)
rp_dest.data = rp_source.data
rp_source.data = {'type': None}
def tupled_lstat(filename):
"""Like os.lstat, but return only a tuple, or None if os.error
......@@ -273,6 +256,8 @@ class RORPath:
self.data = {'type': None}
self.file = None
def __nonzero__(self): return 1
def __eq__(self, other):
"""True iff the two rorpaths are equivalent"""
if self.index != other.index: return None
......@@ -284,12 +269,40 @@ class RORPath:
elif key == 'atime' and not Globals.preserve_atime: pass
elif key == 'devloc' or key == 'nlink': pass
elif key == 'size' and not self.isreg(): pass
elif key == 'inode' and (not self.isreg() or
not Globals.compare_inode): pass
elif (key == 'inode' and
(not self.isreg() or self.getnumlinks() == 1 or
not Globals.compare_inode)): pass
elif (not other.data.has_key(key) or
self.data[key] != other.data[key]): return None
return 1
def equal_loose(self, other):
"""True iff the two rorpaths are kinda equivalent
Sometimes because of missing permissions, a file cannot be
replicated exactly on the remote side. This function tells
you whether the two files are close enough. self must be the
file with more information.
"""
for key in self.data.keys(): # compare dicts key by key
if ((key == 'uid' or key == 'gid') and
(self.issym() or not Globals.change_ownership)):
# Don't compare gid/uid for symlinks, and only root
# can change ownership
pass
elif (key == 'type' and self.isspecial() and
other.isreg() and other.getsize() == 0):
# Special files may be replaced with 0 len regular files
pass
elif key == 'atime' and not Globals.preserve_atime: pass
elif key == 'devloc' or key == 'nlink': pass
elif key == 'size' and not self.isreg(): pass
elif key == 'inode': pass
elif (not other.data.has_key(key) or
self.data[key] != other.data[key]): return 0
return 1
def equal_verbose(self, other, check_index = 1,
compare_inodes = 0, compare_ownership = 0):
"""Like __eq__, but log more information. Useful when testing"""
......
......@@ -154,7 +154,7 @@ class PathSetter(unittest.TestCase):
"testfiles/output/rdiff-backup-data/increments")
self.exec_rb(None, timbar_paths[0])
self.refresh(Local.timbar_in, Local.timbar_out)
assert rpath.cmp_with_attribs(Local.timbar_in, Local.timbar_out)
assert Local.timbar_in.equal_loose(Local.timbar_out)
self.exec_rb_restore(25000, 'testfiles/output/various_file_types',
'testfiles/vft2_out')
......@@ -207,14 +207,30 @@ class Final(PathSetter):
def testProcLocal(self):
"""Test initial backup of /proc locally"""
Myrm("testfiles/procoutput")
procout = rpath.RPath(Globals.local_connection, 'testfiles/procoutput')
self.set_connections(None, None, None, None)
self.exec_rb(None, '../../../../../../proc', 'testfiles/procoutput')
self.exec_rb(10000, '../../../../../../proc', procout.path)
time.sleep(1)
self.exec_rb(20000, '../../../../../../proc', procout.path)
time.sleep(1)
self.exec_rb(30000, Local.inc1rp.path, procout.path)
assert CompareRecursive(Local.inc1rp, procout)
time.sleep(1)
self.exec_rb(40000, '../../../../../../proc', procout.path)
def testProcRemote(self):
"""Test mirroring proc remote"""
Myrm("testfiles/procoutput")
procout = rpath.RPath(Globals.local_connection, 'testfiles/procoutput')
self.set_connections(None, None, "test2/tmp/", "../../")
self.exec_rb(None, '../../../../../../proc', 'testfiles/procoutput')
self.exec_rb(10000, '../../../../../../proc', procout.path)
time.sleep(1)
self.exec_rb(20000, '../../../../../../proc', procout.path)
time.sleep(1)
self.exec_rb(30000, Local.inc1rp.path, procout.path)
assert CompareRecursive(Local.inc1rp, procout)
time.sleep(1)
self.exec_rb(40000, '../../../../../../proc', procout.path)
def testProcRemote2(self):
"""Test mirroring proc, this time when proc is remote, dest local"""
......
......@@ -24,6 +24,7 @@ target = rpath.RPath(lc, "testfiles/output/out")
out2 = rpath.RPath(lc, "testfiles/output/out2")
out_gz = rpath.RPath(lc, "testfiles/output/out.gz")
Time.setcurtime(1000000000)
Time.setprevtime(999424113)
prevtimestr = "2001-09-02T02:48:33-07:00"
t_pref = "testfiles/output/out.2001-09-02T02:48:33-07:00"
......
import unittest, os, signal, sys, random, time
from commontest import *
from rdiff_backup.log import *
from rdiff_backup import Globals, Main
from rdiff_backup import Globals, Main, restore
"""Test consistency by killing rdiff-backup as it is backing up"""
......@@ -44,17 +44,8 @@ class ProcessFuncs(unittest.TestCase):
'testfiles/restoretarget* testfiles/vft_out '
'timbar.pyc testfiles/vft2_out')
def is_aborted_backup(self):
"""True if there are signs of aborted backup in output/"""
try: dirlist = os.listdir("testfiles/output/rdiff-backup-data")
except OSError:
raise TimingError("No data dir found, give process more time")
dirlist = filter(lambda f: f.startswith("last-file-incremented"),
dirlist)
return len(dirlist) != 0
def exec_rb(self, time, wait, *args):
"""Run rdiff-backup return pid"""
"""Run rdiff-backup return pid. Wait until done if wait is true"""
arglist = ['python', '../rdiff-backup', '-v3']
if time:
arglist.append("--current-time")
......@@ -65,19 +56,19 @@ class ProcessFuncs(unittest.TestCase):
if wait: return os.spawnvp(os.P_WAIT, 'python', arglist)
else: return os.spawnvp(os.P_NOWAIT, 'python', arglist)
def exec_and_kill(self, mintime, maxtime, backup_time, resume, arg1, arg2):
def exec_and_kill(self, min_max_pair, backup_time, arg1, arg2):
"""Run rdiff-backup, then kill and run again
Kill after a time between mintime and maxtime. First process
should not terminate before maxtime.
"""
mintime, maxtime = min_max_pair
pid = self.exec_rb(backup_time, None, arg1, arg2)
time.sleep(random.uniform(mintime, maxtime))
if os.waitpid(pid, os.WNOHANG)[0] != 0:
raise TimingError("Timing Error on %s, %s:\n"
"Process already quit - try lowering max time"
% (arg1, arg2))
# Timing problem, process already terminated (max time too big?)
return -1
os.kill(pid, self.killsignal)
while 1:
pid, exitstatus = os.waitpid(pid, os.WNOHANG)
......@@ -85,14 +76,7 @@ class ProcessFuncs(unittest.TestCase):
assert exitstatus != 0
break
time.sleep(0.2)
if not self.is_aborted_backup():
raise TimingError("Timing Error on %s, %s:\n"
"Process already finished or didn't "
"get a chance to start" % (arg1, arg2))
print "---------------------- killed"
os.system("ls -l %s/rdiff-backup-data" % arg1)
if resume: self.exec_rb(backup_time + 5, 1, '--resume', arg1, arg2)
else: self.exec_rb(backup_time + 5000, 1, '--no-resume', arg1, arg2)
def create_killtest_dirs(self):
"""Create testfiles/killtest? directories
......@@ -105,8 +89,8 @@ class ProcessFuncs(unittest.TestCase):
def copy_thrice(input, output):
"""Copy input directory to output directory three times"""
assert not os.system("cp -a %s %s" % (input, output))
assert not os.system("cp -a %s %s/killtest1" % (input, output))
assert not os.system("cp -a %s %s/killtest2" % (input, output))
assert not os.system("cp -a %s %s/killtesta" % (input, output))
assert not os.system("cp -a %s %s/killtestb" % (input, output))
if (Local.kt1rp.lstat() and Local.kt2rp.lstat() and
Local.kt3rp.lstat() and Local.kt4rp.lstat()): return
......@@ -116,39 +100,6 @@ class ProcessFuncs(unittest.TestCase):
copy_thrice("testfiles/increment%d" % i,
"testfiles/killtest%d" % i)
def verify_back_dirs(self):
"""Make sure testfiles/output/back? dirs exist"""
if (Local.back1.lstat() and Local.back2.lstat() and
Local.back3.lstat() and Local.back4.lstat() and
Local.back5.lstat()): return
os.system(MiscDir + "/myrm testfiles/backup[1-5]")
self.exec_rb(10000, 1, 'testfiles/killtest3', 'testfiles/backup1')
Local.back1.setdata()
self.exec_rb(10000, 1, 'testfiles/killtest3', 'testfiles/backup2')
self.exec_rb(20000, 1, 'testfiles/killtest1', 'testfiles/backup2')
Local.back2.setdata()
self.exec_rb(10000, 1, 'testfiles/killtest3', 'testfiles/backup3')
self.exec_rb(20000, 1, 'testfiles/killtest1', 'testfiles/backup3')
self.exec_rb(30000, 1, 'testfiles/killtest2', 'testfiles/backup3')
Local.back3.setdata()
self.exec_rb(10000, 1, 'testfiles/killtest3', 'testfiles/backup4')
self.exec_rb(20000, 1, 'testfiles/killtest1', 'testfiles/backup4')
self.exec_rb(30000, 1, 'testfiles/killtest2', 'testfiles/backup4')
self.exec_rb(40000, 1, 'testfiles/killtest3', 'testfiles/backup4')
Local.back4.setdata()
self.exec_rb(10000, 1, 'testfiles/killtest3', 'testfiles/backup5')
self.exec_rb(20000, 1, 'testfiles/killtest1', 'testfiles/backup5')
self.exec_rb(30000, 1, 'testfiles/killtest2', 'testfiles/backup5')
self.exec_rb(40000, 1, 'testfiles/killtest3', 'testfiles/backup5')
self.exec_rb(50000, 1, 'testfiles/killtest4', 'testfiles/backup5')
Local.back5.setdata()
def runtest_sequence(self, total_tests,
exclude_rbdir, ignore_tmp, compare_links,
stop_on_error = None):
......@@ -169,125 +120,137 @@ class ProcessFuncs(unittest.TestCase):
(timing_problems, failures,
total_tests - timing_problems - failures)
class KillTest(ProcessFuncs):
"""Test rdiff-backup by killing it, recovering, and then comparing"""
killsignal = signal.SIGTERM
# The following are lower and upper bounds on the amount of time
# rdiff-backup is expected to run. They are used to determine how
# long to wait before killing the rdiff-backup process
time_pairs = [(0.0, 3.7), (0.0, 5.7), (0.0, 3.0), (0.0, 5.0), (0.0, 5.0)]
class Resume(ProcessFuncs):
"""Test for graceful recovery after resumed backup"""
def setUp(self):
"""Create killtest? and backup? directories"""
self.create_killtest_dirs()
self.verify_back_dirs()
"""Create killtest? and backup? directories if necessary"""
Local.kt1rp.setdata()
Local.kt2rp.setdata()
Local.kt3rp.setdata()
Local.kt4rp.setdata()
if (not Local.kt1rp.lstat() or not Local.kt2rp.lstat() or
not Local.kt3rp.lstat() or not Local.kt4rp.lstat()):
self.create_killtest_dirs()
def testTiming(self):
"""Run each rdiff-backup sequence 10 times, printing average time"""
time_list = [[], [], [], [], []] # List of time lists
iterations = 10
def run_once(current_time, input_rp, index):
start_time = time.time()
self.exec_rb(current_time, 1, input_rp.path, Local.rpout.path)
time_list[index].append(time.time() - start_time)
for i in range(iterations):
self.delete_tmpdirs()
run_once(10000, Local.kt3rp, 0)
run_once(20000, Local.kt1rp, 1)
run_once(30000, Local.kt3rp, 2)
run_once(40000, Local.kt3rp, 3)
run_once(50000, Local.kt3rp, 4)
for i in range(len(time_list)):
print "%s -> %s" % (i, " ".join(map(str, time_list[i])))
def mark_incomplete(self, curtime, rp):
"""Check the date of current mirror
Return 1 if there are two current_mirror incs and last one has
time curtime. Return 0 if only one with time curtime, and
then add a current_mirror marker. Return -1 if only one and
time is not curtime.
def runtest(self, exclude_rbdir, ignore_tmp_files, compare_links):
"""Run the actual test, returning 1 if passed and 0 otherwise"""
self.delete_tmpdirs()
# Backing up killtest3
"""
rbdir = rp.append_path("rdiff-backup-data")
inclist = restore.get_inclist(rbdir.append("current_mirror"))
assert 1 <= len(inclist) <= 2, str(map(lambda x: x.path, inclist))
inc_date_pairs = map(lambda inc: (inc.getinctime(), inc), inclist)
inc_date_pairs.sort()
assert inc_date_pairs[-1][0] == curtime, \
(inc_date_pairs[-1][0], curtime)
if len(inclist) == 2: return 1
if inc_date_pairs[-1][0] == curtime:
result = 0
marker_time = curtime - 10000
else:
assert inc_date_pairs[-1][0] == curtime - 10000
marker_time = curtime
result = -1
cur_mirror_rp = rbdir.append("current_mirror.%s.data" %
(Time.timetostring(marker_time),))
assert not cur_mirror_rp.lstat()
cur_mirror_rp.touch()
return result
def testTerm(self):
"""Run rdiff-backup, termining and regressing each time
Because rdiff-backup must be killed, the timing should be
updated
# Start with killtest3 because it is big and the first case
"""
count, killed_too_soon, killed_too_late = 5, [0]*4, [0]*4
self.delete_tmpdirs()
# Back up killtest3 first because it is big and the first case
# is kind of special (there's no incrementing, so different
# code)
self.exec_and_kill(0.7, 4.0, 10000, 1,
'testfiles/killtest3', 'testfiles/output')
if not CompareRecursive(Local.back1, Local.rpout, compare_links,
None, exclude_rbdir, ignore_tmp_files):
return 0
# Backing up killtest1
self.exec_and_kill(0.8, 5.0, 20000, 1,
'testfiles/killtest1', 'testfiles/output')
if not CompareRecursive(Local.back2, Local.rpout, compare_links,
None, exclude_rbdir, ignore_tmp_files):
return 0
# Backing up killtest2
self.exec_and_kill(0.7, 0.8, 30000, 1,
'testfiles/killtest2', 'testfiles/output')
if not CompareRecursive(Local.back3, Local.rpout, compare_links,
None, exclude_rbdir, ignore_tmp_files):
return 0
# Backing up killtest3
self.exec_and_kill(0.7, 4.0, 40000, 1,
'testfiles/killtest3', 'testfiles/output')
if not CompareRecursive(Local.back4, Local.rpout, compare_links,
None, exclude_rbdir, ignore_tmp_files):
return 0
# Backing up killtest4
self.exec_and_kill(1.0, 8.0, 50000, 1,
'testfiles/killtest4', 'testfiles/output')
if not CompareRecursive(Local.back5, Local.rpout, compare_links,
None, exclude_rbdir, ignore_tmp_files):
return 0
return 1
def testTERM(self, total_tests = 3):
"""Test sending local processes a TERM signal"""
self.killsignal = signal.SIGTERM
self.runtest_sequence(total_tests, None, None, 1)
def testKILL(self, total_tests = 10):
"""Send local backup process a KILL signal"""
self.killsignal = signal.SIGKILL
self.runtest_sequence(total_tests, None, 1, None)
class NoResume(ProcessFuncs):
"""Test for consistent backup after abort and then no resume"""
def runtest(self, exclude_rbdir, ignore_tmp_files, compare_links):
self.delete_tmpdirs()
# Back up each killtest to output
self.exec_and_kill(0.7, 1.5, 10000, 1,
'testfiles/killtest3', 'testfiles/output')
self.exec_and_kill(0.6, 0.6, 20000, 1,
'testfiles/killtest1', 'testfiles/output')
self.exec_and_kill(0.7, 1.0, 30000, 1,
'testfiles/killtest2', 'testfiles/output')
self.exec_and_kill(0.7, 2.0, 40000, 1,
'testfiles/killtest3', 'testfiles/output')
self.exec_and_kill(1.0, 5.0, 50000, 1,
'testfiles/killtest4', 'testfiles/output')
# Now restore each and compare
InternalRestore(1, 1, "testfiles/output", "testfiles/restoretarget1",
15000)
if not CompareRecursive(Local.kt3rp, Local.rpout1, compare_links,
None, exclude_rbdir, ignore_tmp_files):
return 0
InternalRestore(1, 1, "testfiles/output", "testfiles/restoretarget2",
25000)
if not CompareRecursive(Local.kt1rp, Local.rpout2, compare_links,
None, exclude_rbdir, ignore_tmp_files):
return 0
InternalRestore(1, 1, "testfiles/output", "testfiles/restoretarget3",
35000)
if not CompareRecursive(Local.kt2rp, Local.rpout3, compare_links,
None, exclude_rbdir, ignore_tmp_files):
return 0
InternalRestore(1, 1, "testfiles/output", "testfiles/restoretarget4",
45000)
if not CompareRecursive(Local.kt3rp, Local.rpout4, compare_links,
None, exclude_rbdir, ignore_tmp_files):
return 0
InternalRestore(1, 1, "testfiles/output", "testfiles/restoretarget5",
55000)
if not CompareRecursive(Local.kt4rp, Local.rpout5, compare_links,
None, exclude_rbdir, ignore_tmp_files):
return 0
return 1
def testTERM(self, total_tests = 5):
self.killsignal = signal.SIGTERM
self.runtest_sequence(total_tests, 1, None, 1)
def testKILL(self, total_tests = 5):
self.killsignal = signal.SIGKILL
self.runtest_sequence(total_tests, 1, 1, None)
self.exec_rb(10000, 1, Local.kt3rp.path, Local.rpout.path)
assert CompareRecursive(Local.kt3rp, Local.rpout)
def cycle_once(min_max_time_pair, curtime, input_rp, old_rp):
"""Backup input_rp, kill, regress, and then compare"""
time.sleep(1)
self.exec_and_kill(min_max_time_pair, curtime,
input_rp.path, Local.rpout.path)
result = self.mark_incomplete(curtime, Local.rpout)
assert not self.exec_rb(None, 1, '--check-destination-dir',
Local.rpout.path)
assert CompareRecursive(old_rp, Local.rpout, compare_hardlinks = 0)
return result
# Keep backing kt1rp, and then regressing to kt3rp. Then go to kt1rp
for i in range(count):
result = cycle_once(self.time_pairs[1], 20000,
Local.kt1rp, Local.kt3rp)
if result == 0: killed_too_late[0] += 1
elif result == -1: killed_too_soon[0] += 1
self.exec_rb(20000, 1, Local.kt1rp.path, Local.rpout.path)
# Now keep regressing from kt2rp, only staying there at the end
for i in range(count):
result = cycle_once(self.time_pairs[2], 30000,
Local.kt2rp, Local.kt1rp)
if result == 0: killed_too_late[1] += 1
elif result == -1: killed_too_soon[1] += 1
self.exec_rb(30000, 1, Local.kt2rp.path, Local.rpout.path)
# Now keep regressing from kt2rp, only staying there at the end
for i in range(count):
result = cycle_once(self.time_pairs[3], 40000,
Local.kt3rp, Local.kt2rp)
if result == 0: killed_too_late[2] += 1
elif result == -1: killed_too_soon[2] += 1
self.exec_rb(40000, 1, Local.kt3rp.path, Local.rpout.path)
# Now keep regressing from kt2rp, only staying there at the end
for i in range(count):
result = cycle_once(self.time_pairs[4], 50000,
Local.kt4rp, Local.kt3rp)
if result == 0: killed_too_late[3] += 1
elif result == -1: killed_too_soon[3] += 1
print "Killed too soon out of %s: %s" % (count, killed_too_soon)
print "Killed too late out of %s: %s" % (count, killed_too_late)
if __name__ == "__main__": unittest.main()
......@@ -152,6 +152,12 @@ class IncrementTest1(unittest.TestCase):
"""Increment/Restore when both directories are remote"""
BackupRestoreSeries(None, None, self.dirlist)
def test_long_filenames_local(self):
"""Test backing up a directory with lots of long filenames in it"""
Myrm(Local.rpout.path)
InternalBackup(1, 1, "testfiles/longfilenames1", Local.rpout.path, 100)
InternalBackup(1, 1, "testfiles/longfilenames2", Local.rpout.path, 200)
def testNoWrite(self):
"""Test backup/restore on dirs without write permissions"""
def write_string(rp, s = ""):
......
......@@ -6,70 +6,91 @@ Not to be confused with the regression tests.
import unittest
from commontest import *
from rdiff_backup import regress
from rdiff_backup import regress, Time
Log.setverbosity(7)
Log.setverbosity(3)
class RegressTest(unittest.TestCase):
regress_rp1 = rpath.RPath(Globals.local_connection,
"testfiles/regress_output1")
regress_rp2 = rpath.RPath(Globals.local_connection,
"testfiles/regress_output2")
def make_output(self, level):
"""Set up two rdiff-backup destination dir of level and level+1
testfiles/regress_output1 will be a copy of
testfiles/increment1 through testfiles/increment{level}
testfiles/regress_output2 will have all everything backed up
in testfiles/regress_output1 + testfiles/increment{level+1}.
The time of each increment will be 10000*level.
"""
assert 1 <= level <= 3
if self.regress_rp1.lstat(): Myrm(self.regress_rp1.path)
if self.regress_rp2.lstat(): Myrm(self.regress_rp2.path)
# Make regress_output1
Log("Making %s" % (self.regress_rp1.path,), 4)
for i in range(1, level+1):
rdiff_backup(1, 1,
"testfiles/increment%s" % (i,),
self.regress_rp1.path,
current_time = 10000*i)
# Now make regress_output2
Log("Making %s" % (self.regress_rp2.path,), 4)
assert not os.system("cp -a %s %s" %
(self.regress_rp1.path, self.regress_rp2.path))
rdiff_backup(1, 1,
"testfiles/increment%s" % (level+1),
self.regress_rp2.path,
current_time = 10000*(level+1))
self.regress_rp1.setdata()
self.regress_rp2.setdata()
def test_full(self):
output_rp = rpath.RPath(Globals.local_connection, "testfiles/output")
output_rbdir_rp = output_rp.append_path("rdiff-backup-data")
inc1_rp = rpath.RPath(Globals.local_connection, "testfiles/increment1")
inc2_rp = rpath.RPath(Globals.local_connection, "testfiles/increment2")
inc3_rp = rpath.RPath(Globals.local_connection, "testfiles/increment3")
inc4_rp = rpath.RPath(Globals.local_connection, "testfiles/increment4")
def runtest(self, regress_function):
"""Test regressing a full directory to older state
Make two directories, one with one more backup in it. Then
regress the bigger one, and then make sure they compare the
same.
"""
for level in range(1, 4):
self.make_output(level)
regress.regress_time = 10000*level
regress.unsuccessful_backup_time = 10000*(level+1)
regress.time_override_mode = 1
Globals.rbdir = self.regress_rp2.append_path("rdiff-backup-data")
Log("######### Beginning regress ###########", 5)
regress.Regress(self.regress_rp2)
assert CompareRecursive(self.regress_rp1, self.regress_rp2,
exclude_rbdir = 0)
Regress_function takes a time and should regress
self.output_rp back to that time.
"""
self.output_rp.setdata()
if self.output_rp.lstat(): Myrm(self.output_rp.path)
rdiff_backup(1, 1, self.inc1_rp.path, self.output_rp.path,
current_time = 10000)
assert CompareRecursive(self.inc1_rp, self.output_rp)
rdiff_backup(1, 1, self.inc2_rp.path, self.output_rp.path,
current_time = 20000)
assert CompareRecursive(self.inc2_rp, self.output_rp)
rdiff_backup(1, 1, self.inc3_rp.path, self.output_rp.path,
current_time = 30000)
assert CompareRecursive(self.inc3_rp, self.output_rp)
rdiff_backup(1, 1, self.inc4_rp.path, self.output_rp.path,
current_time = 40000)
assert CompareRecursive(self.inc4_rp, self.output_rp)
Globals.rbdir = self.output_rbdir_rp
regress_function(30000)
assert CompareRecursive(self.inc3_rp, self.output_rp,
compare_hardlinks = 0)
regress_function(20000)
assert CompareRecursive(self.inc2_rp, self.output_rp,
compare_hardlinks = 0)
regress_function(10000)
assert CompareRecursive(self.inc1_rp, self.output_rp,
compare_hardlinks = 0)
def regress_to_time_local(self, time):
"""Regress self.output_rp to time by running regress locally"""
self.output_rp.setdata()
self.output_rbdir_rp.setdata()
self.add_current_mirror(time)
regress.Regress(self.output_rp)
def add_current_mirror(self, time):
"""Add current_mirror marker at given time"""
cur_mirror_rp = self.output_rbdir_rp.append(
"current_mirror.%s.data" % (Time.timetostring(time),))
cur_mirror_rp.touch()
def regress_to_time_remote(self, time):
"""Like test_full above, but run regress remotely"""
self.output_rp.setdata()
self.output_rbdir_rp.setdata()
self.add_current_mirror(time)
cmdline = (SourceDir +
"/../rdiff-backup -v3 --check-destination-dir "
"--remote-schema './chdir-wrapper2 %s' "
"test1::../" + self.output_rp.path)
print "Running:", cmdline
assert not os.system(cmdline)
def test_local(self):
"""Run regress test locally"""
self.runtest(self.regress_to_time_local)
def test_remote(self):
"""Run regress test remotely"""
self.runtest(self.regress_to_time_remote)
if __name__ == "__main__": unittest.main()
......@@ -361,8 +361,8 @@ class FileAttributes(FileCopying):
"""Test attribute comparison success"""
testpairs = [(self.hl1, self.hl2)]
for a, b in testpairs:
assert rpath.cmp_attribs(a, b), "Err with %s %s" % (a.path, b.path)
assert rpath.cmp_attribs(b, a), "Err with %s %s" % (b.path, a.path)
assert a.equal_loose(b), "Err with %s %s" % (a.path, b.path)
assert b.equal_loose(a), "Err with %s %s" % (b.path, a.path)
def testCompFail(self):
"""Test attribute comparison failures"""
......@@ -370,17 +370,15 @@ class FileAttributes(FileCopying):
(self.exec1, self.exec2),
(self.rf, self.hl1)]
for a, b in testpairs:
assert not rpath.cmp_attribs(a, b), \
"Err with %s %s" % (a.path, b.path)
assert not rpath.cmp_attribs(b, a), \
"Err with %s %s" % (b.path, a.path)
assert not a.equal_loose(b), "Err with %s %s" % (a.path, b.path)
assert not b.equal_loose(a), "Err with %s %s" % (b.path, a.path)
def testCompRaise(self):
def testCheckRaise(self):
"""Should raise exception when file missing"""
self.assertRaises(RPathException, rpath.cmp_attribs,
self.assertRaises(RPathException, rpath.check_for_files,
self.nothing, self.hl1)
self.assertRaises(RPathException, rpath.cmp_attribs,
self.noperms, self.nothing)
self.assertRaises(RPathException, rpath.check_for_files,
self.hl1, self.nothing)
def testCopyAttribs(self):
"""Test copying attributes"""
......@@ -402,7 +400,7 @@ class FileAttributes(FileCopying):
self.exec2, self.hl1, self.dir, self.sym]:
rpath.copy_with_attribs(rp, out)
assert rpath.cmp(rp, out)
assert rpath.cmp_attribs(rp, out)
assert rp.equal_loose(out)
out.delete()
def testCopyRaise(self):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment