Commit cedfc767 authored by joshn's avatar joshn

Gah. Fix unicode support for linux systems where the destination Python...

Gah. Fix unicode support for linux systems where the destination Python installation doesn't support unicode filenames.


git-svn-id: http://svn.savannah.nongnu.org/svn/rdiff-backup@1057 2b77aa54-bcbc-44c9-a7ec-4f6cf2b41109
parent e9e15d9e
...@@ -180,8 +180,11 @@ def Record2EA(record): ...@@ -180,8 +180,11 @@ def Record2EA(record):
raise metadata.ParsingError("Bad record beginning: " + first[:8]) raise metadata.ParsingError("Bad record beginning: " + first[:8])
filename = first[8:] filename = first[8:]
if filename == '.': index = () if filename == '.': index = ()
else: index = tuple(unicode(C.acl_unquote(encode(filename)), else:
'utf-8').split('/')) unquoted_filename = C.acl_unquote(encode(filename))
if Globals.use_unicode_paths:
unquoted_filename = unicode(unquoted_filename, 'utf-8')
index = tuple(unquoted_filename.split('/'))
ea = ExtendedAttributes(index) ea = ExtendedAttributes(index)
for line in lines: for line in lines:
...@@ -556,8 +559,11 @@ def Record2ACL(record): ...@@ -556,8 +559,11 @@ def Record2ACL(record):
raise metadata.ParsingError("Bad record beginning: "+ first_line) raise metadata.ParsingError("Bad record beginning: "+ first_line)
filename = first_line[8:] filename = first_line[8:]
if filename == '.': index = () if filename == '.': index = ()
else: index = tuple(unicode(C.acl_unquote(encode(filename)), else:
'utf-8').split('/')) unquoted_filename = C.acl_unquote(encode(filename))
if Globals.use_unicode_paths:
unquoted_filename = unicode(unquoted_filename, 'utf-8')
index = tuple(unquoted_filename.split('/'))
return AccessControlLists(index, record[newline_pos:]) return AccessControlLists(index, record[newline_pos:])
class ACLExtractor(EAExtractor): class ACLExtractor(EAExtractor):
......
...@@ -376,17 +376,17 @@ class FlatFile: ...@@ -376,17 +376,17 @@ class FlatFile:
compress = 1 compress = 1
if mode == 'r': if mode == 'r':
self.rp = rp_base self.rp = rp_base
self.fileobj = rpath.UnicodeFile(self.rp.open("rb", compress)) self.fileobj = rpath.MaybeUnicode(self.rp.open("rb", compress))
else: else:
assert mode == 'w' assert mode == 'w'
if compress and check_path and not rp_base.isinccompressed(): if compress and check_path and not rp_base.isinccompressed():
def callback(rp): self.rp = rp def callback(rp): self.rp = rp
self.fileobj = rpath.UnicodeFile(rpath.MaybeGzip(rp_base, self.fileobj = rpath.MaybeUnicode(rpath.MaybeGzip(rp_base,
callback)) callback))
else: else:
self.rp = rp_base self.rp = rp_base
assert not self.rp.lstat(), self.rp assert not self.rp.lstat(), self.rp
self.fileobj = rpath.UnicodeFile(self.rp.open("wb", self.fileobj = rpath.MaybeUnicode(self.rp.open("wb",
compress = compress)) compress = compress))
def write_record(self, record): def write_record(self, record):
......
...@@ -1417,19 +1417,24 @@ class RPath(RORPath): ...@@ -1417,19 +1417,24 @@ class RPath(RORPath):
write_win_acl(self, acl) write_win_acl(self, acl)
self.data['win_acl'] = acl self.data['win_acl'] = acl
class UnicodeFile: class MaybeUnicode:
""" Wraps a RPath and reads/writes unicode. """ """ Wraps a RPath and reads/writes unicode if Globals.use_unicode_paths is on. """
def __init__(self, fileobj): def __init__(self, fileobj):
self.fileobj = fileobj self.fileobj = fileobj
def read(self, length = -1): def read(self, length = -1):
return unicode(self.fileobj.read(length), 'utf-8') data = self.fileobj.read(length)
if Globals.use_unicode_paths:
data = unicode(data, 'utf-8')
return data
def write(self, buf): def write(self, buf):
if type(buf) != unicode: if Globals.use_unicode_paths:
buf = unicode(buf, 'utf-8') if type(buf) != unicode:
return self.fileobj.write(buf.encode('utf-8')) buf = unicode(buf, 'utf-8')
buf = buf.encode('utf-8')
return self.fileobj.write(buf)
def close(self): def close(self):
return self.fileobj.close() return self.fileobj.close()
...@@ -1463,7 +1468,7 @@ class GzipFile(gzip.GzipFile): ...@@ -1463,7 +1468,7 @@ class GzipFile(gzip.GzipFile):
unicode with the filename.""" unicode with the filename."""
if mode and 'b' not in mode: if mode and 'b' not in mode:
mode += 'b' mode += 'b'
if type(filename) != unicode: if type(filename) != unicode and Globals.use_unicode_paths:
filename = unicode(filename, 'utf-8') filename = unicode(filename, 'utf-8')
fileobj = open(filename, mode or 'rb') fileobj = open(filename, mode or 'rb')
gzip.GzipFile.__init__(self, filename.encode('utf-8'), gzip.GzipFile.__init__(self, filename.encode('utf-8'),
......
...@@ -219,13 +219,13 @@ class StatsObj: ...@@ -219,13 +219,13 @@ class StatsObj:
def write_stats_to_rp(self, rp): def write_stats_to_rp(self, rp):
"""Write statistics string to given rpath""" """Write statistics string to given rpath"""
fp = rpath.UnicodeFile(rp.open("wb")) fp = rpath.MaybeUnicode(rp.open("wb"))
fp.write(self.get_stats_string()) fp.write(self.get_stats_string())
assert not fp.close() assert not fp.close()
def read_stats_from_rp(self, rp): def read_stats_from_rp(self, rp):
"""Set statistics from rpath, return self for convenience""" """Set statistics from rpath, return self for convenience"""
fp = rpath.UnicodeFile(rp.open("r")) fp = rpath.MaybeUnicode(rp.open("r"))
self.set_stats_from_string(fp.read()) self.set_stats_from_string(fp.read())
fp.close() fp.close()
return self return self
...@@ -364,7 +364,7 @@ class FileStats: ...@@ -364,7 +364,7 @@ class FileStats:
suffix = Globals.compression and 'data.gz' or 'data' suffix = Globals.compression and 'data.gz' or 'data'
cls._rp = increment.get_inc(rpbase, suffix, Time.curtime) cls._rp = increment.get_inc(rpbase, suffix, Time.curtime)
assert not cls._rp.lstat() assert not cls._rp.lstat()
cls._fileobj = rpath.UnicodeFile(cls._rp.open("wb", cls._fileobj = rpath.MaybeUnicode(cls._rp.open("wb",
compress = Globals.compression)) compress = Globals.compression))
cls._line_sep = Globals.null_separator and '\0' or '\n' cls._line_sep = Globals.null_separator and '\0' or '\n'
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment