Commit 9931b1da authored by Chris McDonough's avatar Chris McDonough

Minor cleanups and addition of comments, plus fleshing out of TODO.txt.

parent a3f73429
...@@ -32,6 +32,7 @@ class BlobStorage(ProxyBase): ...@@ -32,6 +32,7 @@ class BlobStorage(ProxyBase):
implements(IBlobStorage) implements(IBlobStorage)
__slots__ = ('base_directory', 'dirty_oids') __slots__ = ('base_directory', 'dirty_oids')
# XXX CM: what is the purpose of specifying __slots__ here?
def __new__(self, base_directory, storage): def __new__(self, base_directory, storage):
return ProxyBase.__new__(self, storage) return ProxyBase.__new__(self, storage)
...@@ -42,7 +43,8 @@ class BlobStorage(ProxyBase): ...@@ -42,7 +43,8 @@ class BlobStorage(ProxyBase):
self.base_directory = base_directory self.base_directory = base_directory
self.dirty_oids = [] self.dirty_oids = []
def storeBlob(self, oid, oldserial, data, blobfilename, version, transaction): def storeBlob(self, oid, oldserial, data, blobfilename, version,
transaction):
"""Stores data that has a BLOB attached.""" """Stores data that has a BLOB attached."""
serial = self.store(oid, oldserial, data, version, transaction) serial = self.store(oid, oldserial, data, version, transaction)
assert isinstance(serial, str) # XXX in theory serials could be assert isinstance(serial, str) # XXX in theory serials could be
...@@ -55,19 +57,7 @@ class BlobStorage(ProxyBase): ...@@ -55,19 +57,7 @@ class BlobStorage(ProxyBase):
os.makedirs(targetpath, 0700) os.makedirs(targetpath, 0700)
targetname = self._getCleanFilename(oid, serial) targetname = self._getCleanFilename(oid, serial)
try: utils.best_rename(blobfilename, targetname)
os.rename(blobfilename, targetname)
except OSError:
# XXX CM: I don't think this is a good idea; maybe just fail
# here instead of doing a brute force copy? This is awfully
# expensive and people won't know it's happening without
# at least a warning.
target = file(targetname, "wb")
source = file(blobfilename, "rb")
utils.cp(blobfile, target)
target.close()
source.close()
os.unlink(blobfilename)
# XXX if oid already in there, something is really hosed. # XXX if oid already in there, something is really hosed.
# The underlying storage should have complained anyway # The underlying storage should have complained anyway
......
...@@ -2,4 +2,24 @@ ...@@ -2,4 +2,24 @@
Tests Tests
----- -----
- Test packing.
- Test import/export.
- Test conflict behavior.
- Test shared client usage of blob cache dir.
- More ZEO tests.
Features
--------
- Ensure we detect and play a failed txn involving blobs forward or
backward at startup.
- Importing backward compatible ZEXP files (no \0BLOBSTART) used - Importing backward compatible ZEXP files (no \0BLOBSTART) used
- More options for blob directory structures (e.g. dirstorage's
bushy/chunky/lawn/flat).
...@@ -29,6 +29,3 @@ class IBlobStorage(Interface): ...@@ -29,6 +29,3 @@ class IBlobStorage(Interface):
Raises POSKeyError if the blobfile cannot be found. Raises POSKeyError if the blobfile cannot be found.
""" """
def getBlobDirectory():
"""
"""
...@@ -351,6 +351,9 @@ class Connection(ExportImport, object): ...@@ -351,6 +351,9 @@ class Connection(ExportImport, object):
obj._p_blob_uncommitted, obj._p_blob_uncommitted,
self._version, transaction) self._version, transaction)
obj._p_invalidate() obj._p_invalidate()
# XXX CM: do we invalidate the object here in order to
# ensure that that the next attribute access of its
# name unghostify it?
else: else:
s = self._storage.store(oid, serial, p, self._version, s = self._storage.store(oid, serial, p, self._version,
transaction) transaction)
......
...@@ -329,14 +329,22 @@ def mktemp(): ...@@ -329,14 +329,22 @@ def mktemp():
return filename return filename
def best_rename(sourcename, targetname): def best_rename(sourcename, targetname):
""" Try to rename via os.rename, but if we can't (for instance, if the
source and target are on separate partitions/volumes), fall back to copying
the file and unlinking the original. """
try: try:
os.rename(sourcename, targetname) os.rename(sourcename, targetname)
except OSError: except OSError:
# XXX This creates a race condition for un-locked return above # XXX CM: I don't think this is a good idea; maybe just fail
# here instead of doing a brute force copy? This is awfully
# expensive and people won't know it's happening without
# at least a warning. It also increases the possibility of a race
# condition: both the source and target filenames exist at the
# same time.
source = open(sourcename, "rb") source = open(sourcename, "rb")
target = open(targetname, "wb") target = open(targetname, "wb")
while True: while True:
chunk = source.read(4096) chunk = source.read(1<<16)
if not chunk: if not chunk:
break break
target.write(chunk) target.write(chunk)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment