Commit 71f7573b authored by Jim Fulton's avatar Jim Fulton

Use binary pickles to deal with new oids generated by demo storage.

Added a new test class for testing packing without gc.

Moved some useful tests to PackableStorage so they are run by storages
without undo.
parent 9dd06e68
...@@ -418,6 +418,7 @@ class DemoStorageTests( ...@@ -418,6 +418,7 @@ class DemoStorageTests(
def checkPackWithMultiDatabaseReferences(self): def checkPackWithMultiDatabaseReferences(self):
pass # DemoStorage pack doesn't do gc pass # DemoStorage pack doesn't do gc
checkPackAllRevisions = checkPackWithMultiDatabaseReferences
class HeartbeatTests(ZEO.tests.ConnectionTests.CommonSetupTearDown): class HeartbeatTests(ZEO.tests.ConnectionTests.CommonSetupTearDown):
"""Make sure a heartbeat is being sent and that it does no harm """Make sure a heartbeat is being sent and that it does no harm
......
...@@ -86,7 +86,7 @@ def dumps(obj): ...@@ -86,7 +86,7 @@ def dumps(obj):
return obj.getoid() return obj.getoid()
return None return None
s = StringIO() s = StringIO()
p = pickle.Pickler(s) p = pickle.Pickler(s, 1)
p.persistent_id = getpersid p.persistent_id = getpersid
p.dump(obj) p.dump(obj)
p.dump(None) p.dump(None)
...@@ -322,11 +322,6 @@ class PackableStorage(PackableStorageBase): ...@@ -322,11 +322,6 @@ class PackableStorage(PackableStorageBase):
transaction.commit() transaction.commit()
db.pack(time.time()+1) db.pack(time.time()+1)
self.assertEqual(len(self._storage), 1) self.assertEqual(len(self._storage), 1)
class PackableUndoStorage(PackableStorageBase):
def checkPackAllRevisions(self): def checkPackAllRevisions(self):
self._initroot() self._initroot()
...@@ -443,11 +438,9 @@ class PackableUndoStorage(PackableStorageBase): ...@@ -443,11 +438,9 @@ class PackableUndoStorage(PackableStorageBase):
# Create a persistent object, with some initial state # Create a persistent object, with some initial state
obj1 = self._newobj() obj1 = self._newobj()
oid1 = obj1.getoid() oid1 = obj1.getoid()
# Create another persistent object, with some initial state. Make # Create another persistent object, with some initial state.
# sure it's oid is greater than the first object's oid.
obj2 = self._newobj() obj2 = self._newobj()
oid2 = obj2.getoid() oid2 = obj2.getoid()
self.failUnless(oid2 > oid1)
# Link the root object to the persistent objects, in order to keep # Link the root object to the persistent objects, in order to keep
# them alive. Store the root object. # them alive. Store the root object.
root.obj1 = obj1 root.obj1 = obj1
...@@ -517,6 +510,50 @@ class PackableUndoStorage(PackableStorageBase): ...@@ -517,6 +510,50 @@ class PackableUndoStorage(PackableStorageBase):
eq(pobj.getoid(), oid2) eq(pobj.getoid(), oid2)
eq(pobj.value, 11) eq(pobj.value, 11)
class PackableStorageWithOptionalGC(PackableStorage):
def checkPackAllRevisionsNoGC(self):
self._initroot()
eq = self.assertEqual
raises = self.assertRaises
# Create a `persistent' object
obj = self._newobj()
oid = obj.getoid()
obj.value = 1
# Commit three different revisions
revid1 = self._dostoreNP(oid, data=pdumps(obj))
obj.value = 2
revid2 = self._dostoreNP(oid, revid=revid1, data=pdumps(obj))
obj.value = 3
revid3 = self._dostoreNP(oid, revid=revid2, data=pdumps(obj))
# Now make sure all three revisions can be extracted
data = self._storage.loadSerial(oid, revid1)
pobj = pickle.loads(data)
eq(pobj.getoid(), oid)
eq(pobj.value, 1)
data = self._storage.loadSerial(oid, revid2)
pobj = pickle.loads(data)
eq(pobj.getoid(), oid)
eq(pobj.value, 2)
data = self._storage.loadSerial(oid, revid3)
pobj = pickle.loads(data)
eq(pobj.getoid(), oid)
eq(pobj.value, 3)
# Now pack all transactions; need to sleep a second to make
# sure that the pack time is greater than the last commit time.
now = packtime = time.time()
while packtime <= now:
packtime = time.time()
self._storage.pack(packtime, referencesf, gc=False)
# Only old revisions of the object should be gone. We don't gc
raises(KeyError, self._storage.loadSerial, oid, revid1)
raises(KeyError, self._storage.loadSerial, oid, revid2)
self._storage.loadSerial(oid, revid3)
class PackableUndoStorage(PackableStorageBase):
def checkPackUnlinkedFromRoot(self): def checkPackUnlinkedFromRoot(self):
eq = self.assertEqual eq = self.assertEqual
db = DB(self._storage) db = DB(self._storage)
......
...@@ -77,8 +77,6 @@ class DemoStorageTests( ...@@ -77,8 +77,6 @@ class DemoStorageTests(
pass # we don't support undo yet pass # we don't support undo yet
checkUndoZombie = checkLoadBeforeUndo checkUndoZombie = checkLoadBeforeUndo
def checkPackWithMultiDatabaseReferences(self):
pass # we never do gc
class DemoStorageWrappedBase(DemoStorageTests): class DemoStorageWrappedBase(DemoStorageTests):
...@@ -94,6 +92,13 @@ class DemoStorageWrappedBase(DemoStorageTests): ...@@ -94,6 +92,13 @@ class DemoStorageWrappedBase(DemoStorageTests):
def _makeBaseStorage(self): def _makeBaseStorage(self):
raise NotImplementedError raise NotImplementedError
def checkPackOnlyOneObject(self):
pass # Wrapping demo storages don't do gc
def checkPackWithMultiDatabaseReferences(self):
pass # we never do gc
checkPackAllRevisions = checkPackWithMultiDatabaseReferences
class DemoStorageWrappedAroundMappingStorage(DemoStorageWrappedBase): class DemoStorageWrappedAroundMappingStorage(DemoStorageWrappedBase):
def _makeBaseStorage(self): def _makeBaseStorage(self):
......
...@@ -34,7 +34,7 @@ class MappingStorageTests( ...@@ -34,7 +34,7 @@ class MappingStorageTests(
IteratorStorage.ExtendedIteratorStorage, IteratorStorage.ExtendedIteratorStorage,
IteratorStorage.IteratorStorage, IteratorStorage.IteratorStorage,
MTStorage.MTStorage, MTStorage.MTStorage,
PackableStorage.PackableStorage, PackableStorage.PackableStorageWithOptionalGC,
RevisionStorage.RevisionStorage, RevisionStorage.RevisionStorage,
Synchronization.SynchronizedStorage, Synchronization.SynchronizedStorage,
): ):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment