Commit b6ff5a09 authored by Barry Warsaw's avatar Barry Warsaw

Merging the Berkeley storage's bdb-nolocks branch back into the trunk

for ZODB 3.2.
parent 24482ef0
......@@ -14,14 +14,14 @@
"""An autopacking Berkeley storage without undo and versioning.
"""
__version__ = '$Revision: 1.4 $'.split()[-2:][0]
__version__ = '$Revision: 1.5 $'.split()[-2:][0]
import sys
import os
import struct
import time
# This uses the Dunn/Kuchling PyBSDDB v3 extension module available from
# This uses the Dunn/Kuchling PyBSDDB3 extension module available from
# http://pybsddb.sourceforge.net
from bsddb3 import db
......@@ -61,7 +61,7 @@ class Autopack(BerkeleyBase):
# base class infrastructure and are shared by the Minimal
# implementation.
#
# serials -- {oid -> serial}
# serials -- {oid+tid -> serial}
# Maps oids to object serial numbers. The serial number is
# essentially a timestamp used to determine if conflicts have
# arisen, and serial numbers double as transaction ids and object
......@@ -104,6 +104,32 @@ class Autopack(BerkeleyBase):
self._oids.close()
BerkeleyBase.close(self)
def _getSerial(self, oid):
c = self._serials.cursor()
try:
lastvalue = None
# Search for the largest oid+revid key in the serials table that
# doesn't have a revid component equal to the current revid.
try:
rec = c.set_range(oid)
except db.DBNotFoundError:
rec = None
while rec:
key, value = rec
koid = key[:8]
ktid = key[8:]
if koid <> oid:
break
lastvalue = value
if ktid == self._serial:
break
rec = c.next()
if lastvalue is None:
return None
return lastvalue[:8]
finally:
c.close()
def _begin(self, tid, u, d, e):
# Nothing needs to be done
pass
......@@ -112,12 +138,41 @@ class Autopack(BerkeleyBase):
# Nothing needs to be done, but override the base class's method
pass
def store(self, oid, serial, data, version, transaction):
self._lock_acquire()
try:
# Transaction guard
if transaction is not self._transaction:
raise POSException.StorageTransactionError(self, transaction)
# We don't support versions
if version <> '':
raise POSException.Unsupported, 'versions are not supported'
oserial = self._getSerial(oid)
if oserial is not None and serial <> oserial:
# BAW: Here's where we'd try to do conflict resolution
raise POSException.ConflictError(serials=(oserial, serial))
tid = self._serial
txn = self._env.txn_begin()
try:
self._serials.put(oid+tid, self._serial, txn=txn)
self._pickles.put(oid+tid, data, txn=txn)
self._actions.put(tid+oid, INC, txn=txn)
self._oids.put(oid, ' ', txn=txn)
except:
txn.abort()
raise
else:
txn.commit()
return self._serial
finally:
self._lock_release()
def _finish(self, tid, u, d, e):
# TBD: what about u, d, and e?
#
# First, append a DEL to the actions for each old object, then update
# the current serials table so that its revision id points to this
# trancation id.
# transaction id.
txn = self._env.txn_begin()
try:
c = self._oids.cursor()
......@@ -128,8 +183,8 @@ class Autopack(BerkeleyBase):
lastrevid = self._serials.get(oid, txn=txn)
if lastrevid:
self._actions.put(lastrevid+oid, DEC, txn=txn)
self._serials.put(oid, tid, txn=txn)
rec = c.next()
self._oids.truncate()
finally:
c.close()
except:
......@@ -137,7 +192,6 @@ class Autopack(BerkeleyBase):
raise
else:
txn.commit()
self._oids.truncate()
# Override BerkeleyBase._abort()
def _abort(self):
......@@ -164,30 +218,6 @@ class Autopack(BerkeleyBase):
self._oids.truncate()
self._transaction.abort()
def store(self, oid, serial, data, version, transaction):
# Transaction guard
if transaction is not self._transaction:
raise POSException.StorageTransactionError(self, transaction)
# We don't support versions
if version <> '':
raise POSException.Unsupported, 'versions are not supported'
oserial = self._serials.get(oid)
if oserial is not None and serial <> oserial:
# BAW: Here's where we'd try to do conflict resolution
raise POSException.ConflictError(serials=(oserial, serial))
tid = self._serial
txn = self._env.txn_begin()
try:
self._pickles.put(oid+tid, data, txn=txn)
self._actions.put(tid+oid, INC, txn=txn)
self._oids.put(oid, ' ', txn=txn)
except:
txn.abort()
raise
else:
txn.commit()
return self._serial
def load(self, oid, version):
if version <> '':
raise POSException.Unsupported, 'versions are not supported'
......@@ -196,6 +226,7 @@ class Autopack(BerkeleyBase):
def loadSerial(self, oid, serial):
current = self._serials[oid]
# BAW: should we allow older serials to be retrieved?
if current == serial:
return self._pickles[oid+current]
else:
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
......@@ -23,31 +23,35 @@ DBHOME = 'test-db'
class BerkeleyTestBase(StorageTestBase):
def _zap_dbhome(self):
def _zap_dbhome(self, dir):
# If the tests exited with any uncommitted objects, they'll blow up
# subsequent tests because the next transaction commit will try to
# commit those object. But they're tied to closed databases, so
# that's broken. Aborting the transaction now saves us the headache.
try:
for file in os.listdir(DBHOME):
os.unlink(os.path.join(DBHOME, file))
os.removedirs(DBHOME)
for file in os.listdir(dir):
os.unlink(os.path.join(dir, file))
os.removedirs(dir)
except OSError, e:
if e.errno <> errno.ENOENT: raise
if e.errno <> errno.ENOENT:
raise
def setUp(self):
StorageTestBase.setUp(self)
self._zap_dbhome()
os.mkdir(DBHOME)
def _mk_dbhome(self, dir):
os.mkdir(dir)
try:
self._storage = self.ConcreteStorage(DBHOME)
return self.ConcreteStorage(dir)
except:
self._zap_dbhome()
self._zap_dbhome(dir)
raise
def setUp(self):
StorageTestBase.setUp(self)
self._zap_dbhome(DBHOME)
self._storage = self._mk_dbhome(DBHOME)
def tearDown(self):
StorageTestBase.tearDown(self)
self._zap_dbhome()
self._zap_dbhome(DBHOME)
......
......@@ -27,8 +27,8 @@ DBHOME = 'test-db'
class ZODBTestBase(BerkeleyTestBase):
def setUp(self):
BerkeleyTestBase.setUp(self)
self._db = None
try:
self._storage = self.ConcreteStorage(DBHOME)
self._db = DB(self._storage)
self._conn = self._db.open()
self._root = self._conn.root()
......
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
# Test the operation of the CommitLog classes
import os
import errno
import unittest
from bsddb3Storage import CommitLog
# BAW: Lots of other things to check:
# - creating with a named directory
# - creating with an existing file via filename
# - creating with a file object with # incorrect mode or permissions
# - creating with a file object raising the two flavors of LogCorruptedError
# - The various forms of LogCorruptedError in PacklessLog.next()
class CreateCommitLogTest(unittest.TestCase):
def checkCreateNoFile(self):
unless = self.failUnless
log = CommitLog.CommitLog()
filename = log.get_filename()
try:
unless(os.path.exists(filename))
finally:
log.close(unlink=1)
unless(not os.path.exists(filename))
def checkCreateWithFilename(self):
unless = self.failUnless
filename = 'commit.log'
log = CommitLog.CommitLog(filename)
try:
unless(os.path.exists(filename))
finally:
log.close(unlink=1)
unless(not os.path.exists(filename))
def checkCreateWithFileobj(self):
filename = 'commit.log'
fp = open(filename, 'w+b')
try:
self.assertRaises(CommitLog.TruncationError,
CommitLog.CommitLog, fp)
finally:
fp.close()
self.failUnless(not os.path.exists(filename))
def checkCloseDoesUnlink(self):
log = CommitLog.CommitLog()
filename = log.get_filename()
log.close()
self.failUnless(not os.path.exists(filename))
def checkDel(self):
log = CommitLog.CommitLog()
filename = log.get_filename()
del log
self.failUnless(not os.path.exists(filename))
class BaseSetupTearDown(unittest.TestCase):
def setUp(self):
self._log = CommitLog.CommitLog()
def tearDown(self):
try:
self._log.close(unlink=1)
except OSError, e:
if e.errno <> errno.ENOENT: raise
class CommitLogStateTransitionTest(BaseSetupTearDown):
def checkProperStart(self):
# BAW: best we can do is make sure we can start a new commit log
self._log.start()
def checkAppendSetsOpen(self):
# BAW: Best we can do is assert that the state isn't START
self._log._append('x', 'ignore')
self.assertRaises(CommitLog.StateTransitionError, self._log.start)
def checkPromiseSetsPromise(self):
# BAW: Best we can do is assert that state isn't START
self._log.promise()
self.assertRaises(CommitLog.StateTransitionError, self._log.start)
def checkBadDoublePromise(self):
self._log.promise()
self.assertRaises(CommitLog.StateTransitionError, self._log.promise)
def checkFinishSetsStart(self):
self._log.finish()
# BAW: best we can do is make sure we can start a new commit log
self._log.start()
# Wouldn't it be nice to have generators? :)
class Gen:
def __init__(self):
self.__counter = 0
def __call__(self):
try:
return self[self.__counter]
finally:
self.__counter = self.__counter + 1
def __getitem__(self, i):
if 0 <= i < 10:
return chr(i+65), i
raise IndexError
class LowLevelStoreAndLoadTest(BaseSetupTearDown):
def checkOneStoreAndLoad(self):
eq = self.assertEqual
self._log.start()
self._log._append('x', 'ignore')
self._log.promise()
x, ignore = self._log._next()
eq(x, 'x')
eq(ignore, 'ignore')
eq(self._log._next(), None)
def checkTenStoresAndLoads(self):
eq = self.assertEqual
self._log.start()
for k, v in Gen():
self._log._append(k, v)
self._log.promise()
g = Gen()
while 1:
rec = self._log._next()
if rec is None:
break
c, i = g()
eq(rec[0], c)
eq(rec[1], i)
self.assertRaises(IndexError, g)
class PacklessLogTest(BaseSetupTearDown):
def setUp(self):
self._log = CommitLog.PacklessLog()
self._log.start()
def checkOneStoreAndLoad(self):
eq = self.assertEqual
self._log.write_object(oid=10, pickle='ignore')
self._log.promise()
oid, pickle = self._log.next()
eq(oid, 10)
eq(pickle, 'ignore')
eq(self._log.next(), None)
def checkTenStoresAndLoads(self):
eq = self.assertEqual
for k, v in Gen():
self._log.write_object(v, k*10)
self._log.promise()
g = Gen()
while 1:
rec = self._log.next()
if rec is None:
break
c, i = g()
oid, pickle = rec
eq(oid, i)
eq(pickle, c*10)
self.assertRaises(IndexError, g)
class FullLogTest(BaseSetupTearDown):
def setUp(self):
self._log = CommitLog.FullLog()
self._log.start()
def checkOneStoreAndLoad(self):
eq = self.assertEqual
oid = 10
vid = 8
nvrevid = 0
pickle = 'ignore'
prevrevid = 9
self._log.write_object(oid, vid, nvrevid, pickle, prevrevid)
self._log.promise()
rec = self._log.next()
self.failUnless(rec)
key, rec = rec
eq(key, 'x')
eq(len(rec), 6)
eq(rec, (oid, vid, nvrevid, '', pickle, prevrevid))
eq(self._log.next(), None)
def checkOtherWriteMethods(self):
eq = self.assertEqual
unless = self.failUnless
oid = 10
vid = 1
nvrevid = 0
lrevid = 8
pickle = 'ignore'
prevrevid = 9
version = 'new-version'
zero = '\0'*8
self._log.write_nonversion_object(oid, lrevid, prevrevid)
self._log.write_moved_object(oid, vid, nvrevid, lrevid, prevrevid)
self._log.write_new_version(version, vid)
self._log.write_discard_version(vid)
self._log.promise()
rec = self._log.next()
unless(rec)
key, rec = rec
eq(key, 'a')
eq(len(rec), 6)
eq(rec, (oid, zero, zero, lrevid, None, prevrevid))
rec = self._log.next()
unless(rec)
key, rec = rec
eq(key, 'o')
eq(len(rec), 6)
eq(rec, (oid, vid, nvrevid, lrevid, None, prevrevid))
rec = self._log.next()
unless(rec)
key, rec = rec
eq(key, 'v')
eq(len(rec), 2)
eq(rec, (version, vid))
rec = self._log.next()
unless(rec)
key, rec = rec
eq(key, 'd')
eq(len(rec), 1)
eq(rec, (vid,))
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(CreateCommitLogTest, 'check'))
suite.addTest(unittest.makeSuite(CommitLogStateTransitionTest, 'check'))
suite.addTest(unittest.makeSuite(LowLevelStoreAndLoadTest, 'check'))
suite.addTest(unittest.makeSuite(PacklessLogTest, 'check'))
suite.addTest(unittest.makeSuite(FullLogTest, 'check'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
......@@ -30,7 +30,8 @@ from ZODB.tests.TransactionalUndoVersionStorage import \
TransactionalUndoVersionStorage
from ZODB.tests.PackableStorage import PackableStorage
from ZODB.tests.HistoryStorage import HistoryStorage
from ZODB.tests.IteratorStorage import IteratorStorage
from ZODB.tests.IteratorStorage import IteratorStorage, ExtendedIteratorStorage
from ZODB.tests.RecoveryStorage import RecoveryStorage
from ZODB.tests import ConflictResolution
......@@ -47,17 +48,28 @@ class MinimalTest(BerkeleyTestBase.MinimalTestBase, BasicStorage):
class FullTest(BerkeleyTestBase.FullTestBase, BasicStorage,
RevisionStorage, VersionStorage,
TransactionalUndoStorage,
TransactionalUndoVersionStorage, PackableStorage,
HistoryStorage, IteratorStorage,
TransactionalUndoVersionStorage,
PackableStorage,
HistoryStorage,
IteratorStorage, ExtendedIteratorStorage,
ConflictResolution.ConflictResolvingStorage,
ConflictResolution.ConflictResolvingTransUndoStorage):
pass
DST_DBHOME = 'test-dst'
# BAW: This test fails, it should be fixed.
# DBNotFoundError: (-30990, 'DB_NOTFOUND: No matching key/data pair found')
def checkVersionIterator(self):
import sys
print >> sys.stderr, \
'FullTest.checkVersionIterator() temporarily disabled.'
class FullRecoveryTest(BerkeleyTestBase.FullTestBase,
RecoveryStorage):
def setUp(self):
BerkeleyTestBase.FullTestBase.setUp(self)
self._zap_dbhome(DST_DBHOME)
self._dst = self._mk_dbhome(DST_DBHOME)
def tearDown(self):
BerkeleyTestBase.FullTestBase.tearDown(self)
self._zap_dbhome(DST_DBHOME)
def checkTransactionalUndoAfterPackWithObjectUnlinkFromRoot(self):
......@@ -78,9 +90,10 @@ class AutopackTest(BerkeleyTestBase.AutopackTestBase, BasicStorage):
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(MinimalTest, 'check'))
suite.addTest(unittest.makeSuite(FullTest, 'check'))
suite.addTest(unittest.makeSuite(AutopackTest, 'check'))
suite.addTest(unittest.makeSuite(FullRecoveryTest, 'check'))
suite.addTest(unittest.makeSuite(MinimalTest, 'check'))
#suite.addTest(unittest.makeSuite(AutopackTest, 'check'))
return suite
......
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
# Whitebox testing of storage implementation details.
import unittest
from ZODB.utils import U64
from ZODB.tests.MinPO import MinPO
from ZODB.tests.StorageTestBase import zodb_unpickle
from bsddb3Storage.Minimal import Minimal
from bsddb3Storage.Full import Full
from bsddb3Storage.tests.BerkeleyTestBase import BerkeleyTestBase
from bsddb3Storage.tests.ZODBTestBase import ZODBTestBase
from Persistence import Persistent
ZERO = '\0'*8
class Object(Persistent):
pass
class WhiteboxLowLevelMinimal(BerkeleyTestBase):
ConcreteStorage = Minimal
def checkTableConsistencyAfterCommit(self):
unless = self.failIf
eq = self.assertEqual
oid = self._storage.new_oid()
revid1 = self._dostore(oid, data=11)
revid2 = self._dostore(oid, revid=revid1, data=12)
revid3 = self._dostore(oid, revid=revid2, data=13)
# First off, there should be no entries in the pending table
unless(self._storage._pending.keys())
# Also, there should be no entries in the oids table
unless(self._storage._oids.keys())
# Now, there should be exactly one oid in the serials table, and
# exactly one record for that oid in the table too.
oids = {}
c = self._storage._serials.cursor()
try:
rec = c.first()
while rec:
oid, serial = rec
oids.setdefault(oid, []).append(serial)
rec = c.next()
finally:
c.close()
eq(len(oids), 1)
eq(len(oids[oids.keys()[0]]), 1)
# There should now be exactly one entry in the pickles table.
pickles = self._storage._pickles.items()
eq(len(pickles), 1)
key, data = pickles[0]
poid = key[:8]
pserial = key[8:]
eq(oid, poid)
eq(revid3, pserial)
obj = zodb_unpickle(data)
eq(obj.value, 13)
# Now verify the refcounts table, which should be empty because the
# stored object isn't referenced by any other objects.
eq(len(self._storage._refcounts.keys()), 0)
class WhiteboxHighLevelMinimal(ZODBTestBase):
ConcreteStorage = Minimal
def checkReferenceCounting(self):
eq = self.assertEqual
obj = MinPO(11)
self._root.obj = obj
get_transaction().commit()
obj.value = 12
get_transaction().commit()
obj.value = 13
get_transaction().commit()
# Make sure the databases have what we expect
eq(len(self._storage._serials.items()), 2)
eq(len(self._storage._pickles.items()), 2)
# And now refcount out the object
del self._root.obj
get_transaction().commit()
# Verification stage. Our serials table should have exactly one
# entry, oid == 0
keys = self._storage._serials.keys()
eq(len(keys), 1)
eq(len(self._storage._serials.items()), 1)
eq(keys[0], ZERO)
# The pickles table now should have exactly one revision of the root
# object, and no revisions of the MinPO object, which should have been
# collected away.
pickles = self._storage._pickles.items()
eq(len(pickles), 1)
rec = pickles[0]
key = rec[0]
data = rec[1]
eq(key[:8], ZERO)
# And that pickle should have no 'obj' attribute.
unobj = zodb_unpickle(data)
self.failIf(hasattr(unobj, 'obj'))
# Our refcounts table should have no entries in it, because the root
# object is an island.
eq(len(self._storage._refcounts.keys()), 0)
# And of course, oids and pendings should be empty too
eq(len(self._storage._oids.keys()), 0)
eq(len(self._storage._pending.keys()), 0)
def checkRecursiveReferenceCounting(self):
eq = self.assertEqual
obj1 = Object()
obj2 = Object()
obj3 = Object()
obj4 = Object()
self._root.obj = obj1
obj1.obj = obj2
obj2.obj = obj3
obj3.obj = obj4
get_transaction().commit()
# Make sure the databases have what we expect
eq(len(self._storage._serials.items()), 5)
eq(len(self._storage._pickles.items()), 5)
# And now refcount out the object
del self._root.obj
get_transaction().commit()
# Verification stage. Our serials table should have exactly one
# entry, oid == 0
keys = self._storage._serials.keys()
eq(len(keys), 1)
eq(len(self._storage._serials.items()), 1)
eq(keys[0], ZERO)
# The pickles table now should have exactly one revision of the root
# object, and no revisions of any other objects, which should have
# been collected away.
pickles = self._storage._pickles.items()
eq(len(pickles), 1)
rec = pickles[0]
key = rec[0]
data = rec[1]
eq(key[:8], ZERO)
# And that pickle should have no 'obj' attribute.
unobj = zodb_unpickle(data)
self.failIf(hasattr(unobj, 'obj'))
# Our refcounts table should have no entries in it, because the root
# object is an island.
eq(len(self._storage._refcounts.keys()), 0)
# And of course, oids and pendings should be empty too
eq(len(self._storage._oids.keys()), 0)
eq(len(self._storage._pending.keys()), 0)
class WhiteboxHighLevelFull(ZODBTestBase):
ConcreteStorage = Full
def checkReferenceCounting(self):
eq = self.assertEqual
# Make sure the databases have what we expect
eq(len(self._storage._serials.items()), 1)
eq(len(self._storage._pickles.items()), 1)
# Now store an object
obj = MinPO(11)
self._root.obj = obj
get_transaction().commit()
# Make sure the databases have what we expect
eq(len(self._storage._serials.items()), 2)
eq(len(self._storage._pickles.items()), 3)
obj.value = 12
get_transaction().commit()
# Make sure the databases have what we expect
eq(len(self._storage._serials.items()), 2)
eq(len(self._storage._pickles.items()), 4)
obj.value = 13
get_transaction().commit()
# Make sure the databases have what we expect
eq(len(self._storage._serials.items()), 2)
eq(len(self._storage._pickles.items()), 5)
# And now refcount out the object
del self._root.obj
get_transaction().commit()
# Verification stage. Our serials tabl should still have 2 entries,
# one for the root object and one for the now unlinked MinPO obj.
keys = self._storage._serials.keys()
eq(len(keys), 2)
eq(len(self._storage._serials.items()), 2)
eq(keys[0], ZERO)
# The pickles table should now have 6 entries, broken down like so:
# - 3 revisions of the root object: the initial database-open
# revision, the revision that got its obj attribute set, and the
# revision that got its obj attribute deleted.
# - 3 Three revisions of obj, corresponding to values 11, 12, and 13
pickles = self._storage._pickles.items()
eq(len(pickles), 6)
# Our refcounts table should have one entry in it for the MinPO that's
# referenced in an earlier revision of the root object
eq(len(self._storage._refcounts.keys()), 1)
# And of course, oids and pendings should be empty too
eq(len(self._storage._oids.keys()), 0)
eq(len(self._storage._pending.keys()), 0)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(WhiteboxLowLevelMinimal, 'check'))
suite.addTest(unittest.makeSuite(WhiteboxHighLevelMinimal, 'check'))
suite.addTest(unittest.makeSuite(WhiteboxHighLevelFull, 'check'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment