Commit 88221040 authored by Jim Fulton's avatar Jim Fulton Committed by GitHub

Merge pull request #77 from zopefoundation/handle-serial-4

Better support of the new API to notify of resolved conflicts (store/tpc_finish)
parents 05be22e0 09423921
...@@ -269,7 +269,7 @@ class BaseStorage(UndoLogCompatible): ...@@ -269,7 +269,7 @@ class BaseStorage(UndoLogCompatible):
if transaction is not self._transaction: if transaction is not self._transaction:
raise POSException.StorageTransactionError( raise POSException.StorageTransactionError(
"tpc_vote called with wrong transaction") "tpc_vote called with wrong transaction")
self._vote() return self._vote()
finally: finally:
self._lock_release() self._lock_release()
...@@ -398,8 +398,8 @@ def copy(source, dest, verbose=0): ...@@ -398,8 +398,8 @@ def copy(source, dest, verbose=0):
r.data_txn, transaction) r.data_txn, transaction)
else: else:
pre = preget(oid, None) pre = preget(oid, None)
s = dest.store(oid, pre, r.data, r.version, transaction) dest.store(oid, pre, r.data, r.version, transaction)
preindex[oid] = s preindex[oid] = tid
dest.tpc_vote(transaction) dest.tpc_vote(transaction)
dest.tpc_finish(transaction) dest.tpc_finish(transaction)
......
...@@ -613,16 +613,17 @@ class Connection(ExportImport, object): ...@@ -613,16 +613,17 @@ class Connection(ExportImport, object):
raise InvalidObjectReference(obj, obj._p_jar) raise InvalidObjectReference(obj, obj._p_jar)
elif oid in self._added: elif oid in self._added:
assert obj._p_serial == z64 assert obj._p_serial == z64
elif obj._p_changed: elif obj._p_changed and oid not in self._creating:
if oid in self._invalidated: if oid in self._invalidated:
resolve = getattr(obj, "_p_resolveConflict", None) resolve = getattr(obj, "_p_resolveConflict", None)
if resolve is None: if resolve is None:
raise ConflictError(object=obj) raise ConflictError(object=obj)
self._modified.append(oid)
else: else:
# Nothing to do. It's been said that it's legal, e.g., for # Nothing to do. It's been said that it's legal, e.g., for
# an object to set _p_changed to false after it's been # an object to set _p_changed to false after it's been
# changed and registered. # changed and registered.
# And new objects that are registered after any referrer are
# already processed.
continue continue
self._store_objects(ObjectWriter(obj), transaction) self._store_objects(ObjectWriter(obj), transaction)
...@@ -793,7 +794,7 @@ class Connection(ExportImport, object): ...@@ -793,7 +794,7 @@ class Connection(ExportImport, object):
raise raise
if s: if s:
if type(s[0]) is bytes: if type(next(iter(s))) is bytes:
for oid in s: for oid in s:
self._handle_serial(oid) self._handle_serial(oid)
return return
......
...@@ -1010,7 +1010,12 @@ class TransactionalUndo(object): ...@@ -1010,7 +1010,12 @@ class TransactionalUndo(object):
self._oids.update(result[1]) self._oids.update(result[1])
def tpc_vote(self, transaction): def tpc_vote(self, transaction):
for oid, _ in self._storage.tpc_vote(transaction) or (): result = self._storage.tpc_vote(transaction)
if result:
if isinstance(next(iter(result)), bytes):
self._oids.update(result)
else:
for oid, _ in result:
self._oids.add(oid) self._oids.add(oid)
def tpc_finish(self, transaction): def tpc_finish(self, transaction):
......
...@@ -713,9 +713,8 @@ class BlobStorageMixin(object): ...@@ -713,9 +713,8 @@ class BlobStorageMixin(object):
"""Stores data that has a BLOB attached.""" """Stores data that has a BLOB attached."""
assert not version, "Versions aren't supported." assert not version, "Versions aren't supported."
serial = self.store(oid, oldserial, data, '', transaction) serial = self.store(oid, oldserial, data, '', transaction)
self._blob_storeblob(oid, serial, blobfilename) self._blob_storeblob(oid, self._tid, blobfilename)
return serial
return self._tid
def temporaryDirectory(self): def temporaryDirectory(self):
return self.fshelper.temp_dir return self.fshelper.temp_dir
...@@ -764,8 +763,9 @@ class BlobStorage(BlobStorageMixin): ...@@ -764,8 +763,9 @@ class BlobStorage(BlobStorageMixin):
# We need to override the base storage's tpc_finish instead of # We need to override the base storage's tpc_finish instead of
# providing a _finish method because methods found on the proxied # providing a _finish method because methods found on the proxied
# object aren't rebound to the proxy # object aren't rebound to the proxy
self.__storage.tpc_finish(*arg, **kw) tid = self.__storage.tpc_finish(*arg, **kw)
self._blob_tpc_finish() self._blob_tpc_finish()
return tid
def tpc_abort(self, *arg, **kw): def tpc_abort(self, *arg, **kw):
# We need to override the base storage's abort instead of # We need to override the base storage's abort instead of
......
...@@ -975,7 +975,7 @@ class IStorageUndoable(IStorage): ...@@ -975,7 +975,7 @@ class IStorageUndoable(IStorage):
two-phase commit (after tpc_begin but before tpc_vote). It two-phase commit (after tpc_begin but before tpc_vote). It
returns a serial (transaction id) and a sequence of object ids returns a serial (transaction id) and a sequence of object ids
for objects affected by the transaction. The serial is ignored for objects affected by the transaction. The serial is ignored
and may be None. and may be None. The return from this method may be None.
""" """
# Used by DB (Actually, by TransactionalUndo) # Used by DB (Actually, by TransactionalUndo)
......
"""Adapt non-IMultiCommitStorage storages to IMultiCommitStorage
"""
import zope.interface
from .ConflictResolution import ResolvedSerial
class MultiCommitAdapter:
def __init__(self, storage):
self._storage = storage
ifaces = zope.interface.providedBy(storage)
zope.interface.alsoProvides(self, ifaces)
self._resolved = set() # {OID}, here to make linters happy
def __getattr__(self, name):
v = getattr(self._storage, name)
self.__dict__[name] = v
return v
def tpc_begin(self, *args):
self._storage.tpc_begin(*args)
self._resolved = set()
def store(self, oid, *args):
if self._storage.store(oid, *args) == ResolvedSerial:
self._resolved.add(oid)
def storeBlob(self, oid, *args):
s = self._storage.storeBlob(oid, *args)
if s:
if isinstance(s, bytes):
s = ((oid, s), )
for oid, serial in s:
if s == ResolvedSerial:
self._resolved.add(oid)
def undo(self, transaction_id, transaction):
r = self._storage.undo(transaction_id, transaction)
if r:
self._resolved.update(r[1])
def tpc_vote(self, *args):
s = self._storage.tpc_vote(*args)
for (oid, serial) in (s or ()):
if serial == ResolvedSerial:
self._resolved.add(oid)
return self._resolved
def tpc_finish(self, transaction, f=lambda tid: None):
t = []
def func(tid):
t.append(tid)
f(tid)
self._storage.tpc_finish(transaction, func)
return t[0]
def __len__(self):
return len(self._storage)
...@@ -117,7 +117,7 @@ class MVCCMappingStorage(MappingStorage): ...@@ -117,7 +117,7 @@ class MVCCMappingStorage(MappingStorage):
def tpc_finish(self, transaction, func = lambda tid: None): def tpc_finish(self, transaction, func = lambda tid: None):
self._data_snapshot = None self._data_snapshot = None
MappingStorage.tpc_finish(self, transaction, func) return MappingStorage.tpc_finish(self, transaction, func)
def tpc_abort(self, transaction): def tpc_abort(self, transaction):
self._data_snapshot = None self._data_snapshot = None
......
...@@ -111,7 +111,12 @@ class TransactionalUndoStorage: ...@@ -111,7 +111,12 @@ class TransactionalUndoStorage:
undo_result = self._storage.undo(tid, t) undo_result = self._storage.undo(tid, t)
if undo_result: if undo_result:
oids.extend(undo_result[1]) oids.extend(undo_result[1])
oids.extend(oid for (oid, _) in self._storage.tpc_vote(t) or ()) v = self._storage.tpc_vote(t)
if v:
if isinstance(next(iter(v)), bytes):
oids.extend(v)
else:
oids.extend(oid for (oid, _) in v)
return oids return oids
def undo(self, tid, note): def undo(self, tid, note):
......
...@@ -31,41 +31,20 @@ Put some revisions of a blob object in our database and on the filesystem: ...@@ -31,41 +31,20 @@ Put some revisions of a blob object in our database and on the filesystem:
>>> import os >>> import os
>>> tids = [] >>> tids = []
>>> times = [] >>> times = []
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> blob = Blob() >>> blob = Blob()
>>> with blob.open('w') as file:
... _ = file.write(b'this is blob data 0')
>>> root['blob'] = blob
>>> transaction.commit()
>>> tids.append(blob._p_serial)
>>> nothing = transaction.begin() >>> for i in range(5):
>>> times.append(new_time()) ... _ = transaction.begin()
>>> with root['blob'].open('w') as file: ... times.append(new_time())
... _ = file.write(b'this is blob data 1') ... with blob.open('w') as file:
>>> transaction.commit() ... _ = file.write(b'this is blob data ' + str(i).encode())
>>> tids.append(blob._p_serial) ... if i:
... tids.append(blob._p_serial)
>>> nothing = transaction.begin() ... else:
>>> times.append(new_time()) ... root['blob'] = blob
>>> with root['blob'].open('w') as file: ... transaction.commit()
... _ = file.write(b'this is blob data 2')
>>> transaction.commit() >>> blob._p_activate()
>>> tids.append(blob._p_serial)
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> with root['blob'].open('w') as file:
... _ = file.write(b'this is blob data 3')
>>> transaction.commit()
>>> tids.append(blob._p_serial)
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> with root['blob'].open('w') as file:
... _ = file.write(b'this is blob data 4')
>>> transaction.commit()
>>> tids.append(blob._p_serial) >>> tids.append(blob._p_serial)
>>> oid = root['blob']._p_oid >>> oid = root['blob']._p_oid
......
...@@ -390,10 +390,6 @@ stored are discarded. ...@@ -390,10 +390,6 @@ stored are discarded.
... '', t) ... '', t)
>>> serials = blob_storage.tpc_vote(t) >>> serials = blob_storage.tpc_vote(t)
>>> if s1 is None:
... s1 = [s for (oid, s) in serials if oid == blob._p_oid][0]
>>> if s2 is None:
... s2 = [s for (oid, s) in serials if oid == new_oid][0]
>>> blob_storage.tpc_abort(t) >>> blob_storage.tpc_abort(t)
...@@ -402,14 +398,7 @@ Now, the serial for the existing blob should be the same: ...@@ -402,14 +398,7 @@ Now, the serial for the existing blob should be the same:
>>> blob_storage.load(blob._p_oid, '') == (olddata, oldserial) >>> blob_storage.load(blob._p_oid, '') == (olddata, oldserial)
True True
And we shouldn't be able to read the data that we saved: The old data should be unaffected:
>>> blob_storage.loadBlob(blob._p_oid, s1)
Traceback (most recent call last):
...
POSKeyError: 'No blob file at <BLOB STORAGE PATH>'
Of course the old data should be unaffected:
>>> with open(blob_storage.loadBlob(blob._p_oid, oldserial)) as fp: >>> with open(blob_storage.loadBlob(blob._p_oid, oldserial)) as fp:
... fp.read() ... fp.read()
...@@ -422,11 +411,6 @@ Similarly, the new object wasn't added to the storage: ...@@ -422,11 +411,6 @@ Similarly, the new object wasn't added to the storage:
... ...
POSKeyError: 0x... POSKeyError: 0x...
>>> blob_storage.loadBlob(blob._p_oid, s2)
Traceback (most recent call last):
...
POSKeyError: 'No blob file at <BLOB STORAGE PATH>'
.. clean up .. clean up
>>> tm1.abort() >>> tm1.abort()
......
...@@ -1025,9 +1025,14 @@ def doctest_lp485456_setattr_in_setstate_doesnt_cause_multiple_stores(): ...@@ -1025,9 +1025,14 @@ def doctest_lp485456_setattr_in_setstate_doesnt_cause_multiple_stores():
storing '\x00\x00\x00\x00\x00\x00\x00\x00' storing '\x00\x00\x00\x00\x00\x00\x00\x00'
storing '\x00\x00\x00\x00\x00\x00\x00\x01' storing '\x00\x00\x00\x00\x00\x00\x00\x01'
>>> conn.add(C()) Retry with the new object registered before its referrer.
>>> z = C()
>>> conn.add(z)
>>> conn.root.z = z
>>> transaction.commit() >>> transaction.commit()
storing '\x00\x00\x00\x00\x00\x00\x00\x02' storing '\x00\x00\x00\x00\x00\x00\x00\x02'
storing '\x00\x00\x00\x00\x00\x00\x00\x00'
We still see updates: We still see updates:
......
...@@ -36,6 +36,7 @@ from ZODB.tests import ReadOnlyStorage, RecoveryStorage ...@@ -36,6 +36,7 @@ from ZODB.tests import ReadOnlyStorage, RecoveryStorage
from ZODB.tests.StorageTestBase import MinPO, zodb_pickle from ZODB.tests.StorageTestBase import MinPO, zodb_pickle
from ZODB._compat import dump, dumps, _protocol from ZODB._compat import dump, dumps, _protocol
from .. import multicommitadapter
class FileStorageTests( class FileStorageTests(
StorageTestBase.StorageTestBase, StorageTestBase.StorageTestBase,
...@@ -322,6 +323,12 @@ class FileStorageHexTests(FileStorageTests): ...@@ -322,6 +323,12 @@ class FileStorageHexTests(FileStorageTests):
self._storage = ZODB.tests.hexstorage.HexStorage( self._storage = ZODB.tests.hexstorage.HexStorage(
ZODB.FileStorage.FileStorage('FileStorageTests.fs',**kwargs)) ZODB.FileStorage.FileStorage('FileStorageTests.fs',**kwargs))
class MultiFileStorageTests(FileStorageTests):
def open(self, **kwargs):
self._storage = multicommitadapter.MultiCommitAdapter(
ZODB.FileStorage.FileStorage('FileStorageTests.fs', **kwargs))
class FileStorageTestsWithBlobsEnabled(FileStorageTests): class FileStorageTestsWithBlobsEnabled(FileStorageTests):
...@@ -331,6 +338,7 @@ class FileStorageTestsWithBlobsEnabled(FileStorageTests): ...@@ -331,6 +338,7 @@ class FileStorageTestsWithBlobsEnabled(FileStorageTests):
kwargs['blob_dir'] = 'blobs' kwargs['blob_dir'] = 'blobs'
FileStorageTests.open(self, **kwargs) FileStorageTests.open(self, **kwargs)
class FileStorageHexTestsWithBlobsEnabled(FileStorageTests): class FileStorageHexTestsWithBlobsEnabled(FileStorageTests):
def open(self, **kwargs): def open(self, **kwargs):
...@@ -340,6 +348,16 @@ class FileStorageHexTestsWithBlobsEnabled(FileStorageTests): ...@@ -340,6 +348,16 @@ class FileStorageHexTestsWithBlobsEnabled(FileStorageTests):
FileStorageTests.open(self, **kwargs) FileStorageTests.open(self, **kwargs)
self._storage = ZODB.tests.hexstorage.HexStorage(self._storage) self._storage = ZODB.tests.hexstorage.HexStorage(self._storage)
class MultiFileStorageTestsWithBlobsEnabled(MultiFileStorageTests):
def open(self, **kwargs):
if 'blob_dir' not in kwargs:
kwargs = kwargs.copy()
kwargs['blob_dir'] = 'blobs'
MultiFileStorageTests.open(self, **kwargs)
class FileStorageRecoveryTest( class FileStorageRecoveryTest(
StorageTestBase.StorageTestBase, StorageTestBase.StorageTestBase,
RecoveryStorage.RecoveryStorage, RecoveryStorage.RecoveryStorage,
...@@ -702,6 +720,7 @@ def test_suite(): ...@@ -702,6 +720,7 @@ def test_suite():
FileStorageNoRestoreRecoveryTest, FileStorageNoRestoreRecoveryTest,
FileStorageTestsWithBlobsEnabled, FileStorageHexTestsWithBlobsEnabled, FileStorageTestsWithBlobsEnabled, FileStorageHexTestsWithBlobsEnabled,
AnalyzeDotPyTest, AnalyzeDotPyTest,
MultiFileStorageTests, MultiFileStorageTestsWithBlobsEnabled,
]: ]:
suite.addTest(unittest.makeSuite(klass, "check")) suite.addTest(unittest.makeSuite(klass, "check"))
suite.addTest(doctest.DocTestSuite( suite.addTest(doctest.DocTestSuite(
...@@ -723,6 +742,14 @@ def test_suite(): ...@@ -723,6 +742,14 @@ def test_suite():
test_blob_storage_recovery=True, test_blob_storage_recovery=True,
test_packing=True, test_packing=True,
)) ))
suite.addTest(ZODB.tests.testblob.storage_reusable_suite(
'BlobMultiFileStorage',
lambda name, blob_dir:
multicommitadapter.MultiCommitAdapter(
ZODB.FileStorage.FileStorage('%s.fs' % name, blob_dir=blob_dir)),
test_blob_storage_recovery=True,
test_packing=True,
))
suite.addTest(PackableStorage.IExternalGC_suite( suite.addTest(PackableStorage.IExternalGC_suite(
lambda : ZODB.FileStorage.FileStorage( lambda : ZODB.FileStorage.FileStorage(
'data.fs', blob_dir='blobs', pack_gc=False))) 'data.fs', blob_dir='blobs', pack_gc=False)))
......
...@@ -706,8 +706,8 @@ def lp440234_Setting__p_changed_of_a_Blob_w_no_uncomitted_changes_is_noop(): ...@@ -706,8 +706,8 @@ def lp440234_Setting__p_changed_of_a_Blob_w_no_uncomitted_changes_is_noop():
>>> blob = ZODB.blob.Blob(b'blah') >>> blob = ZODB.blob.Blob(b'blah')
>>> conn.add(blob) >>> conn.add(blob)
>>> transaction.commit() >>> transaction.commit()
>>> old_serial = blob._p_serial
>>> blob._p_changed = True >>> blob._p_changed = True
>>> old_serial = blob._p_serial
>>> transaction.commit() >>> transaction.commit()
>>> with blob.open() as fp: fp.read() >>> with blob.open() as fp: fp.read()
'blah' 'blah'
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment