Commit 151e859a authored by Jim Fulton's avatar Jim Fulton

FileStorages previously saved indexes after a certain

number of writes.  This was done during the last phase of two-phase
commit, which made this critical phase more subject to errors than
it should have been.  Also, for large databases, saves were done so
infrequently as to be useless.  The feature was removed to reduce
the chance for errors during the last phase of two-phase commit.
parent 5241e53f
......@@ -114,6 +114,18 @@ Bugs Fixed
Bugs Fixed:
- FileStorages previously saved indexes after a certain
number of writes. This was done during the last phase of two-phase
commit, which made this critical phase more subject to errors than
it should have been. Also, for large databases, saves were done so
infrequently as to be useless. The feature was removed to reduce
the chance for errors during the last phase of two-phase commit.
- File storages previously kept an internal object id to
transaction id mapping as an optimization. This mapping caused
excessive memory usage and failures during the last phase of
two-phase commit. This optimization has been removed.
- Refactored handling of invalidations on ZEO clients to fix
a possible ordering problem for invalidation messages.
......
......@@ -95,8 +95,6 @@ class FileStorage(BaseStorage.BaseStorage,
# Set True while a pack is in progress; undo is blocked for the duration.
_pack_is_in_progress = False
_records_before_save = 10000
def __init__(self, file_name, create=False, read_only=False, stop=None,
quota=None):
......@@ -171,8 +169,6 @@ class FileStorage(BaseStorage.BaseStorage,
)
self._save_index()
self._records_before_save = max(self._records_before_save,
len(self._index))
self._ltid = tid
# self._pos should always point just past the last
......@@ -660,9 +656,6 @@ class FileStorage(BaseStorage.BaseStorage,
finally:
self._lock_release()
# Keep track of the number of records that we've written
_records_written = 0
def _finish(self, tid, u, d, e):
nextpos=self._nextpos
if nextpos:
......@@ -679,15 +672,6 @@ class FileStorage(BaseStorage.BaseStorage,
self._index.update(self._tindex)
# Update the number of records that we've written
# +1 for the transaction record
self._records_written += len(self._tindex) + 1
if self._records_written >= self._records_before_save:
self._save_index()
self._records_written = 0
self._records_before_save = max(self._records_before_save,
len(self._index))
self._ltid = tid
def _abort(self):
......
......@@ -179,26 +179,6 @@ class FileStorageTests(
self.open()
self.assertEqual(self._storage._saved, 1)
# This would make the unit tests too slow
# check_save_after_load_that_worked_hard(self)
def check_periodic_save_index(self):
# Check the basic algorithm
oldsaved = self._storage._saved
self._storage._records_before_save = 10
for i in range(4):
self._dostore()
self.assertEqual(self._storage._saved, oldsaved)
self._dostore()
self.assertEqual(self._storage._saved, oldsaved+1)
# Now make sure the parameter changes as we get bigger
for i in range(20):
self._dostore()
self.failUnless(self._storage._records_before_save > 20)
def checkStoreBumpsOid(self):
# If .store() is handed an oid bigger than the storage knows
# about already, it's crucial that the storage bump its notion
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment