Commit eee5f974 authored by Tim Peters's avatar Tim Peters

Merge rev 28685 from 3.3 branch.

Collector 1536:  ClientStorage.__init__ ignores cache_size.

Also split test checkRollover into two tests,
checkDisconnectedCacheWorks (persistent ZEO cache large enough
to satisfy all requests) and checkDisconnectedCacheFails (cache
too small to satisfy all requests while disconnected).
parent 3c9f4c38
...@@ -58,6 +58,12 @@ Release date: xx-xxx-2004 ...@@ -58,6 +58,12 @@ Release date: xx-xxx-2004
ZEO client cache ZEO client cache
---------------- ----------------
- Collector 1536: The ``cache-size`` configuration option for ZEO clients
was being ignored. Worse, the client cache size was only one megabyte,
much smaller than the advertised default of 20MB. Note that the default
is carried over from a time when gigabyte disks were expensive and rare;
20MB is also too small on most modern machines.
- Fixed a bug wherein an object removed from the client cache didn't - Fixed a bug wherein an object removed from the client cache didn't
properly mark the file slice it occupied as being available for reuse. properly mark the file slice it occupied as being available for reuse.
......
...@@ -309,7 +309,7 @@ class ClientStorage(object): ...@@ -309,7 +309,7 @@ class ClientStorage(object):
cache_path = os.path.join(dir, "%s-%s.zec" % (client, storage)) cache_path = os.path.join(dir, "%s-%s.zec" % (client, storage))
else: else:
cache_path = None cache_path = None
self._cache = self.ClientCacheClass(cache_path) self._cache = self.ClientCacheClass(cache_path, size=cache_size)
# XXX When should it be opened? # XXX When should it be opened?
self._cache.open() self._cache.open()
......
...@@ -72,7 +72,11 @@ class ClientCache: ...@@ -72,7 +72,11 @@ class ClientCache:
# @param path path of persistent snapshot of cache state (a file path) # @param path path of persistent snapshot of cache state (a file path)
# @param size size of cache file, in bytes # @param size size of cache file, in bytes
def __init__(self, path=None, size=None, trace=False): # The default size of 200MB makes a lot more sense than the traditional
# default of 20MB. The default here is misleading, though, since
# ClientStorage is the only user of ClientCache, and it always passes an
# explicit size of its own choosing.
def __init__(self, path=None, size=200*1024**2, trace=False):
self.path = path self.path = path
self.size = size self.size = size
...@@ -105,7 +109,7 @@ class ClientCache: ...@@ -105,7 +109,7 @@ class ClientCache:
# A FileCache instance does all the low-level work of storing # A FileCache instance does all the low-level work of storing
# and retrieving objects to/from the cache file. # and retrieving objects to/from the cache file.
self.fc = FileCache(size or 10**6, self.path, self) self.fc = FileCache(size, self.path, self)
def open(self): def open(self):
self.fc.scan(self.install) self.fc.scan(self.install)
...@@ -693,7 +697,6 @@ class FileCache(object): ...@@ -693,7 +697,6 @@ class FileCache(object):
# file that exists, that pre-existing file is used (persistent # file that exists, that pre-existing file is used (persistent
# cache). In all other cases a new file is created: a temp # cache). In all other cases a new file is created: a temp
# file if fpath is None, else with path fpath. # file if fpath is None, else with path fpath.
assert maxsize >= 1000 # although 1000 is still absurdly low
self.maxsize = maxsize self.maxsize = maxsize
self.parent = parent self.parent = parent
...@@ -879,7 +882,7 @@ class FileCache(object): ...@@ -879,7 +882,7 @@ class FileCache(object):
# freed (starting at currentofs when _makeroom returns, and # freed (starting at currentofs when _makeroom returns, and
# spanning the number of bytes retured by _makeroom). # spanning the number of bytes retured by _makeroom).
def _makeroom(self, nbytes): def _makeroom(self, nbytes):
assert 0 < nbytes <= self.maxsize assert 0 < nbytes <= self.maxsize - ZEC3_HEADER_SIZE
if self.currentofs + nbytes > self.maxsize: if self.currentofs + nbytes > self.maxsize:
self.currentofs = ZEC3_HEADER_SIZE self.currentofs = ZEC3_HEADER_SIZE
ofs = self.currentofs ofs = self.currentofs
......
...@@ -418,24 +418,51 @@ class ConnectionTests(CommonSetupTearDown): ...@@ -418,24 +418,51 @@ class ConnectionTests(CommonSetupTearDown):
self.assertEqual(revid1, revid2) self.assertEqual(revid1, revid2)
self._storage.close() self._storage.close()
def checkRollover(self): def checkDisconnectedCacheWorks(self):
# Check that the cache works when the files are swapped. # Check that the cache works when the client is disconnected.
self._storage = self.openClientStorage('test')
oid1 = self._storage.new_oid()
obj1 = MinPO("1" * 500)
self._dostore(oid1, data=obj1)
oid2 = self._storage.new_oid()
obj2 = MinPO("2" * 500)
self._dostore(oid2, data=obj2)
expected1 = self._storage.load(oid1, '')
expected2 = self._storage.load(oid2, '')
# In this case, only one object fits in a cache file. When the # Shut it all down, and try loading from the persistent cache file
# cache files swap, the first object is effectively uncached. # without a server present.
self._storage.close()
self.shutdownServer()
self._storage = self.openClientStorage('test', wait=False)
self.assertEqual(expected1, self._storage.load(oid1, ''))
self.assertEqual(expected2, self._storage.load(oid2, ''))
self._storage.close()
self._storage = self.openClientStorage('test', 1000) def checkDisconnectedCacheFails(self):
# Like checkDisconnectedCacheWorks above, except the cache
# file is so small that only one object can be remembered.
self._storage = self.openClientStorage('test', cache_size=900)
oid1 = self._storage.new_oid() oid1 = self._storage.new_oid()
obj1 = MinPO("1" * 500) obj1 = MinPO("1" * 500)
self._dostore(oid1, data=obj1) self._dostore(oid1, data=obj1)
oid2 = self._storage.new_oid() oid2 = self._storage.new_oid()
obj2 = MinPO("2" * 500) obj2 = MinPO("2" * 500)
# The cache file is so small that adding oid2 will evict oid1.
self._dostore(oid2, data=obj2) self._dostore(oid2, data=obj2)
expected2 = self._storage.load(oid2, '')
# Shut it all down, and try loading from the persistent cache file
# without a server present.
self._storage.close() self._storage.close()
self.shutdownServer() self.shutdownServer()
self._storage = self.openClientStorage('test', 1000, wait=0) self._storage = self.openClientStorage('test', cache_size=900,
self._storage.load(oid1, '') wait=False)
self._storage.load(oid2, '') # oid2 should still be in cache.
self.assertEqual(expected2, self._storage.load(oid2, ''))
# But oid1 should have been purged, so that trying to load it will
# try to fetch it from the (non-existent) ZEO server.
self.assertRaises(ClientDisconnected, self._storage.load, oid1, '')
self._storage.close() self._storage.close()
def checkReconnection(self): def checkReconnection(self):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment