Commit ba955c39 authored by Jim Fulton's avatar Jim Fulton

Updated the cache to not store version information.

Also updated the format to allow cache files larger than 4G in size.
parent ddc41882
......@@ -572,11 +572,11 @@ class ClientStorage(object):
# it should set self._server. If it goes through full cache
# verification, then endVerify() should self._server.
# if not self._cache:
# log2("No verification necessary -- empty cache")
# self._server = server
# self._ready.set()
# return "full verification"
if not self._cache:
log2("No verification necessary -- empty cache")
self._server = server
self._ready.set()
return "full verification"
last_inval_tid = self._cache.getLastTid()
......@@ -608,7 +608,7 @@ class ClientStorage(object):
# TODO: should batch these operations for efficiency; would need
# to acquire lock ...
for oid, tid, version in self._cache.contents():
for oid, tid in self._cache.contents():
server.verify(oid, tid)
self._pending_server = server
server.endZeoVerify()
......@@ -719,7 +719,7 @@ class ClientStorage(object):
"""
self._lock.acquire() # for atomic processing of invalidations
try:
t = self._cache.load(oid, '')
t = self._cache.load(oid)
if t:
return t[:2] # XXX strip version
finally:
......@@ -742,7 +742,7 @@ class ClientStorage(object):
self._lock.acquire() # for atomic processing of invalidations
try:
if self._load_status:
self._cache.store(oid, '', tid, None, data)
self._cache.store(oid, tid, None, data)
self._load_oid = None
finally:
self._lock.release()
......@@ -781,7 +781,7 @@ class ClientStorage(object):
return data, start, end
self._lock.acquire()
try:
self._cache.store(oid, "", start, end, data)
self._cache.store(oid, start, end, data)
finally:
self._lock.release()
......@@ -1112,13 +1112,13 @@ class ClientStorage(object):
return
for oid, data in self._tbuf:
self._cache.invalidate(oid, '', tid)
self._cache.invalidate(oid, tid)
# If data is None, we just invalidate.
if data is not None:
s = self._seriald[oid]
if s != ResolvedSerial:
assert s == tid, (s, tid)
self._cache.store(oid, '', s, None, data)
self._cache.store(oid, s, None, data)
if self.fshelper is not None:
......@@ -1195,7 +1195,7 @@ class ClientStorage(object):
for oid in oids:
if oid == self._load_oid:
self._load_status = 0
self._cache.invalidate(oid, '', tid)
self._cache.invalidate(oid, tid)
if self._db is not None:
self._db.invalidate(tid, oids)
......
......@@ -33,6 +33,8 @@ from ZODB.utils import z64, u64
logger = logging.getLogger("ZEO.cache")
max32 = (1 << 32) - 1
##
# A disk-based cache for ZEO clients.
# <p>
......@@ -98,10 +100,6 @@ class ClientCache(object):
# Used to find matching key for load of non-current data.
self.noncurrent = {}
# Map oid to (version, tid) pair. If there is no entry, the object
# is not modified in a version.
self.version = {}
# A FileCache instance does all the low-level work of storing
# and retrieving objects to/from the cache file.
self.fc = FileCache(size, self.path, self)
......@@ -122,9 +120,7 @@ class ClientCache(object):
if o is None:
return
oid = o.key[0]
if o.version:
self.version[oid] = o.version, o.start_tid
elif o.end_tid is None:
if o.end_tid is None:
self.current[oid] = o.start_tid
else:
assert o.start_tid < o.end_tid
......@@ -162,36 +158,25 @@ class ClientCache(object):
return self.fc.tid
##
# Return the current data record for oid and version.
# Return the current data record for oid.
# @param oid object id
# @param version a version string
# @return (data record, serial number, tid), or None if the object is not
# @return (data record, serial number), or None if the object is not
# in the cache
# @defreturn 3-tuple: (string, string, string)
# @defreturn 2-tuple: (string, string)
def load(self, oid, version=""):
def load(self, oid):
tid = None
if version:
p = self.version.get(oid)
if p is None:
self._trace(0x20, oid, version)
return None
elif p[0] == version:
tid = p[1]
# Otherwise, we know the cache has version data but not
# for the requested version. Thus, we know it is safe
# to return the non-version data from the cache.
if tid is None:
tid = self.current.get(oid)
if tid is None:
self._trace(0x20, oid, version)
self._trace(0x20, oid)
return None
o = self.fc.access((oid, tid))
if o is None:
self._trace(0x20, oid, version)
self._trace(0x20, oid)
return None
self._trace(0x22, oid, version, o.start_tid, o.end_tid, len(o.data))
return o.data, tid, o.version
self._trace(0x22, oid, o.start_tid, o.end_tid, len(o.data))
return o.data, tid
##
# Return a non-current revision of oid that was current before tid.
......@@ -225,53 +210,22 @@ class ClientCache(object):
self._trace(0x26, oid, "", tid)
return o.data, o.start_tid, o.end_tid
##
# Return the version an object is modified in, or None for an
# object that is not modified in a version.
# @param oid object id
# @return name of version in which the object is modified
# @defreturn string or None
def modifiedInVersion(self, oid):
p = self.version.get(oid)
if p is None:
return None
version, tid = p
return version
##
# Store a new data record in the cache.
# @param oid object id
# @param version name of version that oid was modified in. The cache
# only stores current version data, so end_tid should
# be None if version is not the empty string.
# @param start_tid the id of the transaction that wrote this revision
# @param end_tid the id of the transaction that created the next
# revision of oid. If end_tid is None, the data is
# current.
# @param data the actual data
# @exception ValueError tried to store non-current version data
def store(self, oid, version, start_tid, end_tid, data):
def store(self, oid, start_tid, end_tid, data):
# It's hard for the client to avoid storing the same object
# more than once. One case is when the client requests
# version data that doesn't exist. It checks the cache for
# the requested version, doesn't find it, then asks the server
# for that data. The server returns the non-version data,
# which may already be in the cache.
# more than once.
if (oid, start_tid) in self.fc:
return
o = Object((oid, start_tid), version, data, start_tid, end_tid)
if version:
if end_tid is not None:
raise ValueError("cache only stores current version data")
if oid in self.version:
if self.version[oid] != (version, start_tid):
raise ValueError("data already exists for version %r"
% self.version[oid][0])
self.version[oid] = version, start_tid
self._trace(0x50, oid, version, start_tid, dlen=len(data))
else:
o = Object((oid, start_tid), data, start_tid, end_tid)
if end_tid is None:
_cur_start = self.current.get(oid)
if _cur_start:
......@@ -281,22 +235,21 @@ class ClientCache(object):
else:
return
self.current[oid] = start_tid
self._trace(0x52, oid, version, start_tid, dlen=len(data))
self._trace(0x52, oid, start_tid, dlen=len(data))
else:
L = self.noncurrent.setdefault(oid, [])
p = start_tid, end_tid
if p in L:
return # duplicate store
bisect.insort_left(L, p)
self._trace(0x54, oid, version, start_tid, end_tid,
dlen=len(data))
self._trace(0x54, oid, start_tid, end_tid, dlen=len(data))
self.fc.add(o)
##
# Remove all knowledge of noncurrent revisions of oid, both in
# self.noncurrent and in our FileCache. `version` and `tid` are used
# self.noncurrent and in our FileCache. `tid` is used
# only for trace records.
def _remove_noncurrent_revisions(self, oid, version, tid):
def _remove_noncurrent_revisions(self, oid, tid):
noncurrent_list = self.noncurrent.get(oid)
if noncurrent_list:
# Note: must iterate over a copy of noncurrent_list. The
......@@ -304,7 +257,7 @@ class ClientCache(object):
# mutates the list.
for old_tid, dummy in noncurrent_list[:]:
# 0x1E = invalidate (hit, discarding current or non-current)
self._trace(0x1E, oid, version, tid)
self._trace(0x1E, oid, tid)
self.fc.remove((oid, old_tid))
# fc.remove() calling back to _evicted() should have removed
# the list from noncurrent when the last non-current revision
......@@ -312,53 +265,38 @@ class ClientCache(object):
assert oid not in self.noncurrent
##
# If `tid` is None, or we have data for `oid` in a (non-empty) version,
# forget all knowledge of `oid`. (`tid` can be None only for
# invalidations generated by startup cache verification.) If `tid`
# isn't None, we don't have version data for `oid`, and we had current
# data for `oid`, stop believing we have current data, and mark the
# data we had as being valid only up to `tid`. In all other cases, do
# If `tid` is None, forget all knowledge of `oid`. (`tid` can be
# None only for invalidations generated by startup cache
# verification.) If `tid` isn't None, and we had current data for
# `oid`, stop believing we have current data, and mark the data we
# had as being valid only up to `tid`. In all other cases, do
# nothing.
# @param oid object id
# @param version name of version to invalidate.
# @param tid the id of the transaction that wrote a new revision of oid,
# or None to forget all cached info about oid (version, current
# or None to forget all cached info about oid (current
# revision, and non-current revisions)
def invalidate(self, oid, version, tid):
def invalidate(self, oid, tid):
if tid > self.fc.tid and tid is not None:
self.fc.settid(tid)
remove_all_knowledge_of_oid = tid is None
if oid in self.version:
# Forget we know about the version data.
# 0x1A = invalidate (hit, version)
self._trace(0x1A, oid, version, tid)
dllversion, dlltid = self.version[oid]
assert not version or version == dllversion, (version, dllversion)
self.fc.remove((oid, dlltid))
assert oid not in self.version # .remove() got rid of it
# And continue: we must also remove any non-version data from
# the cache. Or, at least, I have such a poor understanding of
# versions that anything less drastic would probably be wrong.
remove_all_knowledge_of_oid = True
if remove_all_knowledge_of_oid:
self._remove_noncurrent_revisions(oid, version, tid)
self._remove_noncurrent_revisions(oid, tid)
# Only current, non-version data remains to be handled.
# Only current data remains to be handled.
cur_tid = self.current.get(oid)
if not cur_tid:
# 0x10 == invalidate (miss)
self._trace(0x10, oid, version, tid)
self._trace(0x10, oid, tid)
return
# We had current data for oid, but no longer.
if remove_all_knowledge_of_oid:
# 0x1E = invalidate (hit, discarding current or non-current)
self._trace(0x1E, oid, version, tid)
self._trace(0x1E, oid, tid)
self.fc.remove((oid, cur_tid))
assert cur_tid not in self.current # .remove() got rid of it
return
......@@ -366,7 +304,7 @@ class ClientCache(object):
# Add the data we have to the list of non-current data for oid.
assert tid is not None and cur_tid <= tid
# 0x1C = invalidate (hit, saving non-current)
self._trace(0x1C, oid, version, tid)
self._trace(0x1C, oid, tid)
del self.current[oid] # because we no longer have current data
# Update the end_tid half of oid's validity range on disk.
......@@ -389,32 +327,28 @@ class ClientCache(object):
#
# Or maybe better to just return len(self.cache)? Needs clearer use case.
def __len__(self):
n = len(self.current) + len(self.version)
n = len(self.current)
if self.noncurrent:
n += sum(map(len, self.noncurrent))
return n
##
# Generates (oid, serial, version) triples for all objects in the
# Generates (oid, serial) pairs for all objects in the
# cache. This generator is used by cache verification.
def contents(self):
# May need to materialize list instead of iterating;
# depends on whether the caller may change the cache.
for o in self.fc:
oid, tid = o.key
if oid in self.version:
obj = self.fc.access(o.key)
yield oid, tid, obj.version
else:
yield oid, tid, ""
yield oid, tid
def dump(self):
from ZODB.utils import oid_repr
print "cache size", len(self)
L = list(self.contents())
L.sort()
for oid, tid, version in L:
print oid_repr(oid), oid_repr(tid), repr(version)
for oid, tid in L:
print oid_repr(oid), oid_repr(tid)
print "dll contents"
L = list(self.fc)
L.sort(lambda x, y: cmp(x.key, y.key))
......@@ -427,9 +361,6 @@ class ClientCache(object):
# Called by the FileCache to signal that Object o has been evicted.
oid, tid = o.key
if o.end_tid is None:
if o.version:
del self.version[oid]
else:
del self.current[oid]
else:
# Although we use bisect to keep the list sorted,
......@@ -471,15 +402,13 @@ class ClientCache(object):
self._trace = notrace
def _trace(self,
code, oid="", version="", tid=z64, end_tid=z64, dlen=0,
code, oid="", tid=z64, end_tid=z64, dlen=0,
# The next two are just speed hacks.
time_time=time.time, struct_pack=struct.pack):
# The code argument is two hex digits; bits 0 and 7 must be zero.
# The first hex digit shows the operation, the second the outcome.
# This method has been carefully tuned to be as fast as possible.
# Note: when tracing is disabled, this method is hidden by a dummy.
if version:
code |= 0x80
encoded = (dlen + 255) & 0x7fffff00 | code
if tid is None:
tid = z64
......@@ -499,11 +428,11 @@ class ClientCache(object):
##
# An Object stores the cached data for a single object.
# <p>
# The cached data includes the actual object data, the key, and three
# data fields that describe the validity period and version of the
# The cached data includes the actual object data, the key, and two
# data fields that describe the validity period of the
# object. The key contains the oid and a redundant start_tid. The
# actual size of an object is variable, depending on the size of the
# data and whether it is in a version.
# data.
# <p>
# The serialized format does not include the key, because it is stored
# in the header used by the cache file's storage format.
......@@ -524,14 +453,11 @@ class Object(object):
# greater than start_tid
"end_tid",
# string, name of version
"version",
# string, the actual data record for the object
"data",
# total size of serialized object; this includes the
# data, version, and all overhead (header) bytes.
# data and all overhead (header) bytes.
"size",
)
......@@ -540,41 +466,36 @@ class Object(object):
# offset # bytes value
# ------ ------- -----
# 0 8 end_tid; string
# 8 2 len(version); 2-byte signed int
# 10 4 len(data); 4-byte signed int
# 14 len(version) version; string
# 14+len(version) len(data) the object pickle; string
# 14+len(version)+
# len(data) 8 oid; string
# 8 4 len(data); 4-byte signed int
# 12 len(data) the object pickle; string
# 12+len(data) 8 oid; string
# The serialization format uses an end tid of "\0"*8 (z64), the least
# 8-byte string, to represent None. It isn't possible for an end_tid
# to be 0, because it must always be strictly greater than the start_tid.
fmt = ">8shi" # end_tid, len(self.version), len(self.data)
fmt = ">8si" # end_tid, len(self.data)
FIXED_HEADER_SIZE = struct.calcsize(fmt)
assert FIXED_HEADER_SIZE == 14
assert FIXED_HEADER_SIZE == 12
TOTAL_FIXED_SIZE = FIXED_HEADER_SIZE + 8 # +8 for the oid at the end
def __init__(self, key, version, data, start_tid, end_tid):
def __init__(self, key, data, start_tid, end_tid):
self.key = key
self.version = version
self.data = data
self.start_tid = start_tid
self.end_tid = end_tid
# The size of the serialized object on disk, including the
# 14-byte header, the lengths of data and version, and a
# 14-byte header, the length of data, and a
# copy of the 8-byte oid.
if data is not None:
self.size = self.TOTAL_FIXED_SIZE + len(data) + len(version)
self.size = self.TOTAL_FIXED_SIZE + len(data)
##
# Return the fixed-sized serialization header as a string: pack end_tid,
# and the lengths of the .version and .data members.
# and the length of the .data members.
def get_header(self):
return struct.pack(self.fmt,
self.end_tid or z64,
len(self.version),
len(self.data))
##
......@@ -582,7 +503,6 @@ class Object(object):
# position.
def serialize(self, f):
f.writelines([self.get_header(),
self.version,
self.data,
self.key[0]])
......@@ -609,14 +529,10 @@ class Object(object):
return None
oid, start_tid = key
end_tid, vlen, dlen = struct.unpack(cls.fmt, s)
end_tid, dlen = struct.unpack(cls.fmt, s)
if end_tid == z64:
end_tid = None
version = f.read(vlen)
if vlen != len(version):
raise ValueError("corrupted record, version")
if skip_data:
data = None
f.seek(dlen, 1)
......@@ -629,7 +545,7 @@ class Object(object):
if s != oid:
raise ValueError("corrupted record, oid")
return cls((oid, start_tid), version, data, start_tid, end_tid)
return cls((oid, start_tid), data, start_tid, end_tid)
fromFile = classmethod(fromFile)
......@@ -660,11 +576,11 @@ class Entry(object):
# On-disk cache structure.
#
# The file begins with a 12-byte header. The first four bytes are the
# file's magic number - ZEC3 - indicating zeo cache version 3. The
# file's magic number - ZEC4 - indicating zeo cache version 4. The
# next eight bytes are the last transaction id.
magic = "ZEC3"
ZEC3_HEADER_SIZE = 12
magic = "ZEC4"
ZEC4_HEADER_SIZE = 12
# After the header, the file contains a contiguous sequence of blocks. All
# blocks begin with a one-byte status indicator:
......@@ -674,11 +590,11 @@ ZEC3_HEADER_SIZE = 12
# format total block size.
#
# 'f'
# Free. The block is free; the next 4 bytes are >I format total
# Free. The block is free; the next 8 bytes are >Q format total
# block size.
#
# '1', '2', '3', '4'
# The block is free, and consists of 1, 2, 3 or 4 bytes total.
# '1', '2', '3', '4', '5', '6', '7', '8'
# The block is free, and consists of 1-8 bytes total.
#
# "Total" includes the status byte, and size bytes. There are no
# empty (size 0) blocks.
......@@ -702,7 +618,7 @@ OBJECT_HEADER_SIZE = 1 + 4 + 16
# blocks needed to make enough room for the new object are evicted,
# starting at currentofs. Exception: if currentofs is close enough
# to the end of the file that the new object can't fit in one
# contiguous chunk, currentofs is reset to ZEC3_HEADER_SIZE first.
# contiguous chunk, currentofs is reset to ZEC4_HEADER_SIZE first.
# Do all possible to ensure that the bytes we wrote to file f are really on
# disk.
......@@ -757,7 +673,7 @@ class FileCache(object):
# Always the offset into the file of the start of a block.
# New and relocated objects are always written starting at
# currentofs.
self.currentofs = ZEC3_HEADER_SIZE
self.currentofs = ZEC4_HEADER_SIZE
# self.f is the open file object.
# When we're not reusing an existing file, self.f is left None
......@@ -785,10 +701,10 @@ class FileCache(object):
self.f.write(magic)
self.f.write(z64)
# and one free block.
self.f.write('f' + struct.pack(">I", self.maxsize -
ZEC3_HEADER_SIZE))
self.f.write('f' + struct.pack(">Q", self.maxsize -
ZEC4_HEADER_SIZE))
self.sync()
self.filemap[ZEC3_HEADER_SIZE] = (self.maxsize - ZEC3_HEADER_SIZE,
self.filemap[ZEC4_HEADER_SIZE] = (self.maxsize - ZEC4_HEADER_SIZE,
None)
# Statistics: _n_adds, _n_added_bytes,
......@@ -822,7 +738,7 @@ class FileCache(object):
# Remember the location of the largest free block. That seems a
# decent place to start currentofs.
max_free_size = max_free_offset = 0
ofs = ZEC3_HEADER_SIZE
ofs = ZEC4_HEADER_SIZE
while ofs < fsize:
self.f.seek(ofs)
ent = None
......@@ -834,8 +750,8 @@ class FileCache(object):
self.key2entry[key] = ent = Entry(key, ofs)
install(self.f, ent)
elif status == 'f':
size, = struct.unpack(">I", self.f.read(4))
elif status in '1234':
size, = struct.unpack(">Q", self.f.read(8))
elif status in '12345678':
size = int(status)
else:
raise ValueError("unknown status byte value %s in client "
......@@ -898,7 +814,7 @@ class FileCache(object):
##
# Evict objects as necessary to free up at least nbytes bytes,
# starting at currentofs. If currentofs is closer than nbytes to
# the end of the file, currentofs is reset to ZEC3_HEADER_SIZE first.
# the end of the file, currentofs is reset to ZEC4_HEADER_SIZE first.
# The number of bytes actually freed may be (and probably will be)
# greater than nbytes, and is _makeroom's return value. The file is not
# altered by _makeroom. filemap and key2entry are updated to reflect the
......@@ -907,15 +823,22 @@ class FileCache(object):
# freed (starting at currentofs when _makeroom returns, and
# spanning the number of bytes retured by _makeroom).
def _makeroom(self, nbytes):
assert 0 < nbytes <= self.maxsize - ZEC3_HEADER_SIZE
assert 0 < nbytes <= self.maxsize - ZEC4_HEADER_SIZE
assert nbytes <= max32
if self.currentofs + nbytes > self.maxsize:
self.currentofs = ZEC3_HEADER_SIZE
self.currentofs = ZEC4_HEADER_SIZE
ofs = self.currentofs
while nbytes > 0:
size, e = self.filemap.pop(ofs)
if e is not None:
del self.key2entry[e.key]
self._evictobj(e, size)
self._n_evicts += 1
self._n_evicted_bytes += size
# Load the object header into memory so we know how to
# update the parent's in-memory data structures.
self.f.seek(e.offset + OBJECT_HEADER_SIZE)
o = Object.fromFile(self.f, e.key, skip_data=True)
self.parent._evicted(o)
ofs += size
nbytes -= size
return ofs - self.currentofs
......@@ -935,10 +858,10 @@ class FileCache(object):
# expensive -- it's all a contiguous write.
if excess == 0:
extra = ''
elif excess < 5:
extra = "01234"[excess]
elif excess < 9:
extra = "012345678"[excess]
else:
extra = 'f' + struct.pack(">I", excess)
extra = 'f' + struct.pack(">Q", excess)
self.f.seek(self.currentofs)
self.f.writelines(('a',
......@@ -967,7 +890,7 @@ class FileCache(object):
# 2nd-level ZEO cache got a much higher hit rate if "very large"
# objects simply weren't cached. For now, we ignore the request
# only if the entire cache file is too small to hold the object.
if size > self.maxsize - ZEC3_HEADER_SIZE:
if size > self.maxsize - ZEC4_HEADER_SIZE:
return
assert object.key not in self.key2entry
......@@ -980,21 +903,6 @@ class FileCache(object):
available = self._makeroom(size)
self._writeobj(object, available)
##
# Evict the object represented by Entry `e` from the cache, freeing
# `size` bytes in the file for reuse. `size` is used only for summary
# statistics. This does not alter the file, or self.filemap or
# self.key2entry (those are the caller's responsibilities). It does
# invoke _evicted(Object) on our parent.
def _evictobj(self, e, size):
self._n_evicts += 1
self._n_evicted_bytes += size
# Load the object header into memory so we know how to
# update the parent's in-memory data structures.
self.f.seek(e.offset + OBJECT_HEADER_SIZE)
o = Object.fromFile(self.f, e.key, skip_data=True)
self.parent._evicted(o)
##
# Return Object for key, or None if not in cache.
def access(self, key):
......@@ -1029,12 +937,9 @@ class FileCache(object):
self.filemap[offset] = size, None
self.f.seek(offset + OBJECT_HEADER_SIZE)
o = Object.fromFile(self.f, key, skip_data=True)
assert size >= 5 # only free blocks are tiny
# Because `size` >= 5, we can change an allocated block to a free
# block just by overwriting the 'a' status byte with 'f' -- the
# size field stays the same.
assert size >= 9 # only free blocks are tiny
self.f.seek(offset)
self.f.write('f')
self.f.write('f' + struct.pack(">Q", size))
self.f.flush()
self.parent._evicted(o)
......@@ -1070,13 +975,15 @@ class FileCache(object):
# This debug method marches over the entire cache file, verifying that
# the current contents match the info in self.filemap and self.key2entry.
def _verify_filemap(self, display=False):
a = ZEC3_HEADER_SIZE
a = ZEC4_HEADER_SIZE
f = self.f
while a < self.maxsize:
f.seek(a)
status = f.read(1)
if status in 'af':
if status == 'a':
size, = struct.unpack(">I", f.read(4))
elif status == 'f':
size, = struct.unpack(">Q", f.read(8))
else:
size = int(status)
if display:
......
......@@ -35,14 +35,14 @@ Objects are represented in the cache using a special `Object` object. Let's
start with an object of the size 100 bytes:
>>> from ZEO.cache import Object
>>> obj1_1 = Object(key=(oid(1), tid(1)), version='', data='#'*100,
>>> obj1_1 = Object(key=(oid(1), tid(1)), data='#'*100,
... start_tid=tid(1), end_tid=None)
Notice that the actual object size is a bit larger because of the headers that
are written for each object:
>>> obj1_1.size
122
120
Initially the object is not in the cache:
......@@ -113,120 +113,120 @@ format and look at the cache after each step.
The current state is a cache with two records: the one object which we removed
from the cache and another free record the reaches to the end of the file.
The first record has a size of 143 bytes:
The first record has a size of 141 bytes:
143 = 1 ('f') + 4 (size) + 8 (OID) + 8 (TID) + 8 (end_tid) + 2 (version length) +
141 = 1 ('f') + 4 (size) + 8 (OID) + 8 (TID) + 8 (end_tid) +
4 (data length) + 100 (old data) + 8 (OID)
The second record has a size of 45 bytes:
The second record has a size of 47 bytes:
45 = 1 ('f') + 4 (size) + 40 (free space)
47 = 1 ('f') + 8 (size) + 38 (free space)
Note that the last byte is an 'x' because the initialisation of the cache file
forced the absolute size of the file by seeking to byte 200 and writing an 'x'.
>>> from ZEO.tests.test_cache import hexprint
>>> hexprint(fc.f)
00000000 5a 45 43 33 00 00 00 00 00 00 00 00 66 00 00 00 |ZEC3........f...|
00000010 8f 00 00 00 00 00 00 00 01 00 00 00 00 00 00 00 |................|
00000020 01 00 00 00 00 00 00 00 02 00 00 00 00 00 64 23 |..............d#|
00000000 5a 45 43 34 00 00 00 00 00 00 00 00 66 00 00 00 |ZEC4........f...|
00000010 00 00 00 00 8d 00 00 00 01 00 00 00 00 00 00 00 |................|
00000020 01 00 00 00 00 00 00 00 02 00 00 00 64 23 23 23 |............d###|
00000030 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 |################|
00000040 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 |################|
00000050 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 |################|
00000060 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 |################|
00000070 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 |################|
00000080 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 |################|
00000090 23 23 23 00 00 00 00 00 00 00 01 66 00 00 00 2d |###........f...-|
000000a0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
00000090 23 00 00 00 00 00 00 00 01 66 00 00 00 00 00 00 |#........f......|
000000a0 00 2f 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |./..............|
000000b0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
000000c0 00 00 00 00 00 00 00 78 |.......x |
Case 1: Allocating a new block that fits after the last used one
>>> obj2_1 = Object(key=(oid(2), tid(1)), version='', data='**',
>>> obj2_1 = Object(key=(oid(2), tid(1)), data='******',
... start_tid=tid(1), end_tid=None)
>>> fc.add(obj2_1)
The new block fits exactly in the remaining 45 bytes (43 bytes header + 2
bytes payload) so the beginning of the data is the same except for the last 45
The new block fits exactly in the remaining 47 bytes (41 bytes header + 6
bytes payload) so the beginning of the data is the same except for the last 47
bytes:
>>> hexprint(fc.f) # doctest: +REPORT_NDIFF
00000000 5a 45 43 33 00 00 00 00 00 00 00 00 66 00 00 00 |ZEC3........f...|
00000010 8f 00 00 00 00 00 00 00 01 00 00 00 00 00 00 00 |................|
00000020 01 00 00 00 00 00 00 00 02 00 00 00 00 00 64 23 |..............d#|
>>> hexprint(fc.f)
00000000 5a 45 43 34 00 00 00 00 00 00 00 00 66 00 00 00 |ZEC4........f...|
00000010 00 00 00 00 8d 00 00 00 01 00 00 00 00 00 00 00 |................|
00000020 01 00 00 00 00 00 00 00 02 00 00 00 64 23 23 23 |............d###|
00000030 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 |################|
00000040 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 |################|
00000050 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 |################|
00000060 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 |################|
00000070 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 |################|
00000080 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 |################|
00000090 23 23 23 00 00 00 00 00 00 00 01 61 00 00 00 2d |###........a...-|
000000a0 00 00 00 00 00 00 00 02 00 00 00 00 00 00 00 01 |................|
000000b0 00 00 00 00 00 00 00 00 00 00 00 00 00 02 2a 2a |..............**|
00000090 23 00 00 00 00 00 00 00 01 61 00 00 00 2f 00 00 |#........a.../..|
000000a0 00 00 00 00 00 02 00 00 00 00 00 00 00 01 00 00 |................|
000000b0 00 00 00 00 00 00 00 00 00 06 2a 2a 2a 2a 2a 2a |..........******|
000000c0 00 00 00 00 00 00 00 02 |........ |
Case 2: Allocating a block that wraps around and frees *exactly* one block
>>> obj3_1 = Object(key=(oid(3), tid(1)), version='', data='@'*100,
>>> obj3_1 = Object(key=(oid(3), tid(1)), data='@'*100,
... start_tid=tid(1), end_tid=None)
>>> fc.add(obj3_1)
>>> hexprint(fc.f) # doctest: +REPORT_NDIFF
00000000 5a 45 43 33 00 00 00 00 00 00 00 00 61 00 00 00 |ZEC3........a...|
00000010 8f 00 00 00 00 00 00 00 03 00 00 00 00 00 00 00 |................|
00000020 01 00 00 00 00 00 00 00 00 00 00 00 00 00 64 40 |..............d@|
>>> hexprint(fc.f)
00000000 5a 45 43 34 00 00 00 00 00 00 00 00 61 00 00 00 |ZEC4........a...|
00000010 8d 00 00 00 00 00 00 00 03 00 00 00 00 00 00 00 |................|
00000020 01 00 00 00 00 00 00 00 00 00 00 00 64 40 40 40 |............d@@@|
00000030 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 |@@@@@@@@@@@@@@@@|
00000040 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 |@@@@@@@@@@@@@@@@|
00000050 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 |@@@@@@@@@@@@@@@@|
00000060 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 |@@@@@@@@@@@@@@@@|
00000070 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 |@@@@@@@@@@@@@@@@|
00000080 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 |@@@@@@@@@@@@@@@@|
00000090 40 40 40 00 00 00 00 00 00 00 03 61 00 00 00 2d |@@@........a...-|
000000a0 00 00 00 00 00 00 00 02 00 00 00 00 00 00 00 01 |................|
000000b0 00 00 00 00 00 00 00 00 00 00 00 00 00 02 2a 2a |..............**|
00000090 40 00 00 00 00 00 00 00 03 61 00 00 00 2f 00 00 |@........a.../..|
000000a0 00 00 00 00 00 02 00 00 00 00 00 00 00 01 00 00 |................|
000000b0 00 00 00 00 00 00 00 00 00 06 2a 2a 2a 2a 2a 2a |..........******|
000000c0 00 00 00 00 00 00 00 02 |........ |
Case 3: Allocating a block that requires 1 byte less than the next block
>>> obj4_1 = Object(key=(oid(4), tid(1)), version='', data='~',
>>> obj4_1 = Object(key=(oid(4), tid(1)), data='~~~~~',
... start_tid=tid(1), end_tid=None)
>>> fc.add(obj4_1)
>>> hexprint(fc.f) # doctest: +REPORT_NDIFF
00000000 5a 45 43 33 00 00 00 00 00 00 00 00 61 00 00 00 |ZEC3........a...|
00000010 8f 00 00 00 00 00 00 00 03 00 00 00 00 00 00 00 |................|
00000020 01 00 00 00 00 00 00 00 00 00 00 00 00 00 64 40 |..............d@|
>>> hexprint(fc.f)
00000000 5a 45 43 34 00 00 00 00 00 00 00 00 61 00 00 00 |ZEC4........a...|
00000010 8d 00 00 00 00 00 00 00 03 00 00 00 00 00 00 00 |................|
00000020 01 00 00 00 00 00 00 00 00 00 00 00 64 40 40 40 |............d@@@|
00000030 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 |@@@@@@@@@@@@@@@@|
00000040 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 |@@@@@@@@@@@@@@@@|
00000050 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 |@@@@@@@@@@@@@@@@|
00000060 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 |@@@@@@@@@@@@@@@@|
00000070 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 |@@@@@@@@@@@@@@@@|
00000080 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 40 |@@@@@@@@@@@@@@@@|
00000090 40 40 40 00 00 00 00 00 00 00 03 61 00 00 00 2c |@@@........a...,|
000000a0 00 00 00 00 00 00 00 04 00 00 00 00 00 00 00 01 |................|
000000b0 00 00 00 00 00 00 00 00 00 00 00 00 00 01 7e 00 |..............~.|
00000090 40 00 00 00 00 00 00 00 03 61 00 00 00 2e 00 00 |@........a......|
000000a0 00 00 00 00 00 04 00 00 00 00 00 00 00 01 00 00 |................|
000000b0 00 00 00 00 00 00 00 00 00 05 7e 7e 7e 7e 7e 00 |..........~~~~~.|
000000c0 00 00 00 00 00 00 04 31 |.......1 |
Case 4: Allocating a block that requires 2 bytes less than the next block
>>> obj4_1 = Object(key=(oid(5), tid(1)), version='', data='^'*98,
>>> obj4_1 = Object(key=(oid(5), tid(1)), data='^'*98,
... start_tid=tid(1), end_tid=None)
>>> fc.add(obj4_1)
>>> hexprint(fc.f) # doctest: +REPORT_NDIFF
00000000 5a 45 43 33 00 00 00 00 00 00 00 00 61 00 00 00 |ZEC3........a...|
00000010 8d 00 00 00 00 00 00 00 05 00 00 00 00 00 00 00 |................|
00000020 01 00 00 00 00 00 00 00 00 00 00 00 00 00 62 5e |..............b^|
>>> hexprint(fc.f)
00000000 5a 45 43 34 00 00 00 00 00 00 00 00 61 00 00 00 |ZEC4........a...|
00000010 8b 00 00 00 00 00 00 00 05 00 00 00 00 00 00 00 |................|
00000020 01 00 00 00 00 00 00 00 00 00 00 00 62 5e 5e 5e |............b^^^|
00000030 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e |^^^^^^^^^^^^^^^^|
00000040 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e |^^^^^^^^^^^^^^^^|
00000050 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e |^^^^^^^^^^^^^^^^|
00000060 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e |^^^^^^^^^^^^^^^^|
00000070 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e |^^^^^^^^^^^^^^^^|
00000080 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e |^^^^^^^^^^^^^^^^|
00000090 5e 00 00 00 00 00 00 00 05 32 03 61 00 00 00 2c |^........2.a...,|
000000a0 00 00 00 00 00 00 00 04 00 00 00 00 00 00 00 01 |................|
000000b0 00 00 00 00 00 00 00 00 00 00 00 00 00 01 7e 00 |..............~.|
00000080 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 5e 00 |^^^^^^^^^^^^^^^.|
00000090 00 00 00 00 00 00 05 32 03 61 00 00 00 2e 00 00 |.......2.a......|
000000a0 00 00 00 00 00 04 00 00 00 00 00 00 00 01 00 00 |................|
000000b0 00 00 00 00 00 00 00 00 00 05 7e 7e 7e 7e 7e 00 |..........~~~~~.|
000000c0 00 00 00 00 00 00 04 31 |.......1 |
Case 5: Allocating a block that requires 3 bytes less than the next block
......@@ -236,74 +236,73 @@ situation to work on. We create an entry with the size of 95 byte which will
be inserted at the beginning of the file, leaving a 3 byte free space after
it.
>>> obj4_1 = Object(key=(oid(6), tid(1)), version='', data='+'*95,
>>> obj4_1 = Object(key=(oid(6), tid(1)), data='+'*95,
... start_tid=tid(1), end_tid=None)
>>> fc.add(obj4_1)
>>> hexprint(fc.f) # doctest: +REPORT_NDIFF
00000000 5a 45 43 33 00 00 00 00 00 00 00 00 61 00 00 00 |ZEC3........a...|
00000010 8a 00 00 00 00 00 00 00 06 00 00 00 00 00 00 00 |................|
00000020 01 00 00 00 00 00 00 00 00 00 00 00 00 00 5f 2b |.............._+|
>>> hexprint(fc.f)
00000000 5a 45 43 34 00 00 00 00 00 00 00 00 61 00 00 00 |ZEC4........a...|
00000010 88 00 00 00 00 00 00 00 06 00 00 00 00 00 00 00 |................|
00000020 01 00 00 00 00 00 00 00 00 00 00 00 5f 2b 2b 2b |............_+++|
00000030 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b |++++++++++++++++|
00000040 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b |++++++++++++++++|
00000050 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b |++++++++++++++++|
00000060 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b |++++++++++++++++|
00000070 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b |++++++++++++++++|
00000080 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 00 00 |++++++++++++++..|
00000090 00 00 00 00 00 06 33 00 05 32 03 61 00 00 00 2c |......3..2.a...,|
000000a0 00 00 00 00 00 00 00 04 00 00 00 00 00 00 00 01 |................|
000000b0 00 00 00 00 00 00 00 00 00 00 00 00 00 01 7e 00 |..............~.|
00000080 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 2b 00 00 00 00 |++++++++++++....|
00000090 00 00 00 06 33 00 05 32 03 61 00 00 00 2e 00 00 |....3..2.a......|
000000a0 00 00 00 00 00 04 00 00 00 00 00 00 00 01 00 00 |................|
000000b0 00 00 00 00 00 00 00 00 00 05 7e 7e 7e 7e 7e 00 |..........~~~~~.|
000000c0 00 00 00 00 00 00 04 31 |.......1 |
Case 6: Allocating a block that requires 4 bytes less than the next block
Case 6: Allocating a block that requires 6 bytes less than the next block
As in our previous case, we'll write a block that only fits in the first
block's place to avoid dealing with the cluttering at the end of the cache
file.
>>> obj4_1 = Object(key=(oid(7), tid(1)), version='', data='-'*91,
>>> obj4_1 = Object(key=(oid(7), tid(1)), data='-'*89,
... start_tid=tid(1), end_tid=None)
>>> fc.add(obj4_1)
>>> hexprint(fc.f) # doctest: +REPORT_NDIFF
00000000 5a 45 43 33 00 00 00 00 00 00 00 00 61 00 00 00 |ZEC3........a...|
00000010 86 00 00 00 00 00 00 00 07 00 00 00 00 00 00 00 |................|
00000020 01 00 00 00 00 00 00 00 00 00 00 00 00 00 5b 2d |..............[-|
>>> hexprint(fc.f)
00000000 5a 45 43 34 00 00 00 00 00 00 00 00 61 00 00 00 |ZEC4........a...|
00000010 82 00 00 00 00 00 00 00 07 00 00 00 00 00 00 00 |................|
00000020 01 00 00 00 00 00 00 00 00 00 00 00 59 2d 2d 2d |............Y---|
00000030 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d |----------------|
00000040 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d |----------------|
00000050 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d |----------------|
00000060 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d |----------------|
00000070 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d |----------------|
00000080 2d 2d 2d 2d 2d 2d 2d 2d 2d 2d 00 00 00 00 00 00 |----------......|
00000090 00 07 34 00 00 06 33 00 05 32 03 61 00 00 00 2c |..4...3..2.a...,|
000000a0 00 00 00 00 00 00 00 04 00 00 00 00 00 00 00 01 |................|
000000b0 00 00 00 00 00 00 00 00 00 00 00 00 00 01 7e 00 |..............~.|
00000080 2d 2d 2d 2d 2d 2d 00 00 00 00 00 00 00 07 36 00 |------........6.|
00000090 00 00 00 06 33 00 05 32 03 61 00 00 00 2e 00 00 |....3..2.a......|
000000a0 00 00 00 00 00 04 00 00 00 00 00 00 00 01 00 00 |................|
000000b0 00 00 00 00 00 00 00 00 00 05 7e 7e 7e 7e 7e 00 |..........~~~~~.|
000000c0 00 00 00 00 00 00 04 31 |.......1 |
Case 7: Allocating a block that requires >= 5 bytes less than the next block
Again, we replace the block at the beginning of the cache.
>>> obj4_1 = Object(key=(oid(8), tid(1)), version='', data='='*86,
>>> obj4_1 = Object(key=(oid(8), tid(1)), data='='*80,
... start_tid=tid(1), end_tid=None)
>>> fc.add(obj4_1)
>>> hexprint(fc.f) # doctest: +REPORT_NDIFF
00000000 5a 45 43 33 00 00 00 00 00 00 00 00 61 00 00 00 |ZEC3........a...|
00000010 81 00 00 00 00 00 00 00 08 00 00 00 00 00 00 00 |................|
00000020 01 00 00 00 00 00 00 00 00 00 00 00 00 00 56 3d |..............V=|
>>> hexprint(fc.f)
00000000 5a 45 43 34 00 00 00 00 00 00 00 00 61 00 00 00 |ZEC4........a...|
00000010 79 00 00 00 00 00 00 00 08 00 00 00 00 00 00 00 |y...............|
00000020 01 00 00 00 00 00 00 00 00 00 00 00 50 3d 3d 3d |............P===|
00000030 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d |================|
00000040 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d |================|
00000050 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d |================|
00000060 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d |================|
00000070 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d |================|
00000080 3d 3d 3d 3d 3d 00 00 00 00 00 00 00 08 66 00 00 |=====........f..|
00000090 00 05 34 00 00 06 33 00 05 32 03 61 00 00 00 2c |..4...3..2.a...,|
000000a0 00 00 00 00 00 00 00 04 00 00 00 00 00 00 00 01 |................|
000000b0 00 00 00 00 00 00 00 00 00 00 00 00 00 01 7e 00 |..............~.|
00000070 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 3d 00 00 00 |=============...|
00000080 00 00 00 00 08 66 00 00 00 00 00 00 00 09 36 00 |.....f........6.|
00000090 00 00 00 06 33 00 05 32 03 61 00 00 00 2e 00 00 |....3..2.a......|
000000a0 00 00 00 00 00 04 00 00 00 00 00 00 00 01 00 00 |................|
000000b0 00 00 00 00 00 00 00 00 00 05 7e 7e 7e 7e 7e 00 |..........~~~~~.|
000000c0 00 00 00 00 00 00 04 31 |.......1 |
Statistic functions
===================
......@@ -311,7 +310,7 @@ The `getStats` method talks about the added objects, added bytes, evicted
objects, evicted bytes and accesses to the cache:
>>> fc.getStats()
(8, 917, 5, 601, 2)
(8, 901, 5, 593, 2)
We can reset the stats by calling the `clearStats` method:
......
......@@ -85,86 +85,67 @@ class CacheTests(unittest.TestCase):
self.assertEqual(self.cache.getLastTid(), None)
self.cache.setLastTid(n2)
self.assertEqual(self.cache.getLastTid(), n2)
self.cache.invalidate(None, "", n1)
self.cache.invalidate(None, n1)
self.assertEqual(self.cache.getLastTid(), n2)
self.cache.invalidate(None, "", n3)
self.cache.invalidate(None, n3)
self.assertEqual(self.cache.getLastTid(), n3)
self.assertRaises(ValueError, self.cache.setLastTid, n2)
def testLoad(self):
data1 = "data for n1"
self.assertEqual(self.cache.load(n1, ""), None)
self.cache.store(n1, "", n3, None, data1)
self.assertEqual(self.cache.load(n1, ""), (data1, n3, ""))
# The cache doesn't know whether version exists, because it
# only has non-version data.
self.assertEqual(self.cache.modifiedInVersion(n1), None)
self.assertEqual(self.cache.load(n1), None)
self.cache.store(n1, n3, None, data1)
self.assertEqual(self.cache.load(n1), (data1, n3))
def testInvalidate(self):
data1 = "data for n1"
self.cache.store(n1, "", n3, None, data1)
self.cache.invalidate(n1, "", n4)
self.cache.invalidate(n2, "", n2)
self.assertEqual(self.cache.load(n1, ""), None)
self.assertEqual(self.cache.loadBefore(n1, n4),
(data1, n3, n4))
def testVersion(self):
data1 = "data for n1"
data1v = "data for n1 in version"
self.cache.store(n1, "version", n3, None, data1v)
self.assertEqual(self.cache.load(n1, ""), None)
self.assertEqual(self.cache.load(n1, "version"),
(data1v, n3, "version"))
self.assertEqual(self.cache.load(n1, "random"), None)
self.assertEqual(self.cache.modifiedInVersion(n1), "version")
self.cache.invalidate(n1, "version", n4)
self.assertEqual(self.cache.load(n1, "version"), None)
self.cache.store(n1, n3, None, data1)
self.cache.invalidate(n1, n4)
self.cache.invalidate(n2, n2)
self.assertEqual(self.cache.load(n1), None)
self.assertEqual(self.cache.loadBefore(n1, n4), (data1, n3, n4))
def testNonCurrent(self):
data1 = "data for n1"
data2 = "data for n2"
self.cache.store(n1, "", n4, None, data1)
self.cache.store(n1, "", n2, n3, data2)
self.cache.store(n1, n4, None, data1)
self.cache.store(n1, n2, n3, data2)
# can't say anything about state before n2
self.assertEqual(self.cache.loadBefore(n1, n2), None)
# n3 is the upper bound of non-current record n2
self.assertEqual(self.cache.loadBefore(n1, n3), (data2, n2, n3))
# no data for between n2 and n3
self.assertEqual(self.cache.loadBefore(n1, n4), None)
self.cache.invalidate(n1, "", n5)
self.cache.invalidate(n1, n5)
self.assertEqual(self.cache.loadBefore(n1, n5), (data1, n4, n5))
self.assertEqual(self.cache.loadBefore(n2, n4), None)
def testException(self):
self.cache.store(n1, n2, None, "data")
self.assertRaises(ValueError,
self.cache.store,
n1, "version", n2, n3, "data")
self.cache.store(n1, "", n2, None, "data")
self.assertRaises(ValueError,
self.cache.store,
n1, "", n3, None, "data")
n1, n3, None, "data")
def testEviction(self):
# Manually override the current maxsize
maxsize = self.cache.size = self.cache.fc.maxsize = 3395 # 1245
self.cache.fc = ZEO.cache.FileCache(3395, None, self.cache)
maxsize = self.cache.size = self.cache.fc.maxsize = 3295 # 1245
self.cache.fc = ZEO.cache.FileCache(3295, None, self.cache)
# Trivial test of eviction code. Doesn't test non-current
# eviction.
data = ["z" * i for i in range(100)]
for i in range(50):
n = p64(i)
self.cache.store(n, "", n, None, data[i])
self.cache.store(n, n, None, data[i])
self.assertEquals(len(self.cache), i + 1)
# The cache now uses 1225 bytes. The next insert
# should delete some objects.
n = p64(50)
self.cache.store(n, "", n, None, data[51])
self.cache.store(n, n, None, data[51])
self.assert_(len(self.cache) < 51)
# TODO: Need to make sure eviction of non-current data
# and of version data are handled correctly.
# are handled correctly.
def _run_fuzzing(self):
current_tid = 1
......@@ -183,13 +164,13 @@ class CacheTests(unittest.TestCase):
current_oid += 1
key = (oid(current_oid), tid(current_tid))
object = ZEO.cache.Object(
key=key, version='', data='*'*random.randint(1,60*1024),
key=key, data='*'*random.randint(1,60*1024),
start_tid=tid(current_tid), end_tid=None)
assert key not in objects
log(key, len(object.data), current_tid)
cache.add(object)
if (object.size + ZEO.cache.OBJECT_HEADER_SIZE >
cache.maxsize - ZEO.cache.ZEC3_HEADER_SIZE):
cache.maxsize - ZEO.cache.ZEC4_HEADER_SIZE):
assert key not in cache
else:
objects[key] = object
......@@ -237,10 +218,9 @@ class CacheTests(unittest.TestCase):
raise
def testSerialization(self):
self.cache.store(n1, "", n2, None, "data for n1")
self.cache.store(n2, "version", n2, None, "version data for n2")
self.cache.store(n3, "", n3, n4, "non-current data for n3")
self.cache.store(n3, "", n4, n5, "more non-current data for n3")
self.cache.store(n1, n2, None, "data for n1")
self.cache.store(n3, n3, n4, "non-current data for n3")
self.cache.store(n3, n4, n5, "more non-current data for n3")
path = tempfile.mktemp()
# Copy data from self.cache into path, reaching into the cache
......@@ -258,7 +238,6 @@ class CacheTests(unittest.TestCase):
eq = self.assertEqual
eq(copy.getLastTid(), self.cache.getLastTid())
eq(len(copy), len(self.cache))
eq(copy.version, self.cache.version)
eq(copy.current, self.cache.current)
eq(copy.noncurrent, self.cache.noncurrent)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment