Commit 045ba9bd authored by Jim Fulton's avatar Jim Fulton

Added an optimization to make sure there's always a free block after

an allocated block so that the first free block is right after the
last allocated block so that, on restart, we always continue writing
after the last written block.
parent 7f59134b
...@@ -159,9 +159,9 @@ class ClientCache(object): ...@@ -159,9 +159,9 @@ class ClientCache(object):
# a temp file will be created) # a temp file will be created)
self.path = path self.path = path
# - `maxsize`: total size of the cache file, in bytes; this is # - `maxsize`: total size of the cache file
# ignored path names an existing file; perhaps we should attempt # We set to the minimum size of less than the minimum.
# to change the cache size in that case size = max(size, ZEC_HEADER_SIZE)
self.maxsize = size self.maxsize = size
# The number of records in the cache. # The number of records in the cache.
...@@ -249,9 +249,10 @@ class ClientCache(object): ...@@ -249,9 +249,10 @@ class ClientCache(object):
self.current = ZODB.fsIndex.fsIndex() self.current = ZODB.fsIndex.fsIndex()
self.noncurrent = BTrees.LOBTree.LOBTree() self.noncurrent = BTrees.LOBTree.LOBTree()
l = 0 l = 0
ofs = ZEC_HEADER_SIZE last = ofs = ZEC_HEADER_SIZE
first_free_offset = 0 first_free_offset = 0
current = self.current current = self.current
status = ' '
while ofs < fsize: while ofs < fsize:
seek(ofs) seek(ofs)
status = read(1) status = read(1)
...@@ -288,23 +289,24 @@ class ClientCache(object): ...@@ -288,23 +289,24 @@ class ClientCache(object):
raise ValueError("unknown status byte value %s in client " raise ValueError("unknown status byte value %s in client "
"cache file" % 0, hex(ord(status))) "cache file" % 0, hex(ord(status)))
if ofs + size >= maxsize: last = ofs
ofs += size
if ofs >= maxsize:
# Oops, the file was bigger before. # Oops, the file was bigger before.
if ofs+size > maxsize: if ofs > maxsize:
# The last record is too big. Replace it with a smaller # The last record is too big. Replace it with a smaller
# free record # free record
size = maxsize-ofs size = maxsize-last
seek(ofs) seek(last)
if size > 4: if size > 4:
write('f'+pack(">I", size)) write('f'+pack(">I", size))
else: else:
write("012345"[size]) write("012345"[size])
sync(f) sync(f)
ofs += size ofs = maxsize
break break
ofs += size
if fsize < maxsize: if fsize < maxsize:
assert ofs==fsize assert ofs==fsize
# Make sure the OS really saves enough bytes for the file. # Make sure the OS really saves enough bytes for the file.
...@@ -319,7 +321,10 @@ class ClientCache(object): ...@@ -319,7 +321,10 @@ class ClientCache(object):
write('f' + pack(">I", block_size)) write('f' + pack(">I", block_size))
seek(block_size-5, 1) seek(block_size-5, 1)
sync(self.f) sync(self.f)
first_free_offset = ofs
# There is always data to read and
assert last and status in ' f1234'
first_free_offset = last
else: else:
assert ofs==maxsize assert ofs==maxsize
if maxsize < fsize: if maxsize < fsize:
...@@ -551,14 +556,19 @@ class ClientCache(object): ...@@ -551,14 +556,19 @@ class ClientCache(object):
# 2nd-level ZEO cache got a much higher hit rate if "very large" # 2nd-level ZEO cache got a much higher hit rate if "very large"
# objects simply weren't cached. For now, we ignore the request # objects simply weren't cached. For now, we ignore the request
# only if the entire cache file is too small to hold the object. # only if the entire cache file is too small to hold the object.
if size > min(max_block_size, self.maxsize - ZEC_HEADER_SIZE): if size >= min(max_block_size, self.maxsize - ZEC_HEADER_SIZE):
return return
self._n_adds += 1 self._n_adds += 1
self._n_added_bytes += size self._n_added_bytes += size
self._len += 1 self._len += 1
nfreebytes = self._makeroom(size) # In the next line, we ask for an extra to make sure we always
# have a free block after the new alocated block. This free
# block acts as a ring pointer, so that on restart, we start
# where we left off.
nfreebytes = self._makeroom(size+1)
assert size <= nfreebytes, (size, nfreebytes) assert size <= nfreebytes, (size, nfreebytes)
excess = nfreebytes - size excess = nfreebytes - size
# If there's any excess (which is likely), we need to record a # If there's any excess (which is likely), we need to record a
......
...@@ -229,7 +229,7 @@ class CacheTests(ZODB.tests.util.TestCase): ...@@ -229,7 +229,7 @@ class CacheTests(ZODB.tests.util.TestCase):
data = 'x' data = 'x'
recsize = ZEO.cache.allocated_record_overhead+len(data) recsize = ZEO.cache.allocated_record_overhead+len(data)
for extra in (0, 2, recsize-2): for extra in (2, recsize-2):
cache = ZEO.cache.ClientCache( cache = ZEO.cache.ClientCache(
'cache', size=ZEO.cache.ZEC_HEADER_SIZE+100*recsize+extra) 'cache', size=ZEO.cache.ZEC_HEADER_SIZE+100*recsize+extra)
...@@ -251,8 +251,13 @@ class CacheTests(ZODB.tests.util.TestCase): ...@@ -251,8 +251,13 @@ class CacheTests(ZODB.tests.util.TestCase):
set(range(small))) set(range(small)))
for i in range(100, 110): for i in range(100, 110):
cache.store(p64(i), n1, None, data) cache.store(p64(i), n1, None, data)
self.assertEquals(len(cache), small)
expected_oids = set(range(10, 50)+range(100, 110)) # We use small-1 below because an extra object gets
# evicted because of the optimization to assure that we
# always get a free block after a new allocated block.
expected_len = small - 1
self.assertEquals(len(cache), expected_len)
expected_oids = set(range(11, 50)+range(100, 110))
self.assertEquals( self.assertEquals(
set(u64(oid) for (oid, tid) in cache.contents()), set(u64(oid) for (oid, tid) in cache.contents()),
expected_oids) expected_oids)
...@@ -261,7 +266,7 @@ class CacheTests(ZODB.tests.util.TestCase): ...@@ -261,7 +266,7 @@ class CacheTests(ZODB.tests.util.TestCase):
cache.close() cache.close()
cache = ZEO.cache.ClientCache( cache = ZEO.cache.ClientCache(
'cache', size=ZEO.cache.ZEC_HEADER_SIZE+small*recsize+extra) 'cache', size=ZEO.cache.ZEC_HEADER_SIZE+small*recsize+extra)
self.assertEquals(len(cache), small) self.assertEquals(len(cache), expected_len)
self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()), self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()),
expected_oids) expected_oids)
...@@ -270,16 +275,20 @@ class CacheTests(ZODB.tests.util.TestCase): ...@@ -270,16 +275,20 @@ class CacheTests(ZODB.tests.util.TestCase):
large = 150 large = 150
cache = ZEO.cache.ClientCache( cache = ZEO.cache.ClientCache(
'cache', size=ZEO.cache.ZEC_HEADER_SIZE+large*recsize+extra) 'cache', size=ZEO.cache.ZEC_HEADER_SIZE+large*recsize+extra)
self.assertEquals(len(cache), small) self.assertEquals(len(cache), expected_len)
self.assertEquals(os.path.getsize( self.assertEquals(os.path.getsize(
'cache'), ZEO.cache.ZEC_HEADER_SIZE+large*recsize+extra) 'cache'), ZEO.cache.ZEC_HEADER_SIZE+large*recsize+extra)
self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()), self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()),
expected_oids) expected_oids)
for i in range(200, 305): for i in range(200, 305):
cache.store(p64(i), n1, None, data) cache.store(p64(i), n1, None, data)
self.assertEquals(len(cache), large)
expected_oids = set(range(10, 50)+range(105, 110)+range(200, 305)) # We use large-2 for the same reason we used small-1 above.
expected_len = large-2
self.assertEquals(len(cache), expected_len)
expected_oids = set(range(11, 50)+range(106, 110)+range(200, 305))
self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()), self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()),
expected_oids) expected_oids)
...@@ -287,7 +296,7 @@ class CacheTests(ZODB.tests.util.TestCase): ...@@ -287,7 +296,7 @@ class CacheTests(ZODB.tests.util.TestCase):
cache.close() cache.close()
cache = ZEO.cache.ClientCache( cache = ZEO.cache.ClientCache(
'cache', size=ZEO.cache.ZEC_HEADER_SIZE+large*recsize+extra) 'cache', size=ZEO.cache.ZEC_HEADER_SIZE+large*recsize+extra)
self.assertEquals(len(cache), large) self.assertEquals(len(cache), expected_len)
self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()), self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()),
expected_oids) expected_oids)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment