Commit 1831e638 authored by Jim Fulton's avatar Jim Fulton

Bug Fixed

- When using multi-databases, cache-management operations on a
  connection, cacheMinimize and cacheGC, weren't applied to
  subconnections.
parent 0998ba83
......@@ -18,6 +18,10 @@ Bugs Fixed
- Logrotation/repoening via a SIGUSR2 signal wasn't implemented.
(https://bugs.launchpad.net/zodb/+bug/143600)
- When using multi-databases, cache-management operations on a
connection, cacheMinimize and cacheGC, weren't applied to
subconnections.
3.9.6 (2010-09-21)
==================
......
......@@ -262,13 +262,17 @@ class Connection(ExportImport, object):
return obj
def cacheMinimize(self):
"""Deactivate all unmodified objects in the cache."""
self._cache.minimize()
"""Deactivate all unmodified objects in the cache.
"""
for connection in self.connections.itervalues():
connection._cache.minimize()
# TODO: we should test what happens when cacheGC is called mid-transaction.
def cacheGC(self):
"""Reduce cache size to target size."""
self._cache.incrgc()
"""Reduce cache size to target size.
"""
for connection in self.connections.itervalues():
connection._cache.incrgc()
__onCloseCallbacks = None
def onCloseCallback(self, f):
......
......@@ -556,6 +556,75 @@ def connection_root_convenience():
<root: rather_long_name rather_long_name2 rather_long_name4 ...>
"""
def cache_management_of_subconnections():
"""Make that cache management works for subconnections.
When we use multi-databases, we open a connection in one database and
access connections to other databases through it. This test verifies
thatcache management is applied to all of the connections.
Set up a multi-database:
>>> db1 = ZODB.DB('1')
>>> db2 = ZODB.DB('2', databases=db1.databases, database_name='2',
... cache_size=10)
>>> conn1 = db1.open()
>>> conn2 = conn1.get_connection('2')
Populate it with some data, more than will fit in the cache:
>>> for i in range(100):
... conn2.root()[i] = conn2.root().__class__()
Upon commit, the cache is reduced to the cache size:
>>> transaction.commit()
>>> conn2._cache.cache_non_ghost_count
10
Fill it back up:
>>> for i in range(100):
... _ = str(conn2.root()[i])
>>> conn2._cache.cache_non_ghost_count
101
Doing cache GC on the primary also does it on the secondary:
>>> conn1.cacheGC()
>>> conn2._cache.cache_non_ghost_count
10
Ditto for cache minimize:
>>> conn1.cacheMinimize()
>>> conn2._cache.cache_non_ghost_count
0
Fill it back up:
>>> for i in range(100):
... _ = str(conn2.root()[i])
>>> conn2._cache.cache_non_ghost_count
101
GC is done on reopen:
>>> conn1.close()
>>> db1.open() is conn1
True
>>> conn2 is conn1.get_connection('2')
True
>>> conn2._cache.cache_non_ghost_count
10
>>> db1.close()
>>> db2.close()
"""
class C_invalidations_of_new_objects_work_after_savepoint(Persistent):
def __init__(self):
self.settings = 1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment