Commit 28e4533a authored by Christian Theune's avatar Christian Theune

- Merged to ZODB trunk again

parent 6e9247ba
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -13,5 +13,5 @@ ...@@ -13,5 +13,5 @@
setup.py - setup.py -
src - src -
zpkg.conf - zpkg.conf -
zpkgsetup - buildsupport -
</collection> </collection>
ZODB 3.6 ZODB 3.7
======== ========
Introduction Introduction
...@@ -28,15 +28,14 @@ ZoneAlarm. Many particularly slow tests are skipped unless you pass ...@@ -28,15 +28,14 @@ ZoneAlarm. Many particularly slow tests are skipped unless you pass
Compatibility Compatibility
------------- -------------
ZODB 3.6 requires Python 2.3.4 or later. For best results, we recommend ZODB 3.7 requires Python 2.4.2 or later.
Python 2.3.5. Python 2.4.1 can also be used.
The Zope 2.8 release, and Zope3 releases, should be compatible with this The Zope 2.8 release, and Zope3 releases, should be compatible with this
version of ZODB. Note that Zope 2.7 and higher includes ZEO, so this package version of ZODB. Note that Zope 2.7 and higher includes ZEO, so this package
should only be needed to run a ZEO server. should only be needed to run a ZEO server.
ZEO servers and clients are wholly compatible among 3.3, 3.3.1, 3.4, 3.5, and ZEO servers and clients are wholly compatible among 3.3, 3.4, 3.5, 3.6 and
3.6; a ZEO client from any of those versions can talk with a ZEO server from 3.7; a ZEO client from any of those versions can talk with a ZEO server from
any. any.
Trying to mix ZEO clients and servers from 3.3 or later from ZODB releases Trying to mix ZEO clients and servers from 3.3 or later from ZODB releases
...@@ -93,12 +92,14 @@ script:: ...@@ -93,12 +92,14 @@ script::
This should now make all of ZODB accessible to your Python programs. This should now make all of ZODB accessible to your Python programs.
Testing Testing for Developers
------- ----------------------
When working from a ZODB checkout, do an in-place build instead::
% python setup.py build_ext -i
ZODB comes with a large test suite that can be run from the source followed by::
directory before ZODB is installed. The simplest way to run the tests
is::
% python test.py -v % python test.py -v
......
\documentclass{howto} \documentclass{howto}
\title{ZODB/ZEO Programming Guide} \title{ZODB/ZEO Programming Guide}
\release{3.6.0a3} \release{3.7.0a0}
\date{\today} \date{\today}
\author{A.M.\ Kuchling} \author{A.M.\ Kuchling}
......
...@@ -34,7 +34,7 @@ import zpkgsetup.publication ...@@ -34,7 +34,7 @@ import zpkgsetup.publication
import zpkgsetup.setup import zpkgsetup.setup
# Note that release.py must be able to recognize the VERSION line. # Note that release.py must be able to recognize the VERSION line.
VERSION = "3.6.0a3" VERSION = "3.7.0a0"
context = zpkgsetup.setup.SetupContext( context = zpkgsetup.setup.SetupContext(
"ZODB", VERSION, __file__) "ZODB", VERSION, __file__)
......
This diff is collapsed.
This diff is collapsed.
...@@ -45,7 +45,7 @@ typedef unsigned char char6[6]; ...@@ -45,7 +45,7 @@ typedef unsigned char char6[6];
#define DECREF_KEY(KEY) #define DECREF_KEY(KEY)
#define INCREF_KEY(k) #define INCREF_KEY(k)
#define COPY_KEY(KEY, E) (*(KEY)=*(E), (KEY)[1]=(E)[1]) #define COPY_KEY(KEY, E) (*(KEY)=*(E), (KEY)[1]=(E)[1])
#define COPY_KEY_TO_OBJECT(O, K) O=PyString_FromStringAndSize(K,2) #define COPY_KEY_TO_OBJECT(O, K) O=PyString_FromStringAndSize((const char*)K,2)
#define COPY_KEY_FROM_ARG(TARGET, ARG, STATUS) \ #define COPY_KEY_FROM_ARG(TARGET, ARG, STATUS) \
if (KEY_CHECK(ARG)) memcpy(TARGET, PyString_AS_STRING(ARG), 2); else { \ if (KEY_CHECK(ARG)) memcpy(TARGET, PyString_AS_STRING(ARG), 2); else { \
PyErr_SetString(PyExc_TypeError, "expected two-character string key"); \ PyErr_SetString(PyExc_TypeError, "expected two-character string key"); \
...@@ -59,7 +59,7 @@ typedef unsigned char char6[6]; ...@@ -59,7 +59,7 @@ typedef unsigned char char6[6];
#define DECREF_VALUE(k) #define DECREF_VALUE(k)
#define INCREF_VALUE(k) #define INCREF_VALUE(k)
#define COPY_VALUE(V, E) (memcpy(V, E, 6)) #define COPY_VALUE(V, E) (memcpy(V, E, 6))
#define COPY_VALUE_TO_OBJECT(O, K) O=PyString_FromStringAndSize(K,6) #define COPY_VALUE_TO_OBJECT(O, K) O=PyString_FromStringAndSize((const char*)K,6)
#define COPY_VALUE_FROM_ARG(TARGET, ARG, STATUS) \ #define COPY_VALUE_FROM_ARG(TARGET, ARG, STATUS) \
if ((PyString_Check(ARG) && PyString_GET_SIZE(ARG)==6)) \ if ((PyString_Check(ARG) && PyString_GET_SIZE(ARG)==6)) \
memcpy(TARGET, PyString_AS_STRING(ARG), 6); else { \ memcpy(TARGET, PyString_AS_STRING(ARG), 6); else { \
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
from ZODB.POSException import StorageError from ZODB.POSException import StorageError
class ClientStorageError(StorageError): class ClientStorageError(StorageError):
"""An error occured in the ZEO Client Storage.""" """An error occurred in the ZEO Client Storage."""
class UnrecognizedResult(ClientStorageError): class UnrecognizedResult(ClientStorageError):
"""A server call returned an unrecognized result.""" """A server call returned an unrecognized result."""
......
...@@ -64,7 +64,7 @@ def log(message, level=logging.INFO, label=None, exc_info=False): ...@@ -64,7 +64,7 @@ def log(message, level=logging.INFO, label=None, exc_info=False):
logger.log(level, message, exc_info=exc_info) logger.log(level, message, exc_info=exc_info)
class StorageServerError(StorageError): class StorageServerError(StorageError):
"""Error reported when an unpickleable exception is raised.""" """Error reported when an unpicklable exception is raised."""
class ZEOStorage: class ZEOStorage:
"""Proxy to underlying storage for a single remote client.""" """Proxy to underlying storage for a single remote client."""
......
...@@ -22,4 +22,4 @@ ZEO is now part of ZODB; ZODB's home on the web is ...@@ -22,4 +22,4 @@ ZEO is now part of ZODB; ZODB's home on the web is
""" """
# The next line must use double quotes, so release.py recognizes it. # The next line must use double quotes, so release.py recognizes it.
version = "3.6.0a3" version = "3.7.0a0"
...@@ -176,8 +176,14 @@ def shutdown_zeo_server(adminaddr): ...@@ -176,8 +176,14 @@ def shutdown_zeo_server(adminaddr):
# superstition. # superstition.
for i in range(3): for i in range(3):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(.3)
try: try:
s.connect(adminaddr) s.connect(adminaddr)
except socket.timeout:
# On FreeBSD 5.3 the connection just timed out
if i > 0:
break
raise
except socket.error, e: except socket.error, e:
if e[0] == errno.ECONNREFUSED and i > 0: if e[0] == errno.ECONNREFUSED and i > 0:
break break
......
...@@ -460,9 +460,13 @@ class Connection(smac.SizedMessageAsyncConnection, object): ...@@ -460,9 +460,13 @@ class Connection(smac.SizedMessageAsyncConnection, object):
return hasattr(self.obj, name) return hasattr(self.obj, name)
def send_reply(self, msgid, ret): def send_reply(self, msgid, ret):
# encode() can pass on a wide variety of exceptions from cPickle.
# While a bare `except` is generally poor practice, in this case
# it's acceptable -- we really do want to catch every exception
# cPickle may raise.
try: try:
msg = self.marshal.encode(msgid, 0, REPLY, ret) msg = self.marshal.encode(msgid, 0, REPLY, ret)
except self.marshal.errors: except: # see above
try: try:
r = short_repr(ret) r = short_repr(ret)
except: except:
...@@ -480,9 +484,13 @@ class Connection(smac.SizedMessageAsyncConnection, object): ...@@ -480,9 +484,13 @@ class Connection(smac.SizedMessageAsyncConnection, object):
if type(err_value) is not types.InstanceType: if type(err_value) is not types.InstanceType:
err_value = err_type, err_value err_value = err_type, err_value
# encode() can pass on a wide variety of exceptions from cPickle.
# While a bare `except` is generally poor practice, in this case
# it's acceptable -- we really do want to catch every exception
# cPickle may raise.
try: try:
msg = self.marshal.encode(msgid, 0, REPLY, (err_type, err_value)) msg = self.marshal.encode(msgid, 0, REPLY, (err_type, err_value))
except self.marshal.errors: except: # see above
try: try:
r = short_repr(err_value) r = short_repr(err_value)
except: except:
......
...@@ -34,6 +34,7 @@ class BaseStorage(UndoLogCompatible): ...@@ -34,6 +34,7 @@ class BaseStorage(UndoLogCompatible):
A subclass must define the following methods: A subclass must define the following methods:
load() load()
store()
close() close()
cleanup() cleanup()
lastSerial() lastSerial()
...@@ -53,7 +54,6 @@ class BaseStorage(UndoLogCompatible): ...@@ -53,7 +54,6 @@ class BaseStorage(UndoLogCompatible):
If the subclass wants to implement undo, it should implement the If the subclass wants to implement undo, it should implement the
multiple revision methods and: multiple revision methods and:
loadSerial()
undo() undo()
undoInfo() undoInfo()
undoLog() undoLog()
...@@ -94,9 +94,9 @@ class BaseStorage(UndoLogCompatible): ...@@ -94,9 +94,9 @@ class BaseStorage(UndoLogCompatible):
self._commit_lock_acquire = l.acquire self._commit_lock_acquire = l.acquire
self._commit_lock_release = l.release self._commit_lock_release = l.release
t=time.time() t = time.time()
t=self._ts=apply(TimeStamp,(time.gmtime(t)[:5]+(t%60,))) t = self._ts = TimeStamp(*(time.gmtime(t)[:5] + (t%60,)))
self._tid = `t` self._tid = repr(t)
# ._oid is the highest oid in use (0 is always in use -- it's # ._oid is the highest oid in use (0 is always in use -- it's
# a reserved oid for the root object). Our new_oid() method # a reserved oid for the root object). Our new_oid() method
...@@ -189,10 +189,12 @@ class BaseStorage(UndoLogCompatible): ...@@ -189,10 +189,12 @@ class BaseStorage(UndoLogCompatible):
try: try:
if transaction is not self._transaction: if transaction is not self._transaction:
return return
self._abort() try:
self._clear_temp() self._abort()
self._transaction = None self._clear_temp()
self._commit_lock_release() self._transaction = None
finally:
self._commit_lock_release()
finally: finally:
self._lock_release() self._lock_release()
...@@ -226,7 +228,7 @@ class BaseStorage(UndoLogCompatible): ...@@ -226,7 +228,7 @@ class BaseStorage(UndoLogCompatible):
now = time.time() now = time.time()
t = TimeStamp(*(time.gmtime(now)[:5] + (now % 60,))) t = TimeStamp(*(time.gmtime(now)[:5] + (now % 60,)))
self._ts = t = t.laterThan(self._ts) self._ts = t = t.laterThan(self._ts)
self._tid = `t` self._tid = repr(t)
else: else:
self._ts = TimeStamp(tid) self._ts = TimeStamp(tid)
self._tid = tid self._tid = tid
......
...@@ -45,7 +45,6 @@ from ZODB.POSException import ConflictError, ReadConflictError ...@@ -45,7 +45,6 @@ from ZODB.POSException import ConflictError, ReadConflictError
from ZODB.POSException import Unsupported from ZODB.POSException import Unsupported
from ZODB.POSException import POSKeyError from ZODB.POSException import POSKeyError
from ZODB.serialize import ObjectWriter, ObjectReader, myhasattr from ZODB.serialize import ObjectWriter, ObjectReader, myhasattr
from ZODB.utils import DEPRECATED_ARGUMENT, deprecated36
from ZODB.utils import p64, u64, z64, oid_repr, positive_id from ZODB.utils import p64, u64, z64, oid_repr, positive_id
from ZODB import utils from ZODB import utils
...@@ -132,6 +131,9 @@ class Connection(ExportImport, object): ...@@ -132,6 +131,9 @@ class Connection(ExportImport, object):
# will execute atomically by virtue of the GIL. But some storage # will execute atomically by virtue of the GIL. But some storage
# might generate oids where hash or compare invokes Python code. In # might generate oids where hash or compare invokes Python code. In
# that case, the GIL can't save us. # that case, the GIL can't save us.
# Note: since that was written, it was officially declared that the
# type of an oid is str. TODO: remove the related now-unnecessary
# critical sections (if any -- this needs careful thought).
self._inv_lock = threading.Lock() self._inv_lock = threading.Lock()
self._invalidated = d = {} self._invalidated = d = {}
...@@ -213,10 +215,8 @@ class Connection(ExportImport, object): ...@@ -213,10 +215,8 @@ class Connection(ExportImport, object):
self._cache[oid] = obj self._cache[oid] = obj
return obj return obj
def cacheMinimize(self, dt=DEPRECATED_ARGUMENT): def cacheMinimize(self):
"""Deactivate all unmodified objects in the cache.""" """Deactivate all unmodified objects in the cache."""
if dt is not DEPRECATED_ARGUMENT:
deprecated36("cacheMinimize() dt= is ignored.")
self._cache.minimize() self._cache.minimize()
# TODO: we should test what happens when cacheGC is called mid-transaction. # TODO: we should test what happens when cacheGC is called mid-transaction.
...@@ -266,7 +266,7 @@ class Connection(ExportImport, object): ...@@ -266,7 +266,7 @@ class Connection(ExportImport, object):
# Return the connection to the pool. # Return the connection to the pool.
if self._opened is not None: if self._opened is not None:
self._db._returnToPool(self) self._db._returnToPool(self)
# _returnToPool() set self._opened to None. # _returnToPool() set self._opened to None.
# However, we can't assert that here, because self may # However, we can't assert that here, because self may
# have been reused (by another thread) by the time we # have been reused (by another thread) by the time we
...@@ -323,7 +323,7 @@ class Connection(ExportImport, object): ...@@ -323,7 +323,7 @@ class Connection(ExportImport, object):
This is used in a check to avoid implicitly adding an object This is used in a check to avoid implicitly adding an object
to a database in a multi-database situation. to a database in a multi-database situation.
See serialize.ObjectWriter.persistent_id. See serialize.ObjectWriter.persistent_id.
""" """
return (self._creating.get(oid, 0) return (self._creating.get(oid, 0)
or or
...@@ -546,11 +546,11 @@ class Connection(ExportImport, object): ...@@ -546,11 +546,11 @@ class Connection(ExportImport, object):
# Because obj was added, it is now in _creating, so it # Because obj was added, it is now in _creating, so it
# can be removed from _added. If oid wasn't in # can be removed from _added. If oid wasn't in
# adding, then we are adding it implicitly. # adding, then we are adding it implicitly.
implicitly_adding = self._added.pop(oid, None) is None implicitly_adding = self._added.pop(oid, None) is None
self._creating[oid] = implicitly_adding self._creating[oid] = implicitly_adding
else: else:
if (oid in self._invalidated if (oid in self._invalidated
and not hasattr(obj, '_p_resolveConflict')): and not hasattr(obj, '_p_resolveConflict')):
...@@ -647,8 +647,8 @@ class Connection(ExportImport, object): ...@@ -647,8 +647,8 @@ class Connection(ExportImport, object):
self._storage.tpc_abort(transaction) self._storage.tpc_abort(transaction)
# Note: If we invalidate a non-justifiable object (i.e. a # Note: If we invalidate a non-ghostifiable object (i.e. a
# persistent class), the object will immediately reread it's # persistent class), the object will immediately reread its
# state. That means that the following call could result in a # state. That means that the following call could result in a
# call to self.setstate, which, of course, must succeed. In # call to self.setstate, which, of course, must succeed. In
# general, it would be better if the read could be delayed # general, it would be better if the read could be delayed
...@@ -787,10 +787,8 @@ class Connection(ExportImport, object): ...@@ -787,10 +787,8 @@ class Connection(ExportImport, object):
# dict update could go on in another thread, but we don't care # dict update could go on in another thread, but we don't care
# because we have to check again after the load anyway. # because we have to check again after the load anyway.
if (obj._p_oid in self._invalidated if (obj._p_oid in self._invalidated and
and not myhasattr(obj, "_p_independent") not myhasattr(obj, "_p_independent")):
and not self._invalidated
):
# If the object has _p_independent(), we will handle it below. # If the object has _p_independent(), we will handle it below.
self._load_before_or_conflict(obj) self._load_before_or_conflict(obj)
return return
...@@ -883,16 +881,11 @@ class Connection(ExportImport, object): ...@@ -883,16 +881,11 @@ class Connection(ExportImport, object):
""" """
assert obj._p_jar is self assert obj._p_jar is self
if obj._p_oid is None: if obj._p_oid is None:
# There is some old Zope code that assigns _p_jar
# directly. That is no longer allowed, but we need to
# provide support for old code that still does it.
# The actual complaint here is that an object without # The actual complaint here is that an object without
# an oid is being registered. I can't think of any way to # an oid is being registered. I can't think of any way to
# achieve that without assignment to _p_jar. If there is # achieve that without assignment to _p_jar. If there is
# a way, this will be a very confusing warning. # a way, this will be a very confusing exception.
deprecated36("Assigning to _p_jar is deprecated, and will be " raise ValueError("assigning to _p_jar is not supported")
"changed to raise an exception.")
elif obj._p_oid in self._added: elif obj._p_oid in self._added:
# It was registered before it was added to _added. # It was registered before it was added to _added.
return return
...@@ -961,7 +954,7 @@ class Connection(ExportImport, object): ...@@ -961,7 +954,7 @@ class Connection(ExportImport, object):
if transaction_manager is None: if transaction_manager is None:
transaction_manager = transaction.manager transaction_manager = transaction.manager
self.transaction_manager = transaction_manager self.transaction_manager = transaction_manager
if self._reset_counter != global_reset_counter: if self._reset_counter != global_reset_counter:
...@@ -1036,48 +1029,7 @@ class Connection(ExportImport, object): ...@@ -1036,48 +1029,7 @@ class Connection(ExportImport, object):
########################################################################## ##########################################################################
# DEPRECATED methods # DEPRECATED methods
def cacheFullSweep(self, dt=None): # None at present.
deprecated36("cacheFullSweep is deprecated. "
"Use cacheMinimize instead.")
if dt is None:
self._cache.full_sweep()
else:
self._cache.full_sweep(dt)
def getTransaction(self):
"""Get the current transaction for this connection.
:deprecated:
The transaction manager's get method works the same as this
method. You can pass a transaction manager (TM) to DB.open()
to control which TM the Connection uses.
"""
deprecated36("getTransaction() is deprecated. "
"Use the transaction_manager argument "
"to DB.open() instead, or access "
".transaction_manager directly on the Connection.")
return self.transaction_manager.get()
def setLocalTransaction(self):
"""Use a transaction bound to the connection rather than the thread.
:deprecated:
Returns the transaction manager used by the connection. You
can pass a transaction manager (TM) to DB.open() to control
which TM the Connection uses.
"""
deprecated36("setLocalTransaction() is deprecated. "
"Use the transaction_manager argument "
"to DB.open() instead.")
if self.transaction_manager is transaction.manager:
if self._synch:
self.transaction_manager.unregisterSynch(self)
self.transaction_manager = transaction.TransactionManager()
if self._synch:
self.transaction_manager.registerSynch(self)
return self.transaction_manager
# DEPRECATED methods # DEPRECATED methods
########################################################################## ##########################################################################
......
...@@ -25,7 +25,6 @@ from ZODB.utils import z64 ...@@ -25,7 +25,6 @@ from ZODB.utils import z64
from ZODB.Connection import Connection from ZODB.Connection import Connection
from ZODB.serialize import referencesf from ZODB.serialize import referencesf
from ZODB.utils import WeakSet from ZODB.utils import WeakSet
from ZODB.utils import DEPRECATED_ARGUMENT, deprecated36
from zope.interface import implements from zope.interface import implements
from ZODB.interfaces import IDatabase from ZODB.interfaces import IDatabase
...@@ -119,6 +118,19 @@ class _ConnectionPool(object): ...@@ -119,6 +118,19 @@ class _ConnectionPool(object):
while len(self.available) > target: while len(self.available) > target:
c = self.available.pop(0) c = self.available.pop(0)
self.all.remove(c) self.all.remove(c)
# While application code may still hold a reference to `c`,
# there's little useful that can be done with this Connection
# anymore. Its cache may be holding on to limited resources,
# and we replace the cache with an empty one now so that we
# don't have to wait for gc to reclaim it. Note that it's not
# possible for DB.open() to return `c` again: `c` can never
# be in an open state again.
# TODO: Perhaps it would be better to break the reference
# cycles between `c` and `c._cache`, so that refcounting reclaims
# both right now. But if user code _does_ have a strong
# reference to `c` now, breaking the cycle would not reclaim `c`
# now, and `c` would be left in a user-visible crazy state.
c._resetCache()
# Pop an available connection and return it, or return None if none are # Pop an available connection and return it, or return None if none are
# available. In the latter case, the caller should create a new # available. In the latter case, the caller should create a new
...@@ -177,9 +189,6 @@ class DB(object): ...@@ -177,9 +189,6 @@ class DB(object):
cacheFullSweep, cacheLastGCTime, cacheMinimize, cacheSize, cacheFullSweep, cacheLastGCTime, cacheMinimize, cacheSize,
cacheDetailSize, getCacheSize, getVersionCacheSize, setCacheSize, cacheDetailSize, getCacheSize, getVersionCacheSize, setCacheSize,
setVersionCacheSize setVersionCacheSize
- `Deprecated Methods`: getCacheDeactivateAfter,
setCacheDeactivateAfter,
getVersionCacheDeactivateAfter, setVersionCacheDeactivateAfter
""" """
implements(IDatabase) implements(IDatabase)
...@@ -189,12 +198,10 @@ class DB(object): ...@@ -189,12 +198,10 @@ class DB(object):
def __init__(self, storage, def __init__(self, storage,
pool_size=7, pool_size=7,
cache_size=400, cache_size=400,
cache_deactivate_after=DEPRECATED_ARGUMENT,
version_pool_size=3, version_pool_size=3,
version_cache_size=100, version_cache_size=100,
database_name='unnamed', database_name='unnamed',
databases=None, databases=None,
version_cache_deactivate_after=DEPRECATED_ARGUMENT,
): ):
"""Create an object database. """Create an object database.
...@@ -206,8 +213,6 @@ class DB(object): ...@@ -206,8 +213,6 @@ class DB(object):
version) version)
- `version_cache_size`: target size of Connection object cache for - `version_cache_size`: target size of Connection object cache for
version connections version connections
- `cache_deactivate_after`: ignored
- `version_cache_deactivate_after`: ignored
""" """
# Allocate lock. # Allocate lock.
x = threading.RLock() x = threading.RLock()
...@@ -222,12 +227,6 @@ class DB(object): ...@@ -222,12 +227,6 @@ class DB(object):
self._version_pool_size = version_pool_size self._version_pool_size = version_pool_size
self._version_cache_size = version_cache_size self._version_cache_size = version_cache_size
# warn about use of deprecated arguments
if cache_deactivate_after is not DEPRECATED_ARGUMENT:
deprecated36("cache_deactivate_after has no effect")
if version_cache_deactivate_after is not DEPRECATED_ARGUMENT:
deprecated36("version_cache_deactivate_after has no effect")
self._miv_cache = {} self._miv_cache = {}
# Setup storage # Setup storage
...@@ -494,10 +493,7 @@ class DB(object): ...@@ -494,10 +493,7 @@ class DB(object):
def objectCount(self): def objectCount(self):
return len(self._storage) return len(self._storage)
def open(self, version='', def open(self, version='', mvcc=True,
transaction=DEPRECATED_ARGUMENT, temporary=DEPRECATED_ARGUMENT,
force=DEPRECATED_ARGUMENT, waitflag=DEPRECATED_ARGUMENT,
mvcc=True, txn_mgr=DEPRECATED_ARGUMENT,
transaction_manager=None, synch=True): transaction_manager=None, synch=True):
"""Return a database Connection for use by application code. """Return a database Connection for use by application code.
...@@ -518,29 +514,6 @@ class DB(object): ...@@ -518,29 +514,6 @@ class DB(object):
register for afterCompletion() calls. register for afterCompletion() calls.
""" """
if temporary is not DEPRECATED_ARGUMENT:
deprecated36("DB.open() temporary= ignored. "
"open() no longer blocks.")
if force is not DEPRECATED_ARGUMENT:
deprecated36("DB.open() force= ignored. "
"open() no longer blocks.")
if waitflag is not DEPRECATED_ARGUMENT:
deprecated36("DB.open() waitflag= ignored. "
"open() no longer blocks.")
if transaction is not DEPRECATED_ARGUMENT:
deprecated36("DB.open() transaction= ignored.")
if txn_mgr is not DEPRECATED_ARGUMENT:
deprecated36("use transaction_manager= instead of txn_mgr=")
if transaction_manager is None:
transaction_manager = txn_mgr
else:
raise ValueError("cannot specify both transaction_manager= "
"and txn_mgr=")
self._a() self._a()
try: try:
# pool <- the _ConnectionPool for this version # pool <- the _ConnectionPool for this version
...@@ -567,7 +540,7 @@ class DB(object): ...@@ -567,7 +540,7 @@ class DB(object):
# Tell the connection it belongs to self. # Tell the connection it belongs to self.
result.open(transaction_manager, mvcc, synch) result.open(transaction_manager, mvcc, synch)
# A good time to do some cache cleanup. # A good time to do some cache cleanup.
self._connectionMap(lambda c: c.cacheGC()) self._connectionMap(lambda c: c.cacheGC())
...@@ -706,23 +679,8 @@ class DB(object): ...@@ -706,23 +679,8 @@ class DB(object):
def versionEmpty(self, version): def versionEmpty(self, version):
return self._storage.versionEmpty(version) return self._storage.versionEmpty(version)
# The following methods are deprecated and have no effect resource_counter_lock = threading.Lock()
resource_counter = 0
def getCacheDeactivateAfter(self):
"""Deprecated"""
deprecated36("getCacheDeactivateAfter has no effect")
def getVersionCacheDeactivateAfter(self):
"""Deprecated"""
deprecated36("getVersionCacheDeactivateAfter has no effect")
def setCacheDeactivateAfter(self, v):
"""Deprecated"""
deprecated36("setCacheDeactivateAfter has no effect")
def setVersionCacheDeactivateAfter(self, v):
"""Deprecated"""
deprecated36("setVersionCacheDeactivateAfter has no effect")
class ResourceManager(object): class ResourceManager(object):
"""Transaction participation for a version or undo resource.""" """Transaction participation for a version or undo resource."""
...@@ -734,8 +692,20 @@ class ResourceManager(object): ...@@ -734,8 +692,20 @@ class ResourceManager(object):
self.tpc_finish = self._db._storage.tpc_finish self.tpc_finish = self._db._storage.tpc_finish
self.tpc_abort = self._db._storage.tpc_abort self.tpc_abort = self._db._storage.tpc_abort
# Get a number from a simple thread-safe counter, then
# increment it, for the purpose of sorting ResourceManagers by
# creation order. This ensures that multiple ResourceManagers
# within a transaction commit in a predictable sequence.
resource_counter_lock.acquire()
try:
global resource_counter
self._count = resource_counter
resource_counter += 1
finally:
resource_counter_lock.release()
def sortKey(self): def sortKey(self):
return "%s:%s" % (self._db._storage.sortKey(), id(self)) return "%s:%016x" % (self._db._storage.sortKey(), self._count)
def tpc_begin(self, txn, sub=False): def tpc_begin(self, txn, sub=False):
if sub: if sub:
......
...@@ -21,7 +21,7 @@ The Demo storage serves two purposes: ...@@ -21,7 +21,7 @@ The Demo storage serves two purposes:
- Provide a volatile storage that is useful for giving demonstrations. - Provide a volatile storage that is useful for giving demonstrations.
The demo storage can have a "base" storage that is used in a The demo storage can have a "base" storage that is used in a
read-only fashion. The base storage must not not to contain version read-only fashion. The base storage must not contain version
data. data.
There are three main data structures: There are three main data structures:
......
...@@ -890,7 +890,7 @@ class FileStorage(BaseStorage.BaseStorage, ...@@ -890,7 +890,7 @@ class FileStorage(BaseStorage.BaseStorage,
self._file.write(p64(tl)) self._file.write(p64(tl))
self._file.flush() self._file.flush()
except: except:
# Hm, an error occured writing out the data. Maybe the # Hm, an error occurred writing out the data. Maybe the
# disk is full. We don't want any turd at the end. # disk is full. We don't want any turd at the end.
self._file.truncate(self._pos) self._file.truncate(self._pos)
raise raise
...@@ -993,7 +993,11 @@ class FileStorage(BaseStorage.BaseStorage, ...@@ -993,7 +993,11 @@ class FileStorage(BaseStorage.BaseStorage,
return "", None return "", None
def _transactionalUndoRecord(self, oid, pos, tid, pre, version): def _transactionalUndoRecord(self, oid, pos, tid, pre, version):
"""Get the indo information for a data record """Get the undo information for a data record
'pos' points to the data header for 'oid' in the transaction
being undone. 'tid' refers to the transaction being undone.
'pre' is the 'prev' field of the same data header.
Return a 5-tuple consisting of a pickle, data pointer, Return a 5-tuple consisting of a pickle, data pointer,
version, packed non-version data pointer, and current version, packed non-version data pointer, and current
......
...@@ -31,7 +31,7 @@ class POSKeyError(KeyError, POSError): ...@@ -31,7 +31,7 @@ class POSKeyError(KeyError, POSError):
return oid_repr(self.args[0]) return oid_repr(self.args[0])
class TransactionError(POSError): class TransactionError(POSError):
"""An error occured due to normal transaction processing.""" """An error occurred due to normal transaction processing."""
class TransactionFailedError(POSError): class TransactionFailedError(POSError):
"""Cannot perform an operation on a transaction that previously failed. """Cannot perform an operation on a transaction that previously failed.
...@@ -252,7 +252,7 @@ class UndoError(POSError): ...@@ -252,7 +252,7 @@ class UndoError(POSError):
return _fmt_undo(self._oid, self._reason) return _fmt_undo(self._oid, self._reason)
class MultipleUndoErrors(UndoError): class MultipleUndoErrors(UndoError):
"""Several undo errors occured during a single transaction.""" """Several undo errors occurred during a single transaction."""
def __init__(self, errs): def __init__(self, errs):
# provide a reason and oid for clients that only look at that # provide a reason and oid for clients that only look at that
......
...@@ -13,10 +13,9 @@ ...@@ -13,10 +13,9 @@
############################################################################## ##############################################################################
# The next line must use double quotes, so release.py recognizes it. # The next line must use double quotes, so release.py recognizes it.
__version__ = "3.6.0a3" __version__ = "3.7.0a0"
import sys import sys
import __builtin__
from persistent import TimeStamp from persistent import TimeStamp
from persistent import list from persistent import list
...@@ -30,9 +29,3 @@ sys.modules['ZODB.PersistentList'] = sys.modules['persistent.list'] ...@@ -30,9 +29,3 @@ sys.modules['ZODB.PersistentList'] = sys.modules['persistent.list']
del mapping, list, sys del mapping, list, sys
from DB import DB from DB import DB
# TODO: get_transaction() scheduled to go away in ZODB 3.6.
from transaction import get_transaction
__builtin__.get_transaction = get_transaction
del __builtin__
=======================
Collabortation Diagrams
=======================
This file contains several collaboration diagrams for the ZODB.
Simple fetch, modify, commit
============================
Participants Participants
DB: ZODB.DB.DB ------------
C: ZODB.Connection.Connection
S: ZODB.FileStorage.FileStorage - ``DB``: ``ZODB.DB.DB``
T: transaction.interfaces.ITransaction - ``C``: ``ZODB.Connection.Connection``
TM: transaction.interfaces.ITransactionManager - ``S``: ``ZODB.FileStorage.FileStorage``
o1, o2, ...: pre-existing persistent objects - ``T``: ``transaction.interfaces.ITransaction``
- ``TM``: ``transaction.interfaces.ITransactionManager``
- ``o1``, ``o2``, ...: pre-existing persistent objects
Scenario Scenario
"""Simple fetch, modify, commit.""" --------
::
DB.open() DB.open()
create C create C
...@@ -50,16 +63,23 @@ Scenario ...@@ -50,16 +63,23 @@ Scenario
# transactions. # transactions.
Simple fetch, modify, abort
===========================
Participants Participants
DB: ZODB.DB.DB ------------
C: ZODB.Connection.Connection
S: ZODB.FileStorage.FileStorage - ``DB``: ``ZODB.DB.DB``
T: transaction.interfaces.ITransaction - ``C``: ``ZODB.Connection.Connection``
TM: transaction.interfaces.ITransactionManager - ``S``: ``ZODB.FileStorage.FileStorage``
o1, o2, ...: pre-existing persistent objects - ``T``: ``transaction.interfaces.ITransaction``
- ``TM``: ``transaction.interfaces.ITransactionManager``
- ``o1``, ``o2``, ...: pre-existing persistent objects
Scenario Scenario
"""Simple fetch, modify, abort.""" --------
::
DB.open() DB.open()
create C create C
...@@ -91,15 +111,22 @@ Scenario ...@@ -91,15 +111,22 @@ Scenario
# transactions. # transactions.
Participants: Rollback of a savepoint
T: ITransaction =======================
o1, o2, o3: some persistent objects
C1, C2, C3: resource managers Participants
S1, S2: Transaction savepoint objects ------------
s11, s21, s22: resource-manager savepoints
- ``T``: ``transaction.interfaces.ITransaction``
- ``o1``, ``o2``, ``o3``: some persistent objects
- ``C1``, ``C2``, ``C3``: resource managers
- ``S1``, ``S2``: Transaction savepoint objects
- ``s11``, ``s21``, ``s22``: resource-manager savepoints
Scenario Scenario
"""Rollback of a savepoint""" --------
::
create T create T
o1.modify() o1.modify()
...@@ -140,8 +167,8 @@ Scenario ...@@ -140,8 +167,8 @@ Scenario
o2.invalidate() o2.invalidate()
# truncates temporary storage to beginning, because # truncates temporary storage to beginning, because
# s22 was the first savepoint. (Perhaps conection # s22 was the first savepoint. (Perhaps conection
# savepoints record the log position before the # savepoints record the log position before the
# data were written, which is 0 in this case. # data were written, which is 0 in this case.
T.commit() T.commit()
C1.beforeCompletion(T) C1.beforeCompletion(T)
C2.beforeCompletion(T) C2.beforeCompletion(T)
......
...@@ -158,9 +158,40 @@ ...@@ -158,9 +158,40 @@
implements="ZODB.database"> implements="ZODB.database">
<section type="ZODB.storage" name="*" attribute="storage"/> <section type="ZODB.storage" name="*" attribute="storage"/>
<key name="cache-size" datatype="integer" default="5000"/> <key name="cache-size" datatype="integer" default="5000"/>
<description>
Target size, in number of objects, of each connection's
object cache.
</description>
<key name="pool-size" datatype="integer" default="7"/> <key name="pool-size" datatype="integer" default="7"/>
<description>
The expected maximum number of simultaneously open connections.
There is no hard limit (as many connections as are requested
will be opened, until system resources are exhausted). Exceeding
pool-size connections causes a warning message to be logged,
and exceeding twice pool-size connections causes a critical
message to be logged.
</description>
<key name="version-pool-size" datatype="integer" default="3"/> <key name="version-pool-size" datatype="integer" default="3"/>
<description>
The expected maximum number of connections simultaneously open
per version.
</description>
<key name="version-cache-size" datatype="integer" default="100"/> <key name="version-cache-size" datatype="integer" default="100"/>
<description>
Target size, in number of objects, of each version connection's
object cache.
</description>
<key name="database-name" default="unnamed"/>
<description>
When multidatabases are in use, this is the name given to this
database in the collection. The name must be unique across all
databases in the collection. The collection must also be given
a mapping from its databases' names to their databases, but that
cannot be specified in a ZODB config file. Applications using
multidatabases typical supply a way to configure the mapping in
their own config files, using the "databases" parameter of a DB
constructor.
</description>
</sectiontype> </sectiontype>
<sectiontype name="blobstorage" datatype=".BlobStorage" <sectiontype name="blobstorage" datatype=".BlobStorage"
......
...@@ -92,7 +92,7 @@ class BaseConfig: ...@@ -92,7 +92,7 @@ class BaseConfig:
class ZODBDatabase(BaseConfig): class ZODBDatabase(BaseConfig):
def open(self, database_name='unnamed', databases=None): def open(self, databases=None):
section = self.config section = self.config
storage = section.storage.open() storage = section.storage.open()
try: try:
...@@ -101,8 +101,8 @@ class ZODBDatabase(BaseConfig): ...@@ -101,8 +101,8 @@ class ZODBDatabase(BaseConfig):
cache_size=section.cache_size, cache_size=section.cache_size,
version_pool_size=section.version_pool_size, version_pool_size=section.version_pool_size,
version_cache_size=section.version_cache_size, version_cache_size=section.version_cache_size,
databases=databases, database_name=section.database_name,
database_name=database_name) databases=databases)
except: except:
storage.close() storage.close()
raise raise
......
=========================
Cross-Database References Cross-Database References
========================= =========================
...@@ -36,7 +37,7 @@ We'll have a reference to the first object: ...@@ -36,7 +37,7 @@ We'll have a reference to the first object:
>>> tm.commit() >>> tm.commit()
Now, let's open a separate connection to database 2. We use it to Now, let's open a separate connection to database 2. We use it to
read p2, use p2 to get to p1, and verify that it is in database 1: read `p2`, use `p2` to get to `p1`, and verify that it is in database 1:
>>> conn = db2.open() >>> conn = db2.open()
>>> p2x = conn.root()['p'] >>> p2x = conn.root()['p']
...@@ -77,8 +78,8 @@ happens. Consider: ...@@ -77,8 +78,8 @@ happens. Consider:
>>> p1.p4 = p4 >>> p1.p4 = p4
>>> p2.p4 = p4 >>> p2.p4 = p4
In this example, the new object is reachable from both p1 in database In this example, the new object is reachable from both `p1` in database
1 and p2 in database 2. If we commit, which database will p4 end up 1 and `p2` in database 2. If we commit, which database will `p4` end up
in? This sort of ambiguity can lead to subtle bugs. For that reason, in? This sort of ambiguity can lead to subtle bugs. For that reason,
an error is generated if we commit changes when new objects are an error is generated if we commit changes when new objects are
reachable from multiple databases: reachable from multiple databases:
...@@ -126,7 +127,7 @@ to explicitly say waht database an object belongs to: ...@@ -126,7 +127,7 @@ to explicitly say waht database an object belongs to:
>>> p1.p5 = p5 >>> p1.p5 = p5
>>> p2.p5 = p5 >>> p2.p5 = p5
>>> conn1.add(p5) >>> conn1.add(p5)
>>> tm.commit() >>> tm.commit()
>>> p5._p_jar.db().database_name >>> p5._p_jar.db().database_name
'1' '1'
...@@ -141,6 +142,7 @@ cross-database references, however, there are a number of facilities ...@@ -141,6 +142,7 @@ cross-database references, however, there are a number of facilities
missing: missing:
cross-database garbage collection cross-database garbage collection
Garbage collection is done on a database by database basis. Garbage collection is done on a database by database basis.
If an object on a database only has references to it from other If an object on a database only has references to it from other
databases, then the object will be garbage collected when its databases, then the object will be garbage collected when its
...@@ -148,11 +150,13 @@ cross-database garbage collection ...@@ -148,11 +150,13 @@ cross-database garbage collection
broken. broken.
cross-database undo cross-database undo
Undo is only applied to a single database. Fixing this for Undo is only applied to a single database. Fixing this for
multiple databases is going to be extremely difficult. Undo multiple databases is going to be extremely difficult. Undo
currently poses consistency problems, so it is not (or should not currently poses consistency problems, so it is not (or should not
be) widely used. be) widely used.
Cross-database aware (tolerant) export/import Cross-database aware (tolerant) export/import
The export/import facility needs to be aware, at least, of cross-database The export/import facility needs to be aware, at least, of cross-database
references. references.
==================
Persistent Classes Persistent Classes
================== ==================
...@@ -39,7 +40,7 @@ functions to make them picklable. ...@@ -39,7 +40,7 @@ functions to make them picklable.
Also note that we explictly set the module. Persistent classes don't Also note that we explictly set the module. Persistent classes don't
live in normal Python modules. Rather, they live in the database. We live in normal Python modules. Rather, they live in the database. We
use information in __module__ to record where in the database. When use information in ``__module__`` to record where in the database. When
we want to use a database, we will need to supply a custom class we want to use a database, we will need to supply a custom class
factory to load instances of the class. factory to load instances of the class.
...@@ -176,7 +177,7 @@ until we sync: ...@@ -176,7 +177,7 @@ until we sync:
Instances of Persistent Classes Instances of Persistent Classes
------------------------------- -------------------------------
We can, of course, store instances of perstent classes in the We can, of course, store instances of persistent classes in the
database: database:
>>> c.color = 'blue' >>> c.color = 'blue'
...@@ -189,7 +190,7 @@ database: ...@@ -189,7 +190,7 @@ database:
NOTE: If a non-persistent instance of a persistent class is copied, NOTE: If a non-persistent instance of a persistent class is copied,
the class may be copied as well. This is usually not the desired the class may be copied as well. This is usually not the desired
result. result.
Persistent instances of persistent classes Persistent instances of persistent classes
...@@ -228,10 +229,10 @@ Now, if we try to load it, we get a broken oject: ...@@ -228,10 +229,10 @@ Now, if we try to load it, we get a broken oject:
>>> connection2.root()['obs']['p'] >>> connection2.root()['obs']['p']
<persistent broken __zodb__.P instance '\x00\x00\x00\x00\x00\x00\x00\x04'> <persistent broken __zodb__.P instance '\x00\x00\x00\x00\x00\x00\x00\x04'>
because the module, "__zodb__" can't be loaded. We need to provide a because the module, `__zodb__` can't be loaded. We need to provide a
class factory that knows about this special module. Here we'll supply a class factory that knows about this special module. Here we'll supply a
sample class factory that looks up a class name in the database root sample class factory that looks up a class name in the database root
if the module is "__zodb__". It falls back to the normal class lookup if the module is `__zodb__`. It falls back to the normal class lookup
for other modules: for other modules:
>>> from ZODB.broken import find_global >>> from ZODB.broken import find_global
...@@ -242,7 +243,7 @@ for other modules: ...@@ -242,7 +243,7 @@ for other modules:
>>> some_database.classFactory = classFactory >>> some_database.classFactory = classFactory
Normally, the classFactory should be set before a database is opened. Normally, the classFactory should be set before a database is opened.
We'll reopen the connections we're using. We'll assign the old We'll reopen the connections we're using. We'll assign the old
connections to a variable first to prevent getting them from the connections to a variable first to prevent getting them from the
connection pool: connection pool:
...@@ -250,7 +251,7 @@ connection pool: ...@@ -250,7 +251,7 @@ connection pool:
>>> old = connection, connection2 >>> old = connection, connection2
>>> connection = some_database.open(transaction_manager=tm) >>> connection = some_database.open(transaction_manager=tm)
>>> connection2 = some_database.open(transaction_manager=tm2) >>> connection2 = some_database.open(transaction_manager=tm2)
Now, we can read the object: Now, we can read the object:
>>> connection2.root()['obs']['p'].color >>> connection2.root()['obs']['p'].color
......
...@@ -340,7 +340,7 @@ class ObjectWriter: ...@@ -340,7 +340,7 @@ class ObjectWriter:
if self._jar.get_connection(database_name) is not obj._p_jar: if self._jar.get_connection(database_name) is not obj._p_jar:
raise InvalidObjectReference( raise InvalidObjectReference(
"Attempt to store a reference to an object from " "Attempt to store a reference to an object from "
"a separate onnection to the same database or " "a separate connection to the same database or "
"multidatabase" "multidatabase"
) )
......
...@@ -8,44 +8,44 @@ subtransactions. When a transaction is committed, a flag is passed ...@@ -8,44 +8,44 @@ subtransactions. When a transaction is committed, a flag is passed
indicating whether it is a subtransaction or a top-level transaction. indicating whether it is a subtransaction or a top-level transaction.
Consider the following exampler commit calls: Consider the following exampler commit calls:
- commit() - ``commit()``
A regular top-level transaction is committed. A regular top-level transaction is committed.
- commit(1) - ``commit(1)``
A subtransaction is committed. There is now one subtransaction of A subtransaction is committed. There is now one subtransaction of
the current top-level transaction. the current top-level transaction.
- commit(1) - ``commit(1)``
A subtransaction is committed. There are now two subtransactions of A subtransaction is committed. There are now two subtransactions of
the current top-level transaction. the current top-level transaction.
- abort(1) - ``abort(1)``
A subtransaction is aborted. There are still two subtransactions of A subtransaction is aborted. There are still two subtransactions of
the current top-level transaction; work done since the last the current top-level transaction; work done since the last
commit(1) call is discarded. ``commit(1)`` call is discarded.
- commit() - ``commit()``
We now commit a top-level transaction. The work done in the previous We now commit a top-level transaction. The work done in the previous
two subtransactions *plus* work done since the last abort(1) call two subtransactions *plus* work done since the last ``abort(1)`` call
is saved. is saved.
- commit(1) - ``commit(1)``
A subtransaction is committed. There is now one subtransaction of A subtransaction is committed. There is now one subtransaction of
the current top-level transaction. the current top-level transaction.
- commit(1) - ``commit(1)``
A subtransaction is committed. There are now two subtransactions of A subtransaction is committed. There are now two subtransactions of
the current top-level transaction. the current top-level transaction.
- abort() - ``abort()``
We now abort a top-level transaction. We discard the work done in We now abort a top-level transaction. We discard the work done in
the previous two subtransactions *plus* work done since the last the previous two subtransactions *plus* work done since the last
commit(1) call. ``commit(1)`` call.
...@@ -272,6 +272,54 @@ first popped: ...@@ -272,6 +272,54 @@ first popped:
>>> len(pool.available), len(pool.all) >>> len(pool.available), len(pool.all)
(0, 2) (0, 2)
Next: when a closed Connection is removed from .available due to exceeding
pool_size, that Connection's cache is cleared (this behavior was new in
ZODB 3.6b6). While user code may still hold a reference to that
Connection, once it vanishes from .available it's really not usable for
anything sensible (it can never be in the open state again). Waiting for
gc to reclaim the Connection and its cache eventually works, but that can
take "a long time" and caches can hold on to many objects, and limited
resources (like RDB connections), for the duration.
>>> st.close()
>>> st = Storage()
>>> db = DB(st, pool_size=2)
>>> conn0 = db.open()
>>> len(conn0._cache) # empty now
0
>>> import transaction
>>> conn0.root()['a'] = 1
>>> transaction.commit()
>>> len(conn0._cache) # but now the cache holds the root object
1
Now open more connections so that the total exceeds pool_size (2):
>>> conn1 = db.open()
>>> conn2 = db.open()
>>> pool = db._pools['']
>>> len(pool.all), len(pool.available) # all Connections are in use
(3, 0)
Return pool_size (2) Connections to the pool:
>>> conn0.close()
>>> conn1.close()
>>> len(pool.all), len(pool.available)
(3, 2)
>>> len(conn0._cache) # nothing relevant has changed yet
1
When we close the third connection, conn0 will be booted from .all, and
we expect its cache to be cleared then:
>>> conn2.close()
>>> len(pool.all), len(pool.available)
(2, 2)
>>> len(conn0._cache) # conn0's cache is empty again
0
>>> del conn0, conn1, conn2
Clean up. Clean up.
>>> st.close() >>> st.close()
......
############################################################################## ==================
# Multiple Databases
# Copyright (c) 2005 Zope Corporation and Contributors. ==================
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
Multi-database tests
====================
Multi-database support adds the ability to tie multiple databases into a Multi-database support adds the ability to tie multiple databases into a
collection. The original proposal is in the fishbowl: collection. The original proposal is in the fishbowl:
...@@ -25,29 +12,29 @@ by Jim Fulton, Christian Theune, and Tim Peters. Overview: ...@@ -25,29 +12,29 @@ by Jim Fulton, Christian Theune, and Tim Peters. Overview:
No private attributes were added, and one new method was introduced. No private attributes were added, and one new method was introduced.
DB: ``DB``:
- a new .database_name attribute holds the name of this database - a new ``.database_name`` attribute holds the name of this database.
- a new .databases attribute maps from database name to DB object; all DBs - a new ``.databases`` attribute maps from database name to ``DB`` object; all
in a multi-database collection share the same .databases object databases in a multi-database collection share the same ``.databases`` object
- the DB constructor has new optional arguments with the same names - the ``DB`` constructor has new optional arguments with the same names
(database_name= and databases=). (``database_name=`` and ``databases=``).
Connection: ``Connection``:
- a new .connections attribute maps from database name to a Connection for - a new ``.connections`` attribute maps from database name to a ``Connection``
the database with that name; the .connections mapping object is also for the database with that name; the ``.connections`` mapping object is also
shared among databases in a collection shared among databases in a collection.
- a new .get_connection(database_name) method returns a Connection for a - a new ``.get_connection(database_name)`` method returns a ``Connection`` for
database in the collection; if a connection is already open, it's returned a database in the collection; if a connection is already open, it's returned
(this is the value .connections[database_name]), else a new connection is (this is the value ``.connections[database_name]``), else a new connection
opened (and stored as .connections[database_name]) is opened (and stored as ``.connections[database_name]``)
Creating a multi-database starts with creating a named DB: Creating a multi-database starts with creating a named ``DB``:
>>> from ZODB.tests.test_storage import MinimalMemoryStorage >>> from ZODB.tests.test_storage import MinimalMemoryStorage
>>> from ZODB import DB >>> from ZODB import DB
...@@ -69,7 +56,8 @@ Adding another database to the collection works like this: ...@@ -69,7 +56,8 @@ Adding another database to the collection works like this:
... database_name='notroot', ... database_name='notroot',
... databases=dbmap) ... databases=dbmap)
The new db2 now shares the 'databases' dictionary with db and has two entries: The new ``db2`` now shares the ``databases`` dictionary with db and has two
entries:
>>> db2.databases is db.databases is dbmap >>> db2.databases is db.databases is dbmap
True True
...@@ -87,7 +75,7 @@ It's an error to try to insert a database with a name already in use: ...@@ -87,7 +75,7 @@ It's an error to try to insert a database with a name already in use:
... ...
ValueError: database_name 'root' already in databases ValueError: database_name 'root' already in databases
Because that failed, db.databases wasn't changed: Because that failed, ``db.databases`` wasn't changed:
>>> len(db.databases) # still 2 >>> len(db.databases) # still 2
2 2
...@@ -127,7 +115,7 @@ Now there are two connections in that collection: ...@@ -127,7 +115,7 @@ Now there are two connections in that collection:
>>> names = cn.connections.keys(); names.sort(); print names >>> names = cn.connections.keys(); names.sort(); print names
['notroot', 'root'] ['notroot', 'root']
So long as this database group remains open, the same Connection objects So long as this database group remains open, the same ``Connection`` objects
are returned: are returned:
>>> cn.get_connection('root') is cn >>> cn.get_connection('root') is cn
...@@ -151,3 +139,59 @@ Clean up: ...@@ -151,3 +139,59 @@ Clean up:
>>> for a_db in dbmap.values(): >>> for a_db in dbmap.values():
... a_db.close() ... a_db.close()
Configuration from File
-----------------------
The database name can also be specified in a config file, starting in
ZODB 3.6:
>>> from ZODB.config import databaseFromString
>>> config = """
... <zodb>
... <mappingstorage/>
... database-name this_is_the_name
... </zodb>
... """
>>> db = databaseFromString(config)
>>> print db.database_name
this_is_the_name
>>> db.databases.keys()
['this_is_the_name']
However, the ``.databases`` attribute cannot be configured from file. It
can be passed to the `ZConfig` factory. I'm not sure of the clearest way
to test that here; this is ugly:
>>> from ZODB.config import getDbSchema
>>> import ZConfig
>>> from cStringIO import StringIO
Derive a new `config2` string from the `config` string, specifying a
different database_name:
>>> config2 = config.replace("this_is_the_name", "another_name")
Now get a `ZConfig` factory from `config2`:
>>> f = StringIO(config2)
>>> zconfig, handle = ZConfig.loadConfigFile(getDbSchema(), f)
>>> factory = zconfig.database
The desired ``databases`` mapping can be passed to this factory:
>>> db2 = factory.open(databases=db.databases)
>>> print db2.database_name # has the right name
another_name
>>> db.databases is db2.databases # shares .databases with `db`
True
>>> all = db2.databases.keys()
>>> all.sort()
>>> all # and db.database_name & db2.database_name are the keys
['another_name', 'this_is_the_name']
Cleanup.
>>> db.close()
>>> db2.close()
Here are some tests that storage sync() methods get called at appropriate =============
Synchronizers
=============
Here are some tests that storage ``sync()`` methods get called at appropriate
times in the life of a transaction. The tested behavior is new in ZODB 3.4. times in the life of a transaction. The tested behavior is new in ZODB 3.4.
First define a lightweight storage with a sync() method: First define a lightweight storage with a ``sync()`` method:
>>> import ZODB >>> import ZODB
>>> from ZODB.MappingStorage import MappingStorage >>> from ZODB.MappingStorage import MappingStorage
...@@ -27,14 +31,14 @@ Sync should not have been called yet. ...@@ -27,14 +31,14 @@ Sync should not have been called yet.
False False
sync is called by the Connection's afterCompletion() hook after the commit ``sync()`` is called by the Connection's ``afterCompletion()`` hook after the
completes. commit completes.
>>> transaction.commit() >>> transaction.commit()
>>> st.sync_called # False before 3.4 >>> st.sync_called # False before 3.4
True True
sync is also called by the afterCompletion() hook after an abort. ``sync()`` is also called by the ``afterCompletion()`` hook after an abort.
>>> st.sync_called = False >>> st.sync_called = False
>>> rt['b'] = 2 >>> rt['b'] = 2
...@@ -42,8 +46,8 @@ sync is also called by the afterCompletion() hook after an abort. ...@@ -42,8 +46,8 @@ sync is also called by the afterCompletion() hook after an abort.
>>> st.sync_called # False before 3.4 >>> st.sync_called # False before 3.4
True True
And sync is called whenever we explicitly start a new txn, via the And ``sync()`` is called whenever we explicitly start a new transaction, via
newTransaction() hook. the ``newTransaction()`` hook.
>>> st.sync_called = False >>> st.sync_called = False
>>> dummy = transaction.begin() >>> dummy = transaction.begin()
...@@ -51,19 +55,19 @@ newTransaction() hook. ...@@ -51,19 +55,19 @@ newTransaction() hook.
True True
Clean up. Closing db isn't enough -- closing a DB doesn't close its Clean up. Closing db isn't enough -- closing a DB doesn't close its
Connections. Leaving our Connection open here can cause the `Connections`. Leaving our `Connection` open here can cause the
SimpleStorage.sync() method to get called later, during another test, and ``SimpleStorage.sync()`` method to get called later, during another test, and
our doctest-synthesized module globals no longer exist then. You get our doctest-synthesized module globals no longer exist then. You get a weird
a weird traceback then ;-) traceback then ;-)
>>> cn.close() >>> cn.close()
One more, very obscure. It was the case that if the first action a new One more, very obscure. It was the case that if the first action a new
threaded transaction manager saw was a begin() call, then synchronizers threaded transaction manager saw was a ``begin()`` call, then synchronizers
registered after that in the same transaction weren't communicated to registered after that in the same transaction weren't communicated to the
the Transaction object, and so the synchronizers' afterCompletion() hooks `Transaction` object, and so the synchronizers' ``afterCompletion()`` hooks
weren't called when the transaction commited. None of the test suites weren't called when the transaction commited. None of the test suites
(ZODB's, Zope 2.8's, or Zope3's) caught that, but apparently Zope3 takes this (ZODB's, Zope 2.8's, or Zope3's) caught that, but apparently Zope 3 takes this
path at some point when serving pages. path at some point when serving pages.
>>> tm = transaction.ThreadTransactionManager() >>> tm = transaction.ThreadTransactionManager()
...@@ -75,14 +79,14 @@ path at some point when serving pages. ...@@ -75,14 +79,14 @@ path at some point when serving pages.
>>> st.sync_called >>> st.sync_called
False False
Now ensure that cn.afterCompletion() -> st.sync() gets called by commit Now ensure that ``cn.afterCompletion() -> st.sync()`` gets called by commit
despite that the Connection registered after the transaction began: despite that the `Connection` registered after the transaction began:
>>> tm.commit() >>> tm.commit()
>>> st.sync_called >>> st.sync_called
True True
And try the same thing with a non-threaded TM: And try the same thing with a non-threaded transaction manager:
>>> cn.close() >>> cn.close()
>>> tm = transaction.TransactionManager() >>> tm = transaction.TransactionManager()
......
...@@ -390,11 +390,11 @@ class UserMethodTests(unittest.TestCase): ...@@ -390,11 +390,11 @@ class UserMethodTests(unittest.TestCase):
""" """
def test_cache(self): def test_cache(self):
r"""doctest of cacheMinimize() and cacheFullSweep() methods. r"""doctest of cacheMinimize().
These tests are fairly minimal, just verifying that the Thus test us minimal, just verifying that the method can be called
methods can be called and have some effect. We need other and has some effect. We need other tests that verify the cache works
tests that verify the cache works as intended. as intended.
>>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>") >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
>>> cn = db.open() >>> cn = db.open()
...@@ -403,71 +403,12 @@ class UserMethodTests(unittest.TestCase): ...@@ -403,71 +403,12 @@ class UserMethodTests(unittest.TestCase):
>>> r._p_state >>> r._p_state
-1 -1
The next couple of tests are involved because they have to
cater to backwards compatibility issues. The cacheMinimize()
method used to take an argument, but now ignores it.
cacheFullSweep() used to do something different than
cacheMinimize(), but it doesn't anymore. We want to verify
that these methods do something, but all cause deprecation
warnings. To do that, we need a warnings hook.
>>> hook = WarningsHook()
>>> hook.install()
More problems in case this test is run more than once: fool the
warnings module into delivering the warnings despite that they've
been seen before.
>>> import warnings
>>> warnings.filterwarnings("always", category=DeprecationWarning)
>>> r._p_activate() >>> r._p_activate()
>>> cn.cacheMinimize(12) >>> r._p_state # up to date
>>> r._p_state
-1
>>> len(hook.warnings)
1
>>> message, category, filename, lineno = hook.warnings[0]
>>> print message
This will be removed in ZODB 3.6:
cacheMinimize() dt= is ignored.
>>> category.__name__
'DeprecationWarning'
>>> hook.clear()
cacheFullSweep() is a doozy. It generates two deprecation
warnings, one from the Connection and one from the
cPickleCache. Maybe we should drop the cPickleCache warning,
but it's there for now. When passed an argument, it acts like
cacheGC(). When it isn't passed an argument it acts like
cacheMinimize().
>>> r._p_activate()
>>> cn.cacheFullSweep(12)
>>> r._p_state
0 0
>>> len(hook.warnings) >>> cn.cacheMinimize()
2 >>> r._p_state # ghost again
>>> message, category, filename, lineno = hook.warnings[0] -1
>>> print message
This will be removed in ZODB 3.6:
cacheFullSweep is deprecated. Use cacheMinimize instead.
>>> category.__name__
'DeprecationWarning'
>>> message, category, filename, lineno = hook.warnings[1]
>>> message
'No argument expected'
>>> category.__name__
'DeprecationWarning'
We have to uninstall the hook so that other warnings don't get lost.
>>> hook.uninstall()
Obscure: There is no API call for removing the filter we added, but
filters appears to be a public variable.
>>> del warnings.filters[0]
""" """
class InvalidationTests(unittest.TestCase): class InvalidationTests(unittest.TestCase):
......
==========
Savepoints Savepoints
========== ==========
Savepoints provide a way to save to disk intermediate work done during Savepoints provide a way to save to disk intermediate work done during a
a transaction allowing: transaction allowing:
- partial transaction (subtransaction) rollback (abort) - partial transaction (subtransaction) rollback (abort)
- state of saved objects to be freed, freeing on-line memory for other - state of saved objects to be freed, freeing on-line memory for other
uses uses
Savepoints make it possible to write atomic subroutines that don't Savepoints make it possible to write atomic subroutines that don't make
make top-level transaction commitments. top-level transaction commitments.
Applications Applications
------------ ------------
...@@ -39,13 +41,13 @@ and abort changes: ...@@ -39,13 +41,13 @@ and abort changes:
>>> root['name'] >>> root['name']
'bob' 'bob'
Now, let's look at an application that manages funds for people. Now, let's look at an application that manages funds for people. It allows
It allows deposits and debits to be entered for multiple people. deposits and debits to be entered for multiple people. It accepts a sequence
It accepts a sequence of entries and generates a sequence of status of entries and generates a sequence of status messages. For each entry, it
messages. For each entry, it applies the change and then validates applies the change and then validates the user's account. If the user's
the user's account. If the user's account is invalid, we roll back account is invalid, we roll back the change for that entry. The success or
the change for that entry. The success or failure of an entry is failure of an entry is indicated in the output status. First we'll initialize
indicated in the output status. First we'll initialize some accounts: some accounts:
>>> root['bob-balance'] = 0.0 >>> root['bob-balance'] = 0.0
>>> root['bob-credit'] = 0.0 >>> root['bob-credit'] = 0.0
...@@ -59,8 +61,8 @@ Now, we'll define a validation function to validate an account: ...@@ -59,8 +61,8 @@ Now, we'll define a validation function to validate an account:
... if root[name+'-balance'] + root[name+'-credit'] < 0: ... if root[name+'-balance'] + root[name+'-credit'] < 0:
... raise ValueError('Overdrawn', name) ... raise ValueError('Overdrawn', name)
And a function to apply entries. If the function fails in some And a function to apply entries. If the function fails in some unexpected
unexpected way, it rolls back all of its changes and prints the error: way, it rolls back all of its changes and prints the error:
>>> def apply_entries(entries): >>> def apply_entries(entries):
... savepoint = transaction.savepoint() ... savepoint = transaction.savepoint()
...@@ -114,9 +116,9 @@ If we provide entries that cause an unexpected error: ...@@ -114,9 +116,9 @@ If we provide entries that cause an unexpected error:
Updated sally Updated sally
Unexpected exception unsupported operand type(s) for +=: 'float' and 'str' Unexpected exception unsupported operand type(s) for +=: 'float' and 'str'
Because the apply_entries used a savepoint for the entire function, Because the apply_entries used a savepoint for the entire function, it was
it was able to rollback the partial changes without rolling back able to rollback the partial changes without rolling back changes made in the
changes made in the previous call to apply_entries: previous call to ``apply_entries``:
>>> root['bob-balance'] >>> root['bob-balance']
30.0 30.0
...@@ -135,6 +137,7 @@ away: ...@@ -135,6 +137,7 @@ away:
>>> root['sally-balance'] >>> root['sally-balance']
0.0 0.0
Savepoint invalidation Savepoint invalidation
---------------------- ----------------------
......
...@@ -54,15 +54,6 @@ class DBTests(unittest.TestCase): ...@@ -54,15 +54,6 @@ class DBTests(unittest.TestCase):
# make sure the basic methods are callable # make sure the basic methods are callable
def testSets(self): def testSets(self):
# test set methods that have non-trivial implementations
warnings.filterwarnings("error", category=DeprecationWarning)
self.assertRaises(DeprecationWarning,
self.db.setCacheDeactivateAfter, 12)
self.assertRaises(DeprecationWarning,
self.db.setVersionCacheDeactivateAfter, 12)
# Obscure: There is no API call for removing the warning we just
# added, but filters appears to be a public variable.
del warnings.filters[0]
self.db.setCacheSize(15) self.db.setCacheSize(15)
self.db.setVersionCacheSize(15) self.db.setVersionCacheSize(15)
......
...@@ -213,58 +213,6 @@ class ZODBTests(unittest.TestCase): ...@@ -213,58 +213,6 @@ class ZODBTests(unittest.TestCase):
conn1.close() conn1.close()
conn2.close() conn2.close()
def checkLocalTransactions(self):
# Test of transactions that apply to only the connection,
# not the thread.
conn1 = self._db.open()
conn2 = self._db.open()
hook = WarningsHook()
hook.install()
try:
conn1.setLocalTransaction()
conn2.setLocalTransaction()
r1 = conn1.root()
r2 = conn2.root()
if r1.has_key('item'):
del r1['item']
conn1.getTransaction().commit()
r1.get('item')
r2.get('item')
r1['item'] = 1
conn1.getTransaction().commit()
self.assertEqual(r1['item'], 1)
# r2 has not seen a transaction boundary,
# so it should be unchanged.
self.assertEqual(r2.get('item'), None)
conn2.sync()
# Now r2 is updated.
self.assertEqual(r2['item'], 1)
# Now, for good measure, send an update in the other direction.
r2['item'] = 2
conn2.getTransaction().commit()
self.assertEqual(r1['item'], 1)
self.assertEqual(r2['item'], 2)
conn1.sync()
conn2.sync()
self.assertEqual(r1['item'], 2)
self.assertEqual(r2['item'], 2)
for msg, obj, filename, lineno in hook.warnings:
self.assert_(msg in [
"This will be removed in ZODB 3.6:\n"
"setLocalTransaction() is deprecated. "
"Use the transaction_manager argument "
"to DB.open() instead.",
"This will be removed in ZODB 3.6:\n"
"getTransaction() is deprecated. "
"Use the transaction_manager argument "
"to DB.open() instead, or access "
".transaction_manager directly on the Connection."])
finally:
conn1.close()
conn2.close()
hook.uninstall()
def checkReadConflict(self): def checkReadConflict(self):
self.obj = P() self.obj = P()
self.readConflict() self.readConflict()
...@@ -584,57 +532,8 @@ class ZODBTests(unittest.TestCase): ...@@ -584,57 +532,8 @@ class ZODBTests(unittest.TestCase):
# transaction, and, in fact, when this test was written, # transaction, and, in fact, when this test was written,
# Transaction.begin() didn't do anything (everything from here # Transaction.begin() didn't do anything (everything from here
# down failed). # down failed).
# Later (ZODB 3.6): Transaction.begin() no longer exists, so the
# Oh, bleech. Since Transaction.begin is also deprecated, we have # rest of this test was tossed.
# to goof around suppressing the deprecation warning.
import warnings
# First verify that Transaction.begin *is* deprecated, by turning
# the warning into an error.
warnings.filterwarnings("error", category=DeprecationWarning)
self.assertRaises(DeprecationWarning, transaction.get().begin)
del warnings.filters[0]
# Now ignore DeprecationWarnings for the duration. Use a
# try/finally block to ensure we reenable DeprecationWarnings
# no matter what.
warnings.filterwarnings("ignore", category=DeprecationWarning)
try:
cn = self._db.open()
rt = cn.root()
rt['a'] = 1
transaction.get().begin() # should abort adding 'a' to the root
rt = cn.root()
self.assertRaises(KeyError, rt.__getitem__, 'a')
# A longstanding bug: this didn't work if changes were only in
# subtransactions.
transaction.get().begin()
rt = cn.root()
rt['a'] = 2
transaction.get().commit(1)
transaction.get().begin()
rt = cn.root()
self.assertRaises(KeyError, rt.__getitem__, 'a')
# One more time, mixing "top level" and subtransaction changes.
transaction.get().begin()
rt = cn.root()
rt['a'] = 3
transaction.get().commit(1)
rt['b'] = 4
transaction.get().begin()
rt = cn.root()
self.assertRaises(KeyError, rt.__getitem__, 'a')
self.assertRaises(KeyError, rt.__getitem__, 'b')
cn.close()
finally:
del warnings.filters[0]
def checkFailingCommitSticks(self): def checkFailingCommitSticks(self):
# See also checkFailingSubtransactionCommitSticks. # See also checkFailingSubtransactionCommitSticks.
...@@ -829,6 +728,42 @@ class ZODBTests(unittest.TestCase): ...@@ -829,6 +728,42 @@ class ZODBTests(unittest.TestCase):
cn.close() cn.close()
cn2.close() cn2.close()
def checkMultipleUndoInOneTransaction(self):
# Verify that it's possible to perform multiple undo
# operations within a transaction. If ZODB performs the undo
# operations in a nondeterministic order, this test will often
# fail.
conn = self._db.open()
try:
root = conn.root()
# Add transactions that set root["state"] to (0..5)
for state_num in range(6):
transaction.begin()
root['state'] = state_num
transaction.get().note('root["state"] = %d' % state_num)
transaction.commit()
# Undo all but the first. Note that no work is actually
# performed yet.
transaction.begin()
log = self._db.undoLog()
for i in range(5):
self._db.undo(log[i]['id'])
transaction.get().note('undo states 1 through 5')
# Now attempt all those undo operations.
transaction.commit()
# Sanity check: we should be back to the first state.
self.assertEqual(root['state'], 0)
finally:
transaction.abort()
conn.close()
class PoisonedError(Exception): class PoisonedError(Exception):
pass pass
......
...@@ -56,7 +56,7 @@ database open function, but this doesn't work: ...@@ -56,7 +56,7 @@ database open function, but this doesn't work:
Traceback (most recent call last): Traceback (most recent call last):
... ...
InvalidObjectReference: Attempt to store a reference to an object InvalidObjectReference: Attempt to store a reference to an object
from a separate onnection to the same database or multidatabase from a separate connection to the same database or multidatabase
>>> tm.abort() >>> tm.abort()
...@@ -72,7 +72,7 @@ different connections to the same database. ...@@ -72,7 +72,7 @@ different connections to the same database.
Traceback (most recent call last): Traceback (most recent call last):
... ...
InvalidObjectReference: Attempt to store a reference to an object InvalidObjectReference: Attempt to store a reference to an object
from a separate onnection to the same database or multidatabase from a separate connection to the same database or multidatabase
>>> tm.abort() >>> tm.abort()
......
...@@ -37,9 +37,9 @@ MinimalMemoryStorage that implements MVCC support, but not much else. ...@@ -37,9 +37,9 @@ MinimalMemoryStorage that implements MVCC support, but not much else.
>>> from ZODB import DB >>> from ZODB import DB
>>> db = DB(MinimalMemoryStorage()) >>> db = DB(MinimalMemoryStorage())
We will use two different connections with the experimental We will use two different connections with different transaction managers
setLocalTransaction() method to make sure that the connections act to make sure that the connections act independently, even though they'll
independently, even though they'll be run from a single thread. be run from a single thread.
>>> import transaction >>> import transaction
>>> tm1 = transaction.TransactionManager() >>> tm1 = transaction.TransactionManager()
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
"""Tools to simplify transactions within applications.""" """Tools to simplify transactions within applications."""
from ZODB.POSException import ReadConflictError, ConflictError from ZODB.POSException import ReadConflictError, ConflictError
import transaction
def _commit(note): def _commit(note):
t = transaction.get() t = transaction.get()
......
...@@ -39,7 +39,6 @@ __all__ = ['z64', ...@@ -39,7 +39,6 @@ __all__ = ['z64',
'readable_tid_repr', 'readable_tid_repr',
'WeakSet', 'WeakSet',
'DEPRECATED_ARGUMENT', 'DEPRECATED_ARGUMENT',
'deprecated36',
'deprecated37', 'deprecated37',
'deprecated38', 'deprecated38',
'get_pickle_metadata', 'get_pickle_metadata',
...@@ -54,13 +53,6 @@ __all__ = ['z64', ...@@ -54,13 +53,6 @@ __all__ = ['z64',
# dance. # dance.
DEPRECATED_ARGUMENT = object() DEPRECATED_ARGUMENT = object()
# Raise DeprecationWarning, noting that the deprecated thing will go
# away in ZODB 3.6. Point to the caller of our caller (i.e., at the
# code using the deprecated thing).
def deprecated36(msg):
warnings.warn("This will be removed in ZODB 3.6:\n%s" % msg,
DeprecationWarning, stacklevel=3)
# Raise DeprecationWarning, noting that the deprecated thing will go # Raise DeprecationWarning, noting that the deprecated thing will go
# away in ZODB 3.7. Point to the caller of our caller (i.e., at the # away in ZODB 3.7. Point to the caller of our caller (i.e., at the
# code using the deprecated thing). # code using the deprecated thing).
......
...@@ -39,6 +39,10 @@ class Prefix: ...@@ -39,6 +39,10 @@ class Prefix:
def __cmp__(self, o): def __cmp__(self, o):
other_path = o.split('/') other_path = o.split('/')
if other_path and ' ' in other_path[-1]:
# don't include logged username in comparison
pos = other_path[-1].rfind(' ')
other_path[-1] = other_path[-1][:pos]
return cmp(other_path[:self.length], self.path) return cmp(other_path[:self.length], self.path)
def __repr__(self): def __repr__(self):
......
...@@ -28,5 +28,19 @@ class PrefixTest(unittest.TestCase): ...@@ -28,5 +28,19 @@ class PrefixTest(unittest.TestCase):
for equal in ("", "/", "/def", "/a/b", "/a/b/c", "/a/b/c/d"): for equal in ("", "/", "/def", "/a/b", "/a/b/c", "/a/b/c/d"):
self.assertEqual(p2, equal) self.assertEqual(p2, equal)
def test_username_info(self):
# Zope Collector 1810; user paths have username appended
p1 = Prefix('/a/b')
for equal in ('/a/b spam', '/a/b/c spam', '/a/b/c/b spam'):
self.assertEqual(p1, equal)
for notEqual in (" spam", "/a/c spam", "/a/bbb spam", "/// spam"):
self.assertNotEqual(p1, notEqual)
p2 = Prefix("")
for equal in (" eggs", "/ eggs", "/def eggs", "/a/b eggs",
"/a/b/c eggs", "/a/b/c/d eggs"):
self.assertEqual(p2, equal)
def test_suite(): def test_suite():
return unittest.makeSuite(PrefixTest) return unittest.makeSuite(PrefixTest)
...@@ -2,18 +2,18 @@ ...@@ -2,18 +2,18 @@
Persistence support Persistence support
=================== ===================
(This document is under construction. More basic documentation will (This document is under construction. More basic documentation will eventually
eventually appear here.) appear here.)
Overriding __getattr__, __getattribute__, __setattr__, and __delattr__ Overriding `__getattr__`, `__getattribute__`, `__setattr__`, and `__delattr__`
----------------------------------------------------------------------- ------------------------------------------------------------------------------
Subclasses can override the attribute-management methods. For the Subclasses can override the attribute-management methods. For the
__getattr__ method, the behavior is like that for regular Python `__getattr__` method, the behavior is like that for regular Python
classes and for earlier versions of ZODB 3. classes and for earlier versions of ZODB 3.
For __getattribute__, __setattr__, and __delattr__, it is necessary to For `__getattribute__`, __setattr__`, and `__delattr__`, it is necessary
call certain methods defined by persistent.Persistent. Detailed to call certain methods defined by `persistent.Persistent`. Detailed
examples and documentation is provided in the test module, examples and documentation is provided in the test module,
persistent.tests.test_overriding_attrs. `persistent.tests.test_overriding_attrs`.
...@@ -218,7 +218,7 @@ TimeStamp_timeTime(TimeStamp *self) ...@@ -218,7 +218,7 @@ TimeStamp_timeTime(TimeStamp *self)
static PyObject * static PyObject *
TimeStamp_raw(TimeStamp *self) TimeStamp_raw(TimeStamp *self)
{ {
return PyString_FromStringAndSize(self->data, 8); return PyString_FromStringAndSize((const char*)self->data, 8);
} }
static PyObject * static PyObject *
...@@ -261,7 +261,7 @@ TimeStamp_laterThan(TimeStamp *self, PyObject *obj) ...@@ -261,7 +261,7 @@ TimeStamp_laterThan(TimeStamp *self, PyObject *obj)
new[i] = 0; new[i] = 0;
else { else {
new[i]++; new[i]++;
return TimeStamp_FromString(new); return TimeStamp_FromString((const char*)new);
} }
} }
......
...@@ -39,6 +39,7 @@ class PersistentDict(persistent.Persistent, IterableUserDict): ...@@ -39,6 +39,7 @@ class PersistentDict(persistent.Persistent, IterableUserDict):
__super_clear = IterableUserDict.clear __super_clear = IterableUserDict.clear
__super_update = IterableUserDict.update __super_update = IterableUserDict.update
__super_setdefault = IterableUserDict.setdefault __super_setdefault = IterableUserDict.setdefault
__super_pop = IterableUserDict.pop
__super_popitem = IterableUserDict.popitem __super_popitem = IterableUserDict.popitem
__super_p_init = persistent.Persistent.__init__ __super_p_init = persistent.Persistent.__init__
...@@ -72,6 +73,10 @@ class PersistentDict(persistent.Persistent, IterableUserDict): ...@@ -72,6 +73,10 @@ class PersistentDict(persistent.Persistent, IterableUserDict):
self._p_changed = True self._p_changed = True
return self.__super_setdefault(key, failobj) return self.__super_setdefault(key, failobj)
def pop(self, key, *args):
self._p_changed = True
return self.__super_pop(key, *args)
def popitem(self): def popitem(self):
self._p_changed = True self._p_changed = True
return self.__super_popitem() return self.__super_popitem()
...@@ -167,7 +167,9 @@ class IPersistent(Interface): ...@@ -167,7 +167,9 @@ class IPersistent(Interface):
It is up to the data manager to assign this. It is up to the data manager to assign this.
The special value None is reserved to indicate that an object The special value None is reserved to indicate that an object
id has not been assigned. Non-None object ids must be strings. id has not been assigned. Non-None object ids must be non-empty
strings. The 8-byte string '\0'*8 (8 NUL bytes) is reserved to
identify the database root object.
""") """)
_p_changed = Attribute( _p_changed = Attribute(
......
...@@ -41,6 +41,8 @@ class PersistentMapping(UserDict, persistent.Persistent): ...@@ -41,6 +41,8 @@ class PersistentMapping(UserDict, persistent.Persistent):
__super_clear = UserDict.clear __super_clear = UserDict.clear
__super_update = UserDict.update __super_update = UserDict.update
__super_setdefault = UserDict.setdefault __super_setdefault = UserDict.setdefault
__super_pop = UserDict.pop
__super_popitem = UserDict.popitem
def __delitem__(self, key): def __delitem__(self, key):
self.__super_delitem(key) self.__super_delitem(key)
...@@ -66,14 +68,13 @@ class PersistentMapping(UserDict, persistent.Persistent): ...@@ -66,14 +68,13 @@ class PersistentMapping(UserDict, persistent.Persistent):
self._p_changed = 1 self._p_changed = 1
return self.__super_setdefault(key, failobj) return self.__super_setdefault(key, failobj)
try: def pop(self, key, *args):
__super_popitem = UserDict.popitem self._p_changed = 1
except AttributeError: return self.__super_pop(key, *args)
pass
else: def popitem(self):
def popitem(self): self._p_changed = 1
self._p_changed = 1 return self.__super_popitem()
return self.__super_popitem()
# __iter__ was added in ZODB 3.4.2, but should have been added long # __iter__ was added in ZODB 3.4.2, but should have been added long
# before. We could inherit from Python's IterableUserDict instead # before. We could inherit from Python's IterableUserDict instead
......
This diff is collapsed.
...@@ -16,6 +16,9 @@ import unittest ...@@ -16,6 +16,9 @@ import unittest
from persistent import Persistent from persistent import Persistent
from persistent.interfaces import IPersistent from persistent.interfaces import IPersistent
# Confusing: ZODB doesn't use this file. It appears to be used only
# by Zope3, where it's imported by zope/app/schema/tests/test_wrapper.py.
try: try:
import zope.interface import zope.interface
except ImportError: except ImportError:
...@@ -115,8 +118,10 @@ class Test(unittest.TestCase): ...@@ -115,8 +118,10 @@ class Test(unittest.TestCase):
self.assertEqual(dm.called, 1) self.assertEqual(dm.called, 1)
def testGhostChanged(self): def testGhostChanged(self):
# An object is a ghost, and it's _p_changed it set to True. # If an object is a ghost and its _p_changed is set to True (any
# This assignment should have no effect. # true value), it should activate (unghostify) the object. This
# behavior is new in ZODB 3.6; before then, an attempt to do
# "ghost._p_changed = True" was ignored.
p = self.klass() p = self.klass()
p._p_oid = 1 p._p_oid = 1
dm = DM() dm = DM()
...@@ -124,7 +129,7 @@ class Test(unittest.TestCase): ...@@ -124,7 +129,7 @@ class Test(unittest.TestCase):
p._p_deactivate() p._p_deactivate()
self.assertEqual(p._p_changed, None) self.assertEqual(p._p_changed, None)
p._p_changed = True p._p_changed = True
self.assertEqual(p._p_changed, None) self.assertEqual(p._p_changed, 1)
def testRegistrationFailure(self): def testRegistrationFailure(self):
p = self.klass() p = self.klass()
......
##############################################################################
#
# Copyright (c) 2005 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test the mapping interface to PersistentMapping
"""
import unittest
from persistent.mapping import PersistentMapping
l0 = {}
l1 = {0:0}
l2 = {0:0, 1:1}
class TestPMapping(unittest.TestCase):
def testTheWorld(self):
# Test constructors
u = PersistentMapping()
u0 = PersistentMapping(l0)
u1 = PersistentMapping(l1)
u2 = PersistentMapping(l2)
uu = PersistentMapping(u)
uu0 = PersistentMapping(u0)
uu1 = PersistentMapping(u1)
uu2 = PersistentMapping(u2)
class OtherMapping:
def __init__(self, initmapping):
self.__data = initmapping
def items(self):
return self.__data.items()
v0 = PersistentMapping(OtherMapping(u0))
vv = PersistentMapping([(0, 0), (1, 1)])
# Test __repr__
eq = self.assertEqual
eq(str(u0), str(l0), "str(u0) == str(l0)")
eq(repr(u1), repr(l1), "repr(u1) == repr(l1)")
eq(`u2`, `l2`, "`u2` == `l2`")
# Test __cmp__ and __len__
def mycmp(a, b):
r = cmp(a, b)
if r < 0: return -1
if r > 0: return 1
return r
all = [l0, l1, l2, u, u0, u1, u2, uu, uu0, uu1, uu2]
for a in all:
for b in all:
eq(mycmp(a, b), mycmp(len(a), len(b)),
"mycmp(a, b) == mycmp(len(a), len(b))")
# Test __getitem__
for i in range(len(u2)):
eq(u2[i], i, "u2[i] == i")
# Test get
for i in range(len(u2)):
eq(u2.get(i), i, "u2.get(i) == i")
eq(u2.get(i, 5), i, "u2.get(i, 5) == i")
for i in min(u2)-1, max(u2)+1:
eq(u2.get(i), None, "u2.get(i) == None")
eq(u2.get(i, 5), 5, "u2.get(i, 5) == 5")
# Test __setitem__
uu2[0] = 0
uu2[1] = 100
uu2[2] = 200
# Test __delitem__
del uu2[1]
del uu2[0]
try:
del uu2[0]
except KeyError:
pass
else:
raise TestFailed("uu2[0] shouldn't be deletable")
# Test __contains__
for i in u2:
self.failUnless(i in u2, "i in u2")
for i in min(u2)-1, max(u2)+1:
self.failUnless(i not in u2, "i not in u2")
# Test update
l = {"a":"b"}
u = PersistentMapping(l)
u.update(u2)
for i in u:
self.failUnless(i in l or i in u2, "i in l or i in u2")
for i in l:
self.failUnless(i in u, "i in u")
for i in u2:
self.failUnless(i in u, "i in u")
# Test setdefault
x = u2.setdefault(0, 5)
eq(x, 0, "u2.setdefault(0, 5) == 0")
x = u2.setdefault(5, 5)
eq(x, 5, "u2.setdefault(5, 5) == 5")
self.failUnless(5 in u2, "5 in u2")
# Test pop
x = u2.pop(1)
eq(x, 1, "u2.pop(1) == 1")
self.failUnless(1 not in u2, "1 not in u2")
try:
u2.pop(1)
except KeyError:
pass
else:
raise TestFailed("1 should not be poppable from u2")
x = u2.pop(1, 7)
eq(x, 7, "u2.pop(1, 7) == 7")
# Test popitem
items = u2.items()
key, value = u2.popitem()
self.failUnless((key, value) in items, "key, value in items")
self.failUnless(key not in u2, "key not in u2")
# Test clear
u2.clear()
eq(u2, {}, "u2 == {}")
def test_suite():
return unittest.makeSuite(TestPMapping)
if __name__ == "__main__":
loader = unittest.TestLoader()
unittest.main(testLoader=loader)
...@@ -27,6 +27,7 @@ You must specify either -p and -h or -U. ...@@ -27,6 +27,7 @@ You must specify either -p and -h or -U.
""" """
import getopt import getopt
import logging
import socket import socket
import sys import sys
import time import time
...@@ -41,6 +42,18 @@ from ZEO.ClientStorage import ClientStorage ...@@ -41,6 +42,18 @@ from ZEO.ClientStorage import ClientStorage
ZEO_VERSION = 2 ZEO_VERSION = 2
def setup_logging():
# Set up logging to stderr which will show messages originating
# at severity ERROR or higher.
root = logging.getLogger()
root.setLevel(logging.ERROR)
fmt = logging.Formatter(
"------\n%(asctime)s %(levelname)s %(name)s %(message)s",
"%Y-%m-%dT%H:%M:%S")
handler = logging.StreamHandler()
handler.setFormatter(fmt)
root.addHandler(handler)
def check_server(addr, storage, write): def check_server(addr, storage, write):
t0 = time.time() t0 = time.time()
if ZEO_VERSION == 2: if ZEO_VERSION == 2:
...@@ -122,6 +135,7 @@ def main(): ...@@ -122,6 +135,7 @@ def main():
usage() usage()
addr = host, port addr = host, port
setup_logging()
check_server(addr, storage, write) check_server(addr, storage, write)
if __name__ == "__main__": if __name__ == "__main__":
......
This package is currently a facade of the ZODB.Transaction module. ============
Transactions
============
It exists to support: This package contains a generic transaction implementation for Python. It is
mainly used by the ZODB, though.
- Application code that uses the ZODB 4 transaction API Note that the data manager API, ``transaction.interfaces.IDataManager``,
- ZODB4-style data managers (transaction.interfaces.IDataManager)
Note that the data manager API, transaction.interfaces.IDataManager,
is syntactically simple, but semantically complex. The semantics is syntactically simple, but semantically complex. The semantics
were not easy to express in the interface. This could probably use were not easy to express in the interface. This could probably use
more work. The semantics are presented in detail through examples of more work. The semantics are presented in detail through examples of
a sample data manager in transaction.tests.test_SampleDataManager. a sample data manager in ``transaction.tests.test_SampleDataManager``.
...@@ -25,10 +25,3 @@ begin = manager.begin ...@@ -25,10 +25,3 @@ begin = manager.begin
commit = manager.commit commit = manager.commit
abort = manager.abort abort = manager.abort
savepoint = manager.savepoint savepoint = manager.savepoint
def get_transaction():
from ZODB.utils import deprecated36
deprecated36(""" use transaction.get() instead of get_transaction().
transaction.commit() is a shortcut spelling of transaction.get().commit(),
and transaction.abort() of transaction.get().abort().""")
return get()
...@@ -30,7 +30,7 @@ registers its _p_jar attribute. TODO: explain adapter ...@@ -30,7 +30,7 @@ registers its _p_jar attribute. TODO: explain adapter
Subtransactions Subtransactions
--------------- ---------------
Note: Suntransactions are deprecated! Use savepoint/rollback instead. Note: Subtransactions are deprecated! Use savepoint/rollback instead.
A subtransaction applies the transaction notion recursively. It A subtransaction applies the transaction notion recursively. It
allows a set of modifications within a transaction to be committed or allows a set of modifications within a transaction to be committed or
...@@ -115,6 +115,20 @@ pre-commit hook is available for such use cases: use addBeforeCommitHook(), ...@@ -115,6 +115,20 @@ pre-commit hook is available for such use cases: use addBeforeCommitHook(),
passing it a callable and arguments. The callable will be called with its passing it a callable and arguments. The callable will be called with its
arguments at the start of the commit (but not for substransaction commits). arguments at the start of the commit (but not for substransaction commits).
After-commit hook
------------------
Sometimes, applications want to execute code after a transaction is
committed or aborted. For example, one might want to launch non
transactional code after a successful commit. Or still someone might
want to launch asynchronous code after. A post-commit hook is
available for such use cases: use addAfterCommitHook(), passing it a
callable and arguments. The callable will be called with a Boolean
value representing the status of the commit operation as first
argument (true if successfull or false iff aborted) preceding its
arguments at the start of the commit (but not for substransaction
commits).
Error handling Error handling
-------------- --------------
...@@ -241,6 +255,9 @@ class Transaction(object): ...@@ -241,6 +255,9 @@ class Transaction(object):
# List of (hook, args, kws) tuples added by addBeforeCommitHook(). # List of (hook, args, kws) tuples added by addBeforeCommitHook().
self._before_commit = [] self._before_commit = []
# List of (hook, args, kws) tuples added by addAfterCommitHook().
self._after_commit = []
# Raise TransactionFailedError, due to commit()/join()/register() # Raise TransactionFailedError, due to commit()/join()/register()
# getting called when the current transaction has already suffered # getting called when the current transaction has already suffered
# a commit/savepoint failure. # a commit/savepoint failure.
...@@ -292,7 +309,7 @@ class Transaction(object): ...@@ -292,7 +309,7 @@ class Transaction(object):
savepoint = Savepoint(self, optimistic, *self._resources) savepoint = Savepoint(self, optimistic, *self._resources)
except: except:
self._cleanup(self._resources) self._cleanup(self._resources)
self._saveCommitishError() # reraises! self._saveAndRaiseCommitishError() # reraises!
if self._savepoint2index is None: if self._savepoint2index is None:
self._savepoint2index = weakref.WeakKeyDictionary() self._savepoint2index = weakref.WeakKeyDictionary()
...@@ -345,32 +362,25 @@ class Transaction(object): ...@@ -345,32 +362,25 @@ class Transaction(object):
assert id(obj) not in map(id, adapter.objects) assert id(obj) not in map(id, adapter.objects)
adapter.objects.append(obj) adapter.objects.append(obj)
def begin(self):
from ZODB.utils import deprecated36
deprecated36("Transaction.begin() should no longer be used; use "
"the begin() method of a transaction manager.")
if (self._resources or self._synchronizers):
self.abort()
# Else aborting wouldn't do anything, except if _manager is non-None,
# in which case it would do nothing besides uselessly free() this
# transaction.
def commit(self, subtransaction=_marker, deprecation_wng=True): def commit(self, subtransaction=_marker, deprecation_wng=True):
if subtransaction is _marker: if subtransaction is _marker:
subtransaction = 0 subtransaction = 0
elif deprecation_wng: elif deprecation_wng:
from ZODB.utils import deprecated37 from ZODB.utils import deprecated37
deprecated37("subtransactions are deprecated; use " deprecated37("subtransactions are deprecated; instead of "
"transaction.savepoint() instead of " "transaction.commit(1), use "
"transaction.commit(1)") "transaction.savepoint(optimistic=True) in "
"contexts where a subtransaction abort will never "
"occur, or sp=transaction.savepoint() if later "
"rollback is possible and then sp.rollback() "
"instead of transaction.abort(1)")
if self._savepoint2index: if self._savepoint2index:
self._invalidate_all_savepoints() self._invalidate_all_savepoints()
if subtransaction: if subtransaction:
# TODO deprecate subtransactions # TODO deprecate subtransactions
self._subtransaction_savepoint = self.savepoint(1) self._subtransaction_savepoint = self.savepoint(optimistic=True)
return return
if self.status is Status.COMMITFAILED: if self.status is Status.COMMITFAILED:
...@@ -383,16 +393,19 @@ class Transaction(object): ...@@ -383,16 +393,19 @@ class Transaction(object):
try: try:
self._commitResources() self._commitResources()
self.status = Status.COMMITTED
except: except:
self._saveCommitishError() # This raises! t, v, tb = self._saveAndGetCommitishError()
self._callAfterCommitHooks(status=False)
self.status = Status.COMMITTED raise t, v, tb
if self._manager: else:
self._manager.free(self) if self._manager:
self._synchronizers.map(lambda s: s.afterCompletion(self)) self._manager.free(self)
self._synchronizers.map(lambda s: s.afterCompletion(self))
self._callAfterCommitHooks(status=True)
self.log.debug("commit") self.log.debug("commit")
def _saveCommitishError(self): def _saveAndGetCommitishError(self):
self.status = Status.COMMITFAILED self.status = Status.COMMITFAILED
# Save the traceback for TransactionFailedError. # Save the traceback for TransactionFailedError.
ft = self._failure_traceback = StringIO() ft = self._failure_traceback = StringIO()
...@@ -403,6 +416,10 @@ class Transaction(object): ...@@ -403,6 +416,10 @@ class Transaction(object):
traceback.print_tb(tb, None, ft) traceback.print_tb(tb, None, ft)
# Append the exception type and value. # Append the exception type and value.
ft.writelines(traceback.format_exception_only(t, v)) ft.writelines(traceback.format_exception_only(t, v))
return t, v, tb
def _saveAndRaiseCommitishError(self):
t, v, tb = self._saveAndGetCommitishError()
raise t, v, tb raise t, v, tb
def getBeforeCommitHooks(self): def getBeforeCommitHooks(self):
...@@ -428,6 +445,44 @@ class Transaction(object): ...@@ -428,6 +445,44 @@ class Transaction(object):
hook(*args, **kws) hook(*args, **kws)
self._before_commit = [] self._before_commit = []
def getAfterCommitHooks(self):
return iter(self._after_commit)
def addAfterCommitHook(self, hook, args=(), kws=None):
if kws is None:
kws = {}
self._after_commit.append((hook, tuple(args), kws))
def _callAfterCommitHooks(self, status=True):
# Avoid to abort anything at the end if no hooks are registred.
if not self._after_commit:
return
# Call all hooks registered, allowing further registrations
# during processing. Note that calls to addAterCommitHook() may
# add additional hooks while hooks are running, and iterating over a
# growing list is well-defined in Python.
for hook, args, kws in self._after_commit:
# The first argument passed to the hook is a Boolean value,
# true if the commit succeeded, or false if the commit aborted.
try:
hook(status, *args, **kws)
except:
# We need to catch the exceptions if we want all hooks
# to be called
self.log.error("Error in after commit hook exec in %s ",
hook, exc_info=sys.exc_info())
# The transaction is already committed. It must not have
# further effects after the commit.
for rm in self._resources:
try:
rm.abort(self)
except:
# XXX should we take further actions here ?
self.log.error("Error in abort() on manager %s",
rm, exc_info=sys.exc_info())
self._after_commit = []
self._before_commit = []
def _commitResources(self): def _commitResources(self):
# Execute the two-phase commit protocol. # Execute the two-phase commit protocol.
...@@ -450,7 +505,7 @@ class Transaction(object): ...@@ -450,7 +505,7 @@ class Transaction(object):
# TODO: do we need to make this warning stronger? # TODO: do we need to make this warning stronger?
# TODO: It would be nice if the system could be configured # TODO: It would be nice if the system could be configured
# to stop committing transactions at this point. # to stop committing transactions at this point.
self.log.critical("A storage error occured during the second " self.log.critical("A storage error occurred during the second "
"phase of the two-phase commit. Resources " "phase of the two-phase commit. Resources "
"may be in an inconsistent state.") "may be in an inconsistent state.")
raise raise
...@@ -694,7 +749,7 @@ class Savepoint: ...@@ -694,7 +749,7 @@ class Savepoint:
savepoint.rollback() savepoint.rollback()
except: except:
# Mark the transaction as failed. # Mark the transaction as failed.
transaction._saveCommitishError() # reraises! transaction._saveAndRaiseCommitishError() # reraises!
class AbortSavepoint: class AbortSavepoint:
......
...@@ -156,7 +156,7 @@ class ITransaction(zope.interface.Interface): ...@@ -156,7 +156,7 @@ class ITransaction(zope.interface.Interface):
"""Add extension data to the transaction. """Add extension data to the transaction.
name is the name of the extension property to set, of Python type name is the name of the extension property to set, of Python type
str; value must be pickleable. Multiple calls may be made to set str; value must be picklable. Multiple calls may be made to set
multiple extension properties, provided the names are distinct. multiple extension properties, provided the names are distinct.
Storages record the extension data, as meta-data, when a transaction Storages record the extension data, as meta-data, when a transaction
...@@ -232,6 +232,43 @@ class ITransaction(zope.interface.Interface): ...@@ -232,6 +232,43 @@ class ITransaction(zope.interface.Interface):
by a top-level transaction commit. by a top-level transaction commit.
""" """
def addAfterCommitHook(hook, args=(), kws=None):
"""Register a hook to call after a transaction commit attempt.
The specified hook function will be called after the transaction
commit succeeds or aborts. The first argument passed to the hook
is a Boolean value, true if the commit succeeded, or false if the
commit aborted. `args` specifies additional positional, and `kws`
keyword, arguments to pass to the hook. `args` is a sequence of
positional arguments to be passed, defaulting to an empty tuple
(only the true/false success argument is passed). `kws` is a
dictionary of keyword argument names and values to be passed, or
the default None (no keyword arguments are passed).
Multiple hooks can be registered and will be called in the order they
were registered (first registered, first called). This method can
also be called from a hook: an executing hook can register more
hooks. Applications should take care to avoid creating infinite loops
by recursively registering hooks.
Hooks are called only for a top-level commit. A subtransaction
commit or savepoint creation does not call any hooks. Calling a
hook "consumes" its registration: hook registrations do not
persist across transactions. If it's desired to call the same
hook on every transaction commit, then addAfterCommitHook() must be
called with that hook during every transaction; in such a case
consider registering a synchronizer object via a TransactionManager's
registerSynch() method instead.
"""
def getAfterCommitHooks():
"""Return iterable producing the registered addAfterCommit hooks.
A triple (hook, args, kws) is produced for each registered hook.
The hooks are produced in the order in which they would be invoked
by a top-level transaction commit.
"""
class ITransactionDeprecated(zope.interface.Interface): class ITransactionDeprecated(zope.interface.Interface):
"""Deprecated parts of the transaction API.""" """Deprecated parts of the transaction API."""
......
...@@ -12,17 +12,17 @@ a transaction allowing: ...@@ -12,17 +12,17 @@ a transaction allowing:
Savepoints make it possible to write atomic subroutines that don't Savepoints make it possible to write atomic subroutines that don't
make top-level transaction commitments. make top-level transaction commitments.
Applications Applications
------------ ------------
To demonstrate how savepoints work with transactions, we've provided a To demonstrate how savepoints work with transactions, we've provided a sample
sample data manager implementation that provides savepoint support. data manager implementation that provides savepoint support. The primary
The primary purpose of this data manager is to provide code that can purpose of this data manager is to provide code that can be read to understand
be read to understand how savepoints work. The secondary purpose is to how savepoints work. The secondary purpose is to provide support for
provide support for demonstrating the correct operation of savepoint demonstrating the correct operation of savepoint support within the
support within the transaction system. This data manager is very transaction system. This data manager is very simple. It provides flat
simple. It provides flat storage of named immutable values, like strings storage of named immutable values, like strings and numbers.
and numbers.
>>> import transaction.tests.savepointsample >>> import transaction.tests.savepointsample
>>> dm = transaction.tests.savepointsample.SampleSavepointDataManager() >>> dm = transaction.tests.savepointsample.SampleSavepointDataManager()
...@@ -43,13 +43,13 @@ and abort changes: ...@@ -43,13 +43,13 @@ and abort changes:
>>> dm['name'] >>> dm['name']
'bob' 'bob'
Now, let's look at an application that manages funds for people. Now, let's look at an application that manages funds for people. It allows
It allows deposits and debits to be entered for multiple people. deposits and debits to be entered for multiple people. It accepts a sequence
It accepts a sequence of entries and generates a sequence of status of entries and generates a sequence of status messages. For each entry, it
messages. For each entry, it applies the change and then validates applies the change and then validates the user's account. If the user's
the user's account. If the user's account is invalid, we roll back account is invalid, we roll back the change for that entry. The success or
the change for that entry. The success or failure of an entry is failure of an entry is indicated in the output status. First we'll initialize
indicated in the output status. First we'll initialize some accounts: some accounts:
>>> dm['bob-balance'] = 0.0 >>> dm['bob-balance'] = 0.0
>>> dm['bob-credit'] = 0.0 >>> dm['bob-credit'] = 0.0
...@@ -63,8 +63,8 @@ Now, we'll define a validation function to validate an account: ...@@ -63,8 +63,8 @@ Now, we'll define a validation function to validate an account:
... if dm[name+'-balance'] + dm[name+'-credit'] < 0: ... if dm[name+'-balance'] + dm[name+'-credit'] < 0:
... raise ValueError('Overdrawn', name) ... raise ValueError('Overdrawn', name)
And a function to apply entries. If the function fails in some And a function to apply entries. If the function fails in some unexpected
unexpected way, it rolls back all of its changes and prints the error: way, it rolls back all of its changes and prints the error:
>>> def apply_entries(entries): >>> def apply_entries(entries):
... savepoint = transaction.savepoint() ... savepoint = transaction.savepoint()
...@@ -118,9 +118,9 @@ If we provide entries that cause an unexpected error: ...@@ -118,9 +118,9 @@ If we provide entries that cause an unexpected error:
Updated sally Updated sally
Unexpected exception unsupported operand type(s) for +=: 'float' and 'str' Unexpected exception unsupported operand type(s) for +=: 'float' and 'str'
Because the apply_entries used a savepoint for the entire function, Because the apply_entries used a savepoint for the entire function, it was
it was able to rollback the partial changes without rolling back able to rollback the partial changes without rolling back changes made in the
changes made in the previous call to apply_entries: previous call to ``apply_entries``:
>>> dm['bob-balance'] >>> dm['bob-balance']
30.0 30.0
...@@ -195,11 +195,12 @@ However, using a savepoint invalidates any savepoints that come after it: ...@@ -195,11 +195,12 @@ However, using a savepoint invalidates any savepoints that come after it:
>>> transaction.abort() >>> transaction.abort()
Databases without savepoint support Databases without savepoint support
----------------------------------- -----------------------------------
Normally it's an error to use savepoints with databases that don't Normally it's an error to use savepoints with databases that don't support
support savepoints: savepoints:
>>> dm_no_sp = transaction.tests.savepointsample.SampleDataManager() >>> dm_no_sp = transaction.tests.savepointsample.SampleDataManager()
>>> dm_no_sp['name'] = 'bob' >>> dm_no_sp['name'] = 'bob'
...@@ -212,10 +213,10 @@ support savepoints: ...@@ -212,10 +213,10 @@ support savepoints:
>>> transaction.abort() >>> transaction.abort()
However, a flag can be passed to the transaction savepoint method to However, a flag can be passed to the transaction savepoint method to indicate
indicate that databases without savepoint support should be tolerated that databases without savepoint support should be tolerated until a savepoint
until a savepoint is rolled back. This allows transactions to proceed is rolled back. This allows transactions to proceed if there are no reasons
if there are no reasons to roll back: to roll back:
>>> dm_no_sp['name'] = 'sally' >>> dm_no_sp['name'] = 'sally'
>>> savepoint = transaction.savepoint(1) >>> savepoint = transaction.savepoint(1)
...@@ -231,13 +232,14 @@ if there are no reasons to roll back: ...@@ -231,13 +232,14 @@ if there are no reasons to roll back:
... ...
TypeError: ('Savepoints unsupported', {'name': 'sam'}) TypeError: ('Savepoints unsupported', {'name': 'sam'})
Failures Failures
-------- --------
If a failure occurs when creating or rolling back a savepoint, the If a failure occurs when creating or rolling back a savepoint, the transaction
transaction state will be uncertain and the transaction will become state will be uncertain and the transaction will become uncommitable. From
uncommitable. From that point on, most transaction operations, that point on, most transaction operations, including commit, will fail until
including commit, will fail until the transaction is aborted. the transaction is aborted.
In the previous example, we got an error when we tried to rollback the In the previous example, we got an error when we tried to rollback the
savepoint. If we try to commit the transaction, the commit will fail: savepoint. If we try to commit the transaction, the commit will fail:
...@@ -254,8 +256,8 @@ We have to abort it to make any progress: ...@@ -254,8 +256,8 @@ We have to abort it to make any progress:
>>> transaction.abort() >>> transaction.abort()
Similarly, in our earlier example, where we tried to take a savepoint Similarly, in our earlier example, where we tried to take a savepoint with a
with a data manager that didn't support savepoints: data manager that didn't support savepoints:
>>> dm_no_sp['name'] = 'sally' >>> dm_no_sp['name'] = 'sally'
>>> dm['name'] = 'sally' >>> dm['name'] = 'sally'
......
############################################################################## ##############################################################################
# #
# Copyright (c) 2001, 2002 Zope Corporation and Contributors. # Copyright (c) 2001, 2002, 2005 Zope Corporation and Contributors.
# All Rights Reserved. # All Rights Reserved.
# #
# This software is subject to the provisions of the Zope Public License, # This software is subject to the provisions of the Zope Public License,
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
# FOR A PARTICULAR PURPOSE # FOR A PARTICULAR PURPOSE
# #
############################################################################## ##############################################################################
"""Test tranasction behavior for variety of cases. """Test transaction behavior for variety of cases.
I wrote these unittests to investigate some odd transaction I wrote these unittests to investigate some odd transaction
behavior when doing unittests of integrating non sub transaction behavior when doing unittests of integrating non sub transaction
...@@ -241,7 +241,6 @@ class TransactionTests(unittest.TestCase): ...@@ -241,7 +241,6 @@ class TransactionTests(unittest.TestCase):
assert self.nosub1._p_jar.ctpc_abort == 1 assert self.nosub1._p_jar.ctpc_abort == 1
# last test, check the hosing mechanism # last test, check the hosing mechanism
## def testHoserStoppage(self): ## def testHoserStoppage(self):
...@@ -728,6 +727,268 @@ def test_addBeforeCommitHook(): ...@@ -728,6 +727,268 @@ def test_addBeforeCommitHook():
"arg '-' kw1 'no_kw1' kw2 'no_kw2'", "arg '-' kw1 'no_kw1' kw2 'no_kw2'",
'rec0'] 'rec0']
>>> reset_log() >>> reset_log()
When modifing persitent objects within before commit hooks
modifies the objects, of course :)
Start a new transaction
>>> t = transaction.begin()
Create a DB instance and add a IOBTree within
>>> from ZODB.tests.util import DB
>>> from ZODB.tests.util import P
>>> db = DB()
>>> con = db.open()
>>> root = con.root()
>>> root['p'] = P('julien')
>>> p = root['p']
>>> p.name
'julien'
This hook will get the object from the `DB` instance and change
the flag attribute.
>>> def hookmodify(status, arg=None, kw1='no_kw1', kw2='no_kw2'):
... p.name = 'jul'
Now register this hook and commit.
>>> t.addBeforeCommitHook(hookmodify, (p, 1))
>>> transaction.commit()
Nothing should have changed since it should have been aborted.
>>> p.name
'jul'
>>> db.close()
"""
def test_addAfterCommitHook():
"""Test addAfterCommitHook.
Let's define a hook to call, and a way to see that it was called.
>>> log = []
>>> def reset_log():
... del log[:]
>>> def hook(status, arg='no_arg', kw1='no_kw1', kw2='no_kw2'):
... log.append("%r arg %r kw1 %r kw2 %r" % (status, arg, kw1, kw2))
Now register the hook with a transaction.
>>> import transaction
>>> t = transaction.begin()
>>> t.addAfterCommitHook(hook, '1')
We can see that the hook is indeed registered.
>>> [(hook.func_name, args, kws)
... for hook, args, kws in t.getAfterCommitHooks()]
[('hook', ('1',), {})]
When transaction commit is done, the hook is called, with its
arguments.
>>> log
[]
>>> t.commit()
>>> log
["True arg '1' kw1 'no_kw1' kw2 'no_kw2'"]
>>> reset_log()
A hook's registration is consumed whenever the hook is called. Since
the hook above was called, it's no longer registered:
>>> len(list(t.getAfterCommitHooks()))
0
>>> transaction.commit()
>>> log
[]
The hook is only called after a full commit, not for a savepoint or
subtransaction.
>>> t = transaction.begin()
>>> t.addAfterCommitHook(hook, 'A', dict(kw1='B'))
>>> dummy = t.savepoint()
>>> log
[]
>>> t.commit(subtransaction=True)
>>> log
[]
>>> t.commit()
>>> log
["True arg 'A' kw1 'B' kw2 'no_kw2'"]
>>> reset_log()
If a transaction is aborted, no hook is called.
>>> t = transaction.begin()
>>> t.addAfterCommitHook(hook, ["OOPS!"])
>>> transaction.abort()
>>> log
[]
>>> transaction.commit()
>>> log
[]
The hook is called after the commit is done, so even if the
commit fails the hook will have been called. To provoke failures in
commit, we'll add failing resource manager to the transaction.
>>> class CommitFailure(Exception):
... pass
>>> class FailingDataManager:
... def tpc_begin(self, txn, sub=False):
... raise CommitFailure
... def abort(self, txn):
... pass
>>> t = transaction.begin()
>>> t.join(FailingDataManager())
>>> t.addAfterCommitHook(hook, '2')
>>> t.commit()
Traceback (most recent call last):
...
CommitFailure
>>> log
["False arg '2' kw1 'no_kw1' kw2 'no_kw2'"]
>>> reset_log()
Let's register several hooks.
>>> t = transaction.begin()
>>> t.addAfterCommitHook(hook, '4', dict(kw1='4.1'))
>>> t.addAfterCommitHook(hook, '5', dict(kw2='5.2'))
They are returned in the same order by getAfterCommitHooks.
>>> [(hook.func_name, args, kws) #doctest: +NORMALIZE_WHITESPACE
... for hook, args, kws in t.getAfterCommitHooks()]
[('hook', ('4',), {'kw1': '4.1'}),
('hook', ('5',), {'kw2': '5.2'})]
And commit also calls them in this order.
>>> t.commit()
>>> len(log)
2
>>> log #doctest: +NORMALIZE_WHITESPACE
["True arg '4' kw1 '4.1' kw2 'no_kw2'",
"True arg '5' kw1 'no_kw1' kw2 '5.2'"]
>>> reset_log()
While executing, a hook can itself add more hooks, and they will all
be called before the real commit starts.
>>> def recurse(status, txn, arg):
... log.append('rec' + str(arg))
... if arg:
... txn.addAfterCommitHook(hook, '-')
... txn.addAfterCommitHook(recurse, (txn, arg-1))
>>> t = transaction.begin()
>>> t.addAfterCommitHook(recurse, (t, 3))
>>> transaction.commit()
>>> log #doctest: +NORMALIZE_WHITESPACE
['rec3',
"True arg '-' kw1 'no_kw1' kw2 'no_kw2'",
'rec2',
"True arg '-' kw1 'no_kw1' kw2 'no_kw2'",
'rec1',
"True arg '-' kw1 'no_kw1' kw2 'no_kw2'",
'rec0']
>>> reset_log()
If an after commit hook is raising an exception then it will log a
message at error level so that if other hooks are registered they
can be executed. We don't support execution dependencies at this level.
>>> mgr = transaction.TransactionManager()
>>> do = DataObject(mgr)
>>> def hookRaise(status, arg='no_arg', kw1='no_kw1', kw2='no_kw2'):
... raise TypeError("Fake raise")
>>> t = transaction.begin()
>>> t.addAfterCommitHook(hook, ('-', 1))
>>> t.addAfterCommitHook(hookRaise, ('-', 2))
>>> t.addAfterCommitHook(hook, ('-', 3))
>>> transaction.commit()
>>> log
["True arg '-' kw1 1 kw2 'no_kw2'", "True arg '-' kw1 3 kw2 'no_kw2'"]
>>> reset_log()
Test that the associated transaction manager has been cleanup when
after commit hooks are registered
>>> mgr = transaction.TransactionManager()
>>> do = DataObject(mgr)
>>> t = transaction.begin()
>>> len(t._manager._txns)
1
>>> t.addAfterCommitHook(hook, ('-', 1))
>>> transaction.commit()
>>> log
["True arg '-' kw1 1 kw2 'no_kw2'"]
>>> len(t._manager._txns)
0
>>> reset_log()
The transaction is already committed when the after commit hooks
will be executed. Executing the hooks must not have further
effects on persistent objects.
Start a new transaction
>>> t = transaction.begin()
Create a DB instance and add a IOBTree within
>>> from ZODB.tests.util import DB
>>> from ZODB.tests.util import P
>>> db = DB()
>>> con = db.open()
>>> root = con.root()
>>> root['p'] = P('julien')
>>> p = root['p']
>>> p.name
'julien'
This hook will get the object from the `DB` instance and change
the flag attribute.
>>> def badhook(status, arg=None, kw1='no_kw1', kw2='no_kw2'):
... p.name = 'jul'
Now register this hook and commit.
>>> t.addAfterCommitHook(badhook, (p, 1))
>>> transaction.commit()
Nothing should have changed since it should have been aborted.
>>> p.name
'julien'
>>> db.close()
""" """
def test_suite(): def test_suite():
......
...@@ -36,7 +36,11 @@ if os.path.isdir(LIB_DIR): ...@@ -36,7 +36,11 @@ if os.path.isdir(LIB_DIR):
path = LIB_DIR path = LIB_DIR
print "Running tests from", path print "Running tests from", path
sys.path.append(path) # Insert the ZODB src dir first in the sys.path to avoid a name conflict
# with zope.whatever librairies that might be installed on the Python
# version used to launch these tests.
sys.path.insert(0, path)
from zope.testing import testrunner from zope.testing import testrunner
# Persistence/__init__.py generates a long warning message about the # Persistence/__init__.py generates a long warning message about the
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment