Commit 7f42c6c7 authored by Christian Theune's avatar Christian Theune

- different cleanups

 - merged from head
 - added configuration methods to configure a blobfilestorage
 - made stuff work ;)
parent 6f66bbcb
......@@ -128,21 +128,37 @@ cZopeInterface = Extension(
sources= ['src/zope/interface/_zope_interface_coptimizations.c']
)
exts += [cPersistence, cPickleCache, TimeStamp, winlock, cZopeInterface]
cZopeProxy = Extension(
name = 'zope.proxy._zope_proxy_proxy',
sources= ['src/zope/proxy/_zope_proxy_proxy.c']
)
exts += [cPersistence,
cPickleCache,
TimeStamp,
winlock,
cZopeInterface,
cZopeProxy,
]
# The ZODB.zodb4 code is not being packaged, because it is only
# need to convert early versions of Zope3 databases to ZODB3.
packages = ["BTrees", "BTrees.tests",
"ZEO", "ZEO.auth", "ZEO.zrpc", "ZEO.tests",
"ZODB", "ZODB.FileStorage", "ZODB.Blobs",
"ZODB", "ZODB.FileStorage", "ZODB.Blobs", "ZODB.Blobs.tests",
"ZODB.tests",
"Persistence", "Persistence.tests",
"persistent", "persistent.tests",
"transaction", "transaction.tests",
"ThreadedAsync",
"zdaemon", "zdaemon.tests",
"zope", "zope.interface", "zope.testing",
"zope",
"zope.interface", "zope.interface.tests",
"zope.proxy", "zope.proxy.tests",
"zope.testing",
"ZopeUndo", "ZopeUndo.tests",
"ZConfig", "ZConfig.tests",
"ZConfig.components",
......@@ -187,6 +203,7 @@ def copy_other_files(cmd, outputbase):
"ZODB/tests",
"zdaemon",
"zdaemon/tests",
"zope/interface", "zope/interface/tests",
]:
dir = convert_path(dir)
inputdir = os.path.join("src", dir)
......
......@@ -252,6 +252,12 @@ class BaseStorage(UndoLogCompatible):
pass
def tpc_finish(self, transaction, f=None):
# It's important that the storage calls the function we pass
# while it still has its lock. We don't want another thread
# to be able to read any updated data until we've had a chance
# to send an invalidation message to all of the other
# connections!
self._lock_acquire()
try:
if transaction is not self._transaction:
......
import os
import tempfile
from zope.interface import implements
......@@ -8,33 +9,22 @@ from ZODB.Blobs.exceptions import BlobError
from ZODB import utils
from persistent import Persistent
class TempFileHandler(object):
"""Handles holding a tempfile around.
The tempfile is unlinked when the tempfilehandler is GCed.
"""
def __init__(self, directory, mode)
self.handle, self.filename = tempfile.mkstemp(dir=directory,
text=mode)
def __del__(self):
self.handle
os.unlink(self.filename)
try:
from ZPublisher.Iterators import IStreamIterator
except ImportError:
IStreamIterator = None
class Blob(Persistent):
implements(IBlob)
def __init__(self):
self._p_blob_readers = 0
self._p_blob_writers = 0
self._p_blob_uncommitted = None
self._p_blob_data = None
_p_blob_readers = 0
_p_blob_writers = 0
_p_blob_uncommitted = None
_p_blob_data = None
def open(self, mode):
"""Returns a file(-like) object for handling the blob data."""
if mode == "r":
if self._current_filename() is None:
raise BlobError, "Blob does not exist."
......@@ -43,17 +33,17 @@ class Blob(Persistent):
raise BlobError, "Already opened for writing."
self._p_blob_readers += 1
return BlobTempFile(self._current_filename(), "rb", self)
return BlobFile(self._current_filename(), "rb", self)
if mode == "w":
if self._p_blob_readers != 0:
raise BlobError, "Already opened for reading."
if self._p_blob_uncommitted is None:
self._p_blob_uncommitted = self._get_uncommitted_filename()
self._p_blob_uncommitted = utils.mktemp()
self._p_blob_writers += 1
return BlobTempFile(self._p_blob_uncommitted, "wb", self)
return BlobFile(self._p_blob_uncommitted, "wb", self)
if mode =="a":
if self._current_filename() is None:
......@@ -62,15 +52,15 @@ class Blob(Persistent):
if self._p_blob_readers != 0:
raise BlobError, "Already opened for reading."
if not self._p_blob_uncommitted:
if self._p_blob_uncommitted is None:
# Create a new working copy
self._p_blob_uncommitted = self._get_uncommitted_filename()
uncommitted = BlobTempFile(self._p_blob_uncommitted, "wb", self)
self._p_blob_uncommitted = utils.mktmp()
uncommitted = BlobFile(self._p_blob_uncommitted, "wb", self)
utils.cp(file(self._p_blob_data), uncommitted)
uncommitted.seek(0)
else:
# Re-use existing working copy
uncommitted = BlobTempFile(self._p_blob_uncommitted, "ab", self)
uncommitted = BlobFile(self._p_blob_uncommitted, "ab", self)
self._p_blob_writers +=1
return uncommitted
......@@ -80,28 +70,29 @@ class Blob(Persistent):
def _current_filename(self):
return self._p_blob_uncommitted or self._p_blob_data
def _get_uncommitted_filename(self):
return os.tempnam()
class BlobFileBase:
class BlobFile(file):
# XXX those files should be created in the same partition as
# the storage later puts them to avoid copying them ...
if IStreamIterator is not None:
__implements__ = (IStreamIterator,)
def __init__(self, name, mode, blob):
file.__init__(self, name, mode)
super(BlobFile, self).__init__(name, mode)
self.blob = blob
self.streamsize = 1<<16
def write(self, data):
file.write(self, data)
super(BlobFile, self).write(data)
self.blob._p_changed = 1
def writelines(self, lines):
file.writelines(self, lines)
super(BlobFile, self).writelines(lines)
self.blob._p_changed = 1
def truncate(self, size):
file.truncate(self, size)
super(BlobFile, self).truncate(size)
self.blob._p_changed = 1
def close(self):
......@@ -110,15 +101,20 @@ class BlobFileBase:
self.blob._p_blob_writers -= 1
else:
self.blob._p_blob_readers -= 1
file.close(self)
super(BlobFile, self).close()
def next(self):
data = self.read(self.streamsize)
if not data:
self.blob._p_blob_readers -= 1
raise StopIteration
return data
class BlobFile(BlobFileBase, file):
pass
def __len__(self):
cur_pos = self.tell()
self.seek(0, 2)
size = self.tell()
self.seek(cur_pos, 0)
return size
class BlobTempFile(BlobFileBase, NamedTempFile)
pass
def copy_file(old, new):
for chunk in old.read(4096):
new.write(chunk)
new.seek(0)
......@@ -12,41 +12,85 @@
#
##############################################################################
import os
from zope.interface import implements
from zope.proxy import ProxyBase
from zope.proxy import ProxyBase, getProxiedObject
from ZODB.interfaces import \
IStorageAdapter, IUndoableStorage, IVersioningStorage, IBlobStorage
from ZODB import utils
from ZODB.Blobs.interfaces import IBlobStorage, IBlob
class BlobStorage(ProxyBase):
"""A storage to support blobs."""
implements(IBlobStorage)
__slots__ = ('base_directory',)
__slots__ = ('base_directory', 'dirty_oids')
def __new__(self, base_directory, storage):
return ProxyBase.__new__(self, storage)
def __init__(self, base_directory, storage):
def __init__(self, base_directory, storage):
# TODO Complain if storage is ClientStorage
ProxyBase.__init__(self, storage)
self.base_directory = base_directory
self.dirty_oids = []
def storeBlob(oid, serial, data, blob, version, transaction):
def storeBlob(self, oid, oldserial, data, blobfilename, version, transaction):
"""Stores data that has a BLOB attached."""
if transaction is not self._transaction:
raise POSException.StorageTransactionError(self, transaction)
serial = self.store(oid, oldserial, data, version, transaction)
assert isinstance(serial, str) # XXX in theory serials could be
# something else
self._lock_acquire()
try:
#
targetname = self._getCleanFilename(oid, serial)
try:
os.rename(blobfilename, targetname)
except OSError:
target = file(targetname, "wb")
source = file(blobfilename, "rb")
utils.cp(blobfile, target)
target.close()
source.close()
os.unlink(blobfilename)
# XXX if oid already in there, something is really hosed.
# The underlying storage should have complained anyway
self.dirty_oids.append((oid, serial))
finally:
self._lock_release()
return self._tid
def _getDirtyFilename(self, oid):
"""Generate an intermediate filename for two-phase commit.
XXX Not used right now due to conceptual flux. Please keep it around
anyway.
"""
return self._getCleanFilename(oid, "store")
def _getCleanFilename(self, oid, tid):
return "%s/%s-%s.blob" % \
(self.base_directory,
utils.oid_repr(oid),
utils.tid_repr(tid),
)
def _finish(self, tid, u, d, e):
ProxyBase._finish(self, tid, u, d, e)
self.dirty_blobs = []
def loadBlob(oid, serial, version, blob):
"""Loads the BLOB data for 'oid' into the given blob object.
def _abort(self):
ProxyBase._abort(self)
# Throw away the stuff we'd had committed
while self.dirty_blobs:
oid, serial = self.dirty_blobs.pop()
os.unlink(self._getCleanFilename(oid))
def loadBlob(self, oid, serial, version):
"""Return the filename where the blob file can be found.
"""
return self._getCleanFilename(oid, serial)
- Blob instances should clean up temporary files after committing
- Support database import/export
......@@ -13,3 +13,20 @@ class IBlob(Interface):
# XXX need a method to initialize the blob from the storage
# this means a) setting the _p_blob_data filename and b) putting
# the current data in that file
class IBlobStorage(Interface):
"""A storage supporting BLOBs."""
def storeBlob(oid, oldserial, data, blob, version, transaction):
"""Stores data that has a BLOB attached."""
def loadBlob(oid, serial, version):
"""Return the filename of the Blob data responding to this OID and
serial.
Returns a filename or None if no Blob data is connected with this OID.
"""
def getBlobDirectory():
"""
"""
......@@ -23,15 +23,18 @@ need a Blob with some data:
>>> blob = Blob()
>>> data = blob.open("w")
>>> data.write("I'm a happy Blob.")
>>> data.close()
We also need a database with a blob supporting storage:
>>> from ZODB.MappingStorage import MappingStorage
>>> from ZODB.Blobs.BlobStorage import BlobStorage
>>> from ZODB.DB import DB
>>> from tempfile import mkdtemp
>>> base_storage = MappingStorage("test")
>>> blob_dir = mkdtemp()
>>> blob_storage = BlobStorage(blob_dir, base_storage)
>>> database = DB(storage)
>>> database = DB(blob_storage)
Putting a Blob into a Connection works like every other object:
......@@ -40,12 +43,11 @@ Putting a Blob into a Connection works like every other object:
>>> root['myblob'] = blob
>>> import transaction
>>> transaction.commit()
>>> connection.close()
Getting stuff out of there works similar:
>>> connection = database.open()
>>> root = connection.root()
>>> connection2 = database.open()
>>> root = connection2.root()
>>> blob2 = root['myblob']
>>> IBlob.isImplementedBy(blob2)
True
......@@ -56,17 +58,18 @@ You can't put blobs into a database that has uses a Non-Blob-Storage, though:
>>> no_blob_storage = MappingStorage()
>>> database2 = DB(no_blob_storage)
>>> connection = database.open()
>>> root = connection.root()
>>> root['myblob'] = blob
>>> transaction.commit()
>>> connection3 = database2.open()
>>> root = connection3.root()
>>> root['myblob'] = Blob()
>>> transaction.commit() # doctest: +ELLIPSIS
Traceback (most recent call last):
...
POSException.Unsupported: Storing Blobs is not supported.
Unsupported: Storing Blobs in <ZODB.MappingStorage.MappingStorage instance at ...> is not supported.
While we are testing this, we don't need the storage directory and databases anymore:
>>> import os
>>> os.unlink(blob_dir)
>>> import shutil
>>> shutil.rmtree(blob_dir)
>>> transaction.abort()
>>> database.close()
>>> database2.close()
......@@ -15,4 +15,4 @@
from zope.testing.doctestunit import DocFileSuite
def test_suite():
return DocFileSuite("../README.txt")
return DocFileSuite("../Blob.txt", "connection.txt")
......@@ -27,6 +27,9 @@ from ZODB.serialize import referencesf
from ZODB.utils import WeakSet
from ZODB.utils import DEPRECATED_ARGUMENT, deprecated36
from zope.interface import implements
from ZODB.interfaces import IDatabase
import transaction
logger = logging.getLogger('ZODB.DB')
......@@ -178,6 +181,7 @@ class DB(object):
setCacheDeactivateAfter,
getVersionCacheDeactivateAfter, setVersionCacheDeactivateAfter
"""
implements(IDatabase)
klass = Connection # Class to use for connections
_activity_monitor = None
......@@ -188,6 +192,8 @@ class DB(object):
cache_deactivate_after=DEPRECATED_ARGUMENT,
version_pool_size=3,
version_cache_size=100,
database_name='unnamed',
databases=None,
version_cache_deactivate_after=DEPRECATED_ARGUMENT,
):
"""Create an object database.
......@@ -248,6 +254,16 @@ class DB(object):
storage.tpc_vote(t)
storage.tpc_finish(t)
# Multi-database setup.
if databases is None:
databases = {}
self.databases = databases
self.database_name = database_name
if database_name in databases:
raise ValueError("database_name %r already in databases" %
database_name)
databases[database_name] = self
# Pass through methods:
for m in ['history', 'supportsUndo', 'supportsVersions', 'undoLog',
'versionEmpty', 'versions']:
......@@ -565,7 +581,7 @@ class DB(object):
def get_info(c):
# `result`, `time` and `version` are lexically inherited.
o = c._opened
d = c._debug_info
d = c.getDebugInfo()
if d:
if len(d) == 1:
d = d[0]
......
......@@ -547,6 +547,7 @@ class FileStorage(BaseStorage.BaseStorage,
self._lock_release()
def load(self, oid, version):
"""Return pickle data and serial number."""
self._lock_acquire()
try:
pos = self._lookup_pos(oid)
......@@ -629,7 +630,7 @@ class FileStorage(BaseStorage.BaseStorage,
finally:
self._lock_release()
def store(self, oid, serial, data, version, transaction):
def store(self, oid, oldserial, data, version, transaction):
if self._is_read_only:
raise POSException.ReadOnlyError()
if transaction is not self._transaction:
......@@ -652,12 +653,12 @@ class FileStorage(BaseStorage.BaseStorage,
pnv = h.pnv
cached_tid = h.tid
if serial != cached_tid:
if oldserial != cached_tid:
rdata = self.tryToResolveConflict(oid, cached_tid,
serial, data)
oldserial, data)
if rdata is None:
raise POSException.ConflictError(
oid=oid, serials=(cached_tid, serial), data=data)
oid=oid, serials=(cached_tid, oldserial), data=data)
else:
data = rdata
......@@ -687,7 +688,7 @@ class FileStorage(BaseStorage.BaseStorage,
raise FileStorageQuotaError(
"The storage quota has been exceeded.")
if old and serial != cached_tid:
if old and oldserial != cached_tid:
return ConflictResolution.ResolvedSerial
else:
return self._tid
......
......@@ -68,16 +68,16 @@
#
# - 8-byte data length
#
# ? 8-byte position of non-version data
# ? 8-byte position of non-version data record
# (if version length > 0)
#
# ? 8-byte position of previous record in this version
# (if version length > 0)
#
# ? version string
# ? version string
# (if version length > 0)
#
# ? data
# ? data
# (data length > 0)
#
# ? 8-byte position of data record containing data
......
......@@ -12,8 +12,11 @@
#
##############################################################################
from zope.interface import implements
from ZODB.Blobs.interfaces import IBlobStorage
from ZODB import POSException
from ZODB.utils import p64, u64, z64
from ZODB.utils import p64, u64, z64, cp
import tempfile
......@@ -22,6 +25,8 @@ class TmpStore:
_bver = ''
implements(IBlobStorage)
def __init__(self, base_version, storage):
self._transaction = None
self._storage = storage
......@@ -37,6 +42,8 @@ class TmpStore:
self._tindex = {}
self._creating = []
self.blob_files = {}
def close(self):
self._file.close()
......@@ -61,6 +68,9 @@ class TmpStore:
serial = h[:8]
return self._file.read(size), serial
def sortKey(self):
return self._storage.sortKey()
# TODO: clarify difference between self._storage & self._db._storage
def modifiedInVersion(self, oid):
......@@ -119,5 +129,27 @@ class TmpStore:
def versionEmpty(self, version):
# TODO: what is this supposed to do?
# NOTE: This appears to implement the opposite of what it should.
if version == self._bver:
return len(self._index)
# Blob support
def loadBlob(self, oid, serial, version):
return self.blob_files.get(oid)
def storeBlob(self, oid, oldserial, data, blobfile, version, transaction):
result = self.store(oid, oldserial, data, version, transaction)
target = file(self.generateBlobFile(oid), "w")
src = file(blobfile, "r")
cp(src, target)
return result
def generateBlobFile(self, oid):
if not self.blob_files.has_key(oid):
handle, name = tempfile.mkstemp()
handle.close()
self.blob_files[oid] = name
return self.blob_files[oid]
......@@ -158,4 +158,15 @@
<key name="version-cache-size" datatype="integer" default="100"/>
</sectiontype>
<sectiontype name="blobfilestorage" datatype=".BlobFileStorage"
implements="ZODB.storage" extends="filestorage">
<key name="blob-dir" required="yes">
<description>
Path name to the blob storage directory.
</description>
</key>
</sectiontype>
</component>
......@@ -132,6 +132,15 @@ class FileStorage(BaseConfig):
read_only=self.config.read_only,
quota=self.config.quota)
class BlobFileStorage(FileStorage):
def open(self):
from ZODB.Blobs.BlobStorage import BlobStorage
base_storage = FileStorage.open(self)
return BlobStorage(self.config.blob_dir, base_storage)
class ZEOClient(BaseConfig):
def open(self):
......
##############################################################################
#
# Copyright (c) 2004 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Support for testing logging code
If you want to test that your code generates proper log output, you
can create and install a handler that collects output:
>>> handler = InstalledHandler('foo.bar')
The handler is installed into loggers for all of the names passed. In
addition, the logger level is set to 1, which means, log
everything. If you want to log less than everything, you can provide a
level keyword argument. The level setting effects only the named
loggers.
Then, any log output is collected in the handler:
>>> logging.getLogger('foo.bar').exception('eek')
>>> logging.getLogger('foo.bar').info('blah blah')
>>> for record in handler.records:
... print record.name, record.levelname
... print ' ', record.getMessage()
foo.bar ERROR
eek
foo.bar INFO
blah blah
A similar effect can be gotten by just printing the handler:
>>> print handler
foo.bar ERROR
eek
foo.bar INFO
blah blah
After checking the log output, you need to uninstall the handler:
>>> handler.uninstall()
At which point, the handler won't get any more log output.
Let's clear the handler:
>>> handler.clear()
>>> handler.records
[]
And then log something:
>>> logging.getLogger('foo.bar').info('blah')
and, sure enough, we still have no output:
>>> handler.records
[]
$Id: loggingsupport.py 28349 2004-11-06 00:10:32Z tim_one $
"""
import logging
class Handler(logging.Handler):
def __init__(self, *names, **kw):
logging.Handler.__init__(self)
self.names = names
self.records = []
self.setLoggerLevel(**kw)
def setLoggerLevel(self, level=1):
self.level = level
self.oldlevels = {}
def emit(self, record):
self.records.append(record)
def clear(self):
del self.records[:]
def install(self):
for name in self.names:
logger = logging.getLogger(name)
self.oldlevels[name] = logger.level
logger.setLevel(self.level)
logger.addHandler(self)
def uninstall(self):
for name in self.names:
logger = logging.getLogger(name)
logger.setLevel(self.oldlevels[name])
logger.removeHandler(self)
def __str__(self):
return '\n'.join(
[("%s %s\n %s" %
(record.name, record.levelname,
'\n'.join([line
for line in record.getMessage().split('\n')
if line.strip()])
)
)
for record in self.records]
)
class InstalledHandler(Handler):
def __init__(self, *names):
Handler.__init__(self, *names)
self.install()
##############################################################################
#
# Copyright (c) 2005 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
Multi-database tests
====================
Multi-database support adds the ability to tie multiple databases into a
collection. The original proposal is in the fishbowl:
http://www.zope.org/Wikis/ZODB/MultiDatabases/
It was implemented during the PyCon 2005 sprints, but in a simpler form,
by Jim Fulton, Christian Theune,and Tim Peters. Overview:
No private attributes were added, and one new method was introduced.
DB:
- a new .database_name attribute holds the name of this database
- a new .databases attribute maps from database name to DB object; all DBs
in a multi-database collection share the same .databases object
- the DB constructor has new optional arguments with the same names
(database_name= and databases=).
Connection:
- a new .connections attribute maps from database name to a Connection for
the database with that name; the .connections mapping object is also
shared among databases in a collection
- a new .get_connection(database_name) method returns a Connection for a
database in the collection; if a connection is already open, it's returned
(this is the value .connections[database_name]), else a new connection is
opened (and stored as .connections[database_name])
Creating a multi-database starts with creating a named DB:
>>> from ZODB.tests.test_storage import MinimalMemoryStorage
>>> from ZODB import DB
>>> dbmap = {}
>>> db = DB(MinimalMemoryStorage(), database_name='root', databases=dbmap)
The database name is accessible afterwards and in a newly created collection:
>>> db.database_name
'root'
>>> db.databases # doctest: +ELLIPSIS
{'root': <ZODB.DB.DB object at ...>}
>>> db.databases is dbmap
True
Adding another database to the collection works like this:
>>> db2 = DB(MinimalMemoryStorage(),
... database_name='notroot',
... databases=dbmap)
The new db2 now shares the 'databases' dictionary with db and has two entries:
>>> db2.databases is db.databases is dbmap
True
>>> len(db2.databases)
2
>>> names = dbmap.keys(); names.sort(); print names
['notroot', 'root']
It's an error to try to insert a database with a name already in use:
>>> db3 = DB(MinimalMemoryStorage(),
... database_name='root',
... databases=dbmap)
Traceback (most recent call last):
...
ValueError: database_name 'root' already in databases
Because that failed, db.databases wasn't changed:
>>> len(db.databases) # still 2
2
You can (still) get a connection to a database this way:
>>> cn = db.open()
>>> cn # doctest: +ELLIPSIS
<Connection at ...>
This is the only connection in this collection right now:
>>> cn.connections # doctest: +ELLIPSIS
{'root': <Connection at ...>}
Getting a connection to a different database from an existing connection in the
same database collection (this enables 'connection binding' within a given
thread/transaction/context ...):
>>> cn2 = cn.get_connection('notroot')
>>> cn2 # doctest: +ELLIPSIS
<Connection at ...>
Now there are two connections in that collection:
>>> cn2.connections is cn.connections
True
>>> len(cn2.connections)
2
>>> names = cn.connections.keys(); names.sort(); print names
['notroot', 'root']
So long as this database group remains open, the same Connection objects
are returned:
>>> cn.get_connection('root') is cn
True
>>> cn.get_connection('notroot') is cn2
True
>>> cn2.get_connection('root') is cn
True
>>> cn2.get_connection('notroot') is cn2
True
Of course trying to get a connection for a database not in the group raises
an exception:
>>> cn.get_connection('no way')
Traceback (most recent call last):
...
KeyError: 'no way'
Clean up:
>>> for a_db in dbmap.values():
... a_db.close()
......@@ -647,6 +647,8 @@ class StubDatabase:
self._storage = StubStorage()
classFactory = None
database_name = 'stubdatabase'
databases = {'stubdatabase': database_name}
def invalidate(self, transaction, dict_with_oid_keys, connection):
pass
......
......@@ -15,4 +15,6 @@
from zope.testing.doctestunit import DocFileSuite
def test_suite():
return DocFileSuite("dbopen.txt")
return DocFileSuite("dbopen.txt",
"multidb.txt",
)
......@@ -21,6 +21,8 @@ import cPickle as pickle
from cStringIO import StringIO
import weakref
import warnings
from tempfile import mkstemp
import os
from persistent.TimeStamp import TimeStamp
......@@ -305,3 +307,10 @@ class WeakSet(object):
# We're cheating by breaking into the internals of Python's
# WeakValueDictionary here (accessing its .data attribute).
return self.data.data.values()
def mktemp():
"""Create a temp file, known by name, in a semi-secure manner."""
handle, filename = mkstemp()
os.close(handle)
return filename
......@@ -257,18 +257,35 @@ class IPersistentDataManager(Interface):
def setstate(object):
"""Load the state for the given object.
The object should be in the ghost state.
The object's state will be set and the object will end up
in the saved state.
The object should be in the ghost state. The object's state will be
set and the object will end up in the saved state.
The object must provide the IPersistent interface.
"""
def oldstate(obj, tid):
"""Return copy of 'obj' that was written by transaction 'tid'.
The returned object does not have the typical metadata (_p_jar, _p_oid,
_p_serial) set. I'm not sure how references to other peristent objects
are handled.
Parameters
obj: a persistent object from this Connection.
tid: id of a transaction that wrote an earlier revision.
Raises KeyError if tid does not exist or if tid deleted a revision of
obj.
"""
def register(object):
"""Register an IPersistent with the current transaction.
This method must be called when the object transitions to
the changed state.
A subclass could override this method to customize the default
policy of one transaction manager for each thread.
"""
def mtime(object):
......
# Packaging information for zpkg.
header proxy.h
<extension _zope_proxy_proxy>
source _zope_proxy_proxy.c
depends-on proxy.h
</extension>
##############################################################################
#
# Copyright (c) 2003 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""More convenience functions for dealing with proxies.
$Id$
"""
from zope.interface import moduleProvides
from zope.proxy.interfaces import IProxyIntrospection
from types import ClassType
from zope.proxy._zope_proxy_proxy import *
from zope.proxy._zope_proxy_proxy import _CAPI
moduleProvides(IProxyIntrospection)
__all__ = tuple(IProxyIntrospection)
def ProxyIterator(p):
yield p
while isProxy(p):
p = getProxiedObject(p)
yield p
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Proxy-related interfaces.
$Id$
"""
from zope.interface import Interface
class IProxyIntrospection(Interface):
"""Provides methods for indentifying proxies and extracting proxied objects
"""
def isProxy(obj, proxytype=None):
"""Check whether the given object is a proxy
If proxytype is not None, checkes whether the object is
proxied by the given proxytype.
"""
def sameProxiedObjects(ob1, ob2):
"""Check whether ob1 and ob2 are the same or proxies of the same object
"""
def getProxiedObject(obj):
"""Get the proxied Object
If the object isn't proxied, then just return the object.
"""
def removeAllProxies(obj):
"""Get the proxied object with no proxies
If obj is not a proxied object, return obj.
The returned object has no proxies.
"""
def queryProxy(obj, proxytype, default=None):
"""Look for a proxy of the given type around the object
If no such proxy can be found, return the default.
"""
def queryInnerProxy(obj, proxytype, default=None):
"""Look for the inner-most proxy of the given type around the object
If no such proxy can be found, return the default.
If there is such a proxy, return the inner-most one.
"""
#ifndef _proxy_H_
#define _proxy_H_ 1
typedef struct {
PyObject_HEAD
PyObject *proxy_object;
} ProxyObject;
#define Proxy_GET_OBJECT(ob) (((ProxyObject *)(ob))->proxy_object)
typedef struct {
PyTypeObject *proxytype;
int (*check)(PyObject *obj);
PyObject *(*create)(PyObject *obj);
PyObject *(*getobject)(PyObject *proxy);
} ProxyInterface;
#ifndef PROXY_MODULE
/* These are only defined in the public interface, and are not
* available within the module implementation. There we use the
* classic Python/C API only.
*/
static ProxyInterface *_proxy_api = NULL;
static int
Proxy_Import(void)
{
if (_proxy_api == NULL) {
PyObject *m = PyImport_ImportModule("zope.proxy");
if (m != NULL) {
PyObject *tmp = PyObject_GetAttrString(m, "_CAPI");
if (tmp != NULL) {
if (PyCObject_Check(tmp))
_proxy_api = (ProxyInterface *)
PyCObject_AsVoidPtr(tmp);
Py_DECREF(tmp);
}
}
}
return (_proxy_api == NULL) ? -1 : 0;
}
#define ProxyType (*_proxy_api->proxytype)
#define Proxy_Check(obj) (_proxy_api->check((obj)))
#define Proxy_CheckExact(obj) ((obj)->ob_type == ProxyType)
#define Proxy_New(obj) (_proxy_api->create((obj)))
#define Proxy_GetObject(proxy) (_proxy_api->getobject((proxy)))
#endif /* PROXY_MODULE */
#endif /* _proxy_H_ */
#
# This file is necessary to make this directory a package.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment