Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Z
ZODB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Kirill Smelkov
ZODB
Commits
6e5baffd
Commit
6e5baffd
authored
Oct 29, 2021
by
Michael Howitz
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Lint the code.
Add support for Python 3.9 and 3.10.
parent
1f3a0d62
Changes
110
Show whitespace changes
Inline
Side-by-side
Showing
110 changed files
with
1833 additions
and
1298 deletions
+1833
-1298
CHANGES.rst
CHANGES.rst
+3
-1
docs/conf.py
docs/conf.py
+1
-1
setup.py
setup.py
+12
-1
src/ZODB/ActivityMonitor.py
src/ZODB/ActivityMonitor.py
+1
-1
src/ZODB/BaseStorage.py
src/ZODB/BaseStorage.py
+14
-11
src/ZODB/ConflictResolution.py
src/ZODB/ConflictResolution.py
+28
-16
src/ZODB/Connection.py
src/ZODB/Connection.py
+41
-40
src/ZODB/DB.py
src/ZODB/DB.py
+32
-21
src/ZODB/DemoStorage.py
src/ZODB/DemoStorage.py
+21
-18
src/ZODB/ExportImport.py
src/ZODB/ExportImport.py
+8
-6
src/ZODB/FileStorage/FileStorage.py
src/ZODB/FileStorage/FileStorage.py
+145
-129
src/ZODB/FileStorage/format.py
src/ZODB/FileStorage/format.py
+10
-2
src/ZODB/FileStorage/fsdump.py
src/ZODB/FileStorage/fsdump.py
+11
-7
src/ZODB/FileStorage/fsoids.py
src/ZODB/FileStorage/fsoids.py
+8
-3
src/ZODB/FileStorage/fspack.py
src/ZODB/FileStorage/fspack.py
+13
-13
src/ZODB/FileStorage/interfaces.py
src/ZODB/FileStorage/interfaces.py
+6
-4
src/ZODB/FileStorage/tests.py
src/ZODB/FileStorage/tests.py
+8
-2
src/ZODB/MappingStorage.py
src/ZODB/MappingStorage.py
+19
-16
src/ZODB/POSException.py
src/ZODB/POSException.py
+68
-39
src/ZODB/__init__.py
src/ZODB/__init__.py
+1
-2
src/ZODB/_compat.py
src/ZODB/_compat.py
+12
-7
src/ZODB/blob.py
src/ZODB/blob.py
+41
-27
src/ZODB/broken.py
src/ZODB/broken.py
+7
-2
src/ZODB/config.py
src/ZODB/config.py
+23
-3
src/ZODB/conversionhack.py
src/ZODB/conversionhack.py
+15
-7
src/ZODB/event.py
src/ZODB/event.py
+2
-2
src/ZODB/fsIndex.py
src/ZODB/fsIndex.py
+13
-9
src/ZODB/fsrecover.py
src/ZODB/fsrecover.py
+20
-10
src/ZODB/fstools.py
src/ZODB/fstools.py
+5
-3
src/ZODB/interfaces.py
src/ZODB/interfaces.py
+42
-31
src/ZODB/mvccadapter.py
src/ZODB/mvccadapter.py
+18
-12
src/ZODB/persistentclass.py
src/ZODB/persistentclass.py
+9
-7
src/ZODB/scripts/analyze.py
src/ZODB/scripts/analyze.py
+17
-10
src/ZODB/scripts/checkbtrees.py
src/ZODB/scripts/checkbtrees.py
+10
-5
src/ZODB/scripts/fsoids.py
src/ZODB/scripts/fsoids.py
+4
-1
src/ZODB/scripts/fsrefs.py
src/ZODB/scripts/fsrefs.py
+9
-6
src/ZODB/scripts/fsstats.py
src/ZODB/scripts/fsstats.py
+14
-8
src/ZODB/scripts/fstail.py
src/ZODB/scripts/fstail.py
+3
-1
src/ZODB/scripts/fstest.py
src/ZODB/scripts/fstest.py
+21
-10
src/ZODB/scripts/manual_tests/testfstest.py
src/ZODB/scripts/manual_tests/testfstest.py
+2
-5
src/ZODB/scripts/migrate.py
src/ZODB/scripts/migrate.py
+7
-7
src/ZODB/scripts/migrateblobs.py
src/ZODB/scripts/migrateblobs.py
+6
-6
src/ZODB/scripts/netspace.py
src/ZODB/scripts/netspace.py
+7
-2
src/ZODB/scripts/referrers.py
src/ZODB/scripts/referrers.py
+1
-0
src/ZODB/scripts/repozo.py
src/ZODB/scripts/repozo.py
+24
-9
src/ZODB/scripts/space.py
src/ZODB/scripts/space.py
+4
-1
src/ZODB/scripts/tests/test_doc.py
src/ZODB/scripts/tests/test_doc.py
+2
-1
src/ZODB/scripts/tests/test_fsdump_fsstats.py
src/ZODB/scripts/tests/test_fsdump_fsstats.py
+0
-3
src/ZODB/scripts/tests/test_fstest.py
src/ZODB/scripts/tests/test_fstest.py
+1
-1
src/ZODB/scripts/tests/test_repozo.py
src/ZODB/scripts/tests/test_repozo.py
+134
-114
src/ZODB/scripts/zodbload.py
src/ZODB/scripts/zodbload.py
+228
-214
src/ZODB/serialize.py
src/ZODB/serialize.py
+14
-11
src/ZODB/tests/BasicStorage.py
src/ZODB/tests/BasicStorage.py
+60
-43
src/ZODB/tests/ConflictResolution.py
src/ZODB/tests/ConflictResolution.py
+11
-6
src/ZODB/tests/Corruption.py
src/ZODB/tests/Corruption.py
+1
-0
src/ZODB/tests/HistoryStorage.py
src/ZODB/tests/HistoryStorage.py
+2
-1
src/ZODB/tests/IteratorStorage.py
src/ZODB/tests/IteratorStorage.py
+5
-4
src/ZODB/tests/MTStorage.py
src/ZODB/tests/MTStorage.py
+6
-1
src/ZODB/tests/MVCCMappingStorage.py
src/ZODB/tests/MVCCMappingStorage.py
+1
-1
src/ZODB/tests/MinPO.py
src/ZODB/tests/MinPO.py
+2
-1
src/ZODB/tests/PackableStorage.py
src/ZODB/tests/PackableStorage.py
+12
-8
src/ZODB/tests/PersistentStorage.py
src/ZODB/tests/PersistentStorage.py
+2
-1
src/ZODB/tests/ReadOnlyStorage.py
src/ZODB/tests/ReadOnlyStorage.py
+1
-0
src/ZODB/tests/RecoveryStorage.py
src/ZODB/tests/RecoveryStorage.py
+5
-5
src/ZODB/tests/RevisionStorage.py
src/ZODB/tests/RevisionStorage.py
+4
-4
src/ZODB/tests/StorageTestBase.py
src/ZODB/tests/StorageTestBase.py
+11
-4
src/ZODB/tests/Synchronization.py
src/ZODB/tests/Synchronization.py
+1
-0
src/ZODB/tests/TransactionalUndoStorage.py
src/ZODB/tests/TransactionalUndoStorage.py
+11
-10
src/ZODB/tests/dangle.py
src/ZODB/tests/dangle.py
+4
-0
src/ZODB/tests/hexstorage.py
src/ZODB/tests/hexstorage.py
+12
-8
src/ZODB/tests/loggingsupport.py
src/ZODB/tests/loggingsupport.py
+3
-2
src/ZODB/tests/sampledm.py
src/ZODB/tests/sampledm.py
+3
-1
src/ZODB/tests/speed.py
src/ZODB/tests/speed.py
+67
-50
src/ZODB/tests/testActivityMonitor.py
src/ZODB/tests/testActivityMonitor.py
+0
-3
src/ZODB/tests/testBroken.py
src/ZODB/tests/testBroken.py
+3
-3
src/ZODB/tests/testCache.py
src/ZODB/tests/testCache.py
+17
-11
src/ZODB/tests/testConfig.py
src/ZODB/tests/testConfig.py
+2
-0
src/ZODB/tests/testConnection.py
src/ZODB/tests/testConnection.py
+36
-14
src/ZODB/tests/testConnectionSavepoint.py
src/ZODB/tests/testConnectionSavepoint.py
+15
-7
src/ZODB/tests/testDB.py
src/ZODB/tests/testDB.py
+22
-12
src/ZODB/tests/testDemoStorage.py
src/ZODB/tests/testDemoStorage.py
+20
-16
src/ZODB/tests/testFileStorage.py
src/ZODB/tests/testFileStorage.py
+32
-19
src/ZODB/tests/testMVCCMappingStorage.py
src/ZODB/tests/testMVCCMappingStorage.py
+7
-12
src/ZODB/tests/testMappingStorage.py
src/ZODB/tests/testMappingStorage.py
+10
-9
src/ZODB/tests/testPersistentList.py
src/ZODB/tests/testPersistentList.py
+14
-13
src/ZODB/tests/testPersistentMapping.py
src/ZODB/tests/testPersistentMapping.py
+4
-4
src/ZODB/tests/testPersistentWeakref.py
src/ZODB/tests/testPersistentWeakref.py
+3
-2
src/ZODB/tests/testRecover.py
src/ZODB/tests/testRecover.py
+4
-2
src/ZODB/tests/testSerialize.py
src/ZODB/tests/testSerialize.py
+20
-7
src/ZODB/tests/testUtils.py
src/ZODB/tests/testUtils.py
+14
-12
src/ZODB/tests/testZODB.py
src/ZODB/tests/testZODB.py
+8
-7
src/ZODB/tests/test_TransactionMetaData.py
src/ZODB/tests/test_TransactionMetaData.py
+2
-3
src/ZODB/tests/test_cache.py
src/ZODB/tests/test_cache.py
+5
-0
src/ZODB/tests/test_doctest_files.py
src/ZODB/tests/test_doctest_files.py
+2
-2
src/ZODB/tests/test_fsdump.py
src/ZODB/tests/test_fsdump.py
+1
-1
src/ZODB/tests/test_prefetch.py
src/ZODB/tests/test_prefetch.py
+2
-0
src/ZODB/tests/test_storage.py
src/ZODB/tests/test_storage.py
+4
-0
src/ZODB/tests/testblob.py
src/ZODB/tests/testblob.py
+62
-45
src/ZODB/tests/testconflictresolution.py
src/ZODB/tests/testconflictresolution.py
+9
-3
src/ZODB/tests/testcrossdatabasereferences.py
src/ZODB/tests/testcrossdatabasereferences.py
+23
-19
src/ZODB/tests/testdocumentation.py
src/ZODB/tests/testdocumentation.py
+8
-8
src/ZODB/tests/testfsIndex.py
src/ZODB/tests/testfsIndex.py
+12
-9
src/ZODB/tests/testhistoricalconnections.py
src/ZODB/tests/testhistoricalconnections.py
+2
-1
src/ZODB/tests/testmvcc.py
src/ZODB/tests/testmvcc.py
+2
-1
src/ZODB/tests/testpersistentclass.py
src/ZODB/tests/testpersistentclass.py
+10
-7
src/ZODB/tests/util.py
src/ZODB/tests/util.py
+37
-15
src/ZODB/tests/warnhook.py
src/ZODB/tests/warnhook.py
+1
-0
src/ZODB/transact.py
src/ZODB/transact.py
+4
-2
src/ZODB/utils.py
src/ZODB/utils.py
+25
-9
src/ZODB/valuedoc.py
src/ZODB/valuedoc.py
+2
-2
No files found.
CHANGES.rst
View file @
6e5baffd
...
...
@@ -2,7 +2,7 @@
Change History
================
5.
6.1
(unreleased)
5.
7.0
(unreleased)
==================
- Fix ``TypeError: can't concat str to bytes`` when running fsoids.py script with Python 3.
...
...
@@ -24,6 +24,8 @@
- Fix deprecation warnings occurring on Python 3.10.
- Add support for Python 3.9 and 3.10.
5.6.0 (2020-06-11)
==================
...
...
docs/conf.py
View file @
6e5baffd
...
...
@@ -56,7 +56,7 @@ master_doc = 'index'
# General information about the project.
project
=
'ZODB'
copyright
=
'2009-202
0
, Zope Foundation'
copyright
=
'2009-202
1
, Zope Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
...
...
setup.py
View file @
6e5baffd
...
...
@@ -13,7 +13,7 @@
##############################################################################
from
setuptools
import
setup
,
find_packages
version
=
'5.
6.1
.dev0'
version
=
'5.
7.0
.dev0'
classifiers
=
"""
\
Intended Audience :: Developers
...
...
@@ -26,6 +26,8 @@ Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10
Programming Language :: Python :: Implementation :: CPython
Programming Language :: Python :: Implementation :: PyPy
Topic :: Database
...
...
@@ -35,10 +37,12 @@ Operating System :: Unix
Framework :: ZODB
"""
def
read
(
path
):
with
open
(
path
)
as
f
:
return
f
.
read
()
long_description
=
read
(
"README.rst"
)
+
"
\
n
\
n
"
+
read
(
"CHANGES.rst"
)
tests_require
=
[
...
...
@@ -67,6 +71,13 @@ setup(
tests_require
=
tests_require
,
extras_require
=
{
'test'
:
tests_require
,
'docs'
:
[
'Sphinx'
,
'ZODB'
,
'j1m.sphinxautozconfig'
,
'sphinx_rtd_theme'
,
'sphinxcontrib_zopeext'
,
]
},
install_requires
=
[
'persistent >= 4.4.0'
,
...
...
src/ZODB/ActivityMonitor.py
View file @
6e5baffd
src/ZODB/BaseStorage.py
View file @
6e5baffd
...
...
@@ -20,7 +20,6 @@ from __future__ import print_function
import
time
import
logging
import
sys
from
struct
import
pack
as
_structpack
,
unpack
as
_structunpack
import
zope.interface
...
...
@@ -35,6 +34,7 @@ from ._compat import py2_hasattr
log
=
logging
.
getLogger
(
"ZODB.BaseStorage"
)
class
BaseStorage
(
UndoLogCompatible
):
"""Base class that supports storage implementations.
...
...
@@ -74,12 +74,12 @@ class BaseStorage(UndoLogCompatible):
perhaps other things. It is always held when load() is called, so
presumably the load() implementation should also acquire the lock.
"""
_transaction
=
None
# Transaction that is being committed
_tstatus
=
' '
# Transaction status, used for copying data
_transaction
=
None
# Transaction that is being committed
_tstatus
=
' '
# Transaction status, used for copying data
_is_read_only
=
False
def
__init__
(
self
,
name
,
base
=
None
):
self
.
__name__
=
name
self
.
__name__
=
name
log
.
debug
(
"create storage %s"
,
self
.
__name__
)
# Allocate locks:
...
...
@@ -93,7 +93,7 @@ class BaseStorage(UndoLogCompatible):
self
.
_commit_lock_release
=
self
.
_commit_lock
.
release
t
=
time
.
time
()
t
=
self
.
_ts
=
TimeStamp
(
*
(
time
.
gmtime
(
t
)[:
5
]
+
(
t
%
60
,)))
t
=
self
.
_ts
=
TimeStamp
(
*
(
time
.
gmtime
(
t
)[:
5
]
+
(
t
%
60
,)))
self
.
_tid
=
t
.
raw
()
# ._oid is the highest oid in use (0 is always in use -- it's
...
...
@@ -279,6 +279,7 @@ class BaseStorage(UndoLogCompatible):
"""
copy
(
other
,
self
,
verbose
)
def
copy
(
source
,
dest
,
verbose
=
0
):
"""Copy transactions from a source to a destination storage
...
...
@@ -287,7 +288,7 @@ def copy(source, dest, verbose=0):
"""
_ts
=
None
ok
=
1
preindex
=
{}
;
preindex
=
{}
preget
=
preindex
.
get
# restore() is a new storage API method which has an identical
# signature to store() except that it does not return anything.
...
...
@@ -310,7 +311,8 @@ def copy(source, dest, verbose=0):
else
:
t
=
TimeStamp
(
tid
)
if
t
<=
_ts
:
if
ok
:
print
((
'Time stamps out of order %s, %s'
%
(
_ts
,
t
)))
if
ok
:
print
((
'Time stamps out of order %s, %s'
%
(
_ts
,
t
)))
ok
=
0
_ts
=
t
.
laterThan
(
_ts
)
tid
=
_ts
.
raw
()
...
...
@@ -351,23 +353,24 @@ def checkCurrentSerialInTransaction(self, oid, serial, transaction):
raise
POSException
.
ReadConflictError
(
oid
=
oid
,
serials
=
(
committed_tid
,
serial
))
BaseStorage
.
checkCurrentSerialInTransaction
=
checkCurrentSerialInTransaction
@
zope
.
interface
.
implementer
(
ZODB
.
interfaces
.
IStorageTransactionInformation
)
class
TransactionRecord
(
TransactionMetaData
):
"""Abstract base class for iterator protocol"""
def
__init__
(
self
,
tid
,
status
,
user
,
description
,
extension
):
self
.
tid
=
tid
self
.
status
=
status
TransactionMetaData
.
__init__
(
self
,
user
,
description
,
extension
)
@
zope
.
interface
.
implementer
(
ZODB
.
interfaces
.
IStorageRecordInformation
)
class
DataRecord
(
object
):
"""Abstract base class for iterator protocol"""
version
=
''
def
__init__
(
self
,
oid
,
tid
,
data
,
prev
):
...
...
src/ZODB/ConflictResolution.py
View file @
6e5baffd
...
...
@@ -29,9 +29,11 @@ from pickle import PicklingError
logger
=
logging
.
getLogger
(
'ZODB.ConflictResolution'
)
class
BadClassName
(
Exception
):
pass
class
BadClass
(
object
):
def
__init__
(
self
,
*
args
):
...
...
@@ -40,8 +42,11 @@ class BadClass(object):
def
__reduce__
(
self
):
raise
BadClassName
(
*
self
.
args
)
_class_cache
=
{}
_class_cache_get
=
_class_cache
.
get
def
find_global
(
*
args
):
cls
=
_class_cache_get
(
args
,
0
)
if
cls
==
0
:
...
...
@@ -61,13 +66,13 @@ def find_global(*args):
# Not importable
if
(
isinstance
(
args
,
tuple
)
and
len
(
args
)
==
2
and
isinstance
(
args
[
0
],
six
.
string_types
)
and
isinstance
(
args
[
1
],
six
.
string_types
)
):
isinstance
(
args
[
1
],
six
.
string_types
)):
return
BadClass
(
*
args
)
else
:
raise
BadClassName
(
*
args
)
return
cls
def
state
(
self
,
oid
,
serial
,
prfactory
,
p
=
''
):
p
=
p
or
self
.
loadSerial
(
oid
,
serial
)
p
=
self
.
_crs_untransform_record_data
(
p
)
...
...
@@ -77,6 +82,7 @@ def state(self, oid, serial, prfactory, p=''):
unpickler
.
load
()
# skip the class tuple
return
unpickler
.
load
()
class
IPersistentReference
(
zope
.
interface
.
Interface
):
'''public contract for references to persistent objects from an object
with conflicts.'''
...
...
@@ -114,10 +120,10 @@ class IPersistentReference(zope.interface.Interface):
have two references to the same object that are spelled with different
data (for instance, one with a class and one without).'''
@
zope
.
interface
.
implementer
(
IPersistentReference
)
class
PersistentReference
(
object
):
weak
=
False
oid
=
database_name
=
klass
=
None
...
...
@@ -211,6 +217,7 @@ class PersistentReference(object):
elif
isinstance
(
data
,
list
)
and
data
[
0
]
==
'm'
:
return
data
[
1
][
2
]
class
PersistentReferenceFactory
(
object
):
data
=
None
...
...
@@ -218,7 +225,8 @@ class PersistentReferenceFactory(object):
def
persistent_load
(
self
,
ref
):
if
self
.
data
is
None
:
self
.
data
=
{}
key
=
tuple
(
ref
)
# lists are not hashable; formats are different enough
# lists are not hashable; formats are different enough
key
=
tuple
(
ref
)
# even after eliminating list/tuple distinction
r
=
self
.
data
.
get
(
key
,
None
)
if
r
is
None
:
...
...
@@ -227,12 +235,16 @@ class PersistentReferenceFactory(object):
return
r
def
persistent_id
(
object
):
if
getattr
(
object
,
'__class__'
,
0
)
is
not
PersistentReference
:
return
None
return
object
.
data
_unresolvable
=
{}
def
tryToResolveConflict
(
self
,
oid
,
committedSerial
,
oldSerial
,
newpickle
,
committedData
=
b''
):
# class_tuple, old, committed, newstate = ('',''), 0, 0, 0
...
...
@@ -264,7 +276,6 @@ def tryToResolveConflict(self, oid, committedSerial, oldSerial, newpickle,
_unresolvable
[
klass
]
=
1
raise
ConflictError
oldData
=
self
.
loadSerial
(
oid
,
oldSerial
)
if
not
committedData
:
committedData
=
self
.
loadSerial
(
oid
,
committedSerial
)
...
...
@@ -284,7 +295,7 @@ def tryToResolveConflict(self, oid, committedSerial, oldSerial, newpickle,
logger
.
debug
(
"Conflict resolution on %s failed with %s: %s"
,
klass
,
e
.
__class__
.
__name__
,
str
(
e
))
except
:
except
:
# noqa: E722 do not use bare 'except'
# If anything else went wrong, catch it here and avoid passing an
# arbitrary exception back to the client. The error here will mask
# the original ConflictError. A client can recover from a
...
...
@@ -296,6 +307,7 @@ def tryToResolveConflict(self, oid, committedSerial, oldSerial, newpickle,
raise
ConflictError
(
oid
=
oid
,
serials
=
(
committedSerial
,
oldSerial
),
data
=
newpickle
)
class
ConflictResolvingStorage
(
object
):
"Mix-in class that provides conflict resolution handling for storages"
...
...
src/ZODB/Connection.py
View file @
6e5baffd
...
...
@@ -15,7 +15,6 @@
"""
from
__future__
import
print_function
import
logging
import
sys
import
tempfile
import
warnings
import
os
...
...
@@ -43,7 +42,6 @@ from ZODB import POSException
from
ZODB.POSException
import
InvalidObjectReference
,
ConnectionStateError
from
ZODB.POSException
import
ConflictError
,
ReadConflictError
from
ZODB.POSException
import
Unsupported
,
ReadOnlyHistoryError
from
ZODB.POSException
import
POSKeyError
from
ZODB.serialize
import
ObjectWriter
,
ObjectReader
from
ZODB.utils
import
p64
,
u64
,
z64
,
oid_repr
,
positive_id
from
ZODB
import
utils
...
...
@@ -56,7 +54,10 @@ from . import valuedoc
global_reset_counter
=
0
noop
=
lambda
:
None
def
noop
():
return
None
def
resetCaches
():
"""Causes all connection caches to be reset as connections are reopened.
...
...
@@ -259,6 +260,7 @@ class Connection(ExportImport, object):
connection
.
_cache
.
incrgc
()
__onCloseCallbacks
=
None
def
onCloseCallback
(
self
,
f
):
"""Register a callable, f, to be called by close()."""
if
self
.
__onCloseCallbacks
is
None
:
...
...
@@ -281,18 +283,18 @@ class Connection(ExportImport, object):
for
f
in
callbacks
:
try
:
f
()
except
:
# except what?
except
:
# noqa: E722 do not use bare 'except'
f
=
getattr
(
f
,
'im_self'
,
f
)
self
.
_log
.
exception
(
"Close callback failed for %s"
,
f
)
self
.
_debug_info
=
()
if
self
.
opened
and
self
.
transaction_manager
is
not
None
:
# transaction_manager could be None if one of the __onCloseCallbacks
# closed the DB already, .e.g, ZODB.connection() does this.
# transaction_manager could be None if one of the
# __onCloseCallbacks closed the DB already, .e.g, ZODB.connection()
# does this.
self
.
transaction_manager
.
unregisterSynch
(
self
)
am
=
self
.
_db
.
_activity_monitor
if
am
is
not
None
:
am
.
closedConnection
(
self
)
...
...
@@ -322,7 +324,6 @@ class Connection(ExportImport, object):
# We may have been reused by another thread at this point so
# we can't manipulate or check the state of `self` any more.
def
db
(
self
):
"""Returns a handle to the database this connection belongs to."""
return
self
.
_db
...
...
@@ -545,8 +546,7 @@ class Connection(ExportImport, object):
((
self
.
_savepoint_storage
is
None
)
or
(
oid
not
in
self
.
_savepoint_storage
.
creating
)
or
self
.
_savepoint_storage
.
creating
[
oid
]
)
):
)):
# obj is a new object
...
...
@@ -593,7 +593,7 @@ class Connection(ExportImport, object):
# serial number for a newly created object
try
:
self
.
_cache
[
oid
]
=
obj
except
:
except
:
# noqa: E722 do not use bare 'except'
# Dang, I bet it's wrapped:
# TODO: Deprecate, then remove, this.
if
hasattr
(
obj
,
'aq_base'
):
...
...
@@ -664,7 +664,6 @@ class Connection(ExportImport, object):
del
o
.
_p_jar
del
o
.
_p_oid
def
tpc_vote
(
self
,
transaction
):
"""Verify that a data manager can commit the transaction."""
try
:
...
...
@@ -769,7 +768,7 @@ class Connection(ExportImport, object):
%
(
className
(
obj
),
oid_repr
(
oid
)))
try
:
raise
ConnectionStateError
(
msg
)
except
:
except
:
# noqa: E722 do not use bare 'except'
self
.
_log
.
exception
(
msg
)
raise
...
...
@@ -790,7 +789,7 @@ class Connection(ExportImport, object):
except
ConflictError
:
raise
except
:
except
:
# noqa: E722 do not use bare 'except'
self
.
_log
.
exception
(
"Couldn't load state for %s %s"
,
className
(
obj
),
oid_repr
(
oid
))
raise
...
...
@@ -847,7 +846,7 @@ class Connection(ExportImport, object):
# fine everything. some on the lru list, some not
everything
=
self
.
_cache
.
cache_data
# remove those items that are on the lru list
for
k
,
v
in
items
:
for
k
,
v
in
items
:
del
everything
[
k
]
# return a list of [ghosts....not recently used.....recently used]
return
list
(
everything
.
items
())
+
items
...
...
@@ -1102,6 +1101,7 @@ class Connection(ExportImport, object):
else
:
yield
ob
.
_p_oid
@
implementer
(
IDataManagerSavepoint
)
class
Savepoint
(
object
):
...
...
@@ -1117,7 +1117,6 @@ class Savepoint(object):
class
TmpStore
(
object
):
"""A storage-like thing to support savepoints."""
def
__init__
(
self
,
storage
):
self
.
_storage
=
storage
for
method
in
(
...
...
@@ -1167,14 +1166,14 @@ class TmpStore(object):
# commit logic
assert
version
==
''
self
.
_file
.
seek
(
self
.
position
)
l
=
len
(
data
)
l
enght
=
len
(
data
)
if
serial
is
None
:
serial
=
z64
header
=
p64
(
len
(
oid
))
+
oid
+
serial
+
p64
(
l
)
header
=
p64
(
len
(
oid
))
+
oid
+
serial
+
p64
(
l
enght
)
self
.
_file
.
write
(
header
)
self
.
_file
.
write
(
data
)
self
.
index
[
oid
]
=
self
.
position
self
.
position
+=
l
+
len
(
header
)
self
.
position
+=
l
enght
+
len
(
header
)
return
serial
def
storeBlob
(
self
,
oid
,
serial
,
data
,
blobfilename
,
version
,
...
...
@@ -1271,6 +1270,7 @@ class RootConvenience(object):
names
=
names
[:
57
].
rsplit
(
' '
,
1
)[
0
]
+
' ...'
return
"<root: %s>"
%
names
large_object_message
=
"""The %s
object you're saving is large. (%s bytes.)
...
...
@@ -1291,6 +1291,7 @@ large-record-size option in a configuration file) to specify a larger
size.
"""
class
overridable_property
(
object
):
"""
Same as property() with only a getter, except that setting a
...
...
src/ZODB/DB.py
View file @
6e5baffd
...
...
@@ -41,6 +41,7 @@ from ZODB import valuedoc
logger
=
logging
.
getLogger
(
'ZODB.DB'
)
class
AbstractConnectionPool
(
object
):
"""Manage a pool of connections.
...
...
@@ -111,7 +112,7 @@ class AbstractConnectionPool(object):
class
ConnectionPool
(
AbstractConnectionPool
):
def
__init__
(
self
,
size
,
timeout
=
1
<<
31
):
def
__init__
(
self
,
size
,
timeout
=
1
<<
31
):
super
(
ConnectionPool
,
self
).
__init__
(
size
,
timeout
)
# A stack of connections available to hand out. This is a subset
...
...
@@ -127,9 +128,8 @@ class ConnectionPool(AbstractConnectionPool):
def
_append
(
self
,
c
):
available
=
self
.
available
cactive
=
c
.
_cache
.
cache_non_ghost_count
if
(
available
and
(
available
[
-
1
][
1
].
_cache
.
cache_non_ghost_count
>
cactive
)
):
if
(
available
and
(
available
[
-
1
][
1
].
_cache
.
cache_non_ghost_count
>
cactive
)):
i
=
len
(
available
)
-
1
while
(
i
and
(
available
[
i
-
1
][
1
].
_cache
.
cache_non_ghost_count
>
cactive
)
...
...
@@ -244,7 +244,7 @@ class KeyedConnectionPool(AbstractConnectionPool):
# see the comments in ConnectionPool for method descriptions.
def
__init__
(
self
,
size
,
timeout
=
1
<<
31
):
def
__init__
(
self
,
size
,
timeout
=
1
<<
31
):
super
(
KeyedConnectionPool
,
self
).
__init__
(
size
,
timeout
)
self
.
pools
=
{}
...
...
@@ -303,6 +303,7 @@ def toTimeStamp(dt):
args
=
utc_struct
[:
5
]
+
(
utc_struct
[
5
]
+
dt
.
microsecond
/
1000000.0
,)
return
TimeStamp
(
*
args
)
def
getTID
(
at
,
before
):
if
at
is
not
None
:
if
before
is
not
None
:
...
...
@@ -319,6 +320,7 @@ def getTID(at, before):
before
=
TimeStamp
(
before
).
raw
()
return
before
@
implementer
(
IDatabase
)
class
DB
(
object
):
"""The Object Database
...
...
@@ -348,7 +350,7 @@ class DB(object):
def
__init__
(
self
,
storage
,
pool_size
=
7
,
pool_timeout
=
1
<<
31
,
pool_timeout
=
1
<<
31
,
cache_size
=
400
,
cache_size_bytes
=
0
,
historical_pool_size
=
3
,
...
...
@@ -358,7 +360,7 @@ class DB(object):
database_name
=
'unnamed'
,
databases
=
None
,
xrefs
=
True
,
large_record_size
=
1
<<
24
,
large_record_size
=
1
<<
24
,
**
storage_args
):
"""Create an object database.
...
...
@@ -425,10 +427,10 @@ class DB(object):
# Setup storage
if
isinstance
(
storage
,
six
.
string_types
):
from
ZODB
import
FileStorage
from
ZODB
import
FileStorage
# noqa: F401 import unused
storage
=
ZODB
.
FileStorage
.
FileStorage
(
storage
,
**
storage_args
)
elif
storage
is
None
:
from
ZODB
import
MappingStorage
from
ZODB
import
MappingStorage
# noqa: F401 import unused
storage
=
ZODB
.
MappingStorage
.
MappingStorage
(
**
storage_args
)
else
:
assert
not
storage_args
...
...
@@ -507,6 +509,7 @@ class DB(object):
"""
detail
=
{}
def
f
(
con
,
detail
=
detail
):
for
oid
,
ob
in
con
.
_cache
.
items
():
module
=
getattr
(
ob
.
__class__
,
'__module__'
,
''
)
...
...
@@ -570,7 +573,7 @@ class DB(object):
'rc'
:
(
rc
(
ob
)
-
3
-
(
ob
.
_p_changed
is
not
None
)
if
rc
else
False
),
'state'
:
ob
.
_p_changed
,
#'references': con.references(oid),
#
'references': con.references(oid),
})
self
.
_connectionMap
(
f
)
...
...
@@ -581,6 +584,7 @@ class DB(object):
def
cacheLastGCTime
(
self
):
m
=
[
0
]
def
f
(
con
,
m
=
m
):
t
=
con
.
_cache
.
cache_last_gc_time
if
t
>
m
[
0
]:
...
...
@@ -598,6 +602,7 @@ class DB(object):
"""Return the total count of non-ghost objects in all object caches
"""
m
=
[
0
]
def
f
(
con
,
m
=
m
):
m
[
0
]
+=
con
.
_cache
.
cache_non_ghost_count
...
...
@@ -608,6 +613,7 @@ class DB(object):
"""Return non-ghost counts sizes for all connections.
"""
m
=
[]
def
f
(
con
,
m
=
m
):
m
.
append
({
'connection'
:
repr
(
con
),
'ngsize'
:
con
.
_cache
.
cache_non_ghost_count
,
...
...
@@ -773,7 +779,6 @@ class DB(object):
self
.
pool
.
availableGC
()
self
.
historical_pool
.
availableGC
()
result
.
open
(
transaction_manager
)
return
result
...
...
@@ -836,7 +841,7 @@ class DB(object):
t
-=
days
*
86400
try
:
self
.
storage
.
pack
(
t
,
self
.
references
)
except
:
except
:
# noqa: E722 do not use bare 'except'
logger
.
exception
(
"packing"
)
raise
...
...
@@ -1029,9 +1034,11 @@ class ContextManager(object):
self
.
tm
.
abort
()
self
.
conn
.
close
()
resource_counter_lock
=
utils
.
Lock
()
resource_counter
=
0
class
TransactionalUndo
(
object
):
def
__init__
(
self
,
db
,
tids
):
...
...
@@ -1064,9 +1071,10 @@ class TransactionalUndo(object):
# a new storage instance, and so we must close it to be sure
# to reclaim resources in a timely manner.
#
# Once the tpc_begin method has been called, the transaction manager will
# guarantee to call either `tpc_finish` or `tpc_abort`, so those are the only
# methods we need to be concerned about calling close() from.
# Once the tpc_begin method has been called, the transaction manager
# will guarantee to call either `tpc_finish` or `tpc_abort`, so those
# are the only methods we need to be concerned about calling close()
# from.
db_mvcc_storage
=
self
.
_db
.
_mvcc_storage
self
.
_storage
=
getattr
(
db_mvcc_storage
,
...
...
@@ -1117,7 +1125,10 @@ def connection(*args, **kw):
"""
return
DB
(
*
args
,
**
kw
).
open_then_close_db_when_connection_closes
()
_transaction_meta_data_text_variables
=
'user_name'
,
'description'
def
_text_transaction_info
(
info
):
for
d
in
info
:
for
name
in
_transaction_meta_data_text_variables
:
...
...
src/ZODB/DemoStorage.py
View file @
6e5baffd
...
...
@@ -35,10 +35,11 @@ import zope.interface
from
.ConflictResolution
import
ConflictResolvingStorage
from
.utils
import
load_current
,
maxtid
@
zope
.
interface
.
implementer
(
ZODB
.
interfaces
.
IStorage
,
ZODB
.
interfaces
.
IStorageIteration
,
)
)
class
DemoStorage
(
ConflictResolvingStorage
):
"""A storage that stores changes against a read-only base database
...
...
@@ -99,7 +100,6 @@ class DemoStorage(ConflictResolvingStorage):
self
.
base
=
base
self
.
close_base_on_close
=
close_base_on_close
if
changes
is
None
:
self
.
_temporary_changes
=
True
changes
=
ZODB
.
MappingStorage
.
MappingStorage
()
...
...
@@ -128,12 +128,11 @@ class DemoStorage(ConflictResolvingStorage):
self
.
_copy_methods_from_changes
(
changes
)
self
.
_next_oid
=
random
.
randint
(
1
,
1
<<
62
)
self
.
_next_oid
=
random
.
randint
(
1
,
1
<<
62
)
def
_blobify
(
self
):
if
(
self
.
_temporary_changes
and
isinstance
(
self
.
changes
,
ZODB
.
MappingStorage
.
MappingStorage
)
):
isinstance
(
self
.
changes
,
ZODB
.
MappingStorage
.
MappingStorage
)):
blob_dir
=
tempfile
.
mkdtemp
(
'.demoblobs'
)
_temporary_blobdirs
[
weakref
.
ref
(
self
,
cleanup_temporary_blobdir
)
...
...
@@ -147,6 +146,7 @@ class DemoStorage(ConflictResolvingStorage):
self
.
changes
.
cleanup
()
__opened
=
True
def
opened
(
self
):
return
self
.
__opened
...
...
@@ -296,7 +296,7 @@ class DemoStorage(ConflictResolvingStorage):
def
new_oid
(
self
):
with
self
.
_lock
:
while
1
:
oid
=
ZODB
.
utils
.
p64
(
self
.
_next_oid
)
oid
=
ZODB
.
utils
.
p64
(
self
.
_next_oid
)
if
oid
not
in
self
.
_issued_oids
:
try
:
load_current
(
self
.
changes
,
oid
)
...
...
@@ -308,7 +308,7 @@ class DemoStorage(ConflictResolvingStorage):
self
.
_issued_oids
.
add
(
oid
)
return
oid
self
.
_next_oid
=
random
.
randint
(
1
,
1
<<
62
)
self
.
_next_oid
=
random
.
randint
(
1
,
1
<<
62
)
def
pack
(
self
,
t
,
referencesf
,
gc
=
None
):
if
gc
is
None
:
...
...
@@ -344,7 +344,7 @@ class DemoStorage(ConflictResolvingStorage):
close_base_on_close
=
False
)
def
store
(
self
,
oid
,
serial
,
data
,
version
,
transaction
):
assert
version
==
''
,
"versions aren't supported"
assert
version
==
''
,
"versions aren't supported"
if
transaction
is
not
self
.
_transaction
:
raise
ZODB
.
POSException
.
StorageTransactionError
(
self
,
transaction
)
...
...
@@ -367,7 +367,7 @@ class DemoStorage(ConflictResolvingStorage):
def
storeBlob
(
self
,
oid
,
oldserial
,
data
,
blobfilename
,
version
,
transaction
):
assert
version
==
''
,
"versions aren't supported"
assert
version
==
''
,
"versions aren't supported"
if
transaction
is
not
self
.
_transaction
:
raise
ZODB
.
POSException
.
StorageTransactionError
(
self
,
transaction
)
...
...
@@ -425,7 +425,7 @@ class DemoStorage(ConflictResolvingStorage):
"Unexpected resolved conflicts"
)
return
self
.
_resolved
def
tpc_finish
(
self
,
transaction
,
func
=
lambda
tid
:
None
):
def
tpc_finish
(
self
,
transaction
,
func
=
lambda
tid
:
None
):
with
self
.
_lock
:
if
(
transaction
is
not
self
.
_transaction
):
raise
ZODB
.
POSException
.
StorageTransactionError
(
...
...
@@ -437,11 +437,14 @@ class DemoStorage(ConflictResolvingStorage):
self
.
_commit_lock
.
release
()
return
tid
_temporary_blobdirs
=
{}
def
cleanup_temporary_blobdir
(
ref
,
_temporary_blobdirs
=
_temporary_blobdirs
,
# Make sure it stays around
):
):
blob_dir
=
_temporary_blobdirs
.
pop
(
ref
,
None
)
if
blob_dir
and
os
.
path
.
exists
(
blob_dir
):
ZODB
.
blob
.
remove_committed_dir
(
blob_dir
)
src/ZODB/ExportImport.py
View file @
6e5baffd
...
...
@@ -29,17 +29,17 @@ from ZODB._compat import PersistentPickler, Unpickler, BytesIO, _protocol
logger
=
logging
.
getLogger
(
'ZODB.ExportImport'
)
class
ExportImport
(
object
):
def
exportFile
(
self
,
oid
,
f
=
None
,
bufsize
=
64
*
1024
):
if
f
is
None
:
f
=
TemporaryFile
(
prefix
=
"EXP"
)
elif
isinstance
(
f
,
six
.
string_types
):
f
=
open
(
f
,
'w+b'
)
f
=
open
(
f
,
'w+b'
)
f
.
write
(
b'ZEXP'
)
oids
=
[
oid
]
done_oids
=
{}
done
=
done_oids
.
__contains__
load
=
self
.
_storage
.
load
supports_blobs
=
IBlobStorage
.
providedBy
(
self
.
_storage
)
while
oids
:
...
...
@@ -49,7 +49,7 @@ class ExportImport(object):
done_oids
[
oid
]
=
True
try
:
p
,
serial
=
load
(
oid
)
except
:
except
:
# noqa: E722 do not use bare 'except'
logger
.
debug
(
"broken reference for oid %s"
,
repr
(
oid
),
exc_info
=
True
)
else
:
...
...
@@ -159,8 +159,7 @@ class ExportImport(object):
return_oid_list
.
append
(
oid
)
if
(
b'blob'
in
data
and
isinstance
(
self
.
_reader
.
getGhost
(
data
),
Blob
)
):
isinstance
(
self
.
_reader
.
getGhost
(
data
),
Blob
)):
# Blob support
# Make sure we have a (redundant, overly) blob marker.
...
...
@@ -198,11 +197,14 @@ class ExportImport(object):
export_end_marker
=
b'
\
377
'
*
16
blob_begin_marker
=
b'
\
000
BLOBSTART'
class
Ghost
(
object
):
__slots__
=
(
"oid"
,)
def
__init__
(
self
,
oid
):
self
.
oid
=
oid
def
persistent_id
(
obj
):
if
isinstance
(
obj
,
Ghost
):
return
obj
.
oid
src/ZODB/FileStorage/FileStorage.py
View file @
6e5baffd
...
...
@@ -89,43 +89,54 @@ packed_version = FILESTORAGE_MAGIC
logger
=
logging
.
getLogger
(
'ZODB.FileStorage'
)
def
panic
(
message
,
*
data
):
logger
.
critical
(
message
,
*
data
)
raise
CorruptedTransactionError
(
message
%
data
)
class
FileStorageError
(
StorageError
):
pass
class
PackError
(
FileStorageError
):
pass
class
FileStorageFormatError
(
FileStorageError
):
"""Invalid file format
The format of the given file is not valid.
"""
class
CorruptedFileStorageError
(
FileStorageError
,
StorageSystemError
):
"""Corrupted file storage."""
class
CorruptedTransactionError
(
CorruptedFileStorageError
):
pass
class
FileStorageQuotaError
(
FileStorageError
,
StorageSystemError
):
"""File storage quota exceeded."""
# Intended to be raised only in fspack.py, and ignored here.
class
RedundantPackWarning
(
FileStorageError
):
pass
class
TempFormatter
(
FileStorageFormatter
):
"""Helper class used to read formatted FileStorage data."""
def
__init__
(
self
,
afile
):
self
.
_file
=
afile
@
implementer
(
IStorageRestoreable
,
IStorageIteration
,
...
...
@@ -133,13 +144,13 @@ class TempFormatter(FileStorageFormatter):
IStorageCurrentRecordIteration
,
IExternalGC
,
IStorage
,
)
)
class
FileStorage
(
FileStorageFormatter
,
BlobStorageMixin
,
ConflictResolvingStorage
,
BaseStorage
,
):
):
"""Storage that saves data in a file
"""
...
...
@@ -224,7 +235,7 @@ class FileStorage(
of packing, the ``.old`` file is removed, if it exists, and
the data file is renamed to the ``.old`` file and finally the
``.pack`` file is rewritten to the data file.
"""
"""
# noqa: E501 line too long
if
read_only
:
self
.
_is_read_only
=
True
...
...
@@ -344,9 +355,9 @@ class FileStorage(
return
BaseStorage
.
copyTransactionsFrom
(
self
,
other
)
def
_initIndex
(
self
,
index
,
tindex
):
self
.
_index
=
index
self
.
_tindex
=
tindex
self
.
_index_get
=
index
.
get
self
.
_index
=
index
self
.
_tindex
=
tindex
self
.
_index_get
=
index
.
get
def
__len__
(
self
):
return
len
(
self
.
_index
)
...
...
@@ -356,6 +367,7 @@ class FileStorage(
return
fsIndex
(),
{}
_saved
=
0
def
_save_index
(
self
):
"""Write the database index to a file to support quick startup."""
...
...
@@ -373,7 +385,8 @@ class FileStorage(
except
OSError
:
pass
os
.
rename
(
tmp_name
,
index_name
)
except
:
pass
except
:
# noqa: E722 do not use bare 'except'
pass
self
.
_saved
+=
1
...
...
@@ -457,13 +470,13 @@ class FileStorage(
# fsIndex here, and, if we're not in read-only mode, the .index
# file is rewritten with the converted fsIndex so we don't need to
# convert it again the next time.
file_name
=
self
.
__name__
index_name
=
file_name
+
'.index'
file_name
=
self
.
__name__
index_name
=
file_name
+
'.index'
if
os
.
path
.
exists
(
index_name
):
try
:
info
=
fsIndex
.
load
(
index_name
)
except
:
except
:
# noqa: E722 do not use bare 'except'
logger
.
exception
(
'loading index'
)
return
None
else
:
...
...
@@ -503,13 +516,13 @@ class FileStorage(
def
close
(
self
):
self
.
_file
.
close
()
self
.
_files
.
close
()
if
hasattr
(
self
,
'_lock_file'
):
if
hasattr
(
self
,
'_lock_file'
):
self
.
_lock_file
.
close
()
if
self
.
_tfile
:
self
.
_tfile
.
close
()
try
:
self
.
_save_index
()
except
:
except
:
# noqa: E722 do not use bare 'except'
# Log the error and continue
logger
.
exception
(
"Error saving index on close()"
)
...
...
@@ -593,7 +606,6 @@ class FileStorage(
self
.
set_max_oid
(
oid
)
old
=
self
.
_index_get
(
oid
,
0
)
committed_tid
=
None
pnv
=
None
if
old
:
h
=
self
.
_read_data_header
(
old
,
oid
)
committed_tid
=
h
.
tid
...
...
@@ -798,7 +810,7 @@ class FileStorage(
cp
(
self
.
_tfile
,
self
.
_file
,
dlen
)
self
.
_file
.
write
(
p64
(
tl
))
self
.
_file
.
flush
()
except
:
except
:
# noqa: E722 do not use bare 'except'
# Hm, an error occurred writing out the data. Maybe the
# disk is full. We don't want any turd at the end.
self
.
_file
.
truncate
(
self
.
_pos
)
...
...
@@ -833,7 +845,7 @@ class FileStorage(
# At this point, we may have committed the data to disk.
# If we fail from here, we're in bad shape.
self
.
_finish_finish
(
tid
)
except
:
except
:
# noqa: E722 do not use bare 'except'
# Ouch. This is bad. Let's try to get back to where we were
# and then roll over and die
logger
.
critical
(
"Failure in _finish. Closing."
,
exc_info
=
True
)
...
...
@@ -857,7 +869,7 @@ class FileStorage(
if
self
.
_nextpos
:
self
.
_file
.
truncate
(
self
.
_pos
)
self
.
_files
.
flush
()
self
.
_nextpos
=
0
self
.
_nextpos
=
0
self
.
_blob_tpc_abort
()
def
_undoDataInfo
(
self
,
oid
,
pos
,
tpos
):
...
...
@@ -1137,7 +1149,8 @@ class FileStorage(
pos
=
self
.
_lookup_pos
(
oid
)
while
1
:
if
len
(
r
)
>=
size
:
return
r
if
len
(
r
)
>=
size
:
return
r
h
=
self
.
_read_data_header
(
pos
)
th
=
self
.
_read_txn_header
(
h
.
tloc
)
...
...
@@ -1195,7 +1208,7 @@ class FileStorage(
if
self
.
_is_read_only
:
raise
ReadOnlyError
()
stop
=
TimeStamp
(
*
time
.
gmtime
(
t
)[:
5
]
+
(
t
%
60
,)).
raw
()
stop
=
TimeStamp
(
*
time
.
gmtime
(
t
)[:
5
]
+
(
t
%
60
,)).
raw
()
if
stop
==
z64
:
raise
FileStorageError
(
'Invalid pack time'
)
...
...
@@ -1217,8 +1230,6 @@ class FileStorage(
if
self
.
blob_dir
and
os
.
path
.
exists
(
self
.
blob_dir
+
".old"
):
remove_committed_dir
(
self
.
blob_dir
+
".old"
)
cleanup
=
[]
have_commit_lock
=
False
try
:
pack_result
=
None
...
...
@@ -1304,12 +1315,12 @@ class FileStorage(
if
removed
:
maybe_remove_empty_dir_containing
(
path
,
level
+
1
)
if
self
.
pack_keep_old
:
# Helpers that move oid dir or revision file to the old dir.
os
.
mkdir
(
old
)
link_or_copy
(
os
.
path
.
join
(
self
.
blob_dir
,
'.layout'
),
os
.
path
.
join
(
old
,
'.layout'
))
def
handle_file
(
path
):
newpath
=
old
+
path
[
lblob_dir
:]
dest
=
os
.
path
.
dirname
(
newpath
)
...
...
@@ -1429,6 +1440,7 @@ class FileStorage(
#
######################################################################
def
shift_transactions_forward
(
index
,
tindex
,
file
,
pos
,
opos
):
"""Copy transactions forward in the data file
...
...
@@ -1436,17 +1448,16 @@ def shift_transactions_forward(index, tindex, file, pos, opos):
"""
# Cache a bunch of methods
seek
=
file
.
seek
read
=
file
.
read
write
=
file
.
write
seek
=
file
.
seek
read
=
file
.
read
write
=
file
.
write
index_get
=
index
.
get
index_get
=
index
.
get
# Initialize,
pv
=
z64
p1
=
opos
p2
=
pos
offset
=
p2
-
p1
p1
=
opos
p2
=
pos
offset
=
p2
-
p1
# Copy the data in two stages. In the packing stage,
# we skip records that are non-current or that are for
...
...
@@ -1456,24 +1467,25 @@ def shift_transactions_forward(index, tindex, file, pos, opos):
# transactions, however, we have to update various back pointers.
# We have to have the storage lock in the second phase to keep
# data from being changed while we're copying.
pnv
=
None
while
1
:
# Read the transaction record
seek
(
pos
)
h
=
read
(
TRANS_HDR_LEN
)
if
len
(
h
)
<
TRANS_HDR_LEN
:
break
tid
,
stl
,
status
,
ul
,
dl
,
el
=
unpack
(
TRANS_HDR
,
h
)
h
=
read
(
TRANS_HDR_LEN
)
if
len
(
h
)
<
TRANS_HDR_LEN
:
break
tid
,
stl
,
status
,
ul
,
dl
,
el
=
unpack
(
TRANS_HDR
,
h
)
status
=
as_text
(
status
)
if
status
==
'c'
:
break
# Oops. we found a checkpoint flag.
tl
=
u64
(
stl
)
tpos
=
pos
tend
=
tpos
+
tl
if
status
==
'c'
:
break
# Oops. we found a checkpoint flag.
tl
=
u64
(
stl
)
tpos
=
pos
tend
=
tpos
+
tl
otpos
=
opos
# start pos of output trans
otpos
=
opos
# start pos of output trans
thl
=
ul
+
dl
+
el
h2
=
read
(
thl
)
thl
=
ul
+
dl
+
el
h2
=
read
(
thl
)
if
len
(
h2
)
!=
thl
:
raise
PackError
(
opos
)
...
...
@@ -1482,45 +1494,47 @@ def shift_transactions_forward(index, tindex, file, pos, opos):
write
(
h
)
write
(
h2
)
thl
=
TRANS_HDR_LEN
+
thl
pos
=
tpos
+
thl
opos
=
otpos
+
thl
thl
=
TRANS_HDR_LEN
+
thl
pos
=
tpos
+
thl
opos
=
otpos
+
thl
while
pos
<
tend
:
# Read the data records for this transaction
seek
(
pos
)
h
=
read
(
DATA_HDR_LEN
)
oid
,
serial
,
sprev
,
stloc
,
vlen
,
splen
=
unpack
(
DATA_HDR
,
h
)
h
=
read
(
DATA_HDR_LEN
)
oid
,
serial
,
sprev
,
stloc
,
vlen
,
splen
=
unpack
(
DATA_HDR
,
h
)
assert
not
vlen
plen
=
u64
(
splen
)
dlen
=
DATA_HDR_LEN
+
(
plen
or
8
)
plen
=
u64
(
splen
)
dlen
=
DATA_HDR_LEN
+
(
plen
or
8
)
tindex
[
oid
]
=
opos
tindex
[
oid
]
=
opos
if
plen
:
p
=
read
(
plen
)
if
plen
:
p
=
read
(
plen
)
else
:
p
=
read
(
8
)
p
=
u64
(
p
)
if
p
>=
p2
:
p
=
p
-
offset
p
=
read
(
8
)
p
=
u64
(
p
)
if
p
>=
p2
:
p
=
p
-
offset
elif
p
>=
p1
:
# Ick, we're in trouble. Let's bail
# to the index and hope for the best
p
=
index_get
(
oid
,
0
)
p
=
p64
(
p
)
p
=
index_get
(
oid
,
0
)
p
=
p64
(
p
)
# WRITE
seek
(
opos
)
sprev
=
p64
(
index_get
(
oid
,
0
))
sprev
=
p64
(
index_get
(
oid
,
0
))
write
(
pack
(
DATA_HDR
,
oid
,
serial
,
sprev
,
p64
(
otpos
),
0
,
splen
))
write
(
p
)
opos
=
opos
+
dlen
pos
=
pos
+
dlen
opos
=
opos
+
dlen
pos
=
pos
+
dlen
# skip the (intentionally redundant) transaction length
pos
=
pos
+
8
pos
=
pos
+
8
if
status
!=
'u'
:
index
.
update
(
tindex
)
# Record the position
...
...
@@ -1528,34 +1542,37 @@ def shift_transactions_forward(index, tindex, file, pos, opos):
tindex
.
clear
()
write
(
stl
)
opos
=
opos
+
8
opos
=
opos
+
8
return
opos
def
search_back
(
file
,
pos
):
seek
=
file
.
seek
read
=
file
.
read
seek
(
0
,
2
)
s
=
p
=
file
.
tell
()
seek
=
file
.
seek
read
=
file
.
read
seek
(
0
,
2
)
s
=
p
=
file
.
tell
()
while
p
>
pos
:
seek
(
p
-
8
)
l
=
u64
(
read
(
8
))
if
l
<=
0
:
break
p
=
p
-
l
-
8
l_
=
u64
(
read
(
8
))
if
l_
<=
0
:
break
p
=
p
-
l_
-
8
return
p
,
s
def
recover
(
file_name
):
file
=
open
(
file_name
,
'r+b'
)
index
=
{}
tindex
=
{}
file
=
open
(
file_name
,
'r+b'
)
index
=
{}
tindex
=
{}
pos
,
oid
,
tid
=
read_index
(
file
,
file_name
,
index
,
tindex
,
recover
=
1
)
if
oid
is
not
None
:
print
(
"Nothing to recover"
)
return
opos
=
pos
opos
=
pos
pos
,
sz
=
search_back
(
file
,
pos
)
if
pos
<
sz
:
npos
=
shift_transactions_forward
(
index
,
tindex
,
file
,
pos
,
opos
)
...
...
@@ -1566,7 +1583,6 @@ def recover(file_name):
pos
-
opos
,
npos
))
def
read_index
(
file
,
name
,
index
,
tindex
,
stop
=
b'
\
377
'
*
8
,
ltid
=
z64
,
start
=
4
,
maxoid
=
z64
,
recover
=
0
,
read_only
=
0
):
"""Scan the file storage and update the index.
...
...
@@ -1642,7 +1658,7 @@ def read_index(file, name, index, tindex, stop=b'\377'*8,
logger
.
warning
(
"%s time-stamp reduction at %s"
,
name
,
pos
)
ltid
=
tid
if
pos
+
(
tl
+
8
)
>
file_size
or
status
==
'c'
:
if
pos
+
(
tl
+
8
)
>
file_size
or
status
==
'c'
:
# Hm, the data were truncated or the checkpoint flag wasn't
# cleared. They may also be corrupted,
# in which case, we don't want to totally lose the data.
...
...
@@ -1727,7 +1743,7 @@ def read_index(file, name, index, tindex, stop=b'\377'*8,
if
pos
!=
tend
:
if
recover
:
return
tpos
,
None
,
None
panic
(
"%s data records don't add up at %s"
,
name
,
tpos
)
panic
(
"%s data records don't add up at %s"
,
name
,
tpos
)
# Read the (intentionally redundant) transaction length
seek
(
pos
)
...
...
@@ -1759,18 +1775,18 @@ def _truncate(file, name, pos):
try
:
i
=
0
while
1
:
oname
=
'%s.tr%s'
%
(
name
,
i
)
oname
=
'%s.tr%s'
%
(
name
,
i
)
if
os
.
path
.
exists
(
oname
):
i
+=
1
else
:
logger
.
warning
(
"Writing truncated data from %s to %s"
,
name
,
oname
)
o
=
open
(
oname
,
'wb'
)
o
=
open
(
oname
,
'wb'
)
file
.
seek
(
pos
)
cp
(
file
,
o
,
file_size
-
pos
)
o
.
close
()
break
except
:
except
:
# noqa: E722 do not use bare 'except'
logger
.
exception
(
"couldn
\
'
t write truncated data for %s"
,
name
)
raise
StorageSystemError
(
"Couldn't save truncated data"
)
...
...
@@ -1791,7 +1807,7 @@ class FileIterator(FileStorageFormatter):
self
.
_file_name
=
filename
if
file
.
read
(
4
)
!=
packed_version
:
raise
FileStorageFormatError
(
file
.
name
)
file
.
seek
(
0
,
2
)
file
.
seek
(
0
,
2
)
self
.
_file_size
=
file
.
tell
()
if
(
pos
<
4
)
or
pos
>
self
.
_file_size
:
raise
ValueError
(
"Given position is greater than the file size"
,
...
...
@@ -1852,13 +1868,13 @@ class FileIterator(FileStorageFormatter):
# case, we'll just scan from the beginning if the file is
# small enough, otherwise we'll fail.
file
.
seek
(
self
.
_file_size
-
8
)
l
=
u64
(
file
.
read
(
8
))
if
not
(
l
+
12
<=
self
.
_file_size
and
self
.
_read_num
(
self
.
_file_size
-
l
)
==
l
):
if
self
.
_file_size
<
(
1
<<
20
):
l
_
=
u64
(
file
.
read
(
8
))
if
not
(
l
_
+
12
<=
self
.
_file_size
and
self
.
_read_num
(
self
.
_file_size
-
l
_
)
==
l_
):
if
self
.
_file_size
<
(
1
<<
20
):
return
self
.
_scan_foreward
(
start
)
raise
ValueError
(
"Can't find last transaction in large file"
)
pos2
=
self
.
_file_size
-
l
-
8
pos2
=
self
.
_file_size
-
l
_
-
8
file
.
seek
(
pos2
)
tid2
=
file
.
read
(
8
)
if
tid2
<
tid1
:
...
...
@@ -1881,7 +1897,6 @@ class FileIterator(FileStorageFormatter):
def
_scan_forward
(
self
,
pos
,
start
):
logger
.
debug
(
"Scan forward %s:%s looking for %r"
,
self
.
_file_name
,
pos
,
start
)
file
=
self
.
_file
while
1
:
# Read the transaction record
h
=
self
.
_read_txn_header
(
pos
)
...
...
@@ -2016,6 +2031,7 @@ class TransactionRecord(_TransactionRecord):
def
__iter__
(
self
):
return
TransactionRecordIterator
(
self
)
class
TransactionRecordIterator
(
FileStorageFormatter
):
"""Iterate over the transactions in a FileStorage file."""
...
...
@@ -2037,7 +2053,7 @@ class TransactionRecordIterator(FileStorageFormatter):
if
pos
+
dlen
>
self
.
_tend
or
h
.
tloc
!=
self
.
_tpos
:
logger
.
warning
(
"%s data record exceeds transaction"
" record at %s"
,
file
.
name
,
pos
)
" record at %s"
,
self
.
_
file
.
name
,
pos
)
break
self
.
_pos
=
pos
+
dlen
...
...
@@ -2122,7 +2138,7 @@ class UndoSearch(object):
if
el
:
try
:
e
=
loads
(
self
.
file
.
read
(
el
))
except
:
except
:
# noqa: E722 do not use bare 'except'
pass
d
=
{
'id'
:
encodebytes
(
tid
).
rstrip
(),
'time'
:
TimeStamp
(
tid
).
timeTime
(),
...
...
@@ -2132,6 +2148,7 @@ class UndoSearch(object):
d
.
update
(
e
)
return
d
class
FilePool
(
object
):
closed
=
False
...
...
@@ -2192,7 +2209,6 @@ class FilePool(object):
while
self
.
_files
:
self
.
_files
.
pop
().
close
()
def
flush
(
self
):
"""Empty read buffers.
...
...
src/ZODB/FileStorage/format.py
View file @
6e5baffd
...
...
@@ -90,9 +90,11 @@ from ZODB.POSException import POSKeyError
from
ZODB.utils
import
u64
,
oid_repr
,
as_bytes
from
ZODB._compat
import
PY3
class
CorruptedError
(
Exception
):
pass
class
CorruptedDataError
(
CorruptedError
):
def
__init__
(
self
,
oid
=
None
,
buf
=
None
,
pos
=
None
):
...
...
@@ -110,6 +112,7 @@ class CorruptedDataError(CorruptedError):
msg
+=
" at %d"
%
self
.
pos
return
msg
# the struct formats for the headers
TRANS_HDR
=
">8sQcHHH"
DATA_HDR
=
">8s8sQQHQ"
...
...
@@ -121,6 +124,7 @@ assert struct.calcsize(DATA_HDR) == DATA_HDR_LEN
logger
=
logging
.
getLogger
(
'ZODB.FileStorage.format'
)
class
FileStorageFormatter
(
object
):
"""Mixin class that can read and write the low-level format."""
...
...
@@ -211,7 +215,7 @@ class FileStorageFormatter(object):
self
.
ltid
=
th
.
tid
if
th
.
status
==
"c"
:
self
.
fail
(
pos
,
"transaction with checkpoint flag set"
)
if
not
th
.
status
in
" pu"
:
# recognize " ", "p", and "u" as valid
if
th
.
status
not
in
" pu"
:
# recognize " ", "p", and "u" as valid
self
.
fail
(
pos
,
"invalid transaction status: %r"
,
th
.
status
)
if
th
.
tlen
<
th
.
headerlen
():
self
.
fail
(
pos
,
"invalid transaction header: "
...
...
@@ -232,9 +236,11 @@ class FileStorageFormatter(object):
if
dh
.
plen
:
self
.
fail
(
pos
,
"data record has back pointer and data"
)
def
DataHeaderFromString
(
s
):
return
DataHeader
(
*
struct
.
unpack
(
DATA_HDR
,
s
))
class
DataHeader
(
object
):
"""Header for a data record."""
...
...
@@ -259,12 +265,14 @@ class DataHeader(object):
def
recordlen
(
self
):
return
DATA_HDR_LEN
+
(
self
.
plen
or
8
)
def
TxnHeaderFromString
(
s
):
res
=
TxnHeader
(
*
struct
.
unpack
(
TRANS_HDR
,
s
))
if
PY3
:
res
.
status
=
res
.
status
.
decode
(
'ascii'
)
return
res
class
TxnHeader
(
object
):
"""Header for a transaction record."""
...
...
src/ZODB/FileStorage/fsdump.py
View file @
6e5baffd
...
...
@@ -20,6 +20,7 @@ from ZODB.FileStorage.format import DATA_HDR, DATA_HDR_LEN
from
ZODB.TimeStamp
import
TimeStamp
from
ZODB.utils
import
u64
,
get_pickle_metadata
def
fsdump
(
path
,
file
=
None
,
with_offset
=
1
):
iter
=
FileIterator
(
path
)
for
i
,
trans
in
enumerate
(
iter
):
...
...
@@ -54,10 +55,12 @@ def fsdump(path, file=None, with_offset=1):
(
j
,
u64
(
rec
.
oid
),
size
,
fullclass
,
bp
)),
file
=
file
)
iter
.
close
()
def
fmt
(
p64
):
# Return a nicely formatted string for a packaged 64-bit value
return
"%016x"
%
u64
(
p64
)
class
Dumper
(
object
):
"""A very verbose dumper for debuggin FileStorage problems."""
...
...
@@ -87,13 +90,13 @@ class Dumper(object):
print
(
"transaction id: %s"
%
fmt
(
tid
),
file
=
self
.
dest
)
print
(
"trec len: %d"
%
tlen
,
file
=
self
.
dest
)
print
(
"status: %r"
%
status
,
file
=
self
.
dest
)
user
=
descr
=
extra
=
""
user
=
descr
=
""
if
ul
:
user
=
self
.
file
.
read
(
ul
)
if
dl
:
descr
=
self
.
file
.
read
(
dl
)
if
el
:
extra
=
self
.
file
.
read
(
el
)
self
.
file
.
read
(
el
)
print
(
"user: %r"
%
user
,
file
=
self
.
dest
)
print
(
"description: %r"
%
descr
,
file
=
self
.
dest
)
print
(
"len(extra): %d"
%
el
,
file
=
self
.
dest
)
...
...
@@ -121,6 +124,7 @@ class Dumper(object):
sbp
=
self
.
file
.
read
(
8
)
print
(
"backpointer: %d"
%
u64
(
sbp
),
file
=
self
.
dest
)
def
main
():
import
sys
fsdump
(
sys
.
argv
[
1
])
...
...
src/ZODB/FileStorage/fsoids.py
View file @
6e5baffd
...
...
@@ -18,10 +18,14 @@ from ZODB.serialize import get_refs
from
ZODB.TimeStamp
import
TimeStamp
# Extract module.class string from pickle.
def
get_class
(
pickle
):
return
"%s.%s"
%
get_pickle_metadata
(
pickle
)
# Shorten a string for display.
def
shorten
(
s
,
size
=
50
):
if
len
(
s
)
<=
size
:
return
s
...
...
@@ -35,6 +39,7 @@ def shorten(s, size=50):
sep
=
" ... "
return
s
[:
nleading
]
+
sep
+
s
[
-
ntrailing
:]
class
Tracer
(
object
):
"""Trace all occurrences of a set of oids in a FileStorage.
...
...
@@ -84,7 +89,7 @@ class Tracer(object):
self
.
oids
[
oid
]
=
0
# 0 revisions seen so far
def
_msg
(
self
,
oid
,
tid
,
*
args
):
self
.
msgs
.
append
(
(
oid
,
tid
,
' '
.
join
(
map
(
str
,
args
)))
)
self
.
msgs
.
append
(
(
oid
,
tid
,
' '
.
join
(
map
(
str
,
args
)))
)
self
.
_produced_msg
=
True
def
report
(
self
):
...
...
@@ -98,7 +103,7 @@ class Tracer(object):
NOT_SEEN
=
"this oid was not defined (no data record for it found)"
for
oid
in
oids
:
if
oid
not
in
oid2name
:
msgs
.
append
(
(
oid
,
None
,
NOT_SEEN
)
)
msgs
.
append
(
(
oid
,
None
,
NOT_SEEN
)
)
msgs
.
sort
()
# oids are primary key, tids secondary
current_oid
=
current_tid
=
None
...
...
src/ZODB/FileStorage/fspack.py
View file @
6e5baffd
...
...
@@ -36,9 +36,11 @@ import ZODB.POSException
logger
=
logging
.
getLogger
(
__name__
)
class
PackError
(
ZODB
.
POSException
.
POSError
):
pass
class
PackCopier
(
FileStorageFormatter
):
def
__init__
(
self
,
f
,
index
,
tindex
):
...
...
@@ -144,6 +146,7 @@ class PackCopier(FileStorageFormatter):
finally
:
self
.
_file
.
seek
(
pos
)
class
GC
(
FileStorageFormatter
):
def
__init__
(
self
,
file
,
eof
,
packtime
,
gc
,
referencesf
):
...
...
@@ -330,6 +333,7 @@ class GC(FileStorageFormatter):
else
:
return
[]
class
FileStoragePacker
(
FileStorageFormatter
):
# path is the storage file path.
...
...
@@ -409,15 +413,15 @@ class FileStoragePacker(FileStorageFormatter):
# try our best, but don't fail
try
:
self
.
_tfile
.
close
()
except
:
except
:
# noqa: E722 do not use bare 'except'
pass
try
:
self
.
_file
.
close
()
except
:
except
:
# noqa: E722 do not use bare 'except'
pass
try
:
os
.
remove
(
self
.
_name
+
".pack"
)
except
:
except
:
# noqa: E722 do not use bare 'except'
pass
if
self
.
blob_removed
is
not
None
:
self
.
blob_removed
.
close
()
...
...
@@ -483,13 +487,12 @@ class FileStoragePacker(FileStorageFormatter):
if
self
.
locked
:
self
.
_commit_lock
.
release
()
raise
# don't succeed silently
except
:
except
:
# noqa: E722 do not use bare 'except'
if
self
.
locked
:
self
.
_commit_lock
.
release
()
raise
def
copyToPacktime
(
self
):
offset
=
0
# the amount of space freed by packing
pos
=
self
.
_metadata_size
new_pos
=
pos
...
...
@@ -506,7 +509,6 @@ class FileStoragePacker(FileStorageFormatter):
self
.
_tfile
.
seek
(
new_pos
-
8
)
self
.
_tfile
.
write
(
p64
(
tlen
))
tlen
=
self
.
_read_num
(
pos
)
if
tlen
!=
th
.
tlen
:
self
.
fail
(
pos
,
"redundant transaction length does not "
...
...
@@ -546,8 +548,8 @@ class FileStoragePacker(FileStorageFormatter):
# record. There's a bug in ZEO blob support that causes
# duplicate data records.
rpos
=
self
.
gc
.
reachable
.
get
(
h
.
oid
)
is_dup
=
(
rpos
and
self
.
_read_data_header
(
rpos
).
tid
==
h
.
tid
)
is_dup
=
(
rpos
and
self
.
_read_data_header
(
rpos
).
tid
==
h
.
tid
)
if
not
is_dup
:
if
h
.
oid
not
in
self
.
gc
.
reachable
:
self
.
blob_removed
.
write
(
...
...
@@ -569,7 +571,6 @@ class FileStoragePacker(FileStorageFormatter):
s
=
th
.
asString
()
new_tpos
=
self
.
_tfile
.
tell
()
self
.
_tfile
.
write
(
s
)
new_pos
=
new_tpos
+
len
(
s
)
copy
=
1
if
h
.
plen
:
...
...
@@ -578,7 +579,6 @@ class FileStoragePacker(FileStorageFormatter):
data
=
self
.
fetchDataViaBackpointer
(
h
.
oid
,
h
.
back
)
self
.
writePackedDataRecord
(
h
,
data
,
new_tpos
)
new_pos
=
self
.
_tfile
.
tell
()
return
new_tpos
,
pos
...
...
src/ZODB/FileStorage/interfaces.py
View file @
6e5baffd
...
...
@@ -13,6 +13,7 @@
##############################################################################
import
zope.interface
class
IFileStoragePacker
(
zope
.
interface
.
Interface
):
def
__call__
(
storage
,
referencesf
,
stop
,
gc
):
...
...
@@ -58,6 +59,7 @@ class IFileStoragePacker(zope.interface.Interface):
corresponding to the file records.
"""
class
IFileStorage
(
zope
.
interface
.
Interface
):
packer
=
zope
.
interface
.
Attribute
(
...
...
src/ZODB/FileStorage/tests.py
View file @
6e5baffd
...
...
@@ -33,6 +33,7 @@ checker = renormalizing.RENormalizing([
(
re
.
compile
(
'data.fs:[0-9]+'
),
'data.fs:<OFFSET>'
),
])
def
pack_keep_old
():
"""Should a copy of the database be kept?
...
...
@@ -106,6 +107,7 @@ directory for blobs is kept.)
>>> db.close()
"""
def
pack_with_repeated_blob_records
():
"""
There is a bug in ZEO that causes duplicate bloc database records
...
...
@@ -144,6 +146,7 @@ def pack_with_repeated_blob_records():
>>> db.close()
"""
def
_save_index
():
"""
...
...
@@ -187,6 +190,7 @@ cleanup
"""
def
pack_disk_full_copyToPacktime
():
"""Recover from a disk full situation by removing the `.pack` file
...
...
@@ -239,6 +243,7 @@ check the data we added
>>> db.close()
"""
def
pack_disk_full_copyRest
():
"""Recover from a disk full situation by removing the `.pack` file
...
...
@@ -307,6 +312,7 @@ check the data we added
>>> db.close()
"""
def
test_suite
():
return
unittest
.
TestSuite
((
doctest
.
DocFileSuite
(
...
...
src/ZODB/MappingStorage.py
View file @
6e5baffd
...
...
@@ -30,7 +30,7 @@ import zope.interface
@
zope
.
interface
.
implementer
(
ZODB
.
interfaces
.
IStorage
,
ZODB
.
interfaces
.
IStorageIteration
,
)
)
class
MappingStorage
(
object
):
"""In-memory storage implementation
...
...
@@ -50,7 +50,8 @@ class MappingStorage(object):
"""
self
.
__name__
=
name
self
.
_data
=
{}
# {oid->{tid->pickle}}
self
.
_transactions
=
BTrees
.
OOBTree
.
OOBTree
()
# {tid->TransactionRecord}
# {tid->TransactionRecord}
self
.
_transactions
=
BTrees
.
OOBTree
.
OOBTree
()
self
.
_ltid
=
ZODB
.
utils
.
z64
self
.
_last_pack
=
None
self
.
_lock
=
ZODB
.
utils
.
RLock
()
...
...
@@ -117,13 +118,13 @@ class MappingStorage(object):
tids
.
reverse
()
return
[
dict
(
time
=
ZODB
.
TimeStamp
.
TimeStamp
(
tid
).
timeTime
(),
tid
=
tid
,
serial
=
tid
,
user_name
=
self
.
_transactions
[
tid
].
user
,
description
=
self
.
_transactions
[
tid
].
description
,
extension
=
self
.
_transactions
[
tid
].
extension
,
size
=
len
(
tid_data
[
tid
])
time
=
ZODB
.
TimeStamp
.
TimeStamp
(
tid
).
timeTime
(),
tid
=
tid
,
serial
=
tid
,
user_name
=
self
.
_transactions
[
tid
].
user
,
description
=
self
.
_transactions
[
tid
].
description
,
extension
=
self
.
_transactions
[
tid
].
extension
,
size
=
len
(
tid_data
[
tid
])
)
for
tid
in
tids
]
...
...
@@ -167,8 +168,8 @@ class MappingStorage(object):
else
:
raise
ZODB
.
POSException
.
POSKeyError
(
oid
)
# ZODB.interfaces.IStorage
@
ZODB
.
utils
.
locked
(
opened
)
def
loadSerial
(
self
,
oid
,
serial
):
tid_data
=
self
.
_data
.
get
(
oid
)
...
...
@@ -192,7 +193,7 @@ class MappingStorage(object):
if
not
self
.
_data
:
return
stop
=
ZODB
.
TimeStamp
.
TimeStamp
(
*
time
.
gmtime
(
t
)[:
5
]
+
(
t
%
60
,)).
raw
()
stop
=
ZODB
.
TimeStamp
.
TimeStamp
(
*
time
.
gmtime
(
t
)[:
5
]
+
(
t
%
60
,)).
raw
()
if
self
.
_last_pack
is
not
None
and
self
.
_last_pack
>=
stop
:
if
self
.
_last_pack
==
stop
:
return
...
...
@@ -298,7 +299,7 @@ class MappingStorage(object):
# ZODB.interfaces.IStorage
@
ZODB
.
utils
.
locked
(
opened
)
def
tpc_finish
(
self
,
transaction
,
func
=
lambda
tid
:
None
):
def
tpc_finish
(
self
,
transaction
,
func
=
lambda
tid
:
None
):
if
(
transaction
is
not
self
.
_transaction
):
raise
ZODB
.
POSException
.
StorageTransactionError
(
"tpc_finish called with wrong transaction"
)
...
...
@@ -332,6 +333,7 @@ class MappingStorage(object):
raise
ZODB
.
POSException
.
StorageTransactionError
(
"tpc_vote called with wrong transaction"
)
class
TransactionRecord
(
object
):
status
=
' '
...
...
@@ -357,11 +359,11 @@ class TransactionRecord(object):
del
self
.
data
[
oid
]
return
not
self
.
data
@
zope
.
interface
.
implementer
(
ZODB
.
interfaces
.
IStorageRecordInformation
)
class
DataRecord
(
object
):
"""Abstract base class for iterator protocol"""
version
=
''
data_txn
=
None
...
...
@@ -370,5 +372,6 @@ class DataRecord(object):
self
.
tid
=
tid
self
.
data
=
data
def
DB
(
*
args
,
**
kw
):
return
ZODB
.
DB
(
MappingStorage
(),
*
args
,
**
kw
)
src/ZODB/POSException.py
View file @
6e5baffd
...
...
@@ -18,20 +18,26 @@ $Id$"""
from
ZODB.utils
import
oid_repr
,
readable_tid_repr
# BBB: We moved the two transactions to the transaction package
from
transaction.interfaces
import
TransactionError
,
TransactionFailedError
from
transaction.interfaces
import
TransactionError
# noqa: F401 import unused
from
transaction.interfaces
import
TransactionFailedError
# noqa: F401
import
transaction.interfaces
def
_fmt_undo
(
oid
,
reason
):
s
=
reason
and
(
": %s"
%
reason
)
or
""
return
"Undo error %s%s"
%
(
oid_repr
(
oid
),
s
)
def
_recon
(
class_
,
state
):
err
=
class_
.
__new__
(
class_
)
err
.
__setstate__
(
state
)
return
err
_recon
.
__no_side_effects__
=
True
class
POSError
(
Exception
):
"""Persistent object system error."""
...
...
@@ -49,9 +55,10 @@ class POSError(Exception):
# the args would then get lost, leading to unprintable exceptions
# and worse. Manually assign to args from the state to be sure
# this doesn't happen.
super
(
POSError
,
self
).
__setstate__
(
state
)
super
(
POSError
,
self
).
__setstate__
(
state
)
self
.
args
=
state
[
'args'
]
class
POSKeyError
(
POSError
,
KeyError
):
"""Key not found in database."""
...
...
@@ -143,6 +150,7 @@ class ConflictError(POSError, transaction.interfaces.TransientError):
def
get_serials
(
self
):
return
self
.
serials
class
ReadConflictError
(
ConflictError
):
"""Conflict detected when object was requested to stay unchanged.
...
...
@@ -156,16 +164,19 @@ class ReadConflictError(ConflictError):
- object is found to be removed, and
- there is possibility that database pack was running simultaneously.
"""
def
__init__
(
self
,
message
=
None
,
object
=
None
,
serials
=
None
,
**
kw
):
if
message
is
None
:
message
=
"database read conflict error"
ConflictError
.
__init__
(
self
,
message
=
message
,
object
=
object
,
serials
=
serials
,
**
kw
)
class
BTreesConflictError
(
ConflictError
):
"""A special subclass for BTrees conflict errors."""
msgs
=
[
# 0; i2 or i3 bucket split; positions are all -1
msgs
=
[
# 0; i2 or i3 bucket split; positions are all -1
'Conflicting bucket split'
,
# 1; keys the same, but i2 and i3 values differ, and both values
...
...
@@ -226,11 +237,14 @@ class BTreesConflictError(ConflictError):
self
.
p2
,
self
.
p3
,
self
.
reason
)
def
__str__
(
self
):
return
"BTrees conflict error at %d/%d/%d: %s"
%
(
self
.
p1
,
self
.
p2
,
self
.
p3
,
self
.
msgs
[
self
.
reason
])
class
DanglingReferenceError
(
POSError
,
transaction
.
interfaces
.
TransactionError
):
class
DanglingReferenceError
(
POSError
,
transaction
.
interfaces
.
TransactionError
):
"""An object has a persistent reference to a missing object.
If an object is stored and it has a reference to another object
...
...
@@ -258,9 +272,11 @@ class DanglingReferenceError(POSError, transaction.interfaces.TransactionError):
class
VersionError
(
POSError
):
"""An error in handling versions occurred."""
class
VersionCommitError
(
VersionError
):
"""An invalid combination of versions was used in a version commit."""
class
VersionLockError
(
VersionError
,
transaction
.
interfaces
.
TransactionError
):
"""Modification to an object modified in an unsaved version.
...
...
@@ -269,6 +285,7 @@ class VersionLockError(VersionError, transaction.interfaces.TransactionError):
"""
############################################################################
class
UndoError
(
POSError
):
"""An attempt was made to undo a non-undoable transaction."""
...
...
@@ -279,6 +296,7 @@ class UndoError(POSError):
def
__str__
(
self
):
return
_fmt_undo
(
self
.
_oid
,
self
.
_reason
)
class
MultipleUndoErrors
(
UndoError
):
"""Several undo errors occurred during a single transaction."""
...
...
@@ -290,33 +308,43 @@ class MultipleUndoErrors(UndoError):
def
__str__
(
self
):
return
"
\
n
"
.
join
([
_fmt_undo
(
*
pair
)
for
pair
in
self
.
_errs
])
class
StorageError
(
POSError
):
"""Base class for storage based exceptions."""
class
StorageTransactionError
(
StorageError
):
"""An operation was invoked for an invalid transaction or state."""
class
StorageSystemError
(
StorageError
):
"""Panic! Internal storage error!"""
class
MountedStorageError
(
StorageError
):
"""Unable to access mounted storage."""
class
ReadOnlyError
(
StorageError
):
"""Unable to modify objects in a read-only storage."""
class
TransactionTooLargeError
(
StorageTransactionError
):
"""The transaction exhausted some finite storage resource."""
class
ExportError
(
POSError
):
"""An export file doesn't have the right format."""
class
Unsupported
(
POSError
):
"""A feature was used that is not supported by the storage."""
class
ReadOnlyHistoryError
(
POSError
):
"""Unable to add or modify objects in an historical connection."""
class
InvalidObjectReference
(
POSError
):
"""An object contains an invalid reference to another object.
...
...
@@ -329,6 +357,7 @@ class InvalidObjectReference(POSError):
TODO: The exception ought to have a member that is the invalid object.
"""
class
ConnectionStateError
(
POSError
):
"""A Connection isn't in the required state for an operation.
...
...
src/ZODB/__init__.py
View file @
6e5baffd
...
...
@@ -12,6 +12,7 @@
#
##############################################################################
from
ZODB.DB
import
DB
,
connection
import
sys
from
persistent
import
TimeStamp
...
...
@@ -24,5 +25,3 @@ sys.modules['ZODB.PersistentMapping'] = sys.modules['persistent.mapping']
sys
.
modules
[
'ZODB.PersistentList'
]
=
sys
.
modules
[
'persistent.list'
]
del
mapping
,
list
,
sys
from
ZODB.DB
import
DB
,
connection
src/ZODB/_compat.py
View file @
6e5baffd
...
...
@@ -11,13 +11,13 @@
# FOR A PARTICULAR PURPOSE
#
##############################################################################
from
zodbpickle
import
binary
# noqa: F401 import unused
import
sys
from
six
import
PY3
IS_JYTHON
=
sys
.
platform
.
startswith
(
'java'
)
_protocol
=
3
from
zodbpickle
import
binary
if
not
PY3
:
# Python 2.x
...
...
@@ -42,7 +42,8 @@ else:
# http://bugs.python.org/issue6784
import
zodbpickle.pickle
HIGHEST_PROTOCOL
=
3
from
_compat_pickle
import
IMPORT_MAPPING
,
NAME_MAPPING
from
_compat_pickle
import
IMPORT_MAPPING
# noqa: F401 import unused
from
_compat_pickle
import
NAME_MAPPING
# noqa: F401 import unused
class
Pickler
(
zodbpickle
.
pickle
.
Pickler
):
def
__init__
(
self
,
f
,
protocol
=
None
):
...
...
@@ -92,6 +93,7 @@ def PersistentPickler(persistent_id, *args, **kwargs):
p
.
persistent_id
=
persistent_id
return
p
def
PersistentUnpickler
(
find_global
,
load_persistent
,
*
args
,
**
kwargs
):
"""
Returns a :class:`Unpickler` that will use the given `find_global` function
...
...
@@ -104,7 +106,8 @@ def PersistentUnpickler(find_global, load_persistent, *args, **kwargs):
if
find_global
is
not
None
:
unpickler
.
find_global
=
find_global
try
:
unpickler
.
find_class
=
find_global
# PyPy, zodbpickle, the non-c-accelerated version
# PyPy, zodbpickle, the non-c-accelerated version
unpickler
.
find_class
=
find_global
except
AttributeError
:
pass
if
load_persistent
is
not
None
:
...
...
@@ -118,7 +121,7 @@ try:
from
cStringIO
import
StringIO
as
BytesIO
except
ImportError
:
# Python 3.x
from
io
import
BytesIO
from
io
import
BytesIO
# noqa: F401 import unused
try
:
...
...
@@ -126,14 +129,15 @@ try:
from
base64
import
decodebytes
,
encodebytes
except
ImportError
:
# Python 2.x
from
base64
import
decodestring
as
decodebytes
,
encodestring
as
encodebytes
from
base64
import
decodestring
as
decodebytes
# noqa: F401 import unused
from
base64
import
encodestring
as
encodebytes
# noqa: F401 import unused
# Python 3.x: ``hasattr()`` swallows only AttributeError.
def
py2_hasattr
(
obj
,
name
):
try
:
getattr
(
obj
,
name
)
except
:
except
:
# noqa: E722 do not use bare 'except'
return
False
return
True
...
...
@@ -151,9 +155,10 @@ else:
try
:
TEXT
=
unicode
except
NameError
:
#
pragma NO COVER Py3k
except
NameError
:
#
pragma NO COVER Py3k
TEXT
=
str
def
ascii_bytes
(
x
):
if
isinstance
(
x
,
TEXT
):
x
=
x
.
encode
(
'ascii'
)
...
...
src/ZODB/blob.py
View file @
6e5baffd
...
...
@@ -35,7 +35,6 @@ from ZODB._compat import BytesIO
from
ZODB._compat
import
PersistentUnpickler
from
ZODB._compat
import
decodebytes
from
ZODB._compat
import
ascii_bytes
from
ZODB._compat
import
INT_TYPES
from
ZODB._compat
import
PY3
...
...
@@ -62,17 +61,18 @@ valid_modes = 'r', 'w', 'r+', 'a', 'c'
# of a weakref when the weakref object dies at the same time
# as the object it refers to. In other words, this doesn't work:
# self._ref = weakref.ref(self, lambda ref: ...)
# because the function never gets called (https://bitbucket.org/pypy/pypy/issue/2030).
# because the function never gets called
# (https://bitbucket.org/pypy/pypy/issue/2030).
# The Blob class used to use that pattern to clean up uncommitted
# files; now we use this module-level global (but still keep a
# reference in the Blob in case we need premature cleanup).
_blob_close_refs
=
[]
@
zope
.
interface
.
implementer
(
ZODB
.
interfaces
.
IBlob
)
class
Blob
(
persistent
.
Persistent
):
"""A BLOB supports efficient handling of large data within ZODB."""
_p_blob_uncommitted
=
None
# Filename of the uncommitted (dirty) data
_p_blob_committed
=
None
# Filename of the committed data
_p_blob_ref
=
None
# weakreference to self; also in _blob_close_refs
...
...
@@ -143,8 +143,7 @@ class Blob(persistent.Persistent):
or
not
self
.
_p_blob_committed
or
self
.
_p_blob_committed
.
endswith
(
SAVEPOINT_SUFFIX
)
):
self
.
_p_blob_committed
.
endswith
(
SAVEPOINT_SUFFIX
)):
raise
BlobError
(
'Uncommitted changes'
)
return
self
.
_p_jar
.
_storage
.
openCommittedBlobFile
(
self
.
_p_oid
,
self
.
_p_serial
)
...
...
@@ -217,8 +216,7 @@ class Blob(persistent.Persistent):
or
not
self
.
_p_blob_committed
or
self
.
_p_blob_committed
.
endswith
(
SAVEPOINT_SUFFIX
)
):
self
.
_p_blob_committed
.
endswith
(
SAVEPOINT_SUFFIX
)):
raise
BlobError
(
'Uncommitted changes'
)
result
=
self
.
_p_blob_committed
...
...
@@ -254,7 +252,7 @@ class Blob(persistent.Persistent):
try
:
rename_or_copy_blob
(
filename
,
target
,
chmod
=
False
)
except
:
except
:
# noqa: E722 do not use bare 'except'
# Recover from the failed consumption: First remove the file, it
# might exist and mark the pointer to the uncommitted file.
self
.
_p_blob_uncommitted
=
None
...
...
@@ -317,6 +315,7 @@ class Blob(persistent.Persistent):
self
.
_p_blob_uncommitted
=
self
.
_p_blob_ref
=
None
return
filename
class
BlobFile
(
file
):
"""A BlobFile that holds a file handle to actual blob data.
...
...
@@ -348,8 +347,10 @@ class BlobFile(file):
# prohibit it on all versions.
raise
TypeError
(
"Pickling a BlobFile is not allowed"
)
_pid
=
str
(
os
.
getpid
())
def
log
(
msg
,
level
=
logging
.
INFO
,
subsys
=
_pid
,
exc_info
=
False
):
message
=
"(%s) %s"
%
(
subsys
,
msg
)
logger
.
log
(
level
,
message
,
exc_info
=
exc_info
)
...
...
@@ -394,8 +395,8 @@ class FilesystemHelper(object):
layout
=
layout_marker
.
read
().
strip
()
if
layout
!=
self
.
layout_name
:
raise
ValueError
(
"Directory layout `%s` selected for blob directory %s, but
"
"marker found for layout `%s`"
%
"Directory layout `%s` selected for blob directory %s, but"
"
marker found for layout `%s`"
%
(
self
.
layout_name
,
self
.
base_dir
,
layout
))
def
isSecure
(
self
,
path
):
...
...
@@ -541,6 +542,7 @@ class NoBlobsFileSystemHelper(object):
class
BlobStorageError
(
Exception
):
"""The blob storage encountered an invalid state."""
def
auto_layout_select
(
path
):
# A heuristic to look at a path and determine which directory layout to
# use.
...
...
@@ -618,8 +620,10 @@ class BushyLayout(object):
filename
=
"%s%s"
%
(
utils
.
tid_repr
(
tid
),
BLOB_SUFFIX
)
return
os
.
path
.
join
(
oid_path
,
filename
)
LAYOUTS
[
'bushy'
]
=
BushyLayout
()
class
LawnLayout
(
BushyLayout
):
"""A shallow directory layout for blob directories.
...
...
@@ -640,8 +644,10 @@ class LawnLayout(BushyLayout):
except
(
TypeError
,
binascii
.
Error
):
raise
ValueError
(
'Not a valid OID path: `%s`'
%
path
)
LAYOUTS
[
'lawn'
]
=
LawnLayout
()
class
BlobStorageMixin
(
object
):
"""A mix-in to help storages support blobs."""
...
...
@@ -738,7 +744,6 @@ class BlobStorage(BlobStorageMixin):
"""A wrapper/proxy storage to support blobs.
"""
def
__init__
(
self
,
base_directory
,
storage
,
layout
=
'automatic'
):
assert
not
ZODB
.
interfaces
.
IBlobStorage
.
providedBy
(
storage
)
self
.
__storage
=
storage
...
...
@@ -780,8 +785,8 @@ class BlobStorage(BlobStorageMixin):
def
tpc_abort
(
self
,
*
arg
,
**
kw
):
# We need to override the base storage's abort instead of
# providing an _abort method because methods found on the proxied
object
# aren't rebound to the proxy
# providing an _abort method because methods found on the proxied
#
object
aren't rebound to the proxy
self
.
__storage
.
tpc_abort
(
*
arg
,
**
kw
)
self
.
_blob_tpc_abort
()
...
...
@@ -905,7 +910,10 @@ class BlobStorage(BlobStorageMixin):
res
=
BlobStorage
(
base_dir
,
s
)
return
res
copied
=
logging
.
getLogger
(
'ZODB.blob.copied'
).
debug
def
rename_or_copy_blob
(
f1
,
f2
,
chmod
=
True
):
"""Try to rename f1 to f2, fallback to copy.
...
...
@@ -926,6 +934,7 @@ def rename_or_copy_blob(f1, f2, chmod=True):
if
chmod
:
set_not_writable
(
f2
)
if
sys
.
platform
==
'win32'
:
# On Windows, you can't remove read-only files, so make the
# file writable first.
...
...
@@ -952,6 +961,7 @@ def find_global_Blob(module, class_):
if
module
==
'ZODB.blob'
and
class_
==
'Blob'
:
return
Blob
def
is_blob_record
(
record
):
"""Check whether a database record is a blob record.
...
...
@@ -960,7 +970,8 @@ def is_blob_record(record):
"""
if
record
and
(
b'ZODB.blob'
in
record
):
unpickler
=
PersistentUnpickler
(
find_global_Blob
,
None
,
BytesIO
(
record
))
unpickler
=
PersistentUnpickler
(
find_global_Blob
,
None
,
BytesIO
(
record
))
try
:
return
unpickler
.
load
()
is
Blob
...
...
@@ -971,6 +982,7 @@ def is_blob_record(record):
return
False
def
copyTransactionsFromTo
(
source
,
destination
):
for
trans
in
source
.
iterator
():
destination
.
tpc_begin
(
trans
,
trans
.
tid
,
trans
.
status
)
...
...
@@ -1001,6 +1013,8 @@ def copyTransactionsFromTo(source, destination):
NO_WRITE
=
~
(
stat
.
S_IWUSR
|
stat
.
S_IWGRP
|
stat
.
S_IWOTH
)
READ_PERMS
=
stat
.
S_IRUSR
|
stat
.
S_IRGRP
|
stat
.
S_IROTH
def
set_not_writable
(
path
):
perms
=
stat
.
S_IMODE
(
os
.
lstat
(
path
).
st_mode
)
...
...
src/ZODB/broken.py
View file @
6e5baffd
...
...
@@ -25,6 +25,7 @@ from ZODB._compat import NAME_MAPPING
broken_cache
=
{}
@
zope
.
interface
.
implementer
(
ZODB
.
interfaces
.
IBroken
)
class
Broken
(
object
):
"""Broken object base class
...
...
@@ -99,7 +100,6 @@ class Broken(object):
>>> broken_cache.clear()
"""
__Broken_state__
=
__Broken_initargs__
=
None
__name__
=
'broken object'
...
...
@@ -131,6 +131,7 @@ class Broken(object):
def
__setattr__
(
self
,
name
,
value
):
raise
BrokenModified
(
"Can't change broken objects"
)
def
find_global
(
modulename
,
globalname
,
# These are *not* optimizations. Callers can override these.
Broken
=
Broken
,
type
=
type
,
...
...
@@ -220,6 +221,7 @@ def find_global(modulename, globalname,
broken_cache
[(
modulename
,
globalname
)]
=
class_
return
class_
def
rebuild
(
modulename
,
globalname
,
*
args
):
"""Recreate a broken object, possibly recreating the missing class
...
...
@@ -257,10 +259,12 @@ def rebuild(modulename, globalname, *args):
class_
=
find_global
(
modulename
,
globalname
)
return
class_
.
__new__
(
class_
,
*
args
)
class
BrokenModified
(
TypeError
):
"""Attempt to modify a broken object
"""
class
PersistentBroken
(
Broken
,
persistent
.
Persistent
):
r"""Persistent broken objects
...
...
@@ -347,6 +351,7 @@ class PersistentBroken(Broken, persistent.Persistent):
def
__getnewargs__
(
self
):
return
self
.
__Broken_newargs__
def
persistentBroken
(
class_
):
try
:
return
class_
.
__dict__
[
'__Broken_Persistent__'
]
...
...
src/ZODB/config.py
View file @
6e5baffd
...
...
@@ -29,18 +29,21 @@ _db_schema = None
s_schema_path
=
os
.
path
.
join
(
ZODB
.
__path__
[
0
],
"storage.xml"
)
_s_schema
=
None
def
getDbSchema
():
global
_db_schema
if
_db_schema
is
None
:
_db_schema
=
ZConfig
.
loadSchema
(
db_schema_path
)
return
_db_schema
def
getStorageSchema
():
global
_s_schema
if
_s_schema
is
None
:
_s_schema
=
ZConfig
.
loadSchema
(
s_schema_path
)
return
_s_schema
def
databaseFromString
(
s
):
"""Create a database from a database-configuration string.
...
...
@@ -56,6 +59,7 @@ def databaseFromString(s):
"""
return
databaseFromFile
(
StringIO
(
s
))
def
databaseFromFile
(
f
):
"""Create a database from a file object that provides configuration.
...
...
@@ -64,6 +68,7 @@ def databaseFromFile(f):
config
,
handle
=
ZConfig
.
loadConfigFile
(
getDbSchema
(),
f
)
return
databaseFromConfig
(
config
.
database
)
def
databaseFromURL
(
url
):
"""Load a database from URL (or file name) that provides configuration.
...
...
@@ -72,6 +77,7 @@ def databaseFromURL(url):
config
,
handler
=
ZConfig
.
loadConfig
(
getDbSchema
(),
url
)
return
databaseFromConfig
(
config
.
database
)
def
databaseFromConfig
(
database_factories
):
databases
=
{}
first
=
None
...
...
@@ -82,17 +88,20 @@ def databaseFromConfig(database_factories):
return
first
def
storageFromString
(
s
):
"""Create a storage from a storage-configuration string.
"""
return
storageFromFile
(
StringIO
(
s
))
def
storageFromFile
(
f
):
"""Create a storage from a file object providing storage-configuration.
"""
config
,
handle
=
ZConfig
.
loadConfigFile
(
getStorageSchema
(),
f
)
return
storageFromConfig
(
config
.
storage
)
def
storageFromURL
(
url
):
"""
\
Create a storage from a URL (or file name) providing storage-configuration.
...
...
@@ -100,9 +109,11 @@ def storageFromURL(url):
config
,
handler
=
ZConfig
.
loadConfig
(
getStorageSchema
(),
url
)
return
storageFromConfig
(
config
.
storage
)
def
storageFromConfig
(
section
):
return
section
.
open
()
class
BaseConfig
(
object
):
"""Object representing a configured storage or database.
...
...
@@ -124,6 +135,7 @@ class BaseConfig(object):
"""Open and return the storage object."""
raise
NotImplementedError
class
ZODBDatabase
(
BaseConfig
):
def
open
(
self
,
databases
=
None
):
...
...
@@ -150,21 +162,23 @@ class ZODBDatabase(BaseConfig):
cache_size_bytes
=
section
.
cache_size_bytes
,
historical_pool_size
=
section
.
historical_pool_size
,
historical_cache_size
=
section
.
historical_cache_size
,
historical_cache_size_bytes
=
section
.
historical_cache_size_bytes
,
historical_cache_size_bytes
=
section
.
historical_cache_size_bytes
,
# noqa: E501 line too long
historical_timeout
=
section
.
historical_timeout
,
database_name
=
section
.
database_name
or
self
.
name
or
''
,
databases
=
databases
,
**
options
)
except
:
except
:
# noqa: E722 do not use bare 'except'
storage
.
close
()
raise
class
MappingStorage
(
BaseConfig
):
def
open
(
self
):
from
ZODB.MappingStorage
import
MappingStorage
return
MappingStorage
(
self
.
config
.
name
)
class
DemoStorage
(
BaseConfig
):
def
open
(
self
):
...
...
@@ -181,6 +195,7 @@ class DemoStorage(BaseConfig):
from
ZODB.DemoStorage
import
DemoStorage
return
DemoStorage
(
self
.
config
.
name
,
base
=
base
,
changes
=
changes
)
class
FileStorage
(
BaseConfig
):
def
open
(
self
):
...
...
@@ -206,6 +221,7 @@ class FileStorage(BaseConfig):
return
FileStorage
(
config
.
path
,
**
options
)
class
BlobStorage
(
BaseConfig
):
def
open
(
self
):
...
...
@@ -225,7 +241,8 @@ class ZEOClient(BaseConfig):
if
self
.
config
.
blob_cache_size
is
not
None
:
options
[
'blob_cache_size'
]
=
self
.
config
.
blob_cache_size
if
self
.
config
.
blob_cache_size_check
is
not
None
:
options
[
'blob_cache_size_check'
]
=
self
.
config
.
blob_cache_size_check
options
[
'blob_cache_size_check'
]
=
(
self
.
config
.
blob_cache_size_check
)
if
self
.
config
.
client_label
is
not
None
:
options
[
'client_label'
]
=
self
.
config
.
client_label
...
...
@@ -249,6 +266,7 @@ class ZEOClient(BaseConfig):
realm
=
self
.
config
.
realm
,
**
options
)
class
BDBStorage
(
BaseConfig
):
def
open
(
self
):
...
...
@@ -261,12 +279,14 @@ class BDBStorage(BaseConfig):
setattr
(
bconf
,
name
,
getattr
(
self
.
config
,
name
))
return
storageclass
(
self
.
config
.
envdir
,
config
=
bconf
)
class
BDBMinimalStorage
(
BDBStorage
):
def
get_storageclass
(
self
):
import
BDBStorage.BDBMinimalStorage
return
BDBStorage
.
BDBMinimalStorage
.
BDBMinimalStorage
class
BDBFullStorage
(
BDBStorage
):
def
get_storageclass
(
self
):
...
...
src/ZODB/conversionhack.py
View file @
6e5baffd
...
...
@@ -14,21 +14,29 @@
import
persistent.mapping
class
fixer
(
object
):
def
__of__
(
self
,
parent
):
def
__setstate__
(
state
,
self
=
parent
):
self
.
_container
=
state
self
.
_container
=
state
del
self
.
__setstate__
return
__setstate__
fixer
=
fixer
()
class
hack
(
object
):
pass
hack
=
hack
()
fixer
=
fixer
()
class
hack
(
object
):
pass
hack
=
hack
()
def
__basicnew__
():
r
=
persistent
.
mapping
.
PersistentMapping
()
r
.
__setstate__
=
fixer
r
=
persistent
.
mapping
.
PersistentMapping
()
r
.
__setstate__
=
fixer
return
r
hack
.
__basicnew__
=
__basicnew__
hack
.
__basicnew__
=
__basicnew__
src/ZODB/event.py
View file @
6e5baffd
...
...
@@ -14,5 +14,5 @@
try
:
from
zope.event
import
notify
except
ImportError
:
notify
=
lambda
event
:
None
def
notify
(
event
):
return
None
src/ZODB/fsIndex.py
View file @
6e5baffd
...
...
@@ -55,17 +55,21 @@ from ZODB._compat import _protocol
def
num2str
(
n
):
return
struct
.
pack
(
">Q"
,
n
)[
2
:]
def
str2num
(
s
):
return
struct
.
unpack
(
">Q"
,
b"
\
000
\
000
"
+
s
)[
0
]
def
prefix_plus_one
(
s
):
num
=
str2num
(
s
)
return
num2str
(
num
+
1
)
def
prefix_minus_one
(
s
):
num
=
str2num
(
s
)
return
num2str
(
num
-
1
)
def
ensure_bytes
(
s
):
# on Python 3 we might pickle bytes and unpickle unicode strings
return
s
.
encode
(
'ascii'
)
if
not
isinstance
(
s
,
bytes
)
else
s
...
...
@@ -80,8 +84,8 @@ class fsIndex(object):
def
__getstate__
(
self
):
return
dict
(
state_version
=
1
,
_data
=
[(
k
,
v
.
toString
())
state_version
=
1
,
_data
=
[(
k
,
v
.
toString
())
for
(
k
,
v
)
in
six
.
iteritems
(
self
.
_data
)
]
)
...
...
src/ZODB/fsrecover.py
View file @
6e5baffd
...
...
@@ -94,12 +94,15 @@ def die(mess='', show_docstring=False):
print
(
__doc__
%
sys
.
argv
[
0
],
file
=
sys
.
stderr
)
sys
.
exit
(
1
)
class
ErrorFound
(
Exception
):
pass
def
error
(
mess
,
*
args
):
raise
ErrorFound
(
mess
%
args
)
def
read_txn_header
(
f
,
pos
,
file_size
,
outp
,
ltid
):
# Read the transaction record
f
.
seek
(
pos
)
...
...
@@ -107,7 +110,7 @@ def read_txn_header(f, pos, file_size, outp, ltid):
if
len
(
h
)
<
23
:
raise
EOFError
tid
,
stl
,
status
,
ul
,
dl
,
el
=
unpack
(
">8s8scHHH"
,
h
)
tid
,
stl
,
status
,
ul
,
dl
,
el
=
unpack
(
">8s8scHHH"
,
h
)
status
=
as_text
(
status
)
tl
=
u64
(
stl
)
...
...
@@ -157,6 +160,7 @@ def read_txn_header(f, pos, file_size, outp, ltid):
return
pos
,
result
,
tid
def
truncate
(
f
,
pos
,
file_size
,
outp
):
"""Copy data from pos to end of f to a .trNNN file."""
...
...
@@ -176,6 +180,7 @@ def truncate(f, pos, file_size, outp):
f
.
seek
(
pos
)
tr
.
close
()
def
copy
(
src
,
dst
,
n
):
while
n
:
buf
=
src
.
read
(
8096
)
...
...
@@ -186,6 +191,7 @@ def copy(src, dst, n):
dst
.
write
(
buf
)
n
-=
len
(
buf
)
def
scan
(
f
,
pos
):
"""Return a potential transaction location following pos in f.
...
...
@@ -206,20 +212,21 @@ def scan(f, pos):
s
=
0
while
1
:
l
=
data
.
find
(
b"."
,
s
)
if
l
<
0
:
l
_
=
data
.
find
(
b"."
,
s
)
if
l
_
<
0
:
pos
+=
len
(
data
)
break
# If we are less than 8 bytes from the end of the
# string, we need to read more data.
s
=
l
+
1
s
=
l
_
+
1
if
s
>
len
(
data
)
-
8
:
pos
+=
l
pos
+=
l
_
break
tl
=
u64
(
data
[
s
:
s
+
8
])
if
tl
<
pos
:
return
pos
+
s
+
8
def
iprogress
(
i
):
if
i
%
2
:
print
(
"."
,
end
=
' '
)
...
...
@@ -227,10 +234,12 @@ def iprogress(i):
print
((
i
/
2
)
%
10
,
end
=
' '
)
sys
.
stdout
.
flush
()
def
progress
(
p
):
for
i
in
range
(
p
):
iprogress
(
i
)
def
main
():
try
:
opts
,
args
=
getopt
.
getopt
(
sys
.
argv
[
1
:],
"fv:pP:"
)
...
...
@@ -256,6 +265,7 @@ def main():
recover
(
inp
,
outp
,
verbose
,
partial
,
force
,
pack
)
def
recover
(
inp
,
outp
,
verbose
=
0
,
partial
=
False
,
force
=
False
,
pack
=
None
):
print
(
"Recovering"
,
inp
,
"into"
,
outp
)
...
...
@@ -266,7 +276,7 @@ def recover(inp, outp, verbose=0, partial=False, force=False, pack=None):
if
f
.
read
(
4
)
!=
ZODB
.
FileStorage
.
packed_version
:
die
(
"input is not a file storage"
)
f
.
seek
(
0
,
2
)
f
.
seek
(
0
,
2
)
file_size
=
f
.
tell
()
ofs
=
ZODB
.
FileStorage
.
FileStorage
(
outp
,
create
=
1
)
...
...
@@ -332,11 +342,11 @@ def recover(inp, outp, verbose=0, partial=False, force=False, pack=None):
for
r
in
txn
:
if
verbose
>
1
:
if
r
.
data
is
None
:
l
=
"bp"
l
_
=
"bp"
else
:
l
=
len
(
r
.
data
)
l
_
=
len
(
r
.
data
)
print
(
"%7d %s
%s"
%
(
u64
(
r
.
oid
),
l
))
print
(
"%7d %s
"
%
(
u64
(
r
.
oid
),
l_
))
ofs
.
restore
(
r
.
oid
,
r
.
tid
,
r
.
data
,
''
,
r
.
data_txn
,
txn
)
nrec
+=
1
...
...
@@ -370,7 +380,6 @@ def recover(inp, outp, verbose=0, partial=False, force=False, pack=None):
prog1
=
prog1
+
1
iprogress
(
prog1
)
bad
=
file_size
-
undone
-
ofs
.
_pos
print
(
"
\
n
%s bytes removed during recovery"
%
bad
)
...
...
@@ -385,5 +394,6 @@ def recover(inp, outp, verbose=0, partial=False, force=False, pack=None):
ofs
.
close
()
f
.
close
()
if
__name__
==
"__main__"
:
main
()
src/ZODB/fstools.py
View file @
6e5baffd
...
...
@@ -100,6 +100,7 @@ class TxnHeader(object):
tlen
=
u64
(
self
.
_file
.
read
(
8
))
return
TxnHeader
(
self
.
_file
,
self
.
_pos
-
(
tlen
+
8
))
class
DataHeader
(
object
):
"""Object representing a data record header.
...
...
@@ -138,6 +139,7 @@ class DataHeader(object):
off
+=
8
# backpointer
return
off
def
prev_txn
(
f
):
"""Return transaction located before current file position."""
f
.
seek
(
-
8
,
1
)
...
...
src/ZODB/interfaces.py
View file @
6e5baffd
...
...
@@ -267,6 +267,7 @@ class IConnection(Interface):
separate object.
"""
class
IStorageWrapper
(
Interface
):
"""Storage wrapper interface
...
...
@@ -296,7 +297,7 @@ class IStorageWrapper(Interface):
This interface may be implemented by storage adapters or other
intermediaries. For example, a storage adapter that provides
encryption and/or compress
s
ion will apply record transformations
encryption and/or compression will apply record transformations
in it's references method.
"""
...
...
@@ -343,6 +344,7 @@ class IStorageWrapper(Interface):
"""Return untransformed data
"""
IStorageDB
=
IStorageWrapper
# for backward compatibility
...
...
@@ -371,7 +373,6 @@ class IDatabase(IStorageDB):
this attribute.
"""
)
def
open
(
transaction_manager
=
None
,
serial
=
''
):
"""Return an IConnection object for use by application code.
...
...
@@ -421,7 +422,6 @@ class IDatabase(IStorageDB):
also included if they don't conflict with the keys above.
"""
def
pack
(
t
=
None
,
days
=
0
):
"""Pack the storage, deleting unused object revisions.
...
...
@@ -433,7 +433,7 @@ class IDatabase(IStorageDB):
usually an expensive operation.
There are two optional arguments that can be used to set the
pack time: t, pack time in seconds since the ep
co
h, and days,
pack time: t, pack time in seconds since the ep
oc
h, and days,
the number of days to subtract from t or from the current
time if t is not specified.
"""
...
...
@@ -539,6 +539,7 @@ class IDatabase(IStorageDB):
should also close all the Connections.
"""
class
IStorageTransactionMetaData
(
Interface
):
"""Provide storage transaction meta data.
...
...
@@ -628,13 +629,13 @@ class IStorage(Interface):
The format and interpretation of this name is storage
dependent. It could be a file name, a database name, etc..
This is used soley for informational purposes.
This is used sole
l
y for informational purposes.
"""
def
getSize
():
"""An approximate size of the database, in bytes.
This is used soley for informational purposes.
This is used sole
l
y for informational purposes.
"""
def
history
(
oid
,
size
=
1
):
...
...
@@ -660,7 +661,7 @@ class IStorage(Interface):
user_name
The bytes user identifier, if any (or an empty string) of the
user on whos behalf the revision was committed.
user on whos
e
behalf the revision was committed.
description
The bytes transaction description for the transaction that
...
...
@@ -704,7 +705,7 @@ class IStorage(Interface):
def
__len__
():
"""The approximate number of objects in the storage
This is used soley for informational purposes.
This is used sole
l
y for informational purposes.
"""
def
loadBefore
(
oid
,
tid
):
...
...
@@ -821,7 +822,7 @@ class IStorage(Interface):
This call is ignored is the storage is not participating in
two-phase commit or if the given transaction is not the same
as the transaction the storage is commiting.
as the transaction the storage is commit
t
ing.
"""
def
tpc_begin
(
transaction
):
...
...
@@ -837,7 +838,7 @@ class IStorage(Interface):
current transaction ends (commits or aborts).
"""
def
tpc_finish
(
transaction
,
func
=
lambda
tid
:
None
):
def
tpc_finish
(
transaction
,
func
=
lambda
tid
:
None
):
"""Finish the transaction, making any transaction changes permanent.
Changes must be made permanent at this point.
...
...
@@ -863,7 +864,7 @@ class IStorage(Interface):
The argument is the same object passed to tpc_begin.
This call raises a StorageTransactionError if the storage
isn't participating in two-phase commit or if it is commiting
isn't participating in two-phase commit or if it is commit
t
ing
a different transaction.
If a transaction can be committed by a storage, then the
...
...
@@ -901,7 +902,7 @@ class IMultiCommitStorage(IStorage):
the return value is always None.
"""
def
tpc_finish
(
transaction
,
func
=
lambda
tid
:
None
):
def
tpc_finish
(
transaction
,
func
=
lambda
tid
:
None
):
"""Finish the transaction, making any transaction changes permanent.
See IStorage.store. For objects implementing this interface,
...
...
@@ -954,7 +955,6 @@ class IStorageRestoreable(IStorage):
# including the existing FileStorage implementation), that
# failed to take into account records after the pack time.
def
restore
(
oid
,
serial
,
data
,
version
,
prev_txn
,
transaction
):
"""Write data already committed in a separate database
...
...
@@ -996,6 +996,7 @@ class IStorageRecordInformation(Interface):
data
=
Attribute
(
"The data record, bytes"
)
data_txn
=
Attribute
(
"The previous transaction id, bytes"
)
class
IStorageTransactionInformation
(
IStorageTransactionMetaData
):
"""Provide information about a storage transaction.
...
...
@@ -1003,7 +1004,7 @@ class IStorageTransactionInformation(IStorageTransactionMetaData):
Note that this may contain a status field used by FileStorage to
support packing. At some point, this will go away when FileStorage
has a better pack algoritm.
has a better pack algorit
h
m.
"""
tid
=
Attribute
(
"Transaction id"
)
...
...
@@ -1034,6 +1035,7 @@ class IStorageIteration(Interface):
"""
class
IStorageUndoable
(
IStorage
):
"""A storage supporting transactional undo.
"""
...
...
@@ -1245,6 +1247,7 @@ class IMVCCStorage(IStorage):
A POSKeyError is raised if there is no record for the object id.
"""
class
IMVCCPrefetchStorage
(
IMVCCStorage
):
def
prefetch
(
oids
):
...
...
@@ -1254,6 +1257,7 @@ class IMVCCPrefetchStorage(IMVCCStorage):
more than once.
"""
class
IMVCCAfterCompletionStorage
(
IMVCCStorage
):
def
afterCompletion
():
...
...
@@ -1264,6 +1268,7 @@ class IMVCCAfterCompletionStorage(IMVCCStorage):
See ``transaction.interfaces.ISynchronizer.afterCompletion``.
"""
class
IStorageCurrentRecordIteration
(
IStorage
):
def
record_iternext
(
next
=
None
):
...
...
@@ -1271,6 +1276,7 @@ class IStorageCurrentRecordIteration(IStorage):
Use like this:
>>> storage = ...
>>> next = None
>>> while 1:
... oid, tid, data, next = storage.record_iternext(next)
...
...
@@ -1280,6 +1286,7 @@ class IStorageCurrentRecordIteration(IStorage):
"""
class
IExternalGC
(
IStorage
):
def
deleteObject
(
oid
,
serial
,
transaction
):
...
...
@@ -1288,7 +1295,7 @@ class IExternalGC(IStorage):
This method marks an object as deleted via a new object
revision. Subsequent attempts to load current data for the
object will fail with a POSKeyError, but loads for
non-current data will su
ceed if there are previous
non-current data will suc
ceed if there are previous
non-delete records. The object will be removed from the
storage when all not-delete records are removed.
...
...
@@ -1299,6 +1306,7 @@ class IExternalGC(IStorage):
commit.
"""
class
ReadVerifyingStorage
(
IStorage
):
def
checkCurrentSerialInTransaction
(
oid
,
serial
,
transaction
):
...
...
@@ -1315,6 +1323,7 @@ class ReadVerifyingStorage(IStorage):
through the end of the transaction.
"""
class
IBlob
(
Interface
):
"""A BLOB supports efficient handling of large data within ZODB."""
...
...
@@ -1325,7 +1334,7 @@ class IBlob(Interface):
mode: Mode to open the file with. Possible values: r,w,r+,a,c
The mode 'c' is similar to 'r', except that an orinary file
The mode 'c' is similar to 'r', except that an or
d
inary file
object is returned and may be used in a separate transaction
and after the blob's database connection has been closed.
...
...
@@ -1335,8 +1344,8 @@ class IBlob(Interface):
"""Return a file name for committed data.
The returned file name may be opened for reading or handed to
other processes for reading. The file name isn't guar
e
nteed
to be valid indefin
a
tely. The file may be removed in the
other processes for reading. The file name isn't guar
a
nteed
to be valid indefin
i
tely. The file may be removed in the
future as a result of garbage collection depending on system
configuration.
...
...
@@ -1412,6 +1421,7 @@ class IBlobStorage(Interface):
If Blobs use this, then commits can be performed with a simple rename.
"""
class
IBlobStorageRestoreable
(
IBlobStorage
,
IStorageRestoreable
):
def
restoreBlob
(
oid
,
serial
,
data
,
blobfilename
,
prev_txn
,
transaction
):
...
...
@@ -1446,6 +1456,7 @@ class IBroken(Interface):
__Broken_initargs__
=
Attribute
(
"Arguments passed to __init__."
)
__Broken_state__
=
Attribute
(
"Value passed to __setstate__."
)
class
BlobError
(
Exception
):
pass
...
...
src/ZODB/mvccadapter.py
View file @
6e5baffd
...
...
@@ -12,6 +12,7 @@ import zope.interface
from
.
import
interfaces
,
serialize
,
POSException
from
.utils
import
p64
,
u64
,
Lock
,
oid_repr
,
tid_repr
class
Base
(
object
):
_copy_methods
=
(
...
...
@@ -37,6 +38,7 @@ class Base(object):
def
__len__
(
self
):
return
len
(
self
.
_storage
)
class
MVCCAdapter
(
Base
):
def
__init__
(
self
,
storage
):
...
...
@@ -63,6 +65,7 @@ class MVCCAdapter(Base):
self
.
_instances
.
remove
(
instance
)
closed
=
False
def
close
(
self
):
if
not
self
.
closed
:
self
.
closed
=
True
...
...
@@ -92,6 +95,7 @@ class MVCCAdapter(Base):
def
pack
(
self
,
pack_time
,
referencesf
):
return
self
.
_storage
.
pack
(
pack_time
,
referencesf
)
class
MVCCAdapterInstance
(
Base
):
_copy_methods
=
Base
.
_copy_methods
+
(
...
...
@@ -107,7 +111,7 @@ class MVCCAdapterInstance(Base):
Base
.
__init__
(
self
,
base
.
_storage
)
self
.
_lock
=
Lock
()
self
.
_invalidations
=
set
()
self
.
_sync
=
getattr
(
self
.
_storage
,
'sync'
,
lambda
:
None
)
self
.
_sync
=
getattr
(
self
.
_storage
,
'sync'
,
lambda
:
None
)
def
release
(
self
):
self
.
_base
.
_release
(
self
)
...
...
@@ -205,7 +209,7 @@ class MVCCAdapterInstance(Base):
oid
,
serial
,
data
,
blobfilename
,
''
,
transaction
)
self
.
_modified
.
add
(
oid
)
def
tpc_finish
(
self
,
transaction
,
func
=
lambda
tid
:
None
):
def
tpc_finish
(
self
,
transaction
,
func
=
lambda
tid
:
None
):
modified
=
self
.
_modified
self
.
_modified
=
None
...
...
@@ -216,9 +220,11 @@ class MVCCAdapterInstance(Base):
return
self
.
_storage
.
tpc_finish
(
transaction
,
invalidate_finish
)
def
read_only_writer
(
self
,
*
a
,
**
kw
):
raise
POSException
.
ReadOnlyError
class
HistoricalStorageAdapter
(
Base
):
"""Adapt a storage to a historical storage
"""
...
...
@@ -293,7 +299,7 @@ class UndoAdapterInstance(Base):
if
result
:
self
.
_undone
.
update
(
result
)
def
tpc_finish
(
self
,
transaction
,
func
=
lambda
tid
:
None
):
def
tpc_finish
(
self
,
transaction
,
func
=
lambda
tid
:
None
):
def
invalidate_finish
(
tid
):
self
.
_base
.
_invalidate_finish
(
tid
,
self
.
_undone
,
None
)
...
...
src/ZODB/persistentclass.py
View file @
6e5baffd
...
...
@@ -63,6 +63,7 @@ class _p_DataDescr(object):
def
__delete__
(
self
,
inst
):
raise
AttributeError
(
self
.
__name__
)
class
_p_oid_or_jar_Descr
(
_p_DataDescr
):
# Special descr for _p_oid and _p_jar that loads
# state when set if both are set and _p_changed is None
...
...
@@ -79,10 +80,10 @@ class _p_oid_or_jar_Descr(_p_DataDescr):
jar
=
get
(
'_p_jar'
)
if
(
jar
is
not
None
and
get
(
'_p_oid'
)
is
not
None
and
get
(
'_p_changed'
)
is
None
):
and
get
(
'_p_changed'
)
is
None
):
jar
.
setstate
(
inst
)
class
_p_ChangedDescr
(
object
):
# descriptor to handle special weird semantics of _p_changed
...
...
@@ -99,6 +100,7 @@ class _p_ChangedDescr(object):
def
__delete__
(
self
,
inst
):
inst
.
_p_invalidate
()
class
_p_MethodDescr
(
object
):
"""Provide unassignable class attributes
"""
...
...
@@ -120,6 +122,7 @@ class _p_MethodDescr(object):
special_class_descrs
=
'__dict__'
,
'__weakref__'
class
PersistentMetaClass
(
type
):
_p_jar
=
_p_oid_or_jar_Descr
(
'_p_jar'
)
...
...
@@ -148,7 +151,7 @@ class PersistentMetaClass(type):
and
(
get
(
'_p_oid'
)
is
not
None
)
and
(
get
(
'_p_changed'
)
==
False
)
(
get
(
'_p_changed'
)
is
False
)
):
self
.
_p_changed
=
True
...
...
@@ -177,7 +180,6 @@ class PersistentMetaClass(type):
_p_invalidate
=
_p_MethodDescr
(
_p_invalidate
)
def
__getstate__
(
self
):
return
(
self
.
__bases__
,
dict
([(
k
,
v
)
for
(
k
,
v
)
in
self
.
__dict__
.
items
()
...
...
src/ZODB/scripts/analyze.py
View file @
6e5baffd
...
...
@@ -9,7 +9,6 @@ from ZODB.FileStorage import FileStorage
from
ZODB._compat
import
PersistentUnpickler
,
BytesIO
class
FakeError
(
Exception
):
def
__init__
(
self
,
module
,
name
):
Exception
.
__init__
(
self
)
...
...
@@ -41,9 +40,10 @@ class Report(object):
self
.
FOIDS
=
0
self
.
FBYTES
=
0
def
shorten
(
s
,
n
):
l
=
len
(
s
)
if
l
<=
n
:
l
ength
=
len
(
s
)
if
l
ength
<=
n
:
return
s
while
len
(
s
)
+
3
>
n
:
# account for ...
i
=
s
.
find
(
"."
)
...
...
@@ -52,9 +52,10 @@ def shorten(s, n):
return
s
[
-
n
:]
else
:
s
=
s
[
i
+
1
:]
l
=
len
(
s
)
l
ength
=
len
(
s
)
return
"..."
+
s
def
report
(
rep
):
print
(
"Processed %d records in %d transactions"
%
(
rep
.
OIDS
,
rep
.
TIDS
))
print
(
"Average record size is %7.2f bytes"
%
(
rep
.
DBYTES
*
1.0
/
rep
.
OIDS
))
...
...
@@ -76,8 +77,9 @@ def report(rep):
pct
,
rep
.
TYPESIZE
[
t
]
*
1.0
/
rep
.
TYPEMAP
[
t
]))
print
(
fmt
%
(
'='
*
46
,
'='
*
7
,
'='
*
9
,
'='
*
5
,
'='
*
7
))
print
(
"%46s %7d %9s %6s %6.2fk"
%
(
'Total Transactions'
,
rep
.
TIDS
,
' '
,
' '
,
rep
.
DBYTES
*
1.0
/
rep
.
TIDS
/
1024.0
))
print
(
"%46s %7d %9s %6s %6.2fk"
%
(
'Total Transactions'
,
rep
.
TIDS
,
' '
,
' '
,
rep
.
DBYTES
*
1.0
/
rep
.
TIDS
/
1024.0
))
print
(
fmts
%
(
'Total Records'
,
rep
.
OIDS
,
rep
.
DBYTES
/
1024.0
,
cumpct
,
rep
.
DBYTES
*
1.0
/
rep
.
OIDS
))
...
...
@@ -89,6 +91,7 @@ def report(rep):
rep
.
FBYTES
*
100.0
/
rep
.
DBYTES
,
rep
.
FBYTES
*
1.0
/
rep
.
FOIDS
))
def
analyze
(
path
):
fs
=
FileStorage
(
path
,
read_only
=
1
)
fsi
=
fs
.
iterator
()
...
...
@@ -97,11 +100,13 @@ def analyze(path):
analyze_trans
(
report
,
txn
)
return
report
def
analyze_trans
(
report
,
txn
):
report
.
TIDS
+=
1
for
rec
in
txn
:
analyze_rec
(
report
,
rec
)
def
get_type
(
record
):
try
:
unpickled
=
FakeUnpickler
(
BytesIO
(
record
.
data
)).
load
()
...
...
@@ -114,6 +119,7 @@ def get_type(record):
else
:
return
str
(
classinfo
)
def
analyze_rec
(
report
,
record
):
oid
=
record
.
oid
report
.
OIDS
+=
1
...
...
@@ -142,6 +148,7 @@ def analyze_rec(report, record):
except
Exception
as
err
:
print
(
err
)
if
__name__
==
"__main__"
:
path
=
sys
.
argv
[
1
]
report
(
analyze
(
path
))
src/ZODB/scripts/checkbtrees.py
View file @
6e5baffd
...
...
@@ -19,6 +19,8 @@ oids_seen = {}
# Append (obj, path) to L if and only if obj is a persistent object
# and we haven't seen it before.
def
add_if_new_persistent
(
L
,
obj
,
path
):
global
oids_seen
...
...
@@ -29,6 +31,7 @@ def add_if_new_persistent(L, obj, path):
L
.
append
((
obj
,
path
))
oids_seen
[
oid
]
=
1
def
get_subobjects
(
obj
):
getattr
(
obj
,
'_'
,
None
)
# unghostify
sub
=
[]
...
...
@@ -55,19 +58,20 @@ def get_subobjects(obj):
while
1
:
try
:
elt
=
obj
[
i
]
except
:
except
:
# noqa: E722 do not use bare 'except'
break
sub
.
append
((
"[%d]"
%
i
,
elt
))
i
+=
1
return
sub
def
main
(
fname
=
None
):
if
fname
is
None
:
import
sys
try
:
fname
,
=
sys
.
argv
[
1
:]
except
:
except
:
# noqa: E722 do not use bare 'except'
print
(
__doc__
)
sys
.
exit
(
2
)
...
...
@@ -116,5 +120,6 @@ def main(fname=None):
print
(
"total"
,
len
(
fs
.
_index
),
"found"
,
found
)
if
__name__
==
"__main__"
:
main
()
src/ZODB/scripts/fsoids.py
View file @
6e5baffd
...
...
@@ -43,9 +43,11 @@ import sys
from
ZODB.FileStorage.fsoids
import
Tracer
def
usage
():
print
(
__doc__
)
def
main
():
import
getopt
...
...
@@ -75,5 +77,6 @@ def main():
c
.
run
()
c
.
report
()
if
__name__
==
"__main__"
:
main
()
src/ZODB/scripts/fsrefs.py
View file @
6e5baffd
...
...
@@ -74,6 +74,8 @@ from BTrees.QQBTree import QQBTree
# There's a problem with oid. 'data' is its pickle, and 'serial' its
# serial number. 'missing' is a list of (oid, class, reason) triples,
# explaining what the problem(s) is(are).
def
report
(
oid
,
data
,
serial
,
missing
):
from_mod
,
from_class
=
get_pickle_metadata
(
data
)
if
len
(
missing
)
>
1
:
...
...
@@ -92,6 +94,7 @@ def report(oid, data, serial, missing):
print
(
"
\
t
oid %s %s: %r"
%
(
oid_repr
(
oid
),
reason
,
description
))
print
()
def
main
(
path
=
None
):
verbose
=
0
if
path
is
None
:
...
...
@@ -105,7 +108,6 @@ def main(path=None):
path
,
=
args
fs
=
FileStorage
(
path
,
read_only
=
1
)
# Set of oids in the index that failed to load due to POSKeyError.
...
...
@@ -137,14 +139,14 @@ def main(path=None):
raise
except
POSKeyError
:
undone
[
oid
]
=
1
except
:
except
:
# noqa: E722 do not use bare 'except'
if
verbose
:
traceback
.
print_exc
()
noload
[
oid
]
=
1
# pass 2: go through all objects again and verify that their references do
# not point to problematic object set. Iterate objects in order of
ascending
# file position to optimize disk IO.
# not point to problematic object set. Iterate objects in order of
#
ascending
file position to optimize disk IO.
inactive
=
noload
.
copy
()
inactive
.
update
(
undone
)
for
oid64
in
pos2oid
.
itervalues
():
...
...
@@ -166,5 +168,6 @@ def main(path=None):
if
missing
:
report
(
oid
,
data
,
serial
,
missing
)
if
__name__
==
"__main__"
:
main
()
src/ZODB/scripts/fsstats.py
View file @
6e5baffd
...
...
@@ -9,6 +9,7 @@ from six.moves import filter
rx_txn
=
re
.
compile
(
r"tid=([0-9a-f]+).*size=(\
d+)
")
rx_data = re.compile(r"
oid
=
([
0
-
9
a
-
f
]
+
)
size
=
(
\
d
+
)
class
=
(
\
S
+
)
")
def sort_byhsize(seq, reverse=False):
L = [(v.size(), k, v) for k, v in seq]
L.sort()
...
...
@@ -16,6 +17,7 @@ def sort_byhsize(seq, reverse=False):
L.reverse()
return [(k, v) for n, k, v in L]
class Histogram(dict):
def add(self, size):
...
...
@@ -93,6 +95,7 @@ class Histogram(dict):
i * binsize, n, p, pc, "
*
" * (n // dot)))
print()
def class_detail(class_size):
# summary of classes
fmt = "
%
5
s
%
6
s
%
6
s
%
6
s
%-
50.50
s
"
...
...
@@ -110,6 +113,7 @@ def class_detail(class_size):
continue
h.report("
Object
size
for
%
s
" % klass, usebins=True)
def revision_detail(lifetimes, classes):
# Report per-class details for any object modified more than once
for name, oids in six.iteritems(classes):
...
...
@@ -124,6 +128,7 @@ def revision_detail(lifetimes, classes):
if keep:
h.report("
Number
of
revisions
for
%
s
" % name, binsize=10)
def main(path=None):
if path is None:
path = sys.argv[1]
...
...
@@ -203,5 +208,6 @@ def main(path=None):
class_detail(class_size)
if __name__ == "
__main__
":
main()
src/ZODB/scripts/fstail.py
View file @
6e5baffd
...
...
@@ -25,6 +25,7 @@ try:
except
ImportError
:
from
sha
import
sha
as
sha1
def
main
(
path
,
ntxn
):
with
open
(
path
,
"rb"
)
as
f
:
f
.
seek
(
0
,
2
)
...
...
@@ -32,7 +33,6 @@ def main(path, ntxn):
i
=
ntxn
while
th
and
i
>
0
:
hash
=
sha1
(
th
.
get_raw_data
()).
digest
()
l
=
len
(
str
(
th
.
get_timestamp
()))
+
1
th
.
read_meta
()
print
(
"%s: hash=%s"
%
(
th
.
get_timestamp
(),
binascii
.
hexlify
(
hash
).
decode
()))
...
...
@@ -42,6 +42,7 @@ def main(path, ntxn):
th
=
th
.
prev_txn
()
i
-=
1
def
Main
():
ntxn
=
10
opts
,
args
=
getopt
.
getopt
(
sys
.
argv
[
1
:],
"n:"
)
...
...
@@ -51,5 +52,6 @@ def Main():
ntxn
=
int
(
v
)
main
(
path
,
ntxn
)
if
__name__
==
"__main__"
:
Main
()
src/ZODB/scripts/fstest.py
View file @
6e5baffd
...
...
@@ -41,13 +41,16 @@ import struct
import
sys
from
ZODB._compat
import
FILESTORAGE_MAGIC
class
FormatError
(
ValueError
):
"""There is a problem with the format of the FileStorage."""
class
Status
(
object
):
checkpoint
=
b'c'
undone
=
b'u'
packed_version
=
FILESTORAGE_MAGIC
TREC_HDR_LEN
=
23
...
...
@@ -55,6 +58,7 @@ DREC_HDR_LEN = 42
VERBOSE
=
0
def
hexify
(
s
):
r"""Format an 8-bit string as hex
...
...
@@ -64,17 +68,20 @@ def hexify(s):
"""
return
'0x'
+
binascii
.
hexlify
(
s
).
decode
()
def
chatter
(
msg
,
level
=
1
):
if
VERBOSE
>=
level
:
sys
.
stdout
.
write
(
msg
)
def
U64
(
v
):
"""Unpack an 8-byte string as a 64-bit long"""
h
,
l
=
struct
.
unpack
(
">II"
,
v
)
h
,
l
_
=
struct
.
unpack
(
">II"
,
v
)
if
h
:
return
(
h
<<
32
)
+
l
return
(
h
<<
32
)
+
l
_
else
:
return
l
return
l_
def
check
(
path
):
with
open
(
path
,
'rb'
)
as
file
:
...
...
@@ -106,7 +113,7 @@ def check_trec(path, file, pos, ltid, file_size):
used for generating error messages.
"""
h
=
file
.
read
(
TREC_HDR_LEN
)
#
XXX must be bytes under Py3k
h
=
file
.
read
(
TREC_HDR_LEN
)
#
XXX must be bytes under Py3k
if
not
h
:
return
None
,
None
if
len
(
h
)
!=
TREC_HDR_LEN
:
...
...
@@ -162,6 +169,7 @@ def check_trec(path, file, pos, ltid, file_size):
pos
=
tend
+
8
return
pos
,
tid
def
check_drec
(
path
,
file
,
pos
,
tpos
,
tid
):
"""Check a data record for the current transaction record"""
...
...
@@ -170,7 +178,7 @@ def check_drec(path, file, pos, tpos, tid):
raise
FormatError
(
"%s truncated at %s"
%
(
path
,
pos
))
oid
,
serial
,
_prev
,
_tloc
,
vlen
,
_plen
=
(
struct
.
unpack
(
">8s8s8s8sH8s"
,
h
))
prev
=
U64
(
_prev
)
U64
(
_prev
)
tloc
=
U64
(
_tloc
)
plen
=
U64
(
_plen
)
dlen
=
DREC_HDR_LEN
+
(
plen
or
8
)
...
...
@@ -178,7 +186,7 @@ def check_drec(path, file, pos, tpos, tid):
if
vlen
:
dlen
=
dlen
+
16
+
vlen
file
.
seek
(
8
,
1
)
pv
=
U64
(
file
.
read
(
8
))
U64
(
file
.
read
(
8
))
file
.
seek
(
vlen
,
1
)
# skip the version data
if
tloc
!=
tpos
:
...
...
@@ -195,9 +203,11 @@ def check_drec(path, file, pos, tpos, tid):
return
pos
,
oid
def
usage
():
sys
.
exit
(
__doc__
)
def
main
(
args
=
None
):
if
args
is
None
:
args
=
sys
.
argv
[
1
:]
...
...
@@ -221,5 +231,6 @@ def main(args=None):
chatter
(
"no errors detected"
)
if
__name__
==
"__main__"
:
main
()
src/ZODB/scripts/manual_tests/testfstest.py
View file @
6e5baffd
...
...
@@ -6,12 +6,12 @@ Note: To run this test script fstest.py must be on your PYTHONPATH.
from
cStringIO
import
StringIO
import
re
import
struct
import
unittest
import
ZODB.tests.util
import
fstest
from
fstest
import
FormatError
,
U64
class
TestCorruptedFS
(
ZODB
.
tests
.
util
.
TestCase
):
f
=
open
(
'test-checker.fs'
,
'rb'
)
...
...
@@ -117,7 +117,7 @@ class TestCorruptedFS(ZODB.tests.util.TestCase):
self
.
_file
.
write
(
data
)
buf
=
self
.
_datafs
.
read
(
tl
-
8
)
self
.
_file
.
write
(
buf
[
0
])
assert
tl
<=
1
<<
16
,
"can't use this transaction for this test"
assert
tl
<=
1
<<
16
,
"can't use this transaction for this test"
self
.
_file
.
write
(
"
\
777
\
777
"
)
self
.
_file
.
write
(
buf
[
3
:])
self
.
detectsError
(
"invalid transaction header"
)
...
...
@@ -172,6 +172,3 @@ class TestCorruptedFS(ZODB.tests.util.TestCase):
self
.
_file
.
write
(
"
\
000
"
*
4
+
"
\
077
"
+
"
\
000
"
*
3
)
self
.
_file
.
write
(
data
[
32
:])
self
.
detectsError
(
"record exceeds transaction"
)
if
__name__
==
"__main__"
:
unittest
.
main
()
src/ZODB/scripts/migrate.py
View file @
6e5baffd
...
...
@@ -130,7 +130,7 @@ def main():
elif
opt
in
(
'-v'
,
'--verbose'
):
options
.
verbose
+=
1
elif
opt
in
(
'-T'
,
'--storage_types'
):
print
_types
(
)
print
(
'Unknown option.'
)
sys
.
exit
(
0
)
elif
opt
in
(
'-S'
,
'--stype'
):
options
.
stype
=
arg
...
...
@@ -247,16 +247,16 @@ def doit(srcdb, dstdb, options):
t = TimeStamp(tid)
if t <= ts:
if ok:
print(
(
'
Time
stamps
are
out
of
order
%
s
,
%
s
' % (ts, t)),
file=sys.stderr)
print(
'
Time
stamps
are
out
of
order
%
s
,
%
s
' % (ts, t),
file=sys.stderr)
ok = False
ts = t.laterThan(ts)
tid = ts.raw()
else:
ts = t
if not ok:
print(
(
'
Time
stamps
are
back
in
order
%
s
' % t),
file=sys.stderr)
print(
'
Time
stamps
are
back
in
order
%
s
' % t,
file=sys.stderr)
ok = True
if verbose > 1:
print(ts)
...
...
src/ZODB/scripts/migrateblobs.py
View file @
6e5baffd
...
...
@@ -23,17 +23,17 @@ from ZODB.blob import FilesystemHelper
from
ZODB.utils
import
oid_repr
def
link_or_copy
(
f1
,
f2
):
try
:
os
.
link
(
f1
,
f2
)
except
OSError
:
shutil
.
copy
(
f1
,
f2
)
# Check if we actually have link
try
:
os
.
link
except
AttributeError
:
link_or_copy
=
shutil
.
copy
else
:
def
link_or_copy
(
f1
,
f2
):
try
:
os
.
link
(
f1
,
f2
)
except
OSError
:
shutil
.
copy
(
f1
,
f2
)
def
migrate
(
source
,
dest
,
layout
):
...
...
src/ZODB/scripts/netspace.py
View file @
6e5baffd
...
...
@@ -13,6 +13,7 @@ from ZODB.utils import U64, get_pickle_metadata, load_current
from
ZODB.serialize
import
referencesf
from
six.moves
import
filter
def
find_paths
(
root
,
maxdist
):
"""Find Python attribute traversal paths for objects to maxdist distance.
...
...
@@ -48,6 +49,7 @@ def find_paths(root, maxdist):
return
paths
def
main
(
path
):
fs
=
FileStorage
(
path
,
read_only
=
1
)
if
PACK
:
...
...
@@ -60,6 +62,7 @@ def main(path):
def
total_size
(
oid
):
cache
=
{}
cache_size
=
1000
def
_total_size
(
oid
,
seen
):
v
=
cache
.
get
(
oid
)
if
v
is
not
None
:
...
...
@@ -91,10 +94,11 @@ def main(path):
for
oid
in
keys
:
data
,
serialno
=
load_current
(
fs
,
oid
)
mod
,
klass
=
get_pickle_metadata
(
data
)
ref
s
=
ref
erencesf
(
data
)
referencesf
(
data
)
path
=
paths
.
get
(
oid
,
'-'
)
print
(
fmt
%
(
U64
(
oid
),
len
(
data
),
total_size
(
oid
),
path
,
mod
,
klass
))
def
Main
():
import
sys
import
getopt
...
...
@@ -122,5 +126,6 @@ def Main():
VERBOSE
+=
1
main
(
path
)
if
__name__
==
"__main__"
:
Main
()
src/ZODB/scripts/referrers.py
View file @
6e5baffd
...
...
@@ -18,6 +18,7 @@ $Id$
from
ZODB.serialize
import
referencesf
def
referrers
(
storage
):
result
=
{}
for
transaction
in
storage
.
iterator
():
...
...
src/ZODB/scripts/repozo.py
View file @
6e5baffd
...
...
@@ -85,6 +85,7 @@ Options for -V/--verify:
Verify file sizes only (skip md5 checksums).
"""
from
__future__
import
print_function
import
re
import
os
import
shutil
import
sys
...
...
@@ -299,6 +300,8 @@ def fsync(afile):
# Return the total number of bytes read == the total number of bytes
# passed in all to func(). Leaves the file position just after the
# last byte read.
def
dofile
(
func
,
fp
,
n
=
None
):
bytesread
=
0
while
n
is
None
or
n
>
0
:
...
...
@@ -320,6 +323,7 @@ def dofile(func, fp, n=None):
def
checksum
(
fp
,
n
):
# Checksum the first n bytes of the specified file
sum
=
md5
()
def
func
(
data
):
sum
.
update
(
data
)
dofile
(
func
,
fp
,
n
)
...
...
@@ -336,6 +340,7 @@ def file_size(fp):
def
checksum_and_size
(
fp
):
# Checksum and return it with the size of the file
sum
=
md5
()
def
func
(
data
):
sum
.
update
(
data
)
size
=
dofile
(
func
,
fp
,
None
)
...
...
@@ -374,6 +379,7 @@ def concat(files, ofp=None):
# given. Return the number of bytes written and the md5 checksum of the
# bytes.
sum
=
md5
()
def
func
(
data
):
sum
.
update
(
data
)
if
ofp
:
...
...
@@ -393,6 +399,7 @@ def concat(files, ofp=None):
def
gen_filedate
(
options
):
return
getattr
(
options
,
'test_now'
,
time
.
gmtime
()[:
6
])
def
gen_filename
(
options
,
ext
=
None
,
now
=
None
):
if
ext
is
None
:
if
options
.
full
:
...
...
@@ -412,10 +419,11 @@ def gen_filename(options, ext=None, now=None):
# files, from the time of the most recent full backup preceding
# options.date, up to options.date.
import
re
is_data_file
=
re
.
compile
(
r'\
d{
4}(?:-\
d
\d){5}\
.(?:del
ta)?fsz?$'
).
match
del
re
def
find_files
(
options
):
when
=
options
.
date
if
not
when
:
...
...
@@ -455,6 +463,7 @@ def find_files(options):
#
# None, None, None, None
def
scandat
(
repofiles
):
fullfile
=
repofiles
[
0
]
datfile
=
os
.
path
.
splitext
(
fullfile
)[
0
]
+
'.dat'
...
...
@@ -475,6 +484,7 @@ def scandat(repofiles):
return
fn
,
startpos
,
endpos
,
sum
def
delete_old_backups
(
options
):
# Delete all full backup files except for the most recent full backup file
all
=
sorted
(
filter
(
is_data_file
,
os
.
listdir
(
options
.
repository
)))
...
...
@@ -515,6 +525,7 @@ def delete_old_backups(options):
pass
os
.
unlink
(
os
.
path
.
join
(
options
.
repository
,
fname
))
def
do_full_backup
(
options
):
options
.
full
=
True
tnow
=
gen_filedate
(
options
)
...
...
@@ -714,7 +725,8 @@ def do_recover(options):
"%s has checksum %s instead of %s"
%
(
repofile
,
reposum
,
expected_truth
[
'sum'
]))
totalsz
+=
reposz
log
(
"Recovered chunk %s : %s bytes, md5: %s"
,
repofile
,
reposz
,
reposum
)
log
(
"Recovered chunk %s : %s bytes, md5: %s"
,
repofile
,
reposz
,
reposum
)
log
(
"Recovered a total of %s bytes"
,
totalsz
)
else
:
reposz
,
reposum
=
concat
(
repofiles
,
outfp
)
...
...
@@ -725,7 +737,8 @@ def do_recover(options):
source_index
=
'%s.index'
%
last_base
target_index
=
'%s.index'
%
options
.
output
if
os
.
path
.
exists
(
source_index
):
log
(
'Restoring index file %s to %s'
,
source_index
,
target_index
)
log
(
'Restoring index file %s to %s'
,
source_index
,
target_index
)
shutil
.
copyfile
(
source_index
,
target_index
)
else
:
log
(
'No index file to restore: %s'
,
source_index
)
...
...
@@ -737,8 +750,8 @@ def do_recover(options):
try
:
os
.
rename
(
temporary_output_file
,
options
.
output
)
except
OSError
:
log
(
"ZODB has been fully recovered as %s, but it cannot be renamed
into : %s"
,
temporary_output_file
,
options
.
output
)
log
(
"ZODB has been fully recovered as %s, but it cannot be renamed
"
" into : %s"
,
temporary_output_file
,
options
.
output
)
raise
...
...
@@ -759,10 +772,12 @@ def do_verify(options):
log
(
"Verifying %s"
,
filename
)
try
:
if
filename
.
endswith
(
'fsz'
):
actual_sum
,
size
=
get_checksum_and_size_of_gzipped_file
(
filename
,
options
.
quick
)
actual_sum
,
size
=
get_checksum_and_size_of_gzipped_file
(
filename
,
options
.
quick
)
when_uncompressed
=
' (when uncompressed)'
else
:
actual_sum
,
size
=
get_checksum_and_size_of_file
(
filename
,
options
.
quick
)
actual_sum
,
size
=
get_checksum_and_size_of_file
(
filename
,
options
.
quick
)
when_uncompressed
=
''
except
IOError
:
error
(
"%s is missing"
,
filename
)
...
...
src/ZODB/scripts/space.py
View file @
6e5baffd
...
...
@@ -12,6 +12,7 @@ from ZODB.FileStorage import FileStorage
from
ZODB.utils
import
U64
,
get_pickle_metadata
,
load_current
import
six
def
run
(
path
,
v
=
0
):
fs
=
FileStorage
(
path
,
read_only
=
1
)
# break into the file implementation
...
...
@@ -31,12 +32,13 @@ def run(path, v=0):
if
v
:
print
(
"%8s %5d %s"
%
(
U64
(
oid
),
len
(
data
),
key
))
L
=
totals
.
items
()
L
.
sort
(
lambda
a
,
b
:
cmp
(
a
[
1
],
b
[
1
])
)
L
.
sort
(
key
=
lambda
x
:
x
[
1
]
)
L
.
reverse
()
print
(
"Totals per object class:"
)
for
key
,
(
bytes
,
count
)
in
L
:
print
(
"%8d %8d %s"
%
(
count
,
bytes
,
key
))
def
main
():
import
sys
import
getopt
...
...
@@ -56,5 +58,6 @@ def main():
path
=
args
[
0
]
run
(
path
,
v
)
if
__name__
==
"__main__"
:
main
()
src/ZODB/scripts/tests/test_doc.py
View file @
6e5baffd
...
...
@@ -38,6 +38,7 @@ checker = zope.testing.renormalizing.RENormalizing([
"
length
=<
LENGTH
>
offset
=
4
(
+
48
)
"),
])
def test_suite():
return unittest.TestSuite((
doctest.DocFileSuite(
...
...
src/ZODB/scripts/tests/test_fsdump_fsstats.py
View file @
6e5baffd
...
...
@@ -57,6 +57,3 @@ class FsdumpFsstatsTests(TestCase):
with
open
(
"stdout"
)
as
f
:
self
.
assertEqual
(
f
.
readline
().
strip
(),
"Summary: 1 txns, 1 objects, 1 revisions"
)
src/ZODB/scripts/tests/test_fstest.py
View file @
6e5baffd
...
...
@@ -19,6 +19,7 @@ import ZODB
from
zope.testing
import
setupstack
from
zope.testing.renormalizing
import
RENormalizing
def
test_fstest_verbose
():
r"""
>>> db = ZODB.DB('data.fs')
...
...
@@ -52,4 +53,3 @@ def test_suite():
doctest
.
DocTestSuite
(
setUp
=
setupstack
.
setUpDirectory
,
tearDown
=
setupstack
.
tearDown
),
])
src/ZODB/scripts/tests/test_repozo.py
View file @
6e5baffd
...
...
@@ -28,11 +28,13 @@ else:
_NOISY
=
os
.
environ
.
get
(
'NOISY_REPOZO_TEST_OUTPUT'
)
def
_write_file
(
name
,
bits
,
mode
=
'wb'
):
with
open
(
name
,
mode
)
as
f
:
f
.
write
(
bits
)
f
.
flush
()
def
_read_file
(
name
,
mode
=
'rb'
):
with
open
(
name
,
mode
)
as
f
:
return
f
.
read
()
...
...
@@ -313,7 +315,6 @@ class Test_checksum(unittest.TestCase, FileopsBase):
self
.
assertEqual
(
sum
,
md5
(
b''
.
join
(
self
.
_makeChunks
())).
hexdigest
())
def
test_nonempty_read_count
(
self
):
chunks
=
[]
file
=
self
.
_makeFile
()
sum
=
self
.
_callFUT
(
file
,
42
)
self
.
assertEqual
(
sum
,
md5
(
b'x'
*
42
).
hexdigest
())
...
...
@@ -335,13 +336,16 @@ class OptionsTestBase(object):
def
_makeOptions
(
self
,
**
kw
):
import
tempfile
self
.
_repository_directory
=
tempfile
.
mkdtemp
(
prefix
=
'test-repozo-'
)
class
Options
(
object
):
repository
=
self
.
_repository_directory
date
=
None
def
__init__
(
self
,
**
kw
):
self
.
__dict__
.
update
(
kw
)
return
Options
(
**
kw
)
class
Test_copyfile
(
OptionsTestBase
,
unittest
.
TestCase
):
def
_callFUT
(
self
,
options
,
dest
,
start
,
n
):
...
...
@@ -413,10 +417,13 @@ class Test_concat(OptionsTestBase, unittest.TestCase):
class
Faux
(
object
):
_closed
=
False
def
__init__
(
self
):
self
.
_written
=
[]
def
write
(
self
,
data
):
self
.
_written
.
append
(
data
)
def
close
(
self
):
self
.
_closed
=
True
...
...
@@ -426,7 +433,10 @@ class Test_concat(OptionsTestBase, unittest.TestCase):
self
.
assertEqual
(
ofp
.
_written
,
[
x
.
encode
()
for
x
in
'ABC'
])
self
.
assertFalse
(
ofp
.
_closed
)
_marker
=
object
()
class
Test_gen_filename
(
OptionsTestBase
,
unittest
.
TestCase
):
def
_callFUT
(
self
,
options
,
ext
=
_marker
):
...
...
@@ -436,38 +446,38 @@ class Test_gen_filename(OptionsTestBase, unittest.TestCase):
return
gen_filename
(
options
,
ext
)
def
test_explicit_ext
(
self
):
options
=
self
.
_makeOptions
(
test_now
=
(
2010
,
5
,
14
,
12
,
52
,
31
))
options
=
self
.
_makeOptions
(
test_now
=
(
2010
,
5
,
14
,
12
,
52
,
31
))
fn
=
self
.
_callFUT
(
options
,
'.txt'
)
self
.
assertEqual
(
fn
,
'2010-05-14-12-52-31.txt'
)
def
test_full_no_gzip
(
self
):
options
=
self
.
_makeOptions
(
test_now
=
(
2010
,
5
,
14
,
12
,
52
,
31
),
full
=
True
,
gzip
=
False
,
options
=
self
.
_makeOptions
(
test_now
=
(
2010
,
5
,
14
,
12
,
52
,
31
),
full
=
True
,
gzip
=
False
,
)
fn
=
self
.
_callFUT
(
options
)
self
.
assertEqual
(
fn
,
'2010-05-14-12-52-31.fs'
)
def
test_full_w_gzip
(
self
):
options
=
self
.
_makeOptions
(
test_now
=
(
2010
,
5
,
14
,
12
,
52
,
31
),
full
=
True
,
gzip
=
True
,
options
=
self
.
_makeOptions
(
test_now
=
(
2010
,
5
,
14
,
12
,
52
,
31
),
full
=
True
,
gzip
=
True
,
)
fn
=
self
.
_callFUT
(
options
)
self
.
assertEqual
(
fn
,
'2010-05-14-12-52-31.fsz'
)
def
test_incr_no_gzip
(
self
):
options
=
self
.
_makeOptions
(
test_now
=
(
2010
,
5
,
14
,
12
,
52
,
31
),
full
=
False
,
gzip
=
False
,
options
=
self
.
_makeOptions
(
test_now
=
(
2010
,
5
,
14
,
12
,
52
,
31
),
full
=
False
,
gzip
=
False
,
)
fn
=
self
.
_callFUT
(
options
)
self
.
assertEqual
(
fn
,
'2010-05-14-12-52-31.deltafs'
)
def
test_incr_w_gzip
(
self
):
options
=
self
.
_makeOptions
(
test_now
=
(
2010
,
5
,
14
,
12
,
52
,
31
),
full
=
False
,
gzip
=
True
,
options
=
self
.
_makeOptions
(
test_now
=
(
2010
,
5
,
14
,
12
,
52
,
31
),
full
=
False
,
gzip
=
True
,
)
fn
=
self
.
_callFUT
(
options
)
self
.
assertEqual
(
fn
,
'2010-05-14-12-52-31.deltafsz'
)
...
...
@@ -536,7 +546,7 @@ class Test_scandat(OptionsTestBase, unittest.TestCase):
return
scandat
(
repofiles
)
def
test_no_dat_file
(
self
):
options
=
self
.
_makeOptions
()
self
.
_makeOptions
()
fsfile
=
os
.
path
.
join
(
self
.
_repository_directory
,
'foo.fs'
)
fn
,
startpos
,
endpos
,
sum
=
self
.
_callFUT
([
fsfile
])
self
.
assertEqual
(
fn
,
None
)
...
...
@@ -545,7 +555,7 @@ class Test_scandat(OptionsTestBase, unittest.TestCase):
self
.
assertEqual
(
sum
,
None
)
def
test_empty_dat_file
(
self
):
options
=
self
.
_makeOptions
()
self
.
_makeOptions
()
fsfile
=
os
.
path
.
join
(
self
.
_repository_directory
,
'foo.fs'
)
datfile
=
os
.
path
.
join
(
self
.
_repository_directory
,
'foo.dat'
)
_write_file
(
datfile
,
b''
)
...
...
@@ -556,7 +566,7 @@ class Test_scandat(OptionsTestBase, unittest.TestCase):
self
.
assertEqual
(
sum
,
None
)
def
test_single_line
(
self
):
options
=
self
.
_makeOptions
()
self
.
_makeOptions
()
fsfile
=
os
.
path
.
join
(
self
.
_repository_directory
,
'foo.fs'
)
datfile
=
os
.
path
.
join
(
self
.
_repository_directory
,
'foo.dat'
)
_write_file
(
datfile
,
b'foo.fs 0 123 ABC
\
n
'
)
...
...
@@ -567,7 +577,7 @@ class Test_scandat(OptionsTestBase, unittest.TestCase):
self
.
assertEqual
(
sum
,
'ABC'
)
def
test_multiple_lines
(
self
):
options
=
self
.
_makeOptions
()
self
.
_makeOptions
()
fsfile
=
os
.
path
.
join
(
self
.
_repository_directory
,
'foo.fs'
)
datfile
=
os
.
path
.
join
(
self
.
_repository_directory
,
'foo.dat'
)
_write_file
(
datfile
,
b'foo.fs 0 123 ABC
\
n
'
...
...
@@ -684,7 +694,7 @@ class Test_do_full_backup(OptionsTestBase, unittest.TestCase):
def
_makeDB
(
self
):
import
tempfile
datadir
=
self
.
_data_directory
=
tempfile
.
mkdtemp
(
prefix
=
'zodb-test-'
)
self
.
_data_directory
=
tempfile
.
mkdtemp
(
prefix
=
'zodb-test-'
)
return
OurDB
(
self
.
_data_directory
)
def
test_dont_overwrite_existing_file
(
self
):
...
...
@@ -694,7 +704,7 @@ class Test_do_full_backup(OptionsTestBase, unittest.TestCase):
options
=
self
.
_makeOptions
(
full
=
True
,
file
=
db
.
_file_name
,
gzip
=
False
,
test_now
=
(
2010
,
5
,
14
,
10
,
51
,
22
),
test_now
=
(
2010
,
5
,
14
,
10
,
51
,
22
),
)
fqn
=
os
.
path
.
join
(
self
.
_repository_directory
,
gen_filename
(
options
))
_write_file
(
fqn
,
b'TESTING'
)
...
...
@@ -708,7 +718,7 @@ class Test_do_full_backup(OptionsTestBase, unittest.TestCase):
options
=
self
.
_makeOptions
(
file
=
db
.
_file_name
,
gzip
=
False
,
killold
=
False
,
test_now
=
(
2010
,
5
,
14
,
10
,
51
,
22
),
test_now
=
(
2010
,
5
,
14
,
10
,
51
,
22
),
)
self
.
_callFUT
(
options
)
target
=
os
.
path
.
join
(
self
.
_repository_directory
,
...
...
@@ -717,7 +727,7 @@ class Test_do_full_backup(OptionsTestBase, unittest.TestCase):
self
.
assertEqual
(
_read_file
(
target
),
original
)
datfile
=
os
.
path
.
join
(
self
.
_repository_directory
,
gen_filename
(
options
,
'.dat'
))
self
.
assertEqual
(
_read_file
(
datfile
,
mode
=
'r'
),
#
XXX 'rb'?
self
.
assertEqual
(
_read_file
(
datfile
,
mode
=
'r'
),
#
XXX 'rb'?
'%s 0 %d %s
\
n
'
%
(
target
,
len
(
original
),
md5
(
original
).
hexdigest
()))
ndxfile
=
os
.
path
.
join
(
self
.
_repository_directory
,
...
...
@@ -739,7 +749,7 @@ class Test_do_incremental_backup(OptionsTestBase, unittest.TestCase):
def
_makeDB
(
self
):
import
tempfile
datadir
=
self
.
_data_directory
=
tempfile
.
mkdtemp
(
prefix
=
'zodb-test-'
)
self
.
_data_directory
=
tempfile
.
mkdtemp
(
prefix
=
'zodb-test-'
)
return
OurDB
(
self
.
_data_directory
)
def
test_dont_overwrite_existing_file
(
self
):
...
...
@@ -750,8 +760,8 @@ class Test_do_incremental_backup(OptionsTestBase, unittest.TestCase):
options
=
self
.
_makeOptions
(
full
=
False
,
file
=
db
.
_file_name
,
gzip
=
False
,
test_now
=
(
2010
,
5
,
14
,
10
,
51
,
22
),
date
=
None
,
test_now
=
(
2010
,
5
,
14
,
10
,
51
,
22
),
date
=
None
,
)
fqn
=
os
.
path
.
join
(
self
.
_repository_directory
,
gen_filename
(
options
))
_write_file
(
fqn
,
b'TESTING'
)
...
...
@@ -768,13 +778,12 @@ class Test_do_incremental_backup(OptionsTestBase, unittest.TestCase):
options
=
self
.
_makeOptions
(
file
=
db
.
_file_name
,
gzip
=
False
,
killold
=
False
,
test_now
=
(
2010
,
5
,
14
,
10
,
51
,
22
),
date
=
None
,
test_now
=
(
2010
,
5
,
14
,
10
,
51
,
22
),
date
=
None
,
)
fullfile
=
os
.
path
.
join
(
self
.
_repository_directory
,
'2010-05-14-00-00-00.fs'
)
original
=
_read_file
(
db
.
_file_name
)
last
=
len
(
original
)
_write_file
(
fullfile
,
original
)
datfile
=
os
.
path
.
join
(
self
.
_repository_directory
,
'2010-05-14-00-00-00.dat'
)
...
...
@@ -783,7 +792,7 @@ class Test_do_incremental_backup(OptionsTestBase, unittest.TestCase):
target
=
os
.
path
.
join
(
self
.
_repository_directory
,
gen_filename
(
options
))
self
.
assertEqual
(
_read_file
(
target
),
b''
)
self
.
assertEqual
(
_read_file
(
datfile
,
mode
=
'r'
),
#
XXX mode='rb'?
self
.
assertEqual
(
_read_file
(
datfile
,
mode
=
'r'
),
#
XXX mode='rb'?
'%s %d %d %s
\
n
'
%
(
target
,
oldpos
,
oldpos
,
md5
(
b''
).
hexdigest
()))
ndxfile
=
os
.
path
.
join
(
self
.
_repository_directory
,
...
...
@@ -805,8 +814,8 @@ class Test_do_incremental_backup(OptionsTestBase, unittest.TestCase):
options
=
self
.
_makeOptions
(
file
=
db
.
_file_name
,
gzip
=
False
,
killold
=
False
,
test_now
=
(
2010
,
5
,
14
,
10
,
51
,
22
),
date
=
None
,
test_now
=
(
2010
,
5
,
14
,
10
,
51
,
22
),
date
=
None
,
)
fullfile
=
os
.
path
.
join
(
self
.
_repository_directory
,
'2010-05-14-00-00-00.fs'
)
...
...
@@ -824,7 +833,7 @@ class Test_do_incremental_backup(OptionsTestBase, unittest.TestCase):
f
.
seek
(
oldpos
)
increment
=
f
.
read
()
self
.
assertEqual
(
_read_file
(
target
),
increment
)
self
.
assertEqual
(
_read_file
(
datfile
,
mode
=
'r'
),
#
XXX mode='rb'?
self
.
assertEqual
(
_read_file
(
datfile
,
mode
=
'r'
),
#
XXX mode='rb'?
'%s %d %d %s
\
n
'
%
(
target
,
oldpos
,
newpos
,
md5
(
increment
).
hexdigest
()))
...
...
@@ -850,7 +859,7 @@ class Test_do_recover(OptionsTestBase, unittest.TestCase):
if
text
is
None
:
text
=
name
fqn
=
os
.
path
.
join
(
self
.
_repository_directory
,
name
)
f
=
_write_file
(
fqn
,
text
.
encode
())
_write_file
(
fqn
,
text
.
encode
())
return
fqn
def
test_no_files
(
self
):
...
...
@@ -880,7 +889,6 @@ class Test_do_recover(OptionsTestBase, unittest.TestCase):
import
tempfile
dd
=
self
.
_data_directory
=
tempfile
.
mkdtemp
(
prefix
=
'zodb-test-'
)
output
=
os
.
path
.
join
(
dd
,
'Data.fs'
)
index
=
os
.
path
.
join
(
dd
,
'Data.fs.index'
)
options
=
self
.
_makeOptions
(
date
=
'2010-05-15-13-30-57'
,
output
=
output
,
withverify
=
False
)
...
...
@@ -908,7 +916,6 @@ class Test_do_recover(OptionsTestBase, unittest.TestCase):
import
tempfile
dd
=
self
.
_data_directory
=
tempfile
.
mkdtemp
(
prefix
=
'zodb-test-'
)
output
=
os
.
path
.
join
(
dd
,
'Data.fs'
)
index
=
os
.
path
.
join
(
dd
,
'Data.fs.index'
)
options
=
self
.
_makeOptions
(
date
=
'2010-05-15-13-30-57'
,
output
=
output
,
withverify
=
False
)
...
...
@@ -936,15 +943,15 @@ class Test_do_recover(OptionsTestBase, unittest.TestCase):
import
tempfile
dd
=
self
.
_data_directory
=
tempfile
.
mkdtemp
(
prefix
=
'zodb-test-'
)
output
=
os
.
path
.
join
(
dd
,
'Data.fs'
)
index
=
os
.
path
.
join
(
dd
,
'Data.fs.index'
)
options
=
self
.
_makeOptions
(
date
=
'2010-05-15-13-30-57'
,
output
=
output
,
withverify
=
True
)
self
.
_makeFile
(
2
,
3
,
4
,
'.fs'
,
'AAA'
)
self
.
_makeFile
(
4
,
5
,
6
,
'.deltafs'
,
'BBBB'
)
self
.
_makeFile
(
2
,
3
,
4
,
'.dat'
,
'/backup/2010-05-14-02-03-04.fs 0 3 e1faffb3e614e6c2fba74296962386b7
\
n
'
'/backup/2010-05-14-04-05-06.deltafs 3 7 f50881ced34c7d9e6bce100bf33dec60
\
n
'
)
self
.
_makeFile
(
2
,
3
,
4
,
'.dat'
,
'/backup/2010-05-14-02-03-04.fs 0 3 e1faffb3e614e6c2fba74296962386b7
\
n
'
# noqa: E501 line too long
'/backup/2010-05-14-04-05-06.deltafs 3 7 f50881ced34c7d9e6bce100bf33dec60
\
n
'
)
# noqa: E501 line too long
self
.
_callFUT
(
options
)
self
.
assertFalse
(
os
.
path
.
exists
(
output
+
'.part'
))
self
.
assertEqual
(
_read_file
(
output
),
b'AAABBBB'
)
...
...
@@ -954,15 +961,15 @@ class Test_do_recover(OptionsTestBase, unittest.TestCase):
from
ZODB.scripts.repozo
import
VerificationFail
dd
=
self
.
_data_directory
=
tempfile
.
mkdtemp
(
prefix
=
'zodb-test-'
)
output
=
os
.
path
.
join
(
dd
,
'Data.fs'
)
index
=
os
.
path
.
join
(
dd
,
'Data.fs.index'
)
options
=
self
.
_makeOptions
(
date
=
'2010-05-15-13-30-57'
,
output
=
output
,
withverify
=
True
)
self
.
_makeFile
(
2
,
3
,
4
,
'.fs'
,
'AAA'
)
self
.
_makeFile
(
4
,
5
,
6
,
'.deltafs'
,
'BBBB'
)
self
.
_makeFile
(
2
,
3
,
4
,
'.dat'
,
'/backup/2010-05-14-02-03-04.fs 0 3 e1faffb3e614e6c2fba74296962386b7
\
n
'
'/backup/2010-05-14-04-05-06.deltafs 3 7 f50881ced34c7d9e6bce100bf33dec61
\
n
'
)
self
.
_makeFile
(
2
,
3
,
4
,
'.dat'
,
'/backup/2010-05-14-02-03-04.fs 0 3 e1faffb3e614e6c2fba74296962386b7
\
n
'
# noqa: E501 line too long
'/backup/2010-05-14-04-05-06.deltafs 3 7 f50881ced34c7d9e6bce100bf33dec61
\
n
'
)
# noqa: E501 line too long
self
.
assertRaises
(
VerificationFail
,
self
.
_callFUT
,
options
)
self
.
assertTrue
(
os
.
path
.
exists
(
output
+
'.part'
))
...
...
@@ -971,15 +978,15 @@ class Test_do_recover(OptionsTestBase, unittest.TestCase):
from
ZODB.scripts.repozo
import
VerificationFail
dd
=
self
.
_data_directory
=
tempfile
.
mkdtemp
(
prefix
=
'zodb-test-'
)
output
=
os
.
path
.
join
(
dd
,
'Data.fs'
)
index
=
os
.
path
.
join
(
dd
,
'Data.fs.index'
)
options
=
self
.
_makeOptions
(
date
=
'2010-05-15-13-30-57'
,
output
=
output
,
withverify
=
True
)
self
.
_makeFile
(
2
,
3
,
4
,
'.fs'
,
'AAA'
)
self
.
_makeFile
(
4
,
5
,
6
,
'.deltafs'
,
'BBBB'
)
self
.
_makeFile
(
2
,
3
,
4
,
'.dat'
,
'/backup/2010-05-14-02-03-04.fs 0 3 e1faffb3e614e6c2fba74296962386b7
\
n
'
'/backup/2010-05-14-04-05-06.deltafs 3 8 f50881ced34c7d9e6bce100bf33dec60
\
n
'
)
self
.
_makeFile
(
2
,
3
,
4
,
'.dat'
,
'/backup/2010-05-14-02-03-04.fs 0 3 e1faffb3e614e6c2fba74296962386b7
\
n
'
# noqa: E501 line too long
'/backup/2010-05-14-04-05-06.deltafs 3 8 f50881ced34c7d9e6bce100bf33dec60
\
n
'
)
# noqa: E501 line too long
self
.
assertRaises
(
VerificationFail
,
self
.
_callFUT
,
options
)
self
.
assertTrue
(
os
.
path
.
exists
(
output
+
'.part'
))
...
...
@@ -990,6 +997,7 @@ class Test_do_verify(OptionsTestBase, unittest.TestCase):
from
ZODB.scripts
import
repozo
errors
=
[]
orig_error
=
repozo
.
error
def
_error
(
msg
,
*
args
):
errors
.
append
(
msg
%
args
)
repozo
.
error
=
_error
...
...
@@ -1024,26 +1032,29 @@ class Test_do_verify(OptionsTestBase, unittest.TestCase):
options
=
self
.
_makeOptions
(
quick
=
False
)
self
.
_makeFile
(
2
,
3
,
4
,
'.fs'
,
'AAA'
)
self
.
_makeFile
(
4
,
5
,
6
,
'.deltafs'
,
'BBBB'
)
self
.
_makeFile
(
2
,
3
,
4
,
'.dat'
,
'/backup/2010-05-14-02-03-04.fs 0 3 e1faffb3e614e6c2fba74296962386b7
\
n
'
'/backup/2010-05-14-04-05-06.deltafs 3 7 f50881ced34c7d9e6bce100bf33dec60
\
n
'
)
self
.
_makeFile
(
2
,
3
,
4
,
'.dat'
,
'/backup/2010-05-14-02-03-04.fs 0 3 e1faffb3e614e6c2fba74296962386b7
\
n
'
# noqa: E501 line too long
'/backup/2010-05-14-04-05-06.deltafs 3 7 f50881ced34c7d9e6bce100bf33dec60
\
n
'
)
# noqa: E501 line too long
self
.
assertEqual
(
self
.
_callFUT
(
options
),
[])
def
test_all_is_fine_gzip
(
self
):
options
=
self
.
_makeOptions
(
quick
=
False
)
self
.
_makeFile
(
2
,
3
,
4
,
'.fsz'
,
'AAA'
)
self
.
_makeFile
(
4
,
5
,
6
,
'.deltafsz'
,
'BBBB'
)
self
.
_makeFile
(
2
,
3
,
4
,
'.dat'
,
'/backup/2010-05-14-02-03-04.fsz 0 3 e1faffb3e614e6c2fba74296962386b7
\
n
'
'/backup/2010-05-14-04-05-06.deltafsz 3 7 f50881ced34c7d9e6bce100bf33dec60
\
n
'
)
self
.
_makeFile
(
2
,
3
,
4
,
'.dat'
,
'/backup/2010-05-14-02-03-04.fsz 0 3 e1faffb3e614e6c2fba74296962386b7
\
n
'
# noqa: E501 line too long
'/backup/2010-05-14-04-05-06.deltafsz 3 7 f50881ced34c7d9e6bce100bf33dec60
\
n
'
)
# noqa: E501 line too long
self
.
assertEqual
(
self
.
_callFUT
(
options
),
[])
def
test_missing_file
(
self
):
options
=
self
.
_makeOptions
(
quick
=
True
)
self
.
_makeFile
(
2
,
3
,
4
,
'.fs'
,
'AAA'
)
self
.
_makeFile
(
2
,
3
,
4
,
'.dat'
,
'/backup/2010-05-14-02-03-04.fs 0 3 e1faffb3e614e6c2fba74296962386b7
\
n
'
'/backup/2010-05-14-04-05-06.deltafs 3 7 f50881ced34c7d9e6bce100bf33dec60
\
n
'
)
self
.
_makeFile
(
2
,
3
,
4
,
'.dat'
,
'/backup/2010-05-14-02-03-04.fs 0 3 e1faffb3e614e6c2fba74296962386b7
\
n
'
# noqa: E501 line too long
'/backup/2010-05-14-04-05-06.deltafs 3 7 f50881ced34c7d9e6bce100bf33dec60
\
n
'
)
# noqa: E501 line too long
self
.
assertEqual
(
self
.
_callFUT
(
options
),
[
options
.
repository
+
os
.
path
.
sep
+
'2010-05-14-04-05-06.deltafs is missing'
])
...
...
@@ -1051,9 +1062,10 @@ class Test_do_verify(OptionsTestBase, unittest.TestCase):
def
test_missing_file_gzip
(
self
):
options
=
self
.
_makeOptions
(
quick
=
True
)
self
.
_makeFile
(
2
,
3
,
4
,
'.fsz'
,
'AAA'
)
self
.
_makeFile
(
2
,
3
,
4
,
'.dat'
,
'/backup/2010-05-14-02-03-04.fsz 0 3 e1faffb3e614e6c2fba74296962386b7
\
n
'
'/backup/2010-05-14-04-05-06.deltafsz 3 7 f50881ced34c7d9e6bce100bf33dec60
\
n
'
)
self
.
_makeFile
(
2
,
3
,
4
,
'.dat'
,
'/backup/2010-05-14-02-03-04.fsz 0 3 e1faffb3e614e6c2fba74296962386b7
\
n
'
# noqa: E501 line too long
'/backup/2010-05-14-04-05-06.deltafsz 3 7 f50881ced34c7d9e6bce100bf33dec60
\
n
'
)
# noqa: E501 line too long
self
.
assertEqual
(
self
.
_callFUT
(
options
),
[
options
.
repository
+
os
.
path
.
sep
+
'2010-05-14-04-05-06.deltafsz is missing'
])
...
...
@@ -1062,9 +1074,10 @@ class Test_do_verify(OptionsTestBase, unittest.TestCase):
options
=
self
.
_makeOptions
(
quick
=
False
)
self
.
_makeFile
(
2
,
3
,
4
,
'.fs'
,
'AAA'
)
self
.
_makeFile
(
4
,
5
,
6
,
'.deltafs'
,
'BBB'
)
self
.
_makeFile
(
2
,
3
,
4
,
'.dat'
,
'/backup/2010-05-14-02-03-04.fs 0 3 e1faffb3e614e6c2fba74296962386b7
\
n
'
'/backup/2010-05-14-04-05-06.deltafs 3 7 f50881ced34c7d9e6bce100bf33dec60
\
n
'
)
self
.
_makeFile
(
2
,
3
,
4
,
'.dat'
,
'/backup/2010-05-14-02-03-04.fs 0 3 e1faffb3e614e6c2fba74296962386b7
\
n
'
# noqa: E501 line too long
'/backup/2010-05-14-04-05-06.deltafs 3 7 f50881ced34c7d9e6bce100bf33dec60
\
n
'
)
# noqa: E501 line too long
self
.
assertEqual
(
self
.
_callFUT
(
options
),
[
options
.
repository
+
os
.
path
.
sep
+
'2010-05-14-04-05-06.deltafs is 3 bytes,'
...
...
@@ -1074,10 +1087,12 @@ class Test_do_verify(OptionsTestBase, unittest.TestCase):
options
=
self
.
_makeOptions
(
quick
=
False
)
self
.
_makeFile
(
2
,
3
,
4
,
'.fsz'
,
'AAA'
)
self
.
_makeFile
(
4
,
5
,
6
,
'.deltafsz'
,
'BBB'
)
self
.
_makeFile
(
2
,
3
,
4
,
'.dat'
,
'/backup/2010-05-14-02-03-04.fsz 0 3 e1faffb3e614e6c2fba74296962386b7
\
n
'
'/backup/2010-05-14-04-05-06.deltafsz 3 7 f50881ced34c7d9e6bce100bf33dec60
\
n
'
)
self
.
assertEqual
(
self
.
_callFUT
(
options
),
self
.
_makeFile
(
2
,
3
,
4
,
'.dat'
,
'/backup/2010-05-14-02-03-04.fsz 0 3 e1faffb3e614e6c2fba74296962386b7
\
n
'
# noqa: E501 line too long
'/backup/2010-05-14-04-05-06.deltafsz 3 7 f50881ced34c7d9e6bce100bf33dec60
\
n
'
)
# noqa: E501 line too long
self
.
assertEqual
(
self
.
_callFUT
(
options
),
[
options
.
repository
+
os
.
path
.
sep
+
'2010-05-14-04-05-06.deltafsz is 3 bytes (when uncompressed),'
' should be 4 bytes'
])
...
...
@@ -1086,9 +1101,10 @@ class Test_do_verify(OptionsTestBase, unittest.TestCase):
options
=
self
.
_makeOptions
(
quick
=
False
)
self
.
_makeFile
(
2
,
3
,
4
,
'.fs'
,
'AAA'
)
self
.
_makeFile
(
4
,
5
,
6
,
'.deltafs'
,
'BbBB'
)
self
.
_makeFile
(
2
,
3
,
4
,
'.dat'
,
'/backup/2010-05-14-02-03-04.fs 0 3 e1faffb3e614e6c2fba74296962386b7
\
n
'
'/backup/2010-05-14-04-05-06.deltafs 3 7 f50881ced34c7d9e6bce100bf33dec60
\
n
'
)
self
.
_makeFile
(
2
,
3
,
4
,
'.dat'
,
'/backup/2010-05-14-02-03-04.fs 0 3 e1faffb3e614e6c2fba74296962386b7
\
n
'
# noqa: E501 line too long
'/backup/2010-05-14-04-05-06.deltafs 3 7 f50881ced34c7d9e6bce100bf33dec60
\
n
'
)
# noqa: E501 line too long
self
.
assertEqual
(
self
.
_callFUT
(
options
),
[
options
.
repository
+
os
.
path
.
sep
+
'2010-05-14-04-05-06.deltafs has checksum'
...
...
@@ -1099,10 +1115,12 @@ class Test_do_verify(OptionsTestBase, unittest.TestCase):
options
=
self
.
_makeOptions
(
quick
=
False
)
self
.
_makeFile
(
2
,
3
,
4
,
'.fsz'
,
'AAA'
)
self
.
_makeFile
(
4
,
5
,
6
,
'.deltafsz'
,
'BbBB'
)
self
.
_makeFile
(
2
,
3
,
4
,
'.dat'
,
'/backup/2010-05-14-02-03-04.fsz 0 3 e1faffb3e614e6c2fba74296962386b7
\
n
'
'/backup/2010-05-14-04-05-06.deltafsz 3 7 f50881ced34c7d9e6bce100bf33dec60
\
n
'
)
self
.
assertEqual
(
self
.
_callFUT
(
options
),
self
.
_makeFile
(
2
,
3
,
4
,
'.dat'
,
'/backup/2010-05-14-02-03-04.fsz 0 3 e1faffb3e614e6c2fba74296962386b7
\
n
'
# noqa: E501 line too long
'/backup/2010-05-14-04-05-06.deltafsz 3 7 f50881ced34c7d9e6bce100bf33dec60
\
n
'
)
# noqa: E501 line too long
self
.
assertEqual
(
self
.
_callFUT
(
options
),
[
options
.
repository
+
os
.
path
.
sep
+
'2010-05-14-04-05-06.deltafsz has checksum'
' 36486440db255f0ee6ab109d5d231406 (when uncompressed) instead of'
...
...
@@ -1112,18 +1130,20 @@ class Test_do_verify(OptionsTestBase, unittest.TestCase):
options
=
self
.
_makeOptions
(
quick
=
True
)
self
.
_makeFile
(
2
,
3
,
4
,
'.fs'
,
'AAA'
)
self
.
_makeFile
(
4
,
5
,
6
,
'.deltafs'
,
'BBBB'
)
self
.
_makeFile
(
2
,
3
,
4
,
'.dat'
,
'/backup/2010-05-14-02-03-04.fs 0 3 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
\
n
'
'/backup/2010-05-14-04-05-06.deltafs 3 7 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
\
n
'
)
self
.
_makeFile
(
2
,
3
,
4
,
'.dat'
,
'/backup/2010-05-14-02-03-04.fs 0 3 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
\
n
'
# noqa: E501 line too long
'/backup/2010-05-14-04-05-06.deltafs 3 7 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
\
n
'
)
# noqa: E501 line too long
self
.
assertEqual
(
self
.
_callFUT
(
options
),
[])
def
test_quick_ignores_checksums_gzip
(
self
):
options
=
self
.
_makeOptions
(
quick
=
True
)
self
.
_makeFile
(
2
,
3
,
4
,
'.fsz'
,
'AAA'
)
self
.
_makeFile
(
4
,
5
,
6
,
'.deltafsz'
,
'BBBB'
)
self
.
_makeFile
(
2
,
3
,
4
,
'.dat'
,
'/backup/2010-05-14-02-03-04.fsz 0 3 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
\
n
'
'/backup/2010-05-14-04-05-06.deltafsz 3 7 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
\
n
'
)
self
.
_makeFile
(
2
,
3
,
4
,
'.dat'
,
'/backup/2010-05-14-02-03-04.fsz 0 3 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
\
n
'
# noqa: E501 line too long
'/backup/2010-05-14-04-05-06.deltafsz 3 7 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
\
n
'
)
# noqa: E501 line too long
self
.
assertEqual
(
self
.
_callFUT
(
options
),
[])
...
...
@@ -1175,7 +1195,6 @@ class MonteCarloTests(unittest.TestCase):
import
random
from
shutil
import
copyfile
from
time
import
gmtime
from
time
import
sleep
self
.
db
.
mutate
()
# Pack about each tenth time.
...
...
@@ -1207,7 +1226,8 @@ class MonteCarloTests(unittest.TestCase):
self
.
assertRestored
()
def
assertRestored
(
self
,
correctpath
=
'Data.fs'
,
when
=
None
):
# Do recovery to time 'when', and check that it's identical to correctpath.
# Do recovery to time 'when', and check that it's identical to
# correctpath.
# restore to Restored.fs
restoredfile
=
os
.
path
.
join
(
self
.
restoredir
,
'Restored.fs'
)
argv
=
[
'-Rr'
,
self
.
backupdir
,
'-o'
,
restoredfile
]
...
...
@@ -1239,7 +1259,7 @@ def test_suite():
unittest
.
makeSuite
(
Test_delete_old_backups
),
unittest
.
makeSuite
(
Test_do_full_backup
),
unittest
.
makeSuite
(
Test_do_incremental_backup
),
#unittest.makeSuite(Test_do_backup), #TODO
#
unittest.makeSuite(Test_do_backup), #TODO
unittest
.
makeSuite
(
Test_do_recover
),
unittest
.
makeSuite
(
Test_do_verify
),
# N.B.: this test take forever to run (~40sec on a fast laptop),
...
...
src/ZODB/scripts/zodbload.py
View file @
6e5baffd
...
...
@@ -123,6 +123,7 @@ import threading
import
time
import
transaction
class
JobProducer
(
object
):
def
__init__
(
self
):
...
...
@@ -142,7 +143,6 @@ class JobProducer(object):
return
not
not
self
.
jobs
class
MBox
(
object
):
def
__init__
(
self
,
filename
):
...
...
@@ -199,8 +199,11 @@ class MBox(object):
message
.
mbox
=
self
.
__name__
return
message
bins
=
9973
#bins = 11
# bins = 11
def
mailfolder
(
app
,
mboxname
,
number
):
mail
=
getattr
(
app
,
mboxname
,
None
)
if
mail
is
None
:
...
...
@@ -210,7 +213,7 @@ def mailfolder(app, mboxname, number):
mail
.
length
=
Length
()
for
i
in
range
(
bins
):
mail
.
manage_addFolder
(
'b'
+
str
(
i
))
bin
=
hash
(
str
(
number
))
%
bins
bin
=
hash
(
str
(
number
))
%
bins
return
getattr
(
mail
,
'b'
+
str
(
bin
))
...
...
@@ -219,24 +222,25 @@ def VmSize():
try
:
with
open
(
'/proc/%s/status'
%
os
.
getpid
())
as
f
:
lines
=
f
.
readlines
()
except
:
except
:
# noqa: E722 do not use bare 'except'
return
0
else
:
l
=
list
(
filter
(
lambda
l
:
l
[:
7
]
==
'VmSize:'
,
lines
))
if
l
:
l
=
l
[
0
][
7
:].
strip
().
split
()[
0
]
return
int
(
l
)
l
_
=
list
(
filter
(
lambda
l
:
l
[:
7
]
==
'VmSize:'
,
lines
))
if
l
_
:
l
_
=
l_
[
0
][
7
:].
strip
().
split
()[
0
]
return
int
(
l
_
)
return
0
def
setup
(
lib_python
):
try
:
os
.
remove
(
os
.
path
.
join
(
lib_python
,
'..'
,
'..'
,
'var'
,
'Data.fs'
))
except
:
except
:
# noqa: E722 do not use bare 'except'
pass
import
Zope2
import
Products
import
AccessControl.SecurityManagement
app
=
Zope2
.
app
()
app
=
Zope2
.
app
()
Products
.
ZCatalog
.
ZCatalog
.
manage_addZCatalog
(
app
,
'cat'
,
''
)
...
...
@@ -261,6 +265,7 @@ def setup(lib_python):
app
.
_p_jar
.
close
()
def
do
(
db
,
f
,
args
):
"""Do something in a transaction, retrying of necessary
...
...
@@ -275,8 +280,8 @@ def do(db, f, args):
connection
=
db
.
open
()
try
:
transaction
.
begin
()
t
=
time
.
time
()
c
=
time
.
clock
()
t
=
time
.
time
()
c
=
time
.
clock
()
try
:
try
:
r
=
f
(
connection
,
*
args
)
...
...
@@ -288,8 +293,8 @@ def do(db, f, args):
wcomp
+=
time
.
time
()
-
t
ccomp
+=
time
.
clock
()
-
c
t
=
time
.
time
()
c
=
time
.
clock
()
t
=
time
.
time
()
c
=
time
.
clock
()
try
:
try
:
transaction
.
commit
()
...
...
@@ -306,6 +311,7 @@ def do(db, f, args):
return
start
,
wcomp
,
ccomp
,
rconflicts
,
wconflicts
,
wcommit
,
ccommit
,
r
def
run1
(
tid
,
db
,
factory
,
job
,
args
):
(
start
,
wcomp
,
ccomp
,
rconflicts
,
wconflicts
,
wcommit
,
ccommit
,
r
)
=
do
(
db
,
job
,
args
)
...
...
@@ -314,6 +320,7 @@ def run1(tid, db, factory, job, args):
start
,
tid
,
wcomp
,
ccomp
,
rconflicts
,
wconflicts
,
wcommit
,
ccommit
,
factory
.
__name__
,
r
))
def
run
(
jobs
,
tid
=
b''
):
import
Zope2
while
1
:
...
...
@@ -321,7 +328,7 @@ def run(jobs, tid=b''):
run1
(
tid
,
Zope2
.
DB
,
factory
,
job
,
args
)
if
repeatp
:
while
1
:
i
=
random
.
randint
(
0
,
100
)
i
=
random
.
randint
(
0
,
100
)
if
i
>
repeatp
:
break
run1
(
tid
,
Zope2
.
DB
,
factory
,
job
,
args
)
...
...
@@ -350,27 +357,28 @@ def index(connection, messages, catalog, max):
doc
=
mail
[
docid
]
for
h
in
message
.
headers
:
h
=
h
.
strip
()
l
=
h
.
find
(
':'
)
if
l
<=
0
:
l
_
=
h
.
find
(
':'
)
if
l
_
<=
0
:
continue
name
=
h
[:
l
].
lower
()
if
name
==
'subject'
:
name
=
'title'
v
=
h
[
l
+
1
:].
strip
()
type
=
'string'
name
=
h
[:
l
_
].
lower
()
if
name
==
'subject'
:
name
=
'title'
v
=
h
[
l
_
+
1
:].
strip
()
type
=
'string'
if
name
==
'title'
:
if
name
==
'title'
:
doc
.
manage_changeProperties
(
title
=
h
)
else
:
try
:
doc
.
manage_addProperty
(
name
,
v
,
type
)
except
:
except
:
# noqa: E722 do not use bare 'except'
pass
if
catalog
:
app
.
cat
.
catalog_object
(
doc
)
return
message
.
number
class
IndexJob
(
object
):
needs_mbox
=
1
catalog
=
1
...
...
@@ -389,8 +397,11 @@ class InsertJob(IndexJob):
catalog
=
0
prefix
=
'insert'
wordre
=
re
.
compile
(
r'(\
w{
3,20})'
)
stop
=
'and'
,
'not'
def
edit
(
connection
,
mbox
,
catalog
=
1
):
app
=
connection
.
root
()[
'Application'
]
mail
=
getattr
(
app
,
mbox
.
__name__
,
None
)
...
...
@@ -423,7 +434,7 @@ def edit(connection, mbox, catalog=1):
nins
=
10
for
j
in
range
(
ndel
):
j
=
random
.
randint
(
0
,
len
(
text
)
-
1
)
j
=
random
.
randint
(
0
,
len
(
text
)
-
1
)
word
=
text
[
j
]
m
=
wordre
.
search
(
word
)
if
m
:
...
...
@@ -444,6 +455,7 @@ def edit(connection, mbox, catalog=1):
return
norig
,
ndel
,
nins
class
EditJob
(
object
):
needs_mbox
=
1
prefix
=
'edit'
...
...
@@ -456,6 +468,7 @@ class EditJob(object):
def
create
(
self
):
return
edit
,
(
self
.
mbox
,
self
.
catalog
)
class
ModifyJob
(
EditJob
):
prefix
=
'modify'
catalog
=
0
...
...
@@ -480,6 +493,7 @@ def search(connection, terms, number):
return
n
class
SearchJob
(
object
):
def
__init__
(
self
,
terms
=
''
,
number
=
10
):
...
...
@@ -499,189 +513,189 @@ class SearchJob(object):
return
search
,
(
self
.
terms
,
self
.
number
)
words
=
[
'banishment'
,
'indirectly'
,
'imprecise'
,
'peeks'
,
'opportunely'
,
'bribe'
,
'sufficiently'
,
'Occidentalized'
,
'elapsing'
,
'fermenting'
,
'listen'
,
'orphanage'
,
'younger'
,
'draperies'
,
'Ida'
,
'cuttlefish'
,
'mastermind'
,
'Michaels'
,
'populations'
,
'lent'
,
'cater'
,
'attentional'
,
'hastiness'
,
'dragnet'
,
'mangling'
,
'scabbards'
,
'princely'
,
'star'
,
'repeat'
,
'deviation'
,
'agers'
,
'fix'
,
'digital'
,
'ambitious'
,
'transit'
,
'jeeps'
,
'lighted'
,
'Prussianizations'
,
'Kickapoo'
,
'virtual'
,
'Andrew'
,
'generally'
,
'boatsman'
,
'amounts'
,
'promulgation'
,
'Malay'
,
'savaging'
,
'courtesan'
,
'nursed'
,
'hungered'
,
'shiningly'
,
'ship'
,
'presides'
,
'Parke'
,
'moderns'
,
'Jonas'
,
'unenlightening'
,
'dearth'
,
'deer'
,
'domesticates'
,
'recognize'
,
'gong'
,
'penetrating'
,
'dependents'
,
'unusually'
,
'complications'
,
'Dennis'
,
'imbalances'
,
'nightgown'
,
'attached'
,
'testaments'
,
'congresswoman'
,
'circuits'
,
'bumpers'
,
'braver'
,
'Boreas'
,
'hauled'
,
'Howe'
,
'seethed'
,
'cult'
,
'numismatic'
,
'vitality'
,
'differences'
,
'collapsed'
,
'Sandburg'
,
'inches'
,
'head'
,
'rhythmic'
,
'opponent'
,
'blanketer'
,
'attorneys'
,
'hen'
,
'spies'
,
'indispensably'
,
'clinical'
,
'redirection'
,
'submit'
,
'catalysts'
,
'councilwoman'
,
'kills'
,
'topologies'
,
'noxious'
,
'exactions'
,
'dashers'
,
'balanced'
,
'slider'
,
'cancerous'
,
'bathtubs'
,
'legged'
,
'respectably'
,
'crochets'
,
'absenteeism'
,
'arcsine'
,
'facility'
,
'cleaners'
,
'bobwhite'
,
'Hawkins'
,
'stockade'
,
'provisional'
,
'tenants'
,
'forearms'
,
'Knowlton'
,
'commit'
,
'scornful'
,
'pediatrician'
,
'greets'
,
'clenches'
,
'trowels'
,
'accepts'
,
'Carboloy'
,
'Glenn'
,
'Leigh'
,
'enroll'
,
'Madison'
,
'Macon'
,
'oiling'
,
'entertainingly'
,
'super'
,
'propositional'
,
'pliers'
,
'beneficiary'
,
'hospitable'
,
'emigration'
,
'sift'
,
'sensor'
,
'reserved'
,
'colonization'
,
'shrilled'
,
'momentously'
,
'stevedore'
,
'Shanghaiing'
,
'schoolmasters'
,
'shaken'
,
'biology'
,
'inclination'
,
'immoderate'
,
'stem'
,
'allegory'
,
'economical'
,
'daytime'
,
'Newell'
,
'Moscow'
,
'archeology'
,
'ported'
,
'scandals'
,
'Blackfoot'
,
'leery'
,
'kilobit'
,
'empire'
,
'obliviousness'
,
'productions'
,
'sacrificed'
,
'ideals'
,
'enrolling'
,
'certainties'
,
'Capsicum'
,
'Brookdale'
,
'Markism'
,
'unkind'
,
'dyers'
,
'legislates'
,
'grotesquely'
,
'megawords'
,
'arbitrary'
,
'laughing'
,
'wildcats'
,
'thrower'
,
'sex'
,
'devils'
,
'Wehr'
,
'ablates'
,
'consume'
,
'gossips'
,
'doorways'
,
'Shari'
,
'advanced'
,
'enumerable'
,
'existentially'
,
'stunt'
,
'auctioneers'
,
'scheduler'
,
'blanching'
,
'petulance'
,
'perceptibly'
,
'vapors'
,
'progressed'
,
'rains'
,
'intercom'
,
'emergency'
,
'increased'
,
'fluctuating'
,
'Krishna'
,
'silken'
,
'reformed'
,
'transformation'
,
'easter'
,
'fares'
,
'comprehensible'
,
'trespasses'
,
'hallmark'
,
'tormenter'
,
'breastworks'
,
'brassiere'
,
'bladders'
,
'civet'
,
'death'
,
'transformer'
,
'tolerably'
,
'bugle'
,
'clergy'
,
'mantels'
,
'satin'
,
'Boswellizes'
,
'Bloomington'
,
'notifier'
,
'Filippo'
,
'circling'
,
'unassigned'
,
'dumbness'
,
'sentries'
,
'representativeness'
,
'souped'
,
'Klux'
,
'Kingstown'
,
'gerund'
,
'Russell'
,
'splices'
,
'bellow'
,
'bandies'
,
'beefers'
,
'cameramen'
,
'appalled'
,
'Ionian'
,
'butterball'
,
'Portland'
,
'pleaded'
,
'admiringly'
,
'pricks'
,
'hearty'
,
'corer'
,
'deliverable'
,
'accountably'
,
'mentors'
,
'accorded'
,
'acknowledgement'
,
'Lawrenceville'
,
'morphology'
,
'eucalyptus'
,
'Rena'
,
'enchanting'
,
'tighter'
,
'scholars'
,
'graduations'
,
'edges'
,
'Latinization'
,
'proficiency'
,
'monolithic'
,
'parenthesizing'
,
'defy'
,
'shames'
,
'enjoyment'
,
'Purdue'
,
'disagrees'
,
'barefoot'
,
'maims'
,
'flabbergast'
,
'dishonorable'
,
'interpolation'
,
'fanatics'
,
'dickens'
,
'abysses'
,
'adverse'
,
'components'
,
'bowl'
,
'belong'
,
'Pipestone'
,
'trainees'
,
'paw'
,
'pigtail'
,
'feed'
,
'whore'
,
'conditioner'
,
'Volstead'
,
'voices'
,
'strain'
,
'inhabits'
,
'Edwin'
,
'discourses'
,
'deigns'
,
'cruiser'
,
'biconvex'
,
'biking'
,
'depreciation'
,
'Harrison'
,
'Persian'
,
'stunning'
,
'agar'
,
'rope'
,
'wagoner'
,
'elections'
,
'reticulately'
,
'Cruz'
,
'pulpits'
,
'wilt'
,
'peels'
,
'plants'
,
'administerings'
,
'deepen'
,
'rubs'
,
'hence'
,
'dissension'
,
'implored'
,
'bereavement'
,
'abyss'
,
'Pennsylvania'
,
'benevolent'
,
'corresponding'
,
'Poseidon'
,
'inactive'
,
'butchers'
,
'Mach'
,
'woke'
,
'loading'
,
'utilizing'
,
'Hoosier'
,
'undo'
,
'Semitization'
,
'trigger'
,
'Mouthe'
,
'mark'
,
'disgracefully'
,
'copier'
,
'futility'
,
'gondola'
,
'algebraic'
,
'lecturers'
,
'sponged'
,
'instigators'
,
'looted'
,
'ether'
,
'trust'
,
'feeblest'
,
'sequencer'
,
'disjointness'
,
'congresses'
,
'Vicksburg'
,
'incompatibilities'
,
'commend'
,
'Luxembourg'
,
'reticulation'
,
'instructively'
,
'reconstructs'
,
'bricks'
,
'attache'
,
'Englishman'
,
'provocation'
,
'roughen'
,
'cynic'
,
'plugged'
,
'scrawls'
,
'antipode'
,
'injected'
,
'Daedalus'
,
'Burnsides'
,
'asker'
,
'confronter'
,
'merriment'
,
'disdain'
,
'thicket'
,
'stinker'
,
'great'
,
'tiers'
,
'oust'
,
'antipodes'
,
'Macintosh'
,
'tented'
,
'packages'
,
'Mediterraneanize'
,
'hurts'
,
'orthodontist'
,
'seeder'
,
'readying'
,
'babying'
,
'Florida'
,
'Sri'
,
'buckets'
,
'complementary'
,
'cartographer'
,
'chateaus'
,
'shaves'
,
'thinkable'
,
'Tehran'
,
'Gordian'
,
'Angles'
,
'arguable'
,
'bureau'
,
'smallest'
,
'fans'
,
'navigated'
,
'dipole'
,
'bootleg'
,
'distinctive'
,
'minimization'
,
'absorbed'
,
'surmised'
,
'Malawi'
,
'absorbent'
,
'close'
,
'conciseness'
,
'hopefully'
,
'declares'
,
'descent'
,
'trick'
,
'portend'
,
'unable'
,
'mildly'
,
'Morse'
,
'reference'
,
'scours'
,
'Caribbean'
,
'battlers'
,
'astringency'
,
'likelier'
,
'Byronizes'
,
'econometric'
,
'grad'
,
'steak'
,
'Austrian'
,
'ban'
,
'voting'
,
'Darlington'
,
'bison'
,
'Cetus'
,
'proclaim'
,
'Gilbertson'
,
'evictions'
,
'submittal'
,
'bearings'
,
'Gothicizer'
,
'settings'
,
'McMahon'
,
'densities'
,
'determinants'
,
'period'
,
'DeKastere'
,
'swindle'
,
'promptness'
,
'enablers'
,
'wordy'
,
'during'
,
'tables'
,
'responder'
,
'baffle'
,
'phosgene'
,
'muttering'
,
'limiters'
,
'custodian'
,
'prevented'
,
'Stouffer'
,
'waltz'
,
'Videotex'
,
'brainstorms'
,
'alcoholism'
,
'jab'
,
'shouldering'
,
'screening'
,
'explicitly'
,
'earner'
,
'commandment'
,
'French'
,
'scrutinizing'
,
'Gemma'
,
'capacitive'
,
'sheriff'
,
'herbivore'
,
'Betsey'
,
'Formosa'
,
'scorcher'
,
'font'
,
'damming'
,
'soldiers'
,
'flack'
,
'Marks'
,
'unlinking'
,
'serenely'
,
'rotating'
,
'converge'
,
'celebrities'
,
'unassailable'
,
'bawling'
,
'wording'
,
'silencing'
,
'scotch'
,
'coincided'
,
'masochists'
,
'graphs'
,
'pernicious'
,
'disease'
,
'depreciates'
,
'later'
,
'torus'
,
'interject'
,
'mutated'
,
'causer'
,
'messy'
,
'Bechtel'
,
'redundantly'
,
'profoundest'
,
'autopsy'
,
'philosophic'
,
'iterate'
,
'Poisson'
,
'horridly'
,
'silversmith'
,
'millennium'
,
'plunder'
,
'salmon'
,
'missioner'
,
'advances'
,
'provers'
,
'earthliness'
,
'manor'
,
'resurrectors'
,
'Dahl'
,
'canto'
,
'gangrene'
,
'gabler'
,
'ashore'
,
'frictionless'
,
'expansionism'
,
'emphasis'
,
'preservations'
,
'Duane'
,
'descend'
,
'isolated'
,
'firmware'
,
'dynamites'
,
'scrawled'
,
'cavemen'
,
'ponder'
,
'prosperity'
,
'squaw'
,
'vulnerable'
,
'opthalmic'
,
'Simms'
,
'unite'
,
'totallers'
,
'Waring'
,
'enforced'
,
'bridge'
,
'collecting'
,
'sublime'
,
'Moore'
,
'gobble'
,
'criticizes'
,
'daydreams'
,
'sedate'
,
'apples'
,
'Concordia'
,
'subsequence'
,
'distill'
,
'Allan'
,
'seizure'
,
'Isadore'
,
'Lancashire'
,
'spacings'
,
'corresponded'
,
'hobble'
,
'Boonton'
,
'genuineness'
,
'artifact'
,
'gratuities'
,
'interviewee'
,
'Vladimir'
,
'mailable'
,
'Bini'
,
'Kowalewski'
,
'interprets'
,
'bereave'
,
'evacuated'
,
'friend'
,
'tourists'
,
'crunched'
,
'soothsayer'
,
'fleetly'
,
'Romanizations'
,
'Medicaid'
,
'persevering'
,
'flimsy'
,
'doomsday'
,
'trillion'
,
'carcasses'
,
'guess'
,
'seersucker'
,
'ripping'
,
'affliction'
,
'wildest'
,
'spokes'
,
'sheaths'
,
'procreate'
,
'rusticates'
,
'Schapiro'
,
'thereafter'
,
'mistakenly'
,
'shelf'
,
'ruination'
,
'bushel'
,
'assuredly'
,
'corrupting'
,
'federation'
,
'portmanteau'
,
'wading'
,
'incendiary'
,
'thing'
,
'wanderers'
,
'messages'
,
'Paso'
,
'reexamined'
,
'freeings'
,
'denture'
,
'potting'
,
'disturber'
,
'laborer'
,
'comrade'
,
'intercommunicating'
,
'Pelham'
,
'reproach'
,
'Fenton'
,
'Alva'
,
'oasis'
,
'attending'
,
'cockpit'
,
'scout'
,
'Jude'
,
'gagging'
,
'jailed'
,
'crustaceans'
,
'dirt'
,
'exquisitely'
,
'Internet'
,
'blocker'
,
'smock'
,
'Troutman'
,
'neighboring'
,
'surprise'
,
'midscale'
,
'impart'
,
'badgering'
,
'fountain'
,
'Essen'
,
'societies'
,
'redresses'
,
'afterwards'
,
'puckering'
,
'silks'
,
'Blakey'
,
'sequel'
,
'greet'
,
'basements'
,
'Aubrey'
,
'helmsman'
,
'album'
,
'wheelers'
,
'easternmost'
,
'flock'
,
'ambassadors'
,
'astatine'
,
'supplant'
,
'gird'
,
'clockwork'
,
'foxes'
,
'rerouting'
,
'divisional'
,
'bends'
,
'spacer'
,
'physiologically'
,
'exquisite'
,
'concerts'
,
'unbridled'
,
'crossing'
,
'rock'
,
'leatherneck'
,
'Fortescue'
,
'reloading'
,
'Laramie'
,
'Tim'
,
'forlorn'
,
'revert'
,
'scarcer'
,
'spigot'
,
'equality'
,
'paranormal'
,
'aggrieves'
,
'pegs'
,
'committeewomen'
,
'documented'
,
'interrupt'
,
'emerald'
,
'Battelle'
,
'reconverted'
,
'anticipated'
,
'prejudices'
,
'drowsiness'
,
'trivialities'
,
'food'
,
'blackberries'
,
'Cyclades'
,
'tourist'
,
'branching'
,
'nugget'
,
'Asilomar'
,
'repairmen'
,
'Cowan'
,
'receptacles'
,
'nobler'
,
'Nebraskan'
,
'territorial'
,
'chickadee'
,
'bedbug'
,
'darted'
,
'vigilance'
,
'Octavia'
,
'summands'
,
'policemen'
,
'twirls'
,
'style'
,
'outlawing'
,
'specifiable'
,
'pang'
,
'Orpheus'
,
'epigram'
,
'Babel'
,
'butyrate'
,
'wishing'
,
'fiendish'
,
'accentuate'
,
'much'
,
'pulsed'
,
'adorned'
,
'arbiters'
,
'counted'
,
'Afrikaner'
,
'parameterizes'
,
'agenda'
,
'Americanism'
,
'referenda'
,
'derived'
,
'liquidity'
,
'trembling'
,
'lordly'
,
'Agway'
,
'Dillon'
,
'propellers'
,
'statement'
,
'stickiest'
,
'thankfully'
,
'autograph'
,
'parallel'
,
'impulse'
,
'Hamey'
,
'stylistic'
,
'disproved'
,
'inquirer'
,
'hoisting'
,
'residues'
,
'variant'
,
'colonials'
,
'dequeued'
,
'especial'
,
'Samoa'
,
'Polaris'
,
'dismisses'
,
'surpasses'
,
'prognosis'
,
'urinates'
,
'leaguers'
,
'ostriches'
,
'calculative'
,
'digested'
,
'divided'
,
'reconfigurer'
,
'Lakewood'
,
'illegalities'
,
'redundancy'
,
'approachability'
,
'masterly'
,
'cookery'
,
'crystallized'
,
'Dunham'
,
'exclaims'
,
'mainline'
,
'Australianizes'
,
'nationhood'
,
'pusher'
,
'ushers'
,
'paranoia'
,
'workstations'
,
'radiance'
,
'impedes'
,
'Minotaur'
,
'cataloging'
,
'bites'
,
'fashioning'
,
'Alsop'
,
'servants'
,
'Onondaga'
,
'paragraph'
,
'leadings'
,
'clients'
,
'Latrobe'
,
'Cornwallis'
,
'excitingly'
,
'calorimetric'
,
'savior'
,
'tandem'
,
'antibiotics'
,
'excuse'
,
'brushy'
,
'selfish'
,
'naive'
,
'becomes'
,
'towers'
,
'popularizes'
,
'engender'
,
'introducing'
,
'possession'
,
'slaughtered'
,
'marginally'
,
'Packards'
,
'parabola'
,
'utopia'
,
'automata'
,
'deterrent'
,
'chocolates'
,
'objectives'
,
'clannish'
,
'aspirin'
,
'ferociousness'
,
'primarily'
,
'armpit'
,
'handfuls'
,
'dangle'
,
'Manila'
,
'enlivened'
,
'decrease'
,
'phylum'
,
'hardy'
,
'objectively'
,
'baskets'
,
'chaired'
,
'Sepoy'
,
'deputy'
,
'blizzard'
,
'shootings'
,
'breathtaking'
,
'sticking'
,
'initials'
,
'epitomized'
,
'Forrest'
,
'cellular'
,
'amatory'
,
'radioed'
,
'horrified'
,
'Neva'
,
'simultaneous'
,
'delimiter'
,
'expulsion'
,
'Himmler'
,
'contradiction'
,
'Remus'
,
'Franklinizations'
,
'luggage'
,
'moisture'
,
'Jews'
,
'comptroller'
,
'brevity'
,
'contradictions'
,
'Ohio'
,
'active'
,
'babysit'
,
'China'
,
'youngest'
,
'superstition'
,
'clawing'
,
'raccoons'
,
'chose'
,
'shoreline'
,
'helmets'
,
'Jeffersonian'
,
'papered'
,
'kindergarten'
,
'reply'
,
'succinct'
,
'split'
,
'wriggle'
,
'suitcases'
,
'nonce'
,
'grinders'
,
'anthem'
,
'showcase'
,
'maimed'
,
'blue'
,
'obeys'
,
'unreported'
,
'perusing'
,
'recalculate'
,
'rancher'
,
'demonic'
,
'Lilliputianize'
,
'approximation'
,
'repents'
,
'yellowness'
,
'irritates'
,
'Ferber'
,
'flashlights'
,
'booty'
,
'Neanderthal'
,
'someday'
,
'foregoes'
,
'lingering'
,
'cloudiness'
,
'guy'
,
'consumer'
,
'Berkowitz'
,
'relics'
,
'interpolating'
,
'reappearing'
,
'advisements'
,
'Nolan'
,
'turrets'
,
'skeletal'
,
'skills'
,
'mammas'
,
'Winsett'
,
'wheelings'
,
'stiffen'
,
'monkeys'
,
'plainness'
,
'braziers'
,
'Leary'
,
'advisee'
,
'jack'
,
'verb'
,
'reinterpret'
,
'geometrical'
,
'trolleys'
,
'arboreal'
,
'overpowered'
,
'Cuzco'
,
'poetical'
,
'admirations'
,
'Hobbes'
,
'phonemes'
,
'Newsweek'
,
'agitator'
,
'finally'
,
'prophets'
,
'environment'
,
'easterners'
,
'precomputed'
,
'faults'
,
'rankly'
,
'swallowing'
,
'crawl'
,
'trolley'
,
'spreading'
,
'resourceful'
,
'go'
,
'demandingly'
,
'broader'
,
'spiders'
,
'Marsha'
,
'debris'
,
'operates'
,
'Dundee'
,
'alleles'
,
'crunchier'
,
'quizzical'
,
'hanging'
,
'Fisk'
]
words
=
[
'banishment'
,
'indirectly'
,
'imprecise'
,
'peeks'
,
'opportunely'
,
'bribe'
,
'sufficiently'
,
'Occidentalized'
,
'elapsing'
,
'fermenting'
,
'listen'
,
'orphanage'
,
'younger'
,
'draperies'
,
'Ida'
,
'cuttlefish'
,
'mastermind'
,
'Michaels'
,
'populations'
,
'lent'
,
'cater'
,
'attentional'
,
'hastiness'
,
'dragnet'
,
'mangling'
,
'scabbards'
,
'princely'
,
'star'
,
'repeat'
,
'deviation'
,
'agers'
,
'fix'
,
'digital'
,
'ambitious'
,
'transit'
,
'jeeps'
,
'lighted'
,
'Prussianizations'
,
'Kickapoo'
,
'virtual'
,
'Andrew'
,
'generally'
,
'boatsman'
,
'amounts'
,
'promulgation'
,
'Malay'
,
'savaging'
,
'courtesan'
,
'nursed'
,
'hungered'
,
'shiningly'
,
'ship'
,
'presides'
,
'Parke'
,
'moderns'
,
'Jonas'
,
'unenlightening'
,
'dearth'
,
'deer'
,
'domesticates'
,
'recognize'
,
'gong'
,
'penetrating'
,
'dependents'
,
'unusually'
,
'complications'
,
'Dennis'
,
'imbalances'
,
'nightgown'
,
'attached'
,
'testaments'
,
'congresswoman'
,
'circuits'
,
'bumpers'
,
'braver'
,
'Boreas'
,
'hauled'
,
'Howe'
,
'seethed'
,
'cult'
,
'numismatic'
,
'vitality'
,
'differences'
,
'collapsed'
,
'Sandburg'
,
'inches'
,
'head'
,
'rhythmic'
,
'opponent'
,
'blanketer'
,
'attorneys'
,
'hen'
,
'spies'
,
'indispensably'
,
'clinical'
,
'redirection'
,
'submit'
,
'catalysts'
,
'councilwoman'
,
'kills'
,
'topologies'
,
'noxious'
,
'exactions'
,
'dashers'
,
'balanced'
,
'slider'
,
'cancerous'
,
'bathtubs'
,
'legged'
,
'respectably'
,
'crochets'
,
'absenteeism'
,
'arcsine'
,
'facility'
,
'cleaners'
,
'bobwhite'
,
'Hawkins'
,
'stockade'
,
'provisional'
,
'tenants'
,
'forearms'
,
'Knowlton'
,
'commit'
,
'scornful'
,
'pediatrician'
,
'greets'
,
'clenches'
,
'trowels'
,
'accepts'
,
'Carboloy'
,
'Glenn'
,
'Leigh'
,
'enroll'
,
'Madison'
,
'Macon'
,
'oiling'
,
'entertainingly'
,
'super'
,
'propositional'
,
'pliers'
,
'beneficiary'
,
'hospitable'
,
'emigration'
,
'sift'
,
'sensor'
,
'reserved'
,
'colonization'
,
'shrilled'
,
'momentously'
,
'stevedore'
,
'Shanghaiing'
,
'schoolmasters'
,
'shaken'
,
'biology'
,
'inclination'
,
'immoderate'
,
'stem'
,
'allegory'
,
'economical'
,
'daytime'
,
'Newell'
,
'Moscow'
,
'archeology'
,
'ported'
,
'scandals'
,
'Blackfoot'
,
'leery'
,
'kilobit'
,
'empire'
,
'obliviousness'
,
'productions'
,
'sacrificed'
,
'ideals'
,
'enrolling'
,
'certainties'
,
'Capsicum'
,
'Brookdale'
,
'Markism'
,
'unkind'
,
'dyers'
,
'legislates'
,
'grotesquely'
,
'megawords'
,
'arbitrary'
,
'laughing'
,
'wildcats'
,
'thrower'
,
'sex'
,
'devils'
,
'Wehr'
,
'ablates'
,
'consume'
,
'gossips'
,
'doorways'
,
'Shari'
,
'advanced'
,
'enumerable'
,
'existentially'
,
'stunt'
,
'auctioneers'
,
'scheduler'
,
'blanching'
,
'petulance'
,
'perceptibly'
,
'vapors'
,
'progressed'
,
'rains'
,
'intercom'
,
'emergency'
,
'increased'
,
'fluctuating'
,
'Krishna'
,
'silken'
,
'reformed'
,
'transformation'
,
'easter'
,
'fares'
,
'comprehensible'
,
'trespasses'
,
'hallmark'
,
'tormenter'
,
'breastworks'
,
'brassiere'
,
'bladders'
,
'civet'
,
'death'
,
'transformer'
,
'tolerably'
,
'bugle'
,
'clergy'
,
'mantels'
,
'satin'
,
'Boswellizes'
,
'Bloomington'
,
'notifier'
,
'Filippo'
,
'circling'
,
'unassigned'
,
'dumbness'
,
'sentries'
,
'representativeness'
,
'souped'
,
'Klux'
,
'Kingstown'
,
'gerund'
,
'Russell'
,
'splices'
,
'bellow'
,
'bandies'
,
'beefers'
,
'cameramen'
,
'appalled'
,
'Ionian'
,
'butterball'
,
'Portland'
,
'pleaded'
,
'admiringly'
,
'pricks'
,
'hearty'
,
'corer'
,
'deliverable'
,
'accountably'
,
'mentors'
,
'accorded'
,
'acknowledgement'
,
'Lawrenceville'
,
'morphology'
,
'eucalyptus'
,
'Rena'
,
'enchanting'
,
'tighter'
,
'scholars'
,
'graduations'
,
'edges'
,
'Latinization'
,
'proficiency'
,
'monolithic'
,
'parenthesizing'
,
'defy'
,
'shames'
,
'enjoyment'
,
'Purdue'
,
'disagrees'
,
'barefoot'
,
'maims'
,
'flabbergast'
,
'dishonorable'
,
'interpolation'
,
'fanatics'
,
'dickens'
,
'abysses'
,
'adverse'
,
'components'
,
'bowl'
,
'belong'
,
'Pipestone'
,
'trainees'
,
'paw'
,
'pigtail'
,
'feed'
,
'whore'
,
'conditioner'
,
'Volstead'
,
'voices'
,
'strain'
,
'inhabits'
,
'Edwin'
,
'discourses'
,
'deigns'
,
'cruiser'
,
'biconvex'
,
'biking'
,
'depreciation'
,
'Harrison'
,
'Persian'
,
'stunning'
,
'agar'
,
'rope'
,
'wagoner'
,
'elections'
,
'reticulately'
,
'Cruz'
,
'pulpits'
,
'wilt'
,
'peels'
,
'plants'
,
'administerings'
,
'deepen'
,
'rubs'
,
'hence'
,
'dissension'
,
'implored'
,
'bereavement'
,
'abyss'
,
'Pennsylvania'
,
'benevolent'
,
'corresponding'
,
'Poseidon'
,
'inactive'
,
'butchers'
,
'Mach'
,
'woke'
,
'loading'
,
'utilizing'
,
'Hoosier'
,
'undo'
,
'Semitization'
,
'trigger'
,
'Mouthe'
,
'mark'
,
'disgracefully'
,
'copier'
,
'futility'
,
'gondola'
,
'algebraic'
,
'lecturers'
,
'sponged'
,
'instigators'
,
'looted'
,
'ether'
,
'trust'
,
'feeblest'
,
'sequencer'
,
'disjointness'
,
'congresses'
,
'Vicksburg'
,
'incompatibilities'
,
'commend'
,
'Luxembourg'
,
'reticulation'
,
'instructively'
,
'reconstructs'
,
'bricks'
,
'attache'
,
'Englishman'
,
'provocation'
,
'roughen'
,
'cynic'
,
'plugged'
,
'scrawls'
,
'antipode'
,
'injected'
,
'Daedalus'
,
'Burnsides'
,
'asker'
,
'confronter'
,
'merriment'
,
'disdain'
,
'thicket'
,
'stinker'
,
'great'
,
'tiers'
,
'oust'
,
'antipodes'
,
'Macintosh'
,
'tented'
,
'packages'
,
'Mediterraneanize'
,
'hurts'
,
'orthodontist'
,
'seeder'
,
'readying'
,
'babying'
,
'Florida'
,
'Sri'
,
'buckets'
,
'complementary'
,
'cartographer'
,
'chateaus'
,
'shaves'
,
'thinkable'
,
'Tehran'
,
'Gordian'
,
'Angles'
,
'arguable'
,
'bureau'
,
'smallest'
,
'fans'
,
'navigated'
,
'dipole'
,
'bootleg'
,
'distinctive'
,
'minimization'
,
'absorbed'
,
'surmised'
,
'Malawi'
,
'absorbent'
,
'close'
,
'conciseness'
,
'hopefully'
,
'declares'
,
'descent'
,
'trick'
,
'portend'
,
'unable'
,
'mildly'
,
'Morse'
,
'reference'
,
'scours'
,
'Caribbean'
,
'battlers'
,
'astringency'
,
'likelier'
,
'Byronizes'
,
'econometric'
,
'grad'
,
'steak'
,
'Austrian'
,
'ban'
,
'voting'
,
'Darlington'
,
'bison'
,
'Cetus'
,
'proclaim'
,
'Gilbertson'
,
'evictions'
,
'submittal'
,
'bearings'
,
'Gothicizer'
,
'settings'
,
'McMahon'
,
'densities'
,
'determinants'
,
'period'
,
'DeKastere'
,
'swindle'
,
'promptness'
,
'enablers'
,
'wordy'
,
'during'
,
'tables'
,
'responder'
,
'baffle'
,
'phosgene'
,
'muttering'
,
'limiters'
,
'custodian'
,
'prevented'
,
'Stouffer'
,
'waltz'
,
'Videotex'
,
'brainstorms'
,
'alcoholism'
,
'jab'
,
'shouldering'
,
'screening'
,
'explicitly'
,
'earner'
,
'commandment'
,
'French'
,
'scrutinizing'
,
'Gemma'
,
'capacitive'
,
'sheriff'
,
'herbivore'
,
'Betsey'
,
'Formosa'
,
'scorcher'
,
'font'
,
'damming'
,
'soldiers'
,
'flack'
,
'Marks'
,
'unlinking'
,
'serenely'
,
'rotating'
,
'converge'
,
'celebrities'
,
'unassailable'
,
'bawling'
,
'wording'
,
'silencing'
,
'scotch'
,
'coincided'
,
'masochists'
,
'graphs'
,
'pernicious'
,
'disease'
,
'depreciates'
,
'later'
,
'torus'
,
'interject'
,
'mutated'
,
'causer'
,
'messy'
,
'Bechtel'
,
'redundantly'
,
'profoundest'
,
'autopsy'
,
'philosophic'
,
'iterate'
,
'Poisson'
,
'horridly'
,
'silversmith'
,
'millennium'
,
'plunder'
,
'salmon'
,
'missioner'
,
'advances'
,
'provers'
,
'earthliness'
,
'manor'
,
'resurrectors'
,
'Dahl'
,
'canto'
,
'gangrene'
,
'gabler'
,
'ashore'
,
'frictionless'
,
'expansionism'
,
'emphasis'
,
'preservations'
,
'Duane'
,
'descend'
,
'isolated'
,
'firmware'
,
'dynamites'
,
'scrawled'
,
'cavemen'
,
'ponder'
,
'prosperity'
,
'squaw'
,
'vulnerable'
,
'opthalmic'
,
'Simms'
,
'unite'
,
'totallers'
,
'Waring'
,
'enforced'
,
'bridge'
,
'collecting'
,
'sublime'
,
'Moore'
,
'gobble'
,
'criticizes'
,
'daydreams'
,
'sedate'
,
'apples'
,
'Concordia'
,
'subsequence'
,
'distill'
,
'Allan'
,
'seizure'
,
'Isadore'
,
'Lancashire'
,
'spacings'
,
'corresponded'
,
'hobble'
,
'Boonton'
,
'genuineness'
,
'artifact'
,
'gratuities'
,
'interviewee'
,
'Vladimir'
,
'mailable'
,
'Bini'
,
'Kowalewski'
,
'interprets'
,
'bereave'
,
'evacuated'
,
'friend'
,
'tourists'
,
'crunched'
,
'soothsayer'
,
'fleetly'
,
'Romanizations'
,
'Medicaid'
,
'persevering'
,
'flimsy'
,
'doomsday'
,
'trillion'
,
'carcasses'
,
'guess'
,
'seersucker'
,
'ripping'
,
'affliction'
,
'wildest'
,
'spokes'
,
'sheaths'
,
'procreate'
,
'rusticates'
,
'Schapiro'
,
'thereafter'
,
'mistakenly'
,
'shelf'
,
'ruination'
,
'bushel'
,
'assuredly'
,
'corrupting'
,
'federation'
,
'portmanteau'
,
'wading'
,
'incendiary'
,
'thing'
,
'wanderers'
,
'messages'
,
'Paso'
,
'reexamined'
,
'freeings'
,
'denture'
,
'potting'
,
'disturber'
,
'laborer'
,
'comrade'
,
'intercommunicating'
,
'Pelham'
,
'reproach'
,
'Fenton'
,
'Alva'
,
'oasis'
,
'attending'
,
'cockpit'
,
'scout'
,
'Jude'
,
'gagging'
,
'jailed'
,
'crustaceans'
,
'dirt'
,
'exquisitely'
,
'Internet'
,
'blocker'
,
'smock'
,
'Troutman'
,
'neighboring'
,
'surprise'
,
'midscale'
,
'impart'
,
'badgering'
,
'fountain'
,
'Essen'
,
'societies'
,
'redresses'
,
'afterwards'
,
'puckering'
,
'silks'
,
'Blakey'
,
'sequel'
,
'greet'
,
'basements'
,
'Aubrey'
,
'helmsman'
,
'album'
,
'wheelers'
,
'easternmost'
,
'flock'
,
'ambassadors'
,
'astatine'
,
'supplant'
,
'gird'
,
'clockwork'
,
'foxes'
,
'rerouting'
,
'divisional'
,
'bends'
,
'spacer'
,
'physiologically'
,
'exquisite'
,
'concerts'
,
'unbridled'
,
'crossing'
,
'rock'
,
'leatherneck'
,
'Fortescue'
,
'reloading'
,
'Laramie'
,
'Tim'
,
'forlorn'
,
'revert'
,
'scarcer'
,
'spigot'
,
'equality'
,
'paranormal'
,
'aggrieves'
,
'pegs'
,
'committeewomen'
,
'documented'
,
'interrupt'
,
'emerald'
,
'Battelle'
,
'reconverted'
,
'anticipated'
,
'prejudices'
,
'drowsiness'
,
'trivialities'
,
'food'
,
'blackberries'
,
'Cyclades'
,
'tourist'
,
'branching'
,
'nugget'
,
'Asilomar'
,
'repairmen'
,
'Cowan'
,
'receptacles'
,
'nobler'
,
'Nebraskan'
,
'territorial'
,
'chickadee'
,
'bedbug'
,
'darted'
,
'vigilance'
,
'Octavia'
,
'summands'
,
'policemen'
,
'twirls'
,
'style'
,
'outlawing'
,
'specifiable'
,
'pang'
,
'Orpheus'
,
'epigram'
,
'Babel'
,
'butyrate'
,
'wishing'
,
'fiendish'
,
'accentuate'
,
'much'
,
'pulsed'
,
'adorned'
,
'arbiters'
,
'counted'
,
'Afrikaner'
,
'parameterizes'
,
'agenda'
,
'Americanism'
,
'referenda'
,
'derived'
,
'liquidity'
,
'trembling'
,
'lordly'
,
'Agway'
,
'Dillon'
,
'propellers'
,
'statement'
,
'stickiest'
,
'thankfully'
,
'autograph'
,
'parallel'
,
'impulse'
,
'Hamey'
,
'stylistic'
,
'disproved'
,
'inquirer'
,
'hoisting'
,
'residues'
,
'variant'
,
'colonials'
,
'dequeued'
,
'especial'
,
'Samoa'
,
'Polaris'
,
'dismisses'
,
'surpasses'
,
'prognosis'
,
'urinates'
,
'leaguers'
,
'ostriches'
,
'calculative'
,
'digested'
,
'divided'
,
'reconfigurer'
,
'Lakewood'
,
'illegalities'
,
'redundancy'
,
'approachability'
,
'masterly'
,
'cookery'
,
'crystallized'
,
'Dunham'
,
'exclaims'
,
'mainline'
,
'Australianizes'
,
'nationhood'
,
'pusher'
,
'ushers'
,
'paranoia'
,
'workstations'
,
'radiance'
,
'impedes'
,
'Minotaur'
,
'cataloging'
,
'bites'
,
'fashioning'
,
'Alsop'
,
'servants'
,
'Onondaga'
,
'paragraph'
,
'leadings'
,
'clients'
,
'Latrobe'
,
'Cornwallis'
,
'excitingly'
,
'calorimetric'
,
'savior'
,
'tandem'
,
'antibiotics'
,
'excuse'
,
'brushy'
,
'selfish'
,
'naive'
,
'becomes'
,
'towers'
,
'popularizes'
,
'engender'
,
'introducing'
,
'possession'
,
'slaughtered'
,
'marginally'
,
'Packards'
,
'parabola'
,
'utopia'
,
'automata'
,
'deterrent'
,
'chocolates'
,
'objectives'
,
'clannish'
,
'aspirin'
,
'ferociousness'
,
'primarily'
,
'armpit'
,
'handfuls'
,
'dangle'
,
'Manila'
,
'enlivened'
,
'decrease'
,
'phylum'
,
'hardy'
,
'objectively'
,
'baskets'
,
'chaired'
,
'Sepoy'
,
'deputy'
,
'blizzard'
,
'shootings'
,
'breathtaking'
,
'sticking'
,
'initials'
,
'epitomized'
,
'Forrest'
,
'cellular'
,
'amatory'
,
'radioed'
,
'horrified'
,
'Neva'
,
'simultaneous'
,
'delimiter'
,
'expulsion'
,
'Himmler'
,
'contradiction'
,
'Remus'
,
'Franklinizations'
,
'luggage'
,
'moisture'
,
'Jews'
,
'comptroller'
,
'brevity'
,
'contradictions'
,
'Ohio'
,
'active'
,
'babysit'
,
'China'
,
'youngest'
,
'superstition'
,
'clawing'
,
'raccoons'
,
'chose'
,
'shoreline'
,
'helmets'
,
'Jeffersonian'
,
'papered'
,
'kindergarten'
,
'reply'
,
'succinct'
,
'split'
,
'wriggle'
,
'suitcases'
,
'nonce'
,
'grinders'
,
'anthem'
,
'showcase'
,
'maimed'
,
'blue'
,
'obeys'
,
'unreported'
,
'perusing'
,
'recalculate'
,
'rancher'
,
'demonic'
,
'Lilliputianize'
,
'approximation'
,
'repents'
,
'yellowness'
,
'irritates'
,
'Ferber'
,
'flashlights'
,
'booty'
,
'Neanderthal'
,
'someday'
,
'foregoes'
,
'lingering'
,
'cloudiness'
,
'guy'
,
'consumer'
,
'Berkowitz'
,
'relics'
,
'interpolating'
,
'reappearing'
,
'advisements'
,
'Nolan'
,
'turrets'
,
'skeletal'
,
'skills'
,
'mammas'
,
'Winsett'
,
'wheelings'
,
'stiffen'
,
'monkeys'
,
'plainness'
,
'braziers'
,
'Leary'
,
'advisee'
,
'jack'
,
'verb'
,
'reinterpret'
,
'geometrical'
,
'trolleys'
,
'arboreal'
,
'overpowered'
,
'Cuzco'
,
'poetical'
,
'admirations'
,
'Hobbes'
,
'phonemes'
,
'Newsweek'
,
'agitator'
,
'finally'
,
'prophets'
,
'environment'
,
'easterners'
,
'precomputed'
,
'faults'
,
'rankly'
,
'swallowing'
,
'crawl'
,
'trolley'
,
'spreading'
,
'resourceful'
,
'go'
,
'demandingly'
,
'broader'
,
'spiders'
,
'Marsha'
,
'debris'
,
'operates'
,
'Dundee'
,
'alleles'
,
'crunchier'
,
'quizzical'
,
'hanging'
,
'Fisk'
]
wordsd
=
{}
for
word
in
words
:
...
...
@@ -702,7 +716,7 @@ def collect_options(args, jobs, options):
collect_options
(
list
(
d
[
'options'
]),
jobs
,
options
)
elif
name
in
options
:
v
=
args
.
pop
(
0
)
if
options
[
name
]
!=
None
:
if
options
[
name
]
is
not
None
:
raise
ValueError
(
"Duplicate values for %s, %s and %s"
%
(
name
,
v
,
options
[
name
])
...
...
@@ -721,7 +735,7 @@ def collect_options(args, jobs, options):
"Duplicate parameter %s for job %s"
%
(
name
,
job
)
)
kw
[
name
]
=
v
kw
[
name
]
=
v
if
'frequency'
in
kw
:
frequency
=
kw
[
'frequency'
]
del
kw
[
'frequency'
]
...
...
@@ -756,6 +770,7 @@ def find_lib_python():
return
p
raise
ValueError
(
"Couldn't find lib/python"
)
def
main
(
args
=
None
):
lib_python
=
find_lib_python
()
sys
.
path
.
insert
(
0
,
lib_python
)
...
...
@@ -830,6 +845,5 @@ def zetup(configfile_name):
dropPrivileges
(
opts
.
configroot
)
if
__name__
==
'__main__'
:
main
()
src/ZODB/serialize.py
View file @
6e5baffd
...
...
@@ -327,7 +327,6 @@ class ObjectWriter(object):
else
:
return
[
'w'
,
(
oid
,
obj
.
database_name
)]
# Since we have an oid, we have either a persistent instance
# (an instance of Persistent), or a persistent class.
...
...
@@ -443,6 +442,7 @@ class ObjectWriter(object):
def
__iter__
(
self
):
return
NewObjectIterator
(
self
.
_stack
)
class
NewObjectIterator
(
object
):
# The pickler is used as a forward iterator when the connection
...
...
@@ -463,6 +463,7 @@ class NewObjectIterator(object):
next
=
__next__
class
ObjectReader
(
object
):
def
__init__
(
self
,
conn
=
None
,
cache
=
None
,
factory
=
None
):
...
...
@@ -481,7 +482,8 @@ class ObjectReader(object):
def
find_global
(
modulename
,
name
):
return
factory
(
conn
,
modulename
,
name
)
unpickler
=
PersistentUnpickler
(
find_global
,
self
.
_persistent_load
,
file
)
unpickler
=
PersistentUnpickler
(
find_global
,
self
.
_persistent_load
,
file
)
return
unpickler
...
...
@@ -542,7 +544,6 @@ class ObjectReader(object):
loaders
[
'm'
]
=
load_multi_persistent
def
load_persistent_weakref
(
self
,
oid
,
database_name
=
None
):
if
not
isinstance
(
oid
,
bytes
):
assert
isinstance
(
oid
,
str
)
...
...
@@ -624,7 +625,7 @@ class ObjectReader(object):
try
:
unpickler
.
load
()
# skip the class metadata
return
unpickler
.
load
()
except
EOFError
as
msg
:
except
EOFError
:
log
=
logging
.
getLogger
(
"ZODB.serialize"
)
log
.
exception
(
"Unpickling error: %r"
,
pickle
)
raise
...
...
@@ -673,9 +674,11 @@ def referencesf(p, oids=None):
return
oids
oid_klass_loaders
=
{
'w'
:
lambda
oid
,
database_name
=
None
:
None
,
}
}
def
get_refs
(
a_pickle
):
"""Return oid and class information for references in a pickle
...
...
src/ZODB/tests/BasicStorage.py
View file @
6e5baffd
...
...
@@ -34,6 +34,7 @@ from random import randint
from
..
import
utils
class
BasicStorage
(
object
):
def
checkBasics
(
self
):
self
.
assertEqual
(
self
.
_storage
.
lastTransaction
(),
ZERO
)
...
...
@@ -165,13 +166,13 @@ class BasicStorage(object):
def
checkLen
(
self
):
# len(storage) reports the number of objects.
# check it is zero when empty
self
.
assertEqual
(
len
(
self
.
_storage
),
0
)
self
.
assertEqual
(
len
(
self
.
_storage
),
0
)
# check it is correct when the storage contains two object.
# len may also be zero, for storages that do not keep track
# of this number
self
.
_dostore
(
data
=
MinPO
(
22
))
self
.
_dostore
(
data
=
MinPO
(
23
))
self
.
assertTrue
(
len
(
self
.
_storage
)
in
[
0
,
2
])
self
.
assertTrue
(
len
(
self
.
_storage
)
in
[
0
,
2
])
def
checkGetSize
(
self
):
self
.
_dostore
(
data
=
MinPO
(
25
))
...
...
@@ -208,7 +209,8 @@ class BasicStorage(object):
def
_do_store_in_separate_thread
(
self
,
oid
,
revid
,
voted
):
# We'll run the competing trans in a separate thread:
thread
=
threading
.
Thread
(
name
=
'T2'
,
target
=
self
.
_dostore
,
args
=
(
oid
,),
kwargs
=
dict
(
revid
=
revid
))
target
=
self
.
_dostore
,
args
=
(
oid
,),
kwargs
=
dict
(
revid
=
revid
))
thread
.
daemon
=
True
thread
.
start
()
thread
.
join
(.
1
)
...
...
@@ -220,7 +222,7 @@ class BasicStorage(object):
tid2
=
self
.
_dostore
(
oid
,
revid
=
tid
)
data
=
b'cpersistent
\
n
Persistent
\
n
q
\
x01
.N.'
# a simple persistent obj
#
-
---------------------------------------------------------------------
#
---------------------------------------------------------------------
# stale read
t
=
TransactionMetaData
()
self
.
_storage
.
tpc_begin
(
t
)
...
...
@@ -233,12 +235,12 @@ class BasicStorage(object):
self
.
assertEqual
(
v
.
oid
,
oid
)
self
.
assertEqual
(
v
.
serials
,
(
tid2
,
tid
))
else
:
if
0
:
self
.
assertTrue
(
False
,
"No conflict error"
)
if
0
:
self
.
assertTrue
(
False
,
"No conflict error"
)
self
.
_storage
.
tpc_abort
(
t
)
#----------------------------------------------------------------------
# ---------------------------------------------------------------------
# non-stale read, no stress. :)
t
=
TransactionMetaData
()
self
.
_storage
.
tpc_begin
(
t
)
...
...
@@ -248,7 +250,7 @@ class BasicStorage(object):
self
.
_storage
.
tpc_vote
(
t
)
self
.
_storage
.
tpc_finish
(
t
)
#
-
---------------------------------------------------------------------
#
---------------------------------------------------------------------
# non-stale read, competition after vote. The competing
# transaction must produce a tid > this transaction's tid
t
=
TransactionMetaData
()
...
...
@@ -268,7 +270,7 @@ class BasicStorage(object):
utils
.
load_current
(
self
.
_storage
,
b'
\
0
\
0
\
0
\
0
\
0
\
0
\
0
\
xf3
'
)[
1
])
#
-
---------------------------------------------------------------------
#
---------------------------------------------------------------------
# non-stale competing trans after checkCurrentSerialInTransaction
t
=
TransactionMetaData
()
self
.
_storage
.
tpc_begin
(
t
)
...
...
@@ -295,7 +297,6 @@ class BasicStorage(object):
tid4
>
utils
.
load_current
(
self
.
_storage
,
b'
\
0
\
0
\
0
\
0
\
0
\
0
\
0
\
xf4
'
)[
1
])
def
check_tid_ordering_w_commit
(
self
):
# It's important that storages always give a consistent
...
...
@@ -322,6 +323,7 @@ class BasicStorage(object):
self
.
_storage
.
tpc_vote
(
t
)
to_join
=
[]
def
run_in_thread
(
func
):
t
=
threading
.
Thread
(
target
=
func
)
t
.
daemon
=
True
...
...
@@ -330,6 +332,7 @@ class BasicStorage(object):
started
=
threading
.
Event
()
finish
=
threading
.
Event
()
@
run_in_thread
def
commit
():
def
callback
(
tid
):
...
...
@@ -349,7 +352,6 @@ class BasicStorage(object):
attempts
.
append
(
1
)
attempts_cond
.
notify_all
()
@
run_in_thread
def
load
():
update_attempts
()
...
...
@@ -360,6 +362,7 @@ class BasicStorage(object):
if
hasattr
(
self
.
_storage
,
'getTid'
):
expected_attempts
+=
1
@
run_in_thread
def
getTid
():
update_attempts
()
...
...
@@ -367,6 +370,7 @@ class BasicStorage(object):
if
hasattr
(
self
.
_storage
,
'lastInvalidations'
):
expected_attempts
+=
1
@
run_in_thread
def
lastInvalidations
():
update_attempts
()
...
...
@@ -389,10 +393,11 @@ class BasicStorage(object):
for
m
,
tid
in
results
.
items
():
self
.
assertEqual
(
tid
,
tids
[
1
])
#
verify storage/Connection for race in between load/open and local
invalidations.
# verify storage/Connection for race in between load/open and local
# invalidations.
# https://github.com/zopefoundation/ZEO/issues/166
# https://github.com/zopefoundation/ZODB/issues/290
@
with_high_concurrency
def
check_race_loadopen_vs_local_invalidate
(
self
):
db
=
DB
(
self
.
_storage
)
...
...
@@ -417,6 +422,7 @@ class BasicStorage(object):
# cache is not stale.
failed
=
threading
.
Event
()
failure
=
[
None
]
def
verify
():
transaction
.
begin
()
zconn
=
db
.
open
()
...
...
@@ -433,10 +439,12 @@ class BasicStorage(object):
v1
=
obj1
.
value
v2
=
obj2
.
value
if
v1
!=
v2
:
failure
[
0
]
=
"verify: obj1.value (%d) != obj2.value (%d)"
%
(
v1
,
v2
)
failure
[
0
]
=
"verify: obj1.value (%d) != obj2.value (%d)"
%
(
v1
,
v2
)
failed
.
set
()
transaction
.
abort
()
# we did not changed anything; also fails with commit
# we did not changed anything; also fails with commit:
transaction
.
abort
()
zconn
.
close
()
# modify changes obj1/obj2 by doing `objX.value += 1`.
...
...
@@ -457,25 +465,27 @@ class BasicStorage(object):
transaction
.
commit
()
zconn
.
close
()
# xrun runs f in a loop until either N iterations, or until failed is set.
# xrun runs f in a loop until either N iterations, or until failed is
# set.
def
xrun
(
f
,
N
):
try
:
for
i
in
range
(
N
):
#print('%s.%d' % (f.__name__, i))
#
print('%s.%d' % (f.__name__, i))
f
()
if
failed
.
is_set
():
break
except
:
except
:
# noqa: E722 do not use bare 'except'
failed
.
set
()
raise
# loop verify and modify concurrently.
init
()
N
=
500
tverify
=
threading
.
Thread
(
name
=
'Tverify'
,
target
=
xrun
,
args
=
(
verify
,
N
))
tmodify
=
threading
.
Thread
(
name
=
'Tmodify'
,
target
=
xrun
,
args
=
(
modify
,
N
))
tverify
=
threading
.
Thread
(
name
=
'Tverify'
,
target
=
xrun
,
args
=
(
verify
,
N
))
tmodify
=
threading
.
Thread
(
name
=
'Tmodify'
,
target
=
xrun
,
args
=
(
modify
,
N
))
tverify
.
start
()
tmodify
.
start
()
tverify
.
join
(
60
)
...
...
@@ -484,13 +494,13 @@ class BasicStorage(object):
if
failed
.
is_set
():
self
.
fail
(
failure
[
0
])
# client-server storages like ZEO, NEO and RelStorage allow several storage
# clients to be connected to single storage server.
#
# For client-server storages test subclasses should implement
# _new_storage_client to return new storage client that is connected to the
# same storage server self._storage is connected to.
def
_new_storage_client
(
self
):
raise
NotImplementedError
...
...
@@ -510,10 +520,12 @@ class BasicStorage(object):
# the test will be skipped from main thread because dbopen is
# first used in init on the main thread before any other thread
# is spawned.
self
.
skipTest
(
"%s does not implement _new_storage_client"
%
type
(
self
))
self
.
skipTest
(
"%s does not implement _new_storage_client"
%
type
(
self
))
return
DB
(
zstor
)
# init initializes the database with two integer objects - obj1/obj2 that are set to 0.
# init initializes the database with two integer objects - obj1/obj2
# that are set to 0.
def
init
():
db
=
dbopen
()
...
...
@@ -529,23 +541,27 @@ class BasicStorage(object):
db
.
close
()
# we'll run 8 T workers concurrently. As of 20210416, due to race conditions
# in ZEO, it triggers the bug where T sees stale obj2 with obj1.value != obj2.value
# we'll run 8 T workers concurrently. As of 20210416, due to race
# conditions in ZEO, it triggers the bug where T sees stale obj2 with
# obj1.value != obj2.value
#
# The probability to reproduce the bug is significantly reduced with
# decreasing n(workers): almost never with nwork=2 and sometimes with nwork=4.
# decreasing n(workers): almost never with nwork=2 and sometimes with
# nwork=4.
nwork
=
8
# T is a worker that accesses obj1/obj2 in a loop and verifies
# `obj1.value == obj2.value` invariant.
#
# access to obj1 is organized to always trigger loading from zstor.
# access to obj2 goes through zconn cache and so verifies whether the cache is not stale.
# access to obj2 goes through zconn cache and so verifies whether the
# cache is not stale.
#
# Once in a while T tries to modify obj{1,2}.value maintaining the
invariant as
# test source of changes for other workers.
# Once in a while T tries to modify obj{1,2}.value maintaining the
#
invariant as
test source of changes for other workers.
failed
=
threading
.
Event
()
failure
=
[
None
]
*
nwork
# [tx] is failure from T(tx)
def
T
(
tx
,
N
):
db
=
dbopen
()
...
...
@@ -565,37 +581,38 @@ class BasicStorage(object):
i1
=
obj1
.
value
i2
=
obj2
.
value
if
i1
!=
i2
:
#print('FAIL')
failure
[
tx
]
=
"T%s: obj1.value (%d) != obj2.value (%d)"
%
(
tx
,
i1
,
i2
)
# print('FAIL')
failure
[
tx
]
=
(
"T%s: obj1.value (%d) != obj2.value (%d)"
%
(
tx
,
i1
,
i2
))
failed
.
set
()
# change objects once in a while
if
randint
(
0
,
4
)
==
0
:
#print("T%s: modify" % tx)
if
randint
(
0
,
4
)
==
0
:
#
print("T%s: modify" % tx)
obj1
.
value
+=
1
obj2
.
value
+=
1
try
:
transaction
.
commit
()
except
POSException
.
ConflictError
:
#print('conflict -> ignore')
#
print('conflict -> ignore')
transaction
.
abort
()
zconn
.
close
()
try
:
for
i
in
range
(
N
):
#print('T%s.%d' % (tx, i))
#
print('T%s.%d' % (tx, i))
t_
()
if
failed
.
is_set
():
break
except
:
except
:
# noqa: E722 do not use bare 'except'
failed
.
set
()
raise
finally
:
db
.
close
()
# run the workers concurrently.
init
()
...
...
src/ZODB/tests/ConflictResolution.py
View file @
6e5baffd
...
...
@@ -19,7 +19,8 @@ from ZODB.POSException import ConflictError, UndoError
from
persistent
import
Persistent
from
transaction
import
TransactionManager
from
ZODB.tests.StorageTestBase
import
zodb_unpickle
,
zodb_pickle
from
ZODB.tests.StorageTestBase
import
zodb_pickle
class
PCounter
(
Persistent
):
...
...
@@ -42,19 +43,23 @@ class PCounter(Persistent):
# Insecurity: What if _p_resolveConflict _thinks_ it resolved the
# conflict, but did something wrong?
class
PCounter2
(
PCounter
):
def
_p_resolveConflict
(
self
,
oldState
,
savedState
,
newState
):
raise
ConflictError
class
PCounter3
(
PCounter
):
def
_p_resolveConflict
(
self
,
oldState
,
savedState
,
newState
):
raise
AttributeError
(
"no attribute (testing conflict resolution)"
)
class
PCounter4
(
PCounter
):
def
_p_resolveConflict
(
self
,
oldState
,
savedState
):
raise
RuntimeError
(
"Can't get here; not enough args"
)
class
ConflictResolvingStorage
(
object
):
def
checkResolve
(
self
,
resolvable
=
True
):
...
...
@@ -92,7 +97,6 @@ class ConflictResolvingStorage(object):
def
checkZClassesArentResolved
(
self
):
from
ZODB.ConflictResolution
import
find_global
,
BadClassName
dummy_class_tuple
=
(
'*foobar'
,
())
self
.
assertRaises
(
BadClassName
,
find_global
,
'*foobar'
,
())
def
checkBuggyResolve1
(
self
):
...
...
@@ -108,7 +112,7 @@ class ConflictResolvingStorage(object):
# The effect of committing two transactions with the same
# pickle is to commit two different transactions relative to
# revid1 that add two to _value.
revid2
=
self
.
_dostoreNP
(
oid
,
revid
=
revid1
,
data
=
zodb_pickle
(
obj
))
self
.
_dostoreNP
(
oid
,
revid
=
revid1
,
data
=
zodb_pickle
(
obj
))
self
.
assertRaises
(
ConflictError
,
self
.
_dostoreNP
,
oid
,
revid
=
revid1
,
data
=
zodb_pickle
(
obj
))
...
...
@@ -126,11 +130,12 @@ class ConflictResolvingStorage(object):
# The effect of committing two transactions with the same
# pickle is to commit two different transactions relative to
# revid1 that add two to _value.
revid2
=
self
.
_dostoreNP
(
oid
,
revid
=
revid1
,
data
=
zodb_pickle
(
obj
))
self
.
_dostoreNP
(
oid
,
revid
=
revid1
,
data
=
zodb_pickle
(
obj
))
self
.
assertRaises
(
ConflictError
,
self
.
_dostoreNP
,
oid
,
revid
=
revid1
,
data
=
zodb_pickle
(
obj
))
class
ConflictResolvingTransUndoStorage
(
object
):
def
checkUndoConflictResolution
(
self
):
...
...
@@ -145,7 +150,7 @@ class ConflictResolvingTransUndoStorage(object):
obj
.
inc
()
revid_b
=
self
.
_dostore
(
oid
,
revid
=
revid_a
,
data
=
obj
)
obj
.
inc
()
revid_c
=
self
.
_dostore
(
oid
,
revid
=
revid_b
,
data
=
obj
)
self
.
_dostore
(
oid
,
revid
=
revid_b
,
data
=
obj
)
# Start the undo
info
=
self
.
_storage
.
undoInfo
()
tid
=
info
[
1
][
'id'
]
...
...
@@ -167,7 +172,7 @@ class ConflictResolvingTransUndoStorage(object):
obj
.
inc
()
revid_b
=
self
.
_dostore
(
oid
,
revid
=
revid_a
,
data
=
obj
)
obj
.
inc
()
revid_c
=
self
.
_dostore
(
oid
,
revid
=
revid_b
,
data
=
obj
)
self
.
_dostore
(
oid
,
revid
=
revid_b
,
data
=
obj
)
# Start the undo
info
=
self
.
_storage
.
undoInfo
()
tid
=
info
[
1
][
'id'
]
...
...
src/ZODB/tests/Corruption.py
View file @
6e5baffd
...
...
@@ -23,6 +23,7 @@ from ZODB.utils import load_current
from
.StorageTestBase
import
StorageTestBase
class
FileStorageCorruptTests
(
StorageTestBase
):
def
setUp
(
self
):
...
...
src/ZODB/tests/HistoryStorage.py
View file @
6e5baffd
...
...
@@ -21,6 +21,7 @@ import sys
from
time
import
time
,
sleep
from
ZODB.tests.MinPO
import
MinPO
class
HistoryStorage
(
object
):
def
checkSimpleHistory
(
self
):
self
.
_checkHistory
((
11
,
12
,
13
))
...
...
@@ -29,7 +30,7 @@ class HistoryStorage(object):
start
=
time
()
# Store a couple of revisions of the object
oid
=
self
.
_storage
.
new_oid
()
self
.
assertRaises
(
KeyError
,
self
.
_storage
.
history
,
oid
)
self
.
assertRaises
(
KeyError
,
self
.
_storage
.
history
,
oid
)
revids
=
[
None
]
for
data
in
data
:
if
sys
.
platform
==
'win32'
:
...
...
src/ZODB/tests/IteratorStorage.py
View file @
6e5baffd
...
...
@@ -32,6 +32,7 @@ except ImportError:
# Py3: zip() already returns an iterable.
pass
class
IteratorCompare
(
object
):
def
iter_verify
(
self
,
txniter
,
revids
,
val0
):
...
...
@@ -66,14 +67,14 @@ class IteratorStorage(IteratorCompare):
def
checkUndoZombie
(
self
):
oid
=
self
.
_storage
.
new_oid
()
revid
=
self
.
_dostore
(
oid
,
data
=
MinPO
(
94
))
self
.
_dostore
(
oid
,
data
=
MinPO
(
94
))
# Get the undo information
info
=
self
.
_storage
.
undoInfo
()
tid
=
info
[
0
][
'id'
]
# Undo the creation of the object, rendering it a zombie
t
=
TransactionMetaData
()
self
.
_storage
.
tpc_begin
(
t
)
oids
=
self
.
_storage
.
undo
(
tid
,
t
)
self
.
_storage
.
undo
(
tid
,
t
)
self
.
_storage
.
tpc_vote
(
t
)
self
.
_storage
.
tpc_finish
(
t
)
# Now attempt to iterator over the storage
...
...
@@ -95,10 +96,10 @@ class IteratorStorage(IteratorCompare):
# always return x.
class
ext
(
dict
):
def
__reduce__
(
self
):
return
dict
,(
tuple
(
self
.
items
()),)
return
dict
,
(
tuple
(
self
.
items
()),)
ext
=
ext
(
foo
=
1
)
oid
=
self
.
_storage
.
new_oid
()
revid
=
self
.
_dostore
(
oid
,
data
=
MinPO
(
1
),
extension
=
ext
)
self
.
_dostore
(
oid
,
data
=
MinPO
(
1
),
extension
=
ext
)
txn
,
=
self
.
_storage
.
iterator
()
self
.
assertEqual
(
txn
.
extension
,
ext
)
try
:
...
...
src/ZODB/tests/MTStorage.py
View file @
6e5baffd
...
...
@@ -17,6 +17,7 @@ from ZODB.utils import load_current
SHORT_DELAY
=
0.01
class
TestThread
(
threading
.
Thread
):
"""Base class for defining threads that run from unittest.
...
...
@@ -35,7 +36,7 @@ class TestThread(threading.Thread):
def
run
(
self
):
try
:
self
.
runtest
()
except
:
except
:
# noqa: E722 do not use bare 'except'
self
.
_exc_info
=
sys
.
exc_info
()
def
join
(
self
,
timeout
=
None
):
...
...
@@ -44,6 +45,7 @@ class TestThread(threading.Thread):
raise
six
.
reraise
(
self
.
_exc_info
[
0
],
self
.
_exc_info
[
1
],
self
.
_exc_info
[
2
])
class
ZODBClientThread
(
TestThread
):
__super_init
=
TestThread
.
__init__
...
...
@@ -107,6 +109,7 @@ class ZODBClientThread(TestThread):
raise
ConflictError
(
"Exceeded %d attempts to read"
%
MAXRETRIES
)
class
StorageClientThread
(
TestThread
):
__super_init
=
TestThread
.
__init__
...
...
@@ -159,6 +162,7 @@ class StorageClientThread(TestThread):
self
.
pause
()
self
.
oids
[
oid
]
=
revid
class
ExtStorageClientThread
(
StorageClientThread
):
def
runtest
(
self
):
...
...
@@ -211,6 +215,7 @@ class ExtStorageClientThread(StorageClientThread):
for
obj
in
iter
:
pass
class
MTStorage
(
object
):
"Test a storage with multiple client threads executing concurrently."
...
...
src/ZODB/tests/MVCCMappingStorage.py
View file @
6e5baffd
...
...
@@ -110,7 +110,7 @@ class MVCCMappingStorage(MappingStorage):
self
.
_polled_tid
=
self
.
_ltid
=
new_tid
return
list
(
changed_oids
)
def
tpc_finish
(
self
,
transaction
,
func
=
lambda
tid
:
None
):
def
tpc_finish
(
self
,
transaction
,
func
=
lambda
tid
:
None
):
self
.
_data_snapshot
=
None
with
self
.
_main_lock
:
return
MappingStorage
.
tpc_finish
(
self
,
transaction
,
func
)
...
...
src/ZODB/tests/MinPO.py
View file @
6e5baffd
...
...
@@ -14,12 +14,13 @@
"""A minimal persistent object to use for tests"""
from
persistent
import
Persistent
class
MinPO
(
Persistent
):
def
__init__
(
self
,
value
=
None
):
self
.
value
=
value
def
__cmp__
(
self
,
aMinPO
):
return
cmp
(
self
.
value
,
aMinPO
.
value
)
return
cmp
(
self
.
value
,
aMinPO
.
value
)
# noqa: F821 undefined name 'cmp'
def
__hash__
(
self
):
return
hash
(
self
.
value
)
...
...
src/ZODB/tests/PackableStorage.py
View file @
6e5baffd
...
...
@@ -31,7 +31,6 @@ import transaction
import
ZODB.interfaces
import
ZODB.tests.util
from
ZODB.tests.util
import
time_monotonically_increases
import
zope.testing.setupstack
from
ZODB.utils
import
load_current
...
...
@@ -81,6 +80,8 @@ class C(Persistent):
# serialize the persistent id of the object instead of the object's state.
# That sets the pickle up for proper sniffing by the referencesf machinery.
# Fun, huh?
def
dumps
(
obj
):
def
getpersid
(
obj
):
if
hasattr
(
obj
,
'getoid'
):
...
...
@@ -92,6 +93,7 @@ def dumps(obj):
p
.
dump
(
None
)
return
s
.
getvalue
()
def
pdumps
(
obj
):
s
=
BytesIO
()
p
=
Pickler
(
s
,
_protocol
)
...
...
@@ -245,9 +247,8 @@ class PackableStorage(PackableStorageBase):
# True if we got beyond this line, False if it raised an
# exception (one possible Conflict cause):
# self.root[index].value = MinPO(j)
def
cmp_by_time
(
a
,
b
):
return
cmp
((
a
[
1
],
a
[
0
]),
(
b
[
1
],
b
[
0
]))
outcomes
.
sort
(
cmp_by_time
)
outcomes
.
sort
(
key
=
lambda
x
:
(
x
[
1
],
x
[
0
]))
counts
=
[
0
]
*
4
for
outcome
in
outcomes
:
n
=
len
(
outcome
)
...
...
@@ -528,6 +529,7 @@ class PackableStorage(PackableStorageBase):
eq
(
pobj
.
getoid
(),
oid2
)
eq
(
pobj
.
value
,
11
)
class
PackableStorageWithOptionalGC
(
PackableStorage
):
def
checkPackAllRevisionsNoGC
(
self
):
...
...
@@ -569,7 +571,6 @@ class PackableStorageWithOptionalGC(PackableStorage):
self
.
_storage
.
loadSerial
(
oid
,
revid3
)
class
PackableUndoStorage
(
PackableStorageBase
):
def
checkPackUnlinkedFromRoot
(
self
):
...
...
@@ -716,13 +717,15 @@ class PackableUndoStorage(PackableStorageBase):
self
.
_dostoreNP
(
oid2
,
revid
=
revid22
,
data
=
pdumps
(
obj2
),
description
=
"2-5"
)
# Now pack
self
.
assertEqual
(
6
,
len
(
self
.
_storage
.
undoLog
()))
self
.
assertEqual
(
6
,
len
(
self
.
_storage
.
undoLog
()))
print
(
'
\
n
initial undoLog was'
)
for
r
in
self
.
_storage
.
undoLog
():
print
(
r
)
for
r
in
self
.
_storage
.
undoLog
():
print
(
r
)
self
.
_storage
.
pack
(
packtime
,
referencesf
)
# The undo log contains only two undoable transaction.
print
(
'
\
n
after packing undoLog was'
)
for
r
in
self
.
_storage
.
undoLog
():
print
(
r
)
for
r
in
self
.
_storage
.
undoLog
():
print
(
r
)
# what can we assert about that?
...
...
@@ -774,6 +777,7 @@ class ClientThread(TestThread):
conn
.
close
()
class
ElapsedTimer
(
object
):
def
__init__
(
self
,
start_time
):
self
.
start_time
=
start_time
...
...
src/ZODB/tests/PersistentStorage.py
View file @
6e5baffd
...
...
@@ -15,12 +15,13 @@
from
ZODB.utils
import
load_current
class
PersistentStorage
(
object
):
def
checkUpdatesPersist
(
self
):
oids
=
[]
def
new_oid_wrapper
(
l
=
oids
,
new_oid
=
self
.
_storage
.
new_oid
):
def
new_oid_wrapper
(
l
=
oids
,
new_oid
=
self
.
_storage
.
new_oid
):
# noqa: E741 E501 ambiguous variable name 'l' and line too long
oid
=
new_oid
()
l
.
append
(
oid
)
return
oid
...
...
src/ZODB/tests/ReadOnlyStorage.py
View file @
6e5baffd
...
...
@@ -16,6 +16,7 @@ from ZODB.POSException import ReadOnlyError, Unsupported
from
ZODB.utils
import
load_current
class
ReadOnlyStorage
(
object
):
def
_create_data
(
self
):
...
...
src/ZODB/tests/RecoveryStorage.py
View file @
6e5baffd
...
...
@@ -41,9 +41,9 @@ class RecoveryStorage(IteratorDeepCompare):
db
=
DB
(
self
.
_storage
)
c
=
db
.
open
()
r
=
c
.
root
()
obj
=
r
[
"obj1"
]
=
MinPO
(
1
)
r
[
"obj1"
]
=
MinPO
(
1
)
transaction
.
commit
()
obj
=
r
[
"obj2"
]
=
MinPO
(
1
)
r
[
"obj2"
]
=
MinPO
(
1
)
transaction
.
commit
()
self
.
_dst
.
copyTransactionsFrom
(
self
.
_storage
)
...
...
@@ -151,7 +151,7 @@ class RecoveryStorage(IteratorDeepCompare):
tid
=
info
[
0
][
'id'
]
t
=
TransactionMetaData
()
self
.
_storage
.
tpc_begin
(
t
)
oids
=
self
.
_storage
.
undo
(
tid
,
t
)
self
.
_storage
.
undo
(
tid
,
t
)
self
.
_storage
.
tpc_vote
(
t
)
self
.
_storage
.
tpc_finish
(
t
)
...
...
@@ -175,7 +175,7 @@ class RecoveryStorage(IteratorDeepCompare):
tid
=
info
[
0
][
'id'
]
t
=
TransactionMetaData
()
self
.
_storage
.
tpc_begin
(
t
)
oids
=
self
.
_storage
.
undo
(
tid
,
t
)
self
.
_storage
.
undo
(
tid
,
t
)
self
.
_storage
.
tpc_vote
(
t
)
self
.
_storage
.
tpc_finish
(
t
)
...
...
src/ZODB/tests/RevisionStorage.py
View file @
6e5baffd
...
...
@@ -107,7 +107,6 @@ class RevisionStorage(object):
self
.
assertEqual
(
start
,
revs
[
11
])
self
.
assertEqual
(
end
,
revs
[
12
])
# Unsure: Is it okay to assume everyone testing against RevisionStorage
# implements undo?
...
...
@@ -142,6 +141,7 @@ class RevisionStorage(object):
def
checkLoadBeforeConsecutiveTids
(
self
):
eq
=
self
.
assertEqual
oid
=
self
.
_storage
.
new_oid
()
def
helper
(
tid
,
revid
,
x
):
data
=
zodb_pickle
(
MinPO
(
x
))
t
=
TransactionMetaData
()
...
...
@@ -151,13 +151,13 @@ class RevisionStorage(object):
# Finish the transaction
self
.
_storage
.
tpc_vote
(
t
)
newrevid
=
self
.
_storage
.
tpc_finish
(
t
)
except
:
except
:
# noqa: E722 do not use bare 'except'
self
.
_storage
.
tpc_abort
(
t
)
raise
return
newrevid
revid1
=
helper
(
1
,
None
,
1
)
revid2
=
helper
(
2
,
revid1
,
2
)
revid3
=
helper
(
3
,
revid2
,
3
)
helper
(
3
,
revid2
,
3
)
data
,
start_tid
,
end_tid
=
self
.
_storage
.
loadBefore
(
oid
,
p64
(
2
))
eq
(
zodb_unpickle
(
data
),
MinPO
(
1
))
eq
(
u64
(
start_tid
),
1
)
...
...
@@ -167,7 +167,7 @@ class RevisionStorage(object):
eq
=
self
.
assertEqual
oid1
=
self
.
_storage
.
new_oid
()
oid2
=
self
.
_storage
.
new_oid
()
revid1
=
self
.
_dostore
(
oid1
)
self
.
_dostore
(
oid1
)
revid2
=
self
.
_dostore
(
oid2
)
results
=
self
.
_storage
.
loadBefore
(
oid2
,
revid2
)
eq
(
results
,
None
)
...
...
src/ZODB/tests/StorageTestBase.py
View file @
6e5baffd
...
...
@@ -31,6 +31,7 @@ import ZODB.tests.util
ZERO
=
z64
def
snooze
():
# In Windows, it's possible that two successive time.time() calls return
# the same value. Tim guarantees that time never runs backwards. You
...
...
@@ -40,6 +41,7 @@ def snooze():
while
now
==
time
.
time
():
time
.
sleep
(
0.1
)
def
_persistent_id
(
obj
):
oid
=
getattr
(
obj
,
"_p_oid"
,
None
)
if
getattr
(
oid
,
"__get__"
,
None
)
is
not
None
:
...
...
@@ -47,6 +49,7 @@ def _persistent_id(obj):
else
:
return
oid
def
zodb_pickle
(
obj
):
"""Create a pickle in the format expected by ZODB."""
f
=
BytesIO
()
...
...
@@ -65,10 +68,12 @@ def zodb_pickle(obj):
p
.
dump
(
state
)
return
f
.
getvalue
()
def
persistent_load
(
pid
):
# helper for zodb_unpickle
return
"ref to %s.%s oid=%s"
%
(
pid
[
1
][
0
],
pid
[
1
][
1
],
u64
(
pid
[
0
]))
def
zodb_unpickle
(
data
):
"""Unpickle an object stored using the format expected by ZODB."""
f
=
BytesIO
(
data
)
...
...
@@ -101,6 +106,7 @@ def zodb_unpickle(data):
inst
.
__setstate__
(
state
)
return
inst
def
import_helper
(
name
):
__import__
(
name
)
return
sys
.
modules
[
name
]
...
...
@@ -124,7 +130,8 @@ class StorageTestBase(ZODB.tests.util.TestCase):
ZODB
.
tests
.
util
.
TestCase
.
tearDown
(
self
)
def
_dostore
(
self
,
oid
=
None
,
revid
=
None
,
data
=
None
,
already_pickled
=
0
,
user
=
None
,
description
=
None
,
extension
=
None
):
already_pickled
=
0
,
user
=
None
,
description
=
None
,
extension
=
None
):
"""Do a complete storage transaction. The defaults are:
- oid=None, ask the storage for a new oid
...
...
@@ -152,11 +159,11 @@ class StorageTestBase(ZODB.tests.util.TestCase):
try
:
self
.
_storage
.
tpc_begin
(
t
)
# Store an object
r1
=
self
.
_storage
.
store
(
oid
,
revid
,
data
,
''
,
t
)
self
.
_storage
.
store
(
oid
,
revid
,
data
,
''
,
t
)
# Finish the transaction
r2
=
self
.
_storage
.
tpc_vote
(
t
)
self
.
_storage
.
tpc_vote
(
t
)
revid
=
self
.
_storage
.
tpc_finish
(
t
)
except
:
except
:
# noqa: E722 do not use bare 'except'
self
.
_storage
.
tpc_abort
(
t
)
raise
return
revid
...
...
src/ZODB/tests/Synchronization.py
View file @
6e5baffd
...
...
@@ -69,6 +69,7 @@ OID = "\000" * 8
SERIALNO
=
"
\
000
"
*
8
TID
=
"
\
000
"
*
8
class
SynchronizedStorage
(
object
):
def
verifyNotCommitting
(
self
,
callable
,
*
args
):
...
...
src/ZODB/tests/TransactionalUndoStorage.py
View file @
6e5baffd
...
...
@@ -21,7 +21,6 @@ from six import PY3
from
persistent
import
Persistent
import
transaction
from
transaction
import
Transaction
from
ZODB
import
POSException
from
ZODB.Connection
import
TransactionMetaData
...
...
@@ -37,6 +36,7 @@ from ZODB.tests.StorageTestBase import ZERO
class
C
(
Persistent
):
pass
def
snooze
():
# In Windows, it's possible that two successive time.time() calls return
# the same value. Tim guarantees that time never runs backwards. You
...
...
@@ -46,6 +46,7 @@ def snooze():
while
now
==
time
.
time
():
time
.
sleep
(
0.1
)
def
listeq
(
L1
,
L2
):
"""Return True if L1.sort() == L2.sort()
...
...
@@ -53,6 +54,7 @@ def listeq(L1, L2):
"""
return
sorted
(
L1
)
==
sorted
(
L2
)
class
TransactionalUndoStorage
(
object
):
def
_multi_obj_transaction
(
self
,
objs
):
...
...
@@ -218,7 +220,6 @@ class TransactionalUndoStorage(object):
def
checkTwoObjectUndoAtOnce
(
self
):
# Convenience
eq
=
self
.
assertEqual
unless
=
self
.
assertTrue
p30
,
p31
,
p32
,
p50
,
p51
,
p52
=
map
(
zodb_pickle
,
map
(
MinPO
,
(
30
,
31
,
32
,
50
,
51
,
52
)))
...
...
@@ -470,6 +471,7 @@ class TransactionalUndoStorage(object):
root
=
cn
.
root
()
pack_times
=
[]
def
set_pack_time
():
pack_times
.
append
(
time
.
time
())
snooze
()
...
...
@@ -521,7 +523,6 @@ class TransactionalUndoStorage(object):
cn
.
close
()
db
.
close
()
def
checkPackAfterUndoManyTimes
(
self
):
db
=
DB
(
self
.
_storage
)
cn
=
db
.
open
()
...
...
@@ -664,7 +665,7 @@ class TransactionalUndoStorage(object):
t
=
transaction
.
get
()
t
.
note
(
u't1'
)
t
.
setExtendedInfo
(
'k2'
,
'this is transaction metadata'
)
t
.
setUser
(
u'u3'
,
path
=
u'p3'
)
t
.
setUser
(
u'u3'
,
path
=
u'p3'
)
db
=
DB
(
self
.
_storage
)
conn
=
db
.
open
()
try
:
...
...
@@ -673,9 +674,9 @@ class TransactionalUndoStorage(object):
root
[
'obj'
]
=
o1
txn
=
transaction
.
get
()
txn
.
commit
()
l
=
self
.
_storage
.
undoLog
()
self
.
assertEqual
(
len
(
l
),
2
)
d
=
l
[
0
]
l
og
=
self
.
_storage
.
undoLog
()
self
.
assertEqual
(
len
(
l
og
),
2
)
d
=
l
og
[
0
]
self
.
assertEqual
(
d
[
'description'
],
b't1'
)
self
.
assertEqual
(
d
[
'k2'
],
'this is transaction metadata'
)
self
.
assertEqual
(
d
[
'user_name'
],
b'p3 u3'
)
...
...
@@ -724,7 +725,7 @@ class TransactionalUndoStorage(object):
# Try a slice that doesn't start at 0.
oddball
=
info_func
(
first
=
11
,
last
=
17
)
self
.
assertEqual
(
len
(
oddball
),
17
-
11
)
self
.
assertEqual
(
oddball
,
allofem
[
11
:
11
+
len
(
oddball
)])
self
.
assertEqual
(
oddball
,
allofem
[
11
:
11
+
len
(
oddball
)])
# And another way to spell the same thing.
redundant
=
info_func
(
first
=
11
,
last
=-
6
)
...
...
@@ -754,10 +755,10 @@ class TransactionalUndoStorage(object):
for
i
in
range
(
4
):
with
db
.
transaction
()
as
conn
:
conn
.
transaction_manager
.
get
().
note
(
(
str
if
PY3
else
unicode
)(
i
))
(
str
if
PY3
else
unicode
)(
i
))
# noqa: F821 undef name
conn
.
root
.
x
.
inc
()
ids
=
[
l
[
'id'
]
for
l
in
db
.
undoLog
(
1
,
3
)]
ids
=
[
l
og
[
'id'
]
for
log
in
db
.
undoLog
(
1
,
3
)]
if
reverse
:
ids
.
reverse
()
...
...
src/ZODB/tests/dangle.py
View file @
6e5baffd
...
...
@@ -23,9 +23,11 @@ from ZODB import DB
from
persistent
import
Persistent
class
P
(
Persistent
):
pass
def
create_dangling_ref
(
db
):
rt
=
db
.
open
().
root
()
...
...
@@ -56,11 +58,13 @@ def create_dangling_ref(db):
transaction
.
get
().
note
(
u"set child on o2"
)
transaction
.
commit
()
def
main
():
fs
=
FileStorage
(
u"dangle.fs"
)
db
=
DB
(
fs
)
create_dangling_ref
(
db
)
db
.
close
()
if
__name__
==
"__main__"
:
main
()
src/ZODB/tests/hexstorage.py
View file @
6e5baffd
...
...
@@ -17,10 +17,10 @@ import ZODB.utils
import
zope.interface
from
binascii
import
hexlify
,
unhexlify
@
zope
.
interface
.
implementer
(
ZODB
.
interfaces
.
IStorageWrapper
)
class
HexStorage
(
object
):
copied_methods
=
(
'close'
,
'getName'
,
'getSize'
,
'history'
,
'isReadOnly'
,
'lastTransaction'
,
'new_oid'
,
'sortKey'
,
...
...
@@ -122,6 +122,7 @@ class HexStorage(object):
def
copyTransactionsFrom
(
self
,
other
):
ZODB
.
blob
.
copyTransactionsFromTo
(
other
,
self
)
class
ServerHexStorage
(
HexStorage
):
"""Use on ZEO storage server when Hex is used on client
...
...
@@ -134,6 +135,7 @@ class ServerHexStorage(HexStorage):
'iterator'
,
'storeBlob'
,
'restoreBlob'
,
'record_iternext'
,
)
class
Transaction
(
object
):
def
__init__
(
self
,
store
,
trans
):
...
...
@@ -149,6 +151,7 @@ class Transaction(object):
def
__getattr__
(
self
,
name
):
return
getattr
(
self
.
__trans
,
name
)
class
ZConfigHex
(
object
):
_factory
=
HexStorage
...
...
@@ -161,6 +164,7 @@ class ZConfigHex(object):
base
=
self
.
config
.
base
.
open
()
return
self
.
_factory
(
base
)
class
ZConfigServerHex
(
ZConfigHex
):
_factory
=
ServerHexStorage
src/ZODB/tests/loggingsupport.py
View file @
6e5baffd
...
...
@@ -71,6 +71,7 @@ $Id: loggingsupport.py 28349 2004-11-06 00:10:32Z tim_one $
import
logging
class
Handler
(
logging
.
Handler
):
def
__init__
(
self
,
*
names
,
**
kw
):
...
...
src/ZODB/tests/sampledm.py
View file @
6e5baffd
...
...
@@ -14,6 +14,7 @@
"""Sample objects for use in tests
"""
class
DataManager
(
object
):
"""Sample data manager
...
...
@@ -384,6 +385,7 @@ class DataManager(object):
self
.
sp
+=
1
return
Rollback
(
self
)
class
Rollback
(
object
):
def
__init__
(
self
,
dm
):
...
...
src/ZODB/tests/speed.py
View file @
6e5baffd
from
__future__
import
print_function
import
time
import
string
import
getopt
import
os
import
ZODB.FileStorage
import
ZODB
import
sys
import
transaction
import
persistent
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
...
...
@@ -12,7 +21,7 @@ from __future__ import print_function
# FOR A PARTICULAR PURPOSE
#
##############################################################################
usage
=
"""Test speed of a ZODB storage
usage
=
"""Test speed of a ZODB storage
Options:
...
...
@@ -36,92 +45,100 @@ Options:
-M Output means only
"""
import
sys
,
os
,
getopt
,
string
,
time
sys
.
path
.
insert
(
0
,
os
.
getcwd
())
import
ZODB
,
ZODB
.
FileStorage
import
persistent
import
transaction
class
P
(
persistent
.
Persistent
):
pass
class
P
(
persistent
.
Persistent
):
pass
def
main
(
args
):
opts
,
args
=
getopt
.
getopt
(
args
,
'zd:n:Ds:LM'
)
z
=
s
=
None
data
=
sys
.
argv
[
0
]
nrep
=
5
minimize
=
0
detailed
=
1
z
=
s
=
None
data
=
sys
.
argv
[
0
]
nrep
=
5
minimize
=
0
detailed
=
1
for
o
,
v
in
opts
:
if
o
==
'-n'
:
nrep
=
string
.
atoi
(
v
)
elif
o
==
'-d'
:
data
=
v
elif
o
==
'-s'
:
s
=
v
elif
o
==
'-z'
:
if
o
==
'-n'
:
nrep
=
string
.
atoi
(
v
)
elif
o
==
'-d'
:
data
=
v
elif
o
==
'-s'
:
s
=
v
elif
o
==
'-z'
:
global
zlib
import
zlib
z
=
compress
elif
o
==
'-L'
:
minimize
=
1
elif
o
==
'-M'
:
detailed
=
0
elif
o
==
'-D'
:
z
=
compress
elif
o
==
'-L'
:
minimize
=
1
elif
o
==
'-M'
:
detailed
=
0
elif
o
==
'-D'
:
global
debug
os
.
environ
[
'STUPID_LOG_FILE'
]
=
''
os
.
environ
[
'STUPID_LOG_SEVERITY'
]
=
'-999'
os
.
environ
[
'STUPID_LOG_FILE'
]
=
''
os
.
environ
[
'STUPID_LOG_SEVERITY'
]
=
'-999'
if
s
:
s
=
__import__
(
s
,
globals
(),
globals
(),
(
'__doc__'
,))
s
=
s
.
Storage
s
=
__import__
(
s
,
globals
(),
globals
(),
(
'__doc__'
,))
s
=
s
.
Storage
else
:
s
=
ZODB
.
FileStorage
.
FileStorage
(
'zeo_speed.fs'
,
create
=
1
)
s
=
ZODB
.
FileStorage
.
FileStorage
(
'zeo_speed.fs'
,
create
=
1
)
with
open
(
data
)
as
fp
:
data
=
fp
.
read
()
db
=
ZODB
.
DB
(
s
,
db
=
ZODB
.
DB
(
s
,
# disable cache deactivation
cache_size
=
4000
,
cache_deactivate_after
=
6000
,)
results
=
{
1
:
0
,
10
:
0
,
100
:
0
,
1000
:
0
}
results
=
{
1
:
0
,
10
:
0
,
100
:
0
,
1000
:
0
}
for
j
in
range
(
nrep
):
for
r
in
1
,
10
,
100
,
1000
:
t
=
time
.
time
()
jar
=
db
.
open
()
t
=
time
.
time
()
jar
=
db
.
open
()
transaction
.
begin
()
rt
=
jar
.
root
()
key
=
's%s'
%
r
if
key
in
rt
:
p
=
rt
[
key
]
else
:
rt
[
key
]
=
p
=
P
()
rt
=
jar
.
root
()
key
=
's%s'
%
r
if
key
in
rt
:
p
=
rt
[
key
]
else
:
rt
[
key
]
=
p
=
P
()
for
i
in
range
(
r
):
if
z
is
not
None
:
d
=
z
(
data
)
else
:
d
=
data
v
=
getattr
(
p
,
str
(
i
),
P
())
v
.
d
=
d
setattr
(
p
,
str
(
i
),
v
)
if
z
is
not
None
:
d
=
z
(
data
)
else
:
d
=
data
v
=
getattr
(
p
,
str
(
i
),
P
())
v
.
d
=
d
setattr
(
p
,
str
(
i
),
v
)
transaction
.
commit
()
jar
.
close
()
t
=
time
.
time
()
-
t
t
=
time
.
time
()
-
t
if
detailed
:
sys
.
stderr
.
write
(
"%s
\
t
%s
\
t
%.4f
\
n
"
%
(
j
,
r
,
t
))
sys
.
stdout
.
flush
()
results
[
r
]
=
results
[
r
]
+
t
rt
=
d
=
p
=
v
=
None
# release all references
results
[
r
]
=
results
[
r
]
+
t
rt
=
d
=
p
=
v
=
None
# release all references
if
minimize
:
time
.
sleep
(
3
)
jar
.
cacheMinimize
(
3
)
if
detailed
:
print
(
'-'
*
24
)
if
detailed
:
print
(
'-'
*
24
)
for
r
in
1
,
10
,
100
,
1000
:
t
=
results
[
r
]
/
nrep
t
=
results
[
r
]
/
nrep
sys
.
stderr
.
write
(
"mean:
\
t
%s
\
t
%.4f
\
t
%.4f (s/o)
\
n
"
%
(
r
,
t
,
t
/
r
))
db
.
close
()
def
compress
(
s
):
c
=
zlib
.
compressobj
()
o
=
c
.
compress
(
s
)
c
=
zlib
.
compressobj
()
o
=
c
.
compress
(
s
)
return
o
+
c
.
flush
()
if
__name__
==
'__main__'
:
main
(
sys
.
argv
[
1
:])
if
__name__
==
'__main__'
:
main
(
sys
.
argv
[
1
:])
src/ZODB/tests/testActivityMonitor.py
View file @
6e5baffd
...
...
@@ -102,6 +102,3 @@ class Tests(unittest.TestCase):
def
test_suite
():
return
unittest
.
makeSuite
(
Tests
)
if
__name__
==
'__main__'
:
unittest
.
main
(
defaultTest
=
'test_suite'
)
src/ZODB/tests/testBroken.py
View file @
6e5baffd
...
...
@@ -25,6 +25,7 @@ else:
from
doctest
import
DocTestSuite
from
ZODB.tests.util
import
DB
,
checker
def
test_integration
():
r"""Test the integration of broken object support with the databse:
...
...
@@ -90,10 +91,9 @@ def test_integration():
>>> ZODB.broken.broken_cache.clear()
"""
def
test_suite
():
return
unittest
.
TestSuite
((
DocTestSuite
(
'ZODB.broken'
,
checker
=
checker
),
DocTestSuite
(
checker
=
checker
),
))
if
__name__
==
'__main__'
:
unittest
.
main
()
src/ZODB/tests/testCache.py
View file @
6e5baffd
...
...
@@ -40,7 +40,7 @@ class CacheTestBase(ZODB.tests.util.TestCase):
ZODB
.
tests
.
util
.
TestCase
.
setUp
(
self
)
store
=
ZODB
.
MappingStorage
.
MappingStorage
()
self
.
db
=
ZODB
.
DB
(
store
,
cache_size
=
self
.
CACHE_SIZE
)
cache_size
=
self
.
CACHE_SIZE
)
self
.
conns
=
[]
def
tearDown
(
self
):
...
...
@@ -73,9 +73,10 @@ class CacheTestBase(ZODB.tests.util.TestCase):
transaction
.
commit
()
# CantGetRidOfMe is used by checkMinimizeTerminates.
make_trouble
=
True
class
CantGetRidOfMe
(
MinPO
):
def
__init__
(
self
,
value
):
MinPO
.
__init__
(
self
,
value
)
...
...
@@ -88,6 +89,7 @@ class CantGetRidOfMe(MinPO):
if
make_trouble
:
self
.
an_attribute
class
DBMethods
(
CacheTestBase
):
def
setUp
(
self
):
...
...
@@ -194,6 +196,7 @@ class DBMethods(CacheTestBase):
c
=
self
.
conns
[
0
].
_cache
c
.
klass_items
()
class
LRUCacheTests
(
CacheTestBase
):
def
testLRU
(
self
):
...
...
@@ -205,30 +208,30 @@ class LRUCacheTests(CacheTestBase):
self
.
db
.
setCacheSize
(
CACHE_SIZE
)
c
=
self
.
db
.
open
()
r
=
c
.
root
()
l
=
{}
l
_
=
{}
# the root is the only thing in the cache, because all the
# other objects are new
self
.
assertEqual
(
len
(
c
.
_cache
),
1
)
# run several transactions
for
t
in
range
(
5
):
for
i
in
range
(
dataset_size
):
l
[(
t
,
i
)]
=
r
[
i
]
=
MinPO
(
i
)
l
_
[(
t
,
i
)]
=
r
[
i
]
=
MinPO
(
i
)
transaction
.
commit
()
# commit() will register the objects, placing them in the
# cache. at the end of commit, the cache will be reduced
# down to CACHE_SIZE items
if
len
(
l
)
>
CACHE_SIZE
:
if
len
(
l
_
)
>
CACHE_SIZE
:
self
.
assertEqual
(
c
.
_cache
.
ringlen
(),
CACHE_SIZE
)
for
i
in
range
(
dataset_size
):
# Check objects added in the first two transactions.
# They must all be ghostified.
self
.
assertEqual
(
l
[(
0
,
i
)].
_p_changed
,
None
)
self
.
assertEqual
(
l
[(
1
,
i
)].
_p_changed
,
None
)
self
.
assertEqual
(
l
_
[(
0
,
i
)].
_p_changed
,
None
)
self
.
assertEqual
(
l
_
[(
1
,
i
)].
_p_changed
,
None
)
# Check objects added in the last two transactions.
# They must all still exist in memory, but have
# had their changes flushed
self
.
assertEqual
(
l
[(
3
,
i
)].
_p_changed
,
0
)
self
.
assertEqual
(
l
[(
4
,
i
)].
_p_changed
,
0
)
self
.
assertEqual
(
l
_
[(
3
,
i
)].
_p_changed
,
0
)
self
.
assertEqual
(
l
_
[(
4
,
i
)].
_p_changed
,
0
)
# Of the objects added in the middle transaction, most
# will have been ghostified. There is one cache slot
# that may be occupied by either one of those objects or
...
...
@@ -257,7 +260,7 @@ class LRUCacheTests(CacheTestBase):
# The cache *usually* contains non-ghosts, so that the
# size normally exceeds the target size.
#self.assertEqual(d['size'], CACHE_SIZE)
#
self.assertEqual(d['size'], CACHE_SIZE)
def
testDetail
(
self
):
CACHE_SIZE
=
10
...
...
@@ -288,7 +291,6 @@ class LRUCacheTests(CacheTestBase):
# This test really needs to be thought through and documented
# better.
for
klass
,
count
in
self
.
db
.
cacheDetail
():
if
klass
.
endswith
(
'MinPO'
):
self
.
assertEqual
(
count
,
CONNS
*
CACHE_SIZE
)
...
...
@@ -307,13 +309,16 @@ class LRUCacheTests(CacheTestBase):
if
details
[
'state'
]
is
None
:
# i.e., it's a ghost
self
.
assertTrue
(
details
[
'rc'
]
>
0
)
class
StubDataManager
(
object
):
def
setklassstate
(
self
,
object
):
pass
class
StubObject
(
Persistent
):
pass
class
CacheErrors
(
unittest
.
TestCase
):
def
setUp
(
self
):
...
...
@@ -449,6 +454,7 @@ class CacheErrors(unittest.TestCase):
else
:
self
.
fail
(
"two objects with the same oid should have failed"
)
def
test_basic_cache_size_estimation
():
"""Make sure the basic accounting is correct:
...
...
src/ZODB/tests/testConfig.py
View file @
6e5baffd
...
...
@@ -126,6 +126,7 @@ def database_xrefs_config():
False
"""
def
multi_atabases
():
r"""If there are multiple codb sections -> multidatabase
...
...
@@ -195,6 +196,7 @@ def multi_atabases():
"""
def
test_suite
():
suite
=
unittest
.
TestSuite
()
suite
.
addTest
(
doctest
.
DocTestSuite
(
...
...
src/ZODB/tests/testConnection.py
View file @
6e5baffd
...
...
@@ -44,7 +44,7 @@ checker = renormalizing.RENormalizing([
(
re
.
compile
(
"ZODB.POSException.ConflictError"
),
r"ConflictError"
),
(
re
.
compile
(
"ZODB.POSException.ConnectionStateError"
),
r"ConnectionStateError"
),
])
])
class
ConnectionDotAdd
(
ZODB
.
tests
.
util
.
TestCase
):
...
...
@@ -131,7 +131,7 @@ class ConnectionDotAdd(ZODB.tests.util.TestCase):
self
.
assertTrue
(
obj
.
_p_jar
is
self
.
datamgr
)
# This next assertTrue is covered by an assert in tpc_finish.
#
#
self.assertTrue(not self.datamgr._added)
#
self.assertTrue(not self.datamgr._added)
self
.
assertEqual
(
self
.
db
.
storage
.
_stored
,
[
oid
])
self
.
assertEqual
(
self
.
db
.
storage
.
_finished
,
[
oid
])
...
...
@@ -176,8 +176,7 @@ class SetstateErrorLoggingTests(ZODB.tests.util.TestCase):
def
setUp
(
self
):
ZODB
.
tests
.
util
.
TestCase
.
setUp
(
self
)
from
ZODB.Connection
import
Connection
self
.
db
=
db
=
databaseFromString
(
"<zodb>
\
n
<mappingstorage/>
\
n
</zodb>"
)
self
.
db
=
databaseFromString
(
"<zodb>
\
n
<mappingstorage/>
\
n
</zodb>"
)
self
.
datamgr
=
self
.
db
.
open
()
self
.
object
=
StubObject
()
self
.
datamgr
.
add
(
self
.
object
)
...
...
@@ -188,7 +187,6 @@ class SetstateErrorLoggingTests(ZODB.tests.util.TestCase):
self
.
handler
.
uninstall
()
def
test_closed_connection_wont_setstate
(
self
):
oid
=
self
.
object
.
_p_oid
self
.
object
.
_p_deactivate
()
self
.
datamgr
.
close
()
self
.
assertRaises
(
...
...
@@ -476,6 +474,7 @@ class UserMethodTests(unittest.TestCase):
-1
"""
def
doctest_transaction_retry_convenience
():
"""
Simple test to verify integration with the transaction retry
...
...
@@ -506,6 +505,7 @@ def doctest_transaction_retry_convenience():
0 2
"""
class
InvalidationTests
(
unittest
.
TestCase
):
# It's harder to write serious tests, because some of the critical
...
...
@@ -588,6 +588,7 @@ class InvalidationTests(unittest.TestCase):
c2
.
root
()[
'b'
]
=
1
s1
=
c1
.
_storage
l1
=
s1
.
_lock
@
contextmanager
def
beforeLock1
():
s1
.
_lock
=
l1
...
...
@@ -601,6 +602,7 @@ class InvalidationTests(unittest.TestCase):
finally
:
db
.
close
()
def
doctest_invalidateCache
():
"""The invalidateCache method invalidates a connection's cache.
...
...
@@ -655,6 +657,7 @@ def doctest_invalidateCache():
>>> db.close()
"""
def
doctest_connection_root_convenience
():
"""Connection root attributes can now be used as objects with attributes
...
...
@@ -692,10 +695,12 @@ def doctest_connection_root_convenience():
<root: rather_long_name rather_long_name2 rather_long_name4 ...>
"""
class
proper_ghost_initialization_with_empty__p_deactivate_class
(
Persistent
):
def
_p_deactivate
(
self
):
pass
def
doctest_proper_ghost_initialization_with_empty__p_deactivate
():
"""
See https://bugs.launchpad.net/zodb/+bug/185066
...
...
@@ -715,6 +720,7 @@ def doctest_proper_ghost_initialization_with_empty__p_deactivate():
"""
def
doctest_readCurrent
():
r"""
The connection's readCurrent method is called to provide a higher
...
...
@@ -868,6 +874,7 @@ def doctest_readCurrent():
"""
def
doctest_cache_management_of_subconnections
():
"""Make that cache management works for subconnections.
...
...
@@ -934,6 +941,7 @@ def doctest_cache_management_of_subconnections():
"""
class
C_invalidations_of_new_objects_work_after_savepoint
(
Persistent
):
def
__init__
(
self
):
self
.
settings
=
1
...
...
@@ -943,7 +951,8 @@ class C_invalidations_of_new_objects_work_after_savepoint(Persistent):
Persistent
.
_p_invalidate
(
self
)
print
(
self
.
settings
)
# POSKeyError here
def
doctest_abort_of_savepoint_creating_new_objects_w_exotic_invalidate_doesnt_break
():
def
doctest_abort_of_savepoint_creating_new_objects_w_exotic_invalidate_doesnt_break
():
# noqa: E501 line too long
r"""
Before, the following would fail with a POSKeyError, which was
somewhat surprising, in a very edgy sort of way. :)
...
...
@@ -969,12 +978,14 @@ def doctest_abort_of_savepoint_creating_new_objects_w_exotic_invalidate_doesnt_b
"""
class
Clp9460655
(
Persistent
):
def
__init__
(
self
,
word
,
id
):
super
(
Clp9460655
,
self
).
__init__
()
self
.
id
=
id
self
.
_word
=
word
def
doctest_lp9460655
():
r"""
>>> conn = ZODB.connection(None)
...
...
@@ -1001,6 +1012,7 @@ def doctest_lp9460655():
"""
def
doctest_lp615758_transaction_abort_Incomplete_cleanup_for_new_objects
():
r"""
...
...
@@ -1022,12 +1034,14 @@ def doctest_lp615758_transaction_abort_Incomplete_cleanup_for_new_objects():
>>> c.close()
"""
class
Clp485456_setattr_in_getstate_doesnt_cause_multiple_stores
(
Persistent
):
def
__getstate__
(
self
):
self
.
got
=
1
return
self
.
__dict__
.
copy
()
def
doctest_lp485456_setattr_in_setstate_doesnt_cause_multiple_stores
():
r"""
>>> C = Clp485456_setattr_in_getstate_doesnt_cause_multiple_stores
...
...
@@ -1096,6 +1110,7 @@ class _PlayPersistent(Persistent):
def
setValueWithSize
(
self
,
size
=
0
):
self
.
value
=
size
*
' '
__init__
=
setValueWithSize
class
EstimatedSizeTests
(
ZODB
.
tests
.
util
.
TestCase
):
"""check that size estimations are handled correctly."""
...
...
@@ -1183,7 +1198,6 @@ class EstimatedSizeTests(ZODB.tests.util.TestCase):
)
self
.
assertEqual
(
db
.
getCacheSizeBytes
(),
0x1
<<
33
)
def
test_cache_garbage_collection
(
self
):
db
=
self
.
db
# activate size based cache garbage collection
...
...
@@ -1203,7 +1217,7 @@ class EstimatedSizeTests(ZODB.tests.util.TestCase):
db
=
self
.
db
# activate size based cache garbage collection
db
.
setCacheSizeBytes
(
1000
)
obj
,
c
onn
,
cache
=
self
.
obj
,
self
.
conn
,
self
.
conn
.
_cache
obj
,
c
ache
=
self
.
obj
,
self
.
conn
.
_cache
# verify the change worked as expected
self
.
assertEqual
(
cache
.
cache_size_bytes
,
1000
)
# verify our entrance assumption is fulfilled
...
...
@@ -1222,17 +1236,21 @@ class EstimatedSizeTests(ZODB.tests.util.TestCase):
# ---- stubs
class
StubObject
(
Persistent
):
pass
class
ErrorOnGetstateException
(
Exception
):
pass
class
ErrorOnGetstateObject
(
Persistent
):
def
__getstate__
(
self
):
raise
ErrorOnGetstateException
class
ModifyOnGetStateObject
(
Persistent
):
def
__init__
(
self
,
p
):
...
...
@@ -1346,7 +1364,7 @@ class TestConnection(unittest.TestCase):
db
=
ZODB
.
DB
(
None
)
conn
=
db
.
open
()
data
=
[]
conn
.
_storage
.
afterCompletion
=
lambda
:
data
.
append
(
None
)
conn
.
_storage
.
afterCompletion
=
lambda
:
data
.
append
(
None
)
conn
.
transaction_manager
.
commit
()
self
.
assertEqual
(
len
(
data
),
1
)
conn
.
close
()
...
...
@@ -1359,9 +1377,11 @@ class TestConnection(unittest.TestCase):
storage
=
MVCCMappingStorage
()
new_instance
=
storage
.
new_instance
def
new_instance2
():
inst
=
new_instance
()
sync
=
inst
.
sync
def
sync2
(
*
args
):
sync
()
syncs
.
append
(
1
)
...
...
@@ -1404,6 +1424,7 @@ class TestConnection(unittest.TestCase):
db
.
close
()
class
StubDatabase
(
object
):
def
__init__
(
self
):
...
...
@@ -1418,7 +1439,8 @@ class StubDatabase(object):
def
invalidate
(
self
,
transaction
,
dict_with_oid_keys
,
connection
):
pass
large_record_size
=
1
<<
30
large_record_size
=
1
<<
30
def
test_suite
():
s
=
unittest
.
makeSuite
(
ConnectionDotAdd
)
...
...
src/ZODB/tests/testConnectionSavepoint.py
View file @
6e5baffd
...
...
@@ -16,7 +16,6 @@ import persistent.mapping
import
re
import
transaction
import
unittest
import
ZODB.tests.util
from
zope.testing
import
renormalizing
checker
=
renormalizing
.
RENormalizing
([
...
...
@@ -25,7 +24,8 @@ checker = renormalizing.RENormalizing([
# Python 3 adds module name to exceptions.
(
re
.
compile
(
"ZODB.POSException.ConnectionStateError"
),
r"ConnectionStateError"
),
])
])
def
testAddingThenModifyThenAbort
():
"""
\
...
...
@@ -53,6 +53,7 @@ savepoint.
>>> transaction.abort()
"""
def
testModifyThenSavePointThenModifySomeMoreThenCommit
():
"""
\
We got conflict errors when we committed after we modified an object
...
...
@@ -75,6 +76,7 @@ savepoint storage and *then* to commit the savepoint storage.
>>> transaction.commit()
"""
def
testCantCloseConnectionWithActiveSavepoint
():
"""
>>> import ZODB.tests.util
...
...
@@ -91,6 +93,7 @@ def testCantCloseConnectionWithActiveSavepoint():
>>> db.close()
"""
def
testSavepointDoesCacheGC
():
"""
\
Although the interface doesn't guarantee this internal detail, making a
...
...
@@ -127,8 +130,8 @@ then, + 1 for the root object:
True
Making a savepoint at this time used to leave the cache holding the same
number of objects. Make sure the cache shrinks now instead. (Implementations
that use
weak references, such as PyPy, may need a garbage collection.)
number of objects. Make sure the cache shrinks now instead. (Implementations
that use
weak references, such as PyPy, may need a garbage collection.)
>>> dummy = transaction.savepoint()
>>> _ = gc.collect()
...
...
@@ -149,6 +152,7 @@ Verify all the values are as expected:
>>> db.close()
"""
def
testIsReadonly
():
"""
\
The connection isReadonly method relies on the _storage to have an isReadOnly.
...
...
@@ -164,12 +168,14 @@ We simply rely on the underlying storage method.
False
"""
class
SelfActivatingObject
(
persistent
.
Persistent
):
def
_p_invalidate
(
self
):
super
(
SelfActivatingObject
,
self
).
_p_invalidate
()
self
.
_p_activate
()
def
testInvalidateAfterRollback
():
"""
\
The rollback used to invalidate objects before resetting the TmpStore.
...
...
@@ -196,6 +202,7 @@ the wrong state.
def
tearDown
(
test
):
transaction
.
abort
()
def
test_suite
():
return
unittest
.
TestSuite
((
doctest
.
DocFileSuite
(
...
...
@@ -204,5 +211,6 @@ def test_suite():
doctest
.
DocTestSuite
(
tearDown
=
tearDown
,
checker
=
checker
),
))
if
__name__
==
'__main__'
:
unittest
.
main
(
defaultTest
=
'test_suite'
)
src/ZODB/tests/testDB.py
View file @
6e5baffd
...
...
@@ -15,9 +15,7 @@ from six import PY2
from
ZODB.tests.MinPO
import
MinPO
import
doctest
import
os
import
re
import
sys
import
time
import
transaction
import
unittest
...
...
@@ -31,13 +29,14 @@ checker = renormalizing.RENormalizing([
r"\1"
),
# Python 3 adds module name to exceptions.
(
re
.
compile
(
"ZODB.POSException.ReadConflictError"
),
r"ReadConflictError"
),
])
])
# Return total number of connections across all pools in a db._pools.
def
nconn
(
pools
):
return
sum
([
len
(
pool
.
all
)
for
pool
in
pools
.
values
()])
class
DBTests
(
ZODB
.
tests
.
util
.
TestCase
):
def
setUp
(
self
):
...
...
@@ -99,16 +98,16 @@ class DBTests(ZODB.tests.util.TestCase):
self
.
assertEqual
(
h
[
name
],
expect
)
if
PY2
:
expect
=
unicode
if
text
else
str
expect
=
unicode
if
text
else
str
# noqa: F821 undef name
for
name
in
'description'
,
'user_name'
:
self
.
assertTrue
(
isinstance
(
h
[
name
],
expect
))
check
(
db
.
storage
.
history
(
z64
,
3
),
False
)
check
(
db
.
storage
.
undoLog
(
0
,
3
)
,
False
)
check
(
db
.
storage
.
undoInfo
(
0
,
3
)
,
False
)
check
(
db
.
storage
.
undoLog
(
0
,
3
),
False
)
check
(
db
.
storage
.
undoInfo
(
0
,
3
),
False
)
check
(
db
.
history
(
z64
,
3
),
True
)
check
(
db
.
undoLog
(
0
,
3
)
,
True
)
check
(
db
.
undoInfo
(
0
,
3
)
,
True
)
check
(
db
.
undoLog
(
0
,
3
),
True
)
check
(
db
.
undoInfo
(
0
,
3
),
True
)
class
TransactionalUndoTests
(
unittest
.
TestCase
):
...
...
@@ -266,6 +265,7 @@ def test_invalidateCache():
>>> db.close()
"""
def
connectionDebugInfo
():
r"""DB.connectionDebugInfo provides information about connections.
...
...
@@ -310,11 +310,13 @@ def connectionDebugInfo():
"""
def
passing_a_file_name_to_DB
():
"""You can pass a file-storage file name to DB.
(Also note that we can access DB in ZODB.)
>>> import os
>>> db = ZODB.DB('data.fs')
>>> db.storage # doctest: +ELLIPSIS
<ZODB.FileStorage.FileStorage.FileStorage object at ...
...
...
@@ -324,6 +326,7 @@ def passing_a_file_name_to_DB():
>>> db.close()
"""
def
passing_None_to_DB
():
"""You can pass None DB to get a MappingStorage.
...
...
@@ -335,6 +338,7 @@ def passing_None_to_DB():
>>> db.close()
"""
def
open_convenience
():
"""Often, we just want to open a single connection.
...
...
@@ -372,6 +376,7 @@ def open_convenience():
"""
def
db_with_transaction
():
"""Using databases with with
...
...
@@ -405,7 +410,7 @@ Let's try again, but this time, we'll have an exception:
>>> with db.transaction() as conn2:
... conn2.root()['y'] = 2
... XXX
#doctest: +IGNORE_EXCEPTION_DETAIL
... XXX
# noqa: F821 undefined name
Traceback (most recent call last):
...
NameError: name 'XXX' is not defined
...
...
@@ -429,6 +434,7 @@ Let's try again, but this time, we'll have an exception:
>>> db.close()
"""
def
connection_allows_empty_version_for_idiots
():
r"""
>>> db = ZODB.DB('t.fs')
...
...
@@ -440,6 +446,7 @@ def connection_allows_empty_version_for_idiots():
>>> db.close()
"""
def
warn_when_data_records_are_big
():
"""
When data records are large, a warning is issued to try to prevent new
...
...
@@ -488,6 +495,7 @@ We can also specify it using a configuration option:
>>> db.close()
"""
# '
def
minimally_test_connection_timeout
():
"""There's a mechanism to discard old connections.
...
...
@@ -508,6 +516,7 @@ def minimally_test_connection_timeout():
"""
def
cleanup_on_close
():
"""Verify that various references are cleared on close
...
...
@@ -533,10 +542,11 @@ def cleanup_on_close():
[]
"""
def
test_suite
():
s
=
unittest
.
defaultTestLoader
.
loadTestsFromName
(
__name__
)
s
.
addTest
(
doctest
.
DocTestSuite
(
setUp
=
ZODB
.
tests
.
util
.
setUp
,
tearDown
=
ZODB
.
tests
.
util
.
tearDown
,
checker
=
checker
checker
=
checker
,
optionflags
=
doctest
.
IGNORE_EXCEPTION_DETAIL
))
return
s
src/ZODB/tests/testDemoStorage.py
View file @
6e5baffd
...
...
@@ -22,7 +22,7 @@ from ZODB.tests import (
RevisionStorage
,
StorageTestBase
,
Synchronization
,
)
)
import
os
if
os
.
environ
.
get
(
'USE_ZOPE_TESTING_DOCTEST'
):
...
...
@@ -30,7 +30,6 @@ if os.environ.get('USE_ZOPE_TESTING_DOCTEST'):
else
:
import
doctest
import
random
import
re
import
transaction
import
unittest
import
ZODB.DemoStorage
...
...
@@ -40,7 +39,6 @@ import ZODB.utils
from
ZODB.utils
import
load_current
from
zope.testing
import
renormalizing
class
DemoStorageTests
(
StorageTestBase
.
StorageTestBase
,
...
...
@@ -53,7 +51,7 @@ class DemoStorageTests(
PackableStorage
.
PackableStorage
,
RevisionStorage
.
RevisionStorage
,
Synchronization
.
SynchronizedStorage
,
):
):
def
setUp
(
self
):
StorageTestBase
.
StorageTestBase
.
setUp
(
self
)
...
...
@@ -67,7 +65,7 @@ class DemoStorageTests(
def
checkLoadDelegation
(
self
):
# Minimal test of loadEX w/o version -- ironically
db
=
DB
(
self
.
_storage
)
# creates object 0. :)
DB
(
self
.
_storage
)
# creates object 0. :)
s2
=
ZODB
.
DemoStorage
.
DemoStorage
(
base
=
self
.
_storage
)
self
.
assertEqual
(
load_current
(
s2
,
ZODB
.
utils
.
z64
),
load_current
(
self
.
_storage
,
ZODB
.
utils
.
z64
))
...
...
@@ -97,6 +95,7 @@ class DemoStorageTests(
self
.
_storage
=
self
.
_storage
.
push
()
self
.
_checkHistory
(
base_only
())
self
.
_storage
=
self
.
_storage
.
pop
()
def
base_and_changes
():
yield
11
yield
12
...
...
@@ -106,6 +105,7 @@ class DemoStorageTests(
self
.
_checkHistory
(
base_and_changes
())
self
.
_storage
=
self
.
_storage
.
pop
()
class
DemoStorageHexTests
(
DemoStorageTests
):
def
setUp
(
self
):
...
...
@@ -113,6 +113,7 @@ class DemoStorageHexTests(DemoStorageTests):
self
.
_storage
=
ZODB
.
tests
.
hexstorage
.
HexStorage
(
ZODB
.
DemoStorage
.
DemoStorage
())
class
DemoStorageWrappedBase
(
DemoStorageTests
):
def
setUp
(
self
):
...
...
@@ -134,18 +135,21 @@ class DemoStorageWrappedBase(DemoStorageTests):
pass
# we never do gc
checkPackAllRevisions
=
checkPackWithMultiDatabaseReferences
class
DemoStorageWrappedAroundMappingStorage
(
DemoStorageWrappedBase
):
def
_makeBaseStorage
(
self
):
from
ZODB.MappingStorage
import
MappingStorage
return
MappingStorage
()
class
DemoStorageWrappedAroundFileStorage
(
DemoStorageWrappedBase
):
def
_makeBaseStorage
(
self
):
from
ZODB.FileStorage
import
FileStorage
return
FileStorage
(
'FileStorageTests.fs'
)
class
DemoStorageWrappedAroundHexMappingStorage
(
DemoStorageWrappedBase
):
def
_makeBaseStorage
(
self
):
...
...
@@ -157,6 +161,7 @@ def setUp(test):
random
.
seed
(
0
)
ZODB
.
tests
.
util
.
setUp
(
test
)
def
testSomeDelegation
():
r"""
>>> import six
...
...
@@ -194,11 +199,9 @@ def testSomeDelegation():
>>> storage.tpc_begin(1, 2, 3)
begin 2 3
>>> storage.tpc_abort(1)
>>>
"""
def
blob_pos_key_error_with_non_blob_base
():
"""
>>> storage = ZODB.DemoStorage.DemoStorage()
...
...
@@ -214,6 +217,7 @@ def blob_pos_key_error_with_non_blob_base():
"""
def
load_before_base_storage_current
():
"""
Here we'll exercise that DemoStorage's loadBefore method works
...
...
@@ -221,7 +225,6 @@ def load_before_base_storage_current():
base storage.
>>> import time
>>> import transaction
>>> import ZODB.DB
>>> import ZODB.DemoStorage
>>> import ZODB.MappingStorage
...
...
@@ -264,6 +267,7 @@ def load_before_base_storage_current():
>>> base.close()
"""
def
test_suite
():
suite
=
unittest
.
TestSuite
((
doctest
.
DocTestSuite
(
...
...
src/ZODB/tests/testFileStorage.py
View file @
6e5baffd
...
...
@@ -11,10 +11,12 @@
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import
doctest
import
os
if
os
.
environ
.
get
(
'USE_ZOPE_TESTING_DOCTEST'
):
from
zope.testing
import
doctest
else
:
import
doctest
import
sys
import
unittest
import
transaction
...
...
@@ -38,6 +40,7 @@ from ZODB._compat import dump, dumps, _protocol
from
.
import
util
class
FileStorageTests
(
StorageTestBase
.
StorageTestBase
,
BasicStorage
.
BasicStorage
,
...
...
@@ -54,7 +57,7 @@ class FileStorageTests(
PersistentStorage
.
PersistentStorage
,
MTStorage
.
MTStorage
,
ReadOnlyStorage
.
ReadOnlyStorage
):
):
use_extension_bytes
=
True
...
...
@@ -196,9 +199,9 @@ class FileStorageTests(
giant_oid
=
b'
\
xee
'
*
8
# Store an object.
# oid, serial, data, version, transaction
r1
=
self
.
_storage
.
store
(
giant_oid
,
b'
\
0
'
*
8
,
b'data'
,
b''
,
t
)
self
.
_storage
.
store
(
giant_oid
,
b'
\
0
'
*
8
,
b'data'
,
b''
,
t
)
# Finish the transaction.
r2
=
self
.
_storage
.
tpc_vote
(
t
)
self
.
_storage
.
tpc_vote
(
t
)
self
.
_storage
.
tpc_finish
(
t
)
# Before ZODB 3.2.6, this failed, with ._oid == z64.
self
.
assertEqual
(
self
.
_storage
.
_oid
,
giant_oid
)
...
...
@@ -213,9 +216,9 @@ class FileStorageTests(
giant_oid
=
b'
\
xee
'
*
8
# Store an object.
# oid, serial, data, version, prev_txn, transaction
r1
=
self
.
_storage
.
restore
(
giant_oid
,
b'
\
0
'
*
8
,
b'data'
,
b''
,
None
,
t
)
self
.
_storage
.
restore
(
giant_oid
,
b'
\
0
'
*
8
,
b'data'
,
b''
,
None
,
t
)
# Finish the transaction.
r2
=
self
.
_storage
.
tpc_vote
(
t
)
self
.
_storage
.
tpc_vote
(
t
)
self
.
_storage
.
tpc_finish
(
t
)
# Before ZODB 3.2.6, this failed, with ._oid == z64.
self
.
assertEqual
(
self
.
_storage
.
_oid
,
giant_oid
)
...
...
@@ -344,9 +347,10 @@ class FileStorageTests(
head
=
stor
.
tpc_finish
(
t
)
self
.
assertEqual
(
head
,
stor
.
lastTransaction
())
v
=
list
(
stor
.
iterator
(
start
=
head
,
stop
=
head
)
)
v
=
list
(
stor
.
iterator
(
start
=
head
,
stop
=
head
)
)
self
.
assertEqual
(
len
(
v
),
1
)
trec
=
v
[
0
]
# FileStorage.TransactionRecord or hexstorage.Transaction
# FileStorage.TransactionRecord or hexstorage.Transaction
trec
=
v
[
0
]
self
.
assertEqual
(
trec
.
tid
,
head
)
self
.
assertEqual
(
trec
.
user
,
b''
)
self
.
assertEqual
(
trec
.
description
,
description
.
encode
(
'utf-8'
))
...
...
@@ -359,7 +363,7 @@ class FileStorageHexTests(FileStorageTests):
def
open
(
self
,
**
kwargs
):
self
.
_storage
=
ZODB
.
tests
.
hexstorage
.
HexStorage
(
ZODB
.
FileStorage
.
FileStorage
(
'FileStorageTests.fs'
,
**
kwargs
))
ZODB
.
FileStorage
.
FileStorage
(
'FileStorageTests.fs'
,
**
kwargs
))
class
FileStorageTestsWithBlobsEnabled
(
FileStorageTests
):
...
...
@@ -384,7 +388,7 @@ class FileStorageHexTestsWithBlobsEnabled(FileStorageTests):
class
FileStorageRecoveryTest
(
StorageTestBase
.
StorageTestBase
,
RecoveryStorage
.
RecoveryStorage
,
):
):
def
setUp
(
self
):
StorageTestBase
.
StorageTestBase
.
setUp
(
self
)
...
...
@@ -398,6 +402,7 @@ class FileStorageRecoveryTest(
def
new_dest
(
self
):
return
ZODB
.
FileStorage
.
FileStorage
(
'Dest.fs'
)
class
FileStorageHexRecoveryTest
(
FileStorageRecoveryTest
):
def
setUp
(
self
):
...
...
@@ -454,6 +459,7 @@ class AnalyzeDotPyTest(StorageTestBase.StorageTestBase):
module
.
Broken
=
Broken
oids
=
[[
self
.
_storage
.
new_oid
(),
None
]
for
i
in
range
(
3
)]
def
store
(
i
,
data
):
oid
,
revid
=
oids
[
i
]
self
.
_storage
.
store
(
oid
,
revid
,
data
,
""
,
t
)
...
...
@@ -495,6 +501,8 @@ class AnalyzeDotPyTest(StorageTestBase.StorageTestBase):
# Raise an exception if the tids in FileStorage fs aren't
# strictly increasing.
def
checkIncreasingTids
(
fs
):
lasttid
=
b'
\
0
'
*
8
for
txn
in
fs
.
iterator
():
...
...
@@ -503,6 +511,8 @@ def checkIncreasingTids(fs):
lasttid
=
txn
.
tid
# Return a TimeStamp object 'minutes' minutes in the future.
def
timestamp
(
minutes
):
import
time
from
persistent.TimeStamp
import
TimeStamp
...
...
@@ -510,6 +520,7 @@ def timestamp(minutes):
t
=
time
.
time
()
+
60
*
minutes
return
TimeStamp
(
*
time
.
gmtime
(
t
)[:
5
]
+
(
t
%
60
,))
def
testTimeTravelOnOpen
():
"""
>>> from ZODB.FileStorage import FileStorage
...
...
@@ -586,6 +597,7 @@ def testTimeTravelOnOpen():
>>> handler.uninstall()
"""
def
lastInvalidations
():
"""
...
...
@@ -636,6 +648,7 @@ Of course, calling lastInvalidations on an empty storage refturns no data:
>>> fs.close()
"""
def
deal_with_finish_failures
():
r"""
...
...
@@ -690,6 +703,7 @@ def deal_with_finish_failures():
>>> db.close()
"""
def
pack_with_open_blob_files
():
"""
Make sure packing works while there are open blob files.
...
...
@@ -726,6 +740,7 @@ def pack_with_open_blob_files():
>>> db.close()
"""
def
readonly_open_nonexistent_file
():
"""
Make sure error is reported when non-existent file is tried to be opened
...
...
@@ -739,6 +754,7 @@ def readonly_open_nonexistent_file():
error: ... No such file or directory: 'nonexistent.fs'
"""
def
test_suite
():
suite
=
unittest
.
TestSuite
()
for
klass
in
[
...
...
@@ -770,10 +786,7 @@ def test_suite():
test_packing
=
True
,
))
suite
.
addTest
(
PackableStorage
.
IExternalGC_suite
(
lambda
:
ZODB
.
FileStorage
.
FileStorage
(
lambda
:
ZODB
.
FileStorage
.
FileStorage
(
'data.fs'
,
blob_dir
=
'blobs'
,
pack_gc
=
False
)))
suite
.
layer
=
util
.
MininalTestLayer
(
'testFileStorage'
)
return
suite
if
__name__
==
'__main__'
:
unittest
.
main
()
src/ZODB/tests/testMVCCMappingStorage.py
View file @
6e5baffd
...
...
@@ -31,7 +31,8 @@ from ZODB.tests import (
RevisionStorage
,
StorageTestBase
,
Synchronization
,
)
)
class
MVCCTests
(
object
):
...
...
@@ -146,7 +147,7 @@ class MVCCMappingStorageTests(
RevisionStorage
.
RevisionStorage
,
Synchronization
.
SynchronizedStorage
,
MVCCTests
):
):
def
setUp
(
self
):
self
.
_storage
=
MVCCMappingStorage
()
...
...
@@ -159,9 +160,6 @@ class MVCCMappingStorageTests(
checkUndoZombie
=
checkLoadBeforeUndo
def
checkTransactionIdIncreases
(
self
):
import
time
from
ZODB.utils
import
newTid
from
ZODB.TimeStamp
import
TimeStamp
t
=
TransactionMetaData
()
self
.
_storage
.
tpc_begin
(
t
)
self
.
_storage
.
tpc_vote
(
t
)
...
...
@@ -178,10 +176,12 @@ class MVCCMappingStorageTests(
self
.
_storage
.
tpc_begin
(
t
)
self
.
assertEqual
(
self
.
_storage
.
_tid
,
b'zzzzzzzz'
)
def
create_blob_storage
(
name
,
blob_dir
):
s
=
MVCCMappingStorage
(
name
)
return
ZODB
.
blob
.
BlobStorage
(
blob_dir
,
s
)
def
test_suite
():
suite
=
unittest
.
makeSuite
(
MVCCMappingStorageTests
,
'check'
)
# Note: test_packing doesn't work because even though MVCCMappingStorage
...
...
@@ -193,8 +193,3 @@ def test_suite():
test_undo
=
False
,
))
return
suite
if
__name__
==
"__main__"
:
loader
=
unittest
.
TestLoader
()
loader
.
testMethodPrefix
=
"check"
unittest
.
main
(
testLoader
=
loader
)
src/ZODB/tests/testMappingStorage.py
View file @
6e5baffd
...
...
@@ -26,7 +26,8 @@ from ZODB.tests import (
RevisionStorage
,
StorageTestBase
,
Synchronization
,
)
)
class
MappingStorageTests
(
StorageTestBase
.
StorageTestBase
,
...
...
@@ -39,7 +40,7 @@ class MappingStorageTests(
PackableStorage
.
PackableStorageWithOptionalGC
,
RevisionStorage
.
RevisionStorage
,
Synchronization
.
SynchronizedStorage
,
):
):
def
setUp
(
self
):
StorageTestBase
.
StorageTestBase
.
setUp
(
self
,
)
...
...
@@ -55,6 +56,7 @@ class MappingStorageTests(
pass
# we don't support undo yet
checkUndoZombie
=
checkLoadBeforeUndo
class
MappingStorageHexTests
(
MappingStorageTests
):
def
setUp
(
self
):
...
...
@@ -62,11 +64,13 @@ class MappingStorageHexTests(MappingStorageTests):
self
.
_storage
=
ZODB
.
tests
.
hexstorage
.
HexStorage
(
ZODB
.
MappingStorage
.
MappingStorage
())
MockTransaction
=
namedtuple
(
'transaction'
,
[
'user'
,
'description'
,
'extension'
]
)
class
MappingStorageTransactionRecordTests
(
unittest
.
TestCase
):
def
setUp
(
self
):
...
...
@@ -86,14 +90,11 @@ class MappingStorageTransactionRecordTests(unittest.TestCase):
self
.
_transaction_record
.
_extension
)
def
test_suite
():
suite
=
unittest
.
TestSuite
()
suite
.
addTest
(
unittest
.
makeSuite
(
MappingStorageTests
,
'check'
))
suite
.
addTest
(
unittest
.
makeSuite
(
MappingStorageHexTests
,
'check'
))
suite
.
addTest
(
unittest
.
makeSuite
(
MappingStorageTransactionRecordTests
,
'check'
))
suite
.
addTest
(
unittest
.
makeSuite
(
MappingStorageTransactionRecordTests
,
'check'
))
return
suite
if
__name__
==
"__main__"
:
loader
=
unittest
.
TestLoader
()
loader
.
testMethodPrefix
=
"check"
unittest
.
main
(
testLoader
=
loader
)
src/ZODB/tests/testPersistentList.py
View file @
6e5baffd
...
...
@@ -23,6 +23,7 @@ l0 = []
l1
=
[
0
]
l2
=
[
0
,
1
]
class
TestPList
(
unittest
.
TestCase
):
def
checkTheWorld
(
self
):
# Test constructors
...
...
@@ -37,11 +38,14 @@ class TestPList(unittest.TestCase):
uu2
=
PersistentList
(
u2
)
v
=
PersistentList
(
tuple
(
u
))
class
OtherList
(
object
):
def
__init__
(
self
,
initlist
):
self
.
__data
=
initlist
def
__len__
(
self
):
return
len
(
self
.
__data
)
def
__getitem__
(
self
,
i
):
return
self
.
__data
[
i
]
v0
=
PersistentList
(
OtherList
(
u0
))
...
...
@@ -59,12 +63,14 @@ class TestPList(unittest.TestCase):
# Py3: No cmp() or __cmp__ anymore.
if
PY2
:
def
mycmp
(
a
,
b
):
r
=
cmp
(
a
,
b
)
if
r
<
0
:
return
-
1
if
r
>
0
:
return
1
r
=
cmp
(
a
,
b
)
# noqa: F821 undefined name 'cmp'
if
r
<
0
:
return
-
1
if
r
>
0
:
return
1
return
r
all
=
[
l0
,
l1
,
l2
,
u
,
u0
,
u1
,
u2
,
uu
,
uu0
,
uu1
,
uu2
]
all
=
[
l0
,
l1
,
l2
,
u
,
u0
,
u1
,
u2
,
v
,
v0
,
vv
,
uu
,
uu0
,
uu1
,
uu2
]
for
a
in
all
:
for
b
in
all
:
eq
(
mycmp
(
a
,
b
),
mycmp
(
len
(
a
),
len
(
b
)),
...
...
@@ -142,9 +148,9 @@ class TestPList(unittest.TestCase):
# Test __add__, __radd__, __mul__ and __rmul__
#self.assertTrue(u1 + [] == [] + u1 == u1, "u1 + [] == [] + u1 == u1")
#
self.assertTrue(u1 + [] == [] + u1 == u1, "u1 + [] == [] + u1 == u1")
self
.
assertTrue
(
u1
+
[
1
]
==
u2
,
"u1 + [1] == u2"
)
#self.assertTrue([-1] + u1 == [-1, 0], "[-1] + u1 == [-1, 0]")
#
self.assertTrue([-1] + u1 == [-1, 0], "[-1] + u1 == [-1, 0]")
self
.
assertTrue
(
u2
==
u2
*
1
==
1
*
u2
,
"u2 == u2*1 == 1*u2"
)
self
.
assertTrue
(
u2
+
u2
==
u2
*
2
==
2
*
u2
,
"u2+u2 == u2*2 == 2*u2"
)
self
.
assertTrue
(
u2
+
u2
+
u2
==
u2
*
3
==
3
*
u2
,
"u2+u2+u2 == u2*3 == 3*u2"
)
...
...
@@ -181,7 +187,6 @@ class TestPList(unittest.TestCase):
eq
(
u
.
count
(
1
),
3
,
"u.count(1) == 3"
)
eq
(
u
.
count
(
2
),
0
,
"u.count(2) == 0"
)
# Test index
eq
(
u2
.
index
(
0
),
0
,
"u2.index(0) == 0"
)
...
...
@@ -218,10 +223,6 @@ class TestPList(unittest.TestCase):
from
ZODB.PersistentList
import
PersistentList
as
oldPath
self
.
assertTrue
(
oldPath
is
PersistentList
)
def
test_suite
():
return
unittest
.
makeSuite
(
TestPList
,
'check'
)
if
__name__
==
"__main__"
:
loader
=
unittest
.
TestLoader
()
loader
.
testMethodPrefix
=
"check"
unittest
.
main
(
testLoader
=
loader
)
src/ZODB/tests/testPersistentMapping.py
View file @
6e5baffd
...
...
@@ -34,6 +34,7 @@ from six import PY2
pickle
=
(
'((U
\
x0b
Persistenceq
\
x01
U
\
x11
PersistentMappingtq
\
x02
Nt.}q
\
x03
U
\
n
'
'_containerq
\
x04
}q
\
x05
U
\
x07
versionq
\
x06
U
\
x03
oldq
\
x07
ss.
\
n
'
)
class
PMTests
(
unittest
.
TestCase
):
def
checkOldStyleRoot
(
self
):
...
...
@@ -41,7 +42,7 @@ class PMTests(unittest.TestCase):
# is, but the global `pickle` references it explicitly. So just
# bail if Persistence isn't available.
try
:
import
Persistence
import
Persistence
# noqa: F401 'Persistence' imported but unused
except
ImportError
:
return
# insert the pickle in place of the root
...
...
@@ -129,6 +130,7 @@ class PMTests(unittest.TestCase):
keylist
.
append
(
key
)
check
(
keylist
)
def
find_global
(
modulename
,
classname
):
"""Helper for this test suite to get special PersistentMapping"""
...
...
@@ -142,8 +144,6 @@ def find_global(modulename, classname):
mod
=
sys
.
modules
[
modulename
]
return
getattr
(
mod
,
classname
)
def
test_suite
():
return
unittest
.
makeSuite
(
PMTests
,
'check'
)
if
__name__
==
"__main__"
:
unittest
.
main
()
src/ZODB/tests/testPersistentWeakref.py
View file @
6e5baffd
...
...
@@ -16,7 +16,6 @@
__docformat__
=
"reStructuredText"
def
test_weakrefs_functional
():
"""Persistent weak references
...
...
@@ -29,7 +28,7 @@ def test_weakrefs_functional():
>>> import transaction
>>> from persistent.wref import WeakRef
>>> import
persistent,
ZODB.tests.MinPO
>>> import ZODB.tests.MinPO
>>> import ZODB.tests.util
>>> ob = ZODB.tests.MinPO.MinPO()
>>> ref = WeakRef(ob)
...
...
@@ -256,6 +255,7 @@ def test_PersistentWeakKeyDictionary():
"""
def
test_PersistentWeakKeyDictionary_get
():
"""
>>> import ZODB.tests.util
...
...
@@ -270,6 +270,7 @@ def test_PersistentWeakKeyDictionary_get():
12
"""
def
test_suite
():
from
doctest
import
DocTestSuite
return
DocTestSuite
()
src/ZODB/tests/testRecover.py
View file @
6e5baffd
...
...
@@ -33,6 +33,7 @@ except ImportError:
# Py3
import
io
as
StringIO
class
RecoverTest
(
ZODB
.
tests
.
util
.
TestCase
):
path
=
None
...
...
@@ -101,8 +102,9 @@ class RecoverTest(ZODB.tests.util.TestCase):
try
:
sys
.
stdout
=
faux_stdout
try
:
ZODB
.
fsrecover
.
recover
(
self
.
path
,
self
.
dest
,
verbose
=
0
,
partial
=
True
,
force
=
False
,
pack
=
1
)
ZODB
.
fsrecover
.
recover
(
self
.
path
,
self
.
dest
,
verbose
=
0
,
partial
=
True
,
force
=
False
,
pack
=
1
)
except
SystemExit
:
raise
RuntimeError
(
"recover tried to exit"
)
finally
:
...
...
src/ZODB/tests/testSerialize.py
View file @
6e5baffd
...
...
@@ -18,15 +18,16 @@ import unittest
from
persistent
import
Persistent
from
persistent.wref
import
WeakRef
import
zope.testing.setupstack
import
ZODB.tests.util
from
ZODB
import
serialize
from
ZODB._compat
import
Pickler
,
PersistentUnpickler
,
BytesIO
,
_protocol
,
IS_JYTHON
from
ZODB._compat
import
Pickler
,
PersistentUnpickler
,
BytesIO
,
_protocol
from
ZODB._compat
import
IS_JYTHON
class
PersistentObject
(
Persistent
):
pass
class
ClassWithNewargs
(
int
):
def
__new__
(
cls
,
value
):
return
int
.
__new__
(
cls
,
value
)
...
...
@@ -34,10 +35,12 @@ class ClassWithNewargs(int):
def
__getnewargs__
(
self
):
return
int
(
self
),
class
ClassWithoutNewargs
(
object
):
def
__init__
(
self
,
value
):
self
.
value
=
value
def
make_pickle
(
ob
):
sio
=
BytesIO
()
p
=
Pickler
(
sio
,
_protocol
)
...
...
@@ -48,6 +51,7 @@ def make_pickle(ob):
def
_factory
(
conn
,
module_name
,
name
):
return
globals
()[
name
]
class
SerializerTestCase
(
unittest
.
TestCase
):
# old format: (module, name), None
...
...
@@ -104,6 +108,7 @@ class SerializerTestCase(unittest.TestCase):
class
OldStyle
(
object
):
bar
=
"bar"
def
__getattr__
(
self
,
name
):
if
name
==
"error"
:
raise
ValueError
(
"whee!"
)
...
...
@@ -112,6 +117,7 @@ class SerializerTestCase(unittest.TestCase):
class
NewStyle
(
object
):
bar
=
"bar"
def
_raise
(
self
):
raise
ValueError
(
"whee!"
)
error
=
property
(
_raise
)
...
...
@@ -161,6 +167,7 @@ class SerializerTestCase(unittest.TestCase):
# SHORT_BINBYTES opcode:
self
.
assertTrue
(
b'C
\
x03
o.o'
in
pickle
)
class
SerializerFunctestCase
(
unittest
.
TestCase
):
def
setUp
(
self
):
...
...
@@ -188,7 +195,9 @@ class SerializerFunctestCase(unittest.TestCase):
# it can't import '_jythonlib' and the whole process fails
# We would use multiprocessing here, but it doesn't exist on jython
sys_path
=
[
x
for
x
in
sys
.
path
if
not
x
.
endswith
(
'Lib'
)
and
x
!=
'__classpath__'
and
x
!=
'__pyclasspath__/'
]
if
not
x
.
endswith
(
'Lib'
)
and
x
!=
'__classpath__'
and
x
!=
'__pyclasspath__/'
]
else
:
sys_path
=
sys
.
path
environ
[
'PYTHONPATH'
]
=
os
.
pathsep
.
join
(
sys_path
)
...
...
@@ -198,6 +207,7 @@ class SerializerFunctestCase(unittest.TestCase):
'_functest_load(%s)'
%
repr
(
fqn
)]
subprocess
.
call
(
load_args
,
env
=
environ
)
def
_working_failing_datetimes
():
import
datetime
WORKING
=
datetime
.
datetime
(
5375
,
12
,
31
,
23
,
59
,
59
)
...
...
@@ -205,6 +215,7 @@ def _working_failing_datetimes():
FAILING
=
datetime
.
datetime
(
5376
,
12
,
31
,
23
,
59
,
59
)
return
WORKING
,
FAILING
def
_functest_prep
(
fqn
):
# Prepare the database with a BTree which won't deserialize
# if the bug is present.
...
...
@@ -225,6 +236,7 @@ def _functest_prep(fqn):
conn
.
close
()
db
.
close
()
def
_functest_load
(
fqn
):
# Open the database and attempt to deserialize the tree
# (run in separate process)
...
...
@@ -241,6 +253,7 @@ def _functest_load(fqn):
conn
.
close
()
db
.
close
()
def
test_suite
():
return
unittest
.
TestSuite
((
unittest
.
makeSuite
(
SerializerTestCase
),
...
...
src/ZODB/tests/testUtils.py
View file @
6e5baffd
...
...
@@ -31,13 +31,14 @@ checker = renormalizing.RENormalizing([
(
re
.
compile
(
"b('.*?')"
),
r"\1"
),
# Windows shows result from 'u64' as long?
(
re
.
compile
(
r"(\
d+)L
"), r"
\
1
"),
])
])
class TestUtils(unittest.TestCase):
small = [random.randrange(1, 1
<<
32)
small = [random.randrange(1, 1
<<
32)
for i in range(NUM)]
large = [random.randrange(1
<<32, 1<<
64)
large = [random.randrange(1
<< 32, 1 <<
64)
for i in range(NUM)]
all = small + large
...
...
@@ -51,14 +52,15 @@ class TestUtils(unittest.TestCase):
def test_KnownConstants(self):
self.assertEqual(b"
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
001
", p64(1))
self.assertEqual(b"
\
000
\
000
\
000
\
001
\
000
\
000
\
000
\
000
", p64(1
<<
32))
self.assertEqual(b"
\
000
\
000
\
000
\
001
\
000
\
000
\
000
\
000
", p64(1
<<
32))
self.assertEqual(u64(b"
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
001
"), 1)
self.assertEqual(U64(b"
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
001
"), 1)
self.assertEqual(u64(b"
\
000
\
000
\
000
\
001
\
000
\
000
\
000
\
000
"), 1
<<
32)
self.assertEqual(U64(b"
\
000
\
000
\
000
\
001
\
000
\
000
\
000
\
000
"), 1
<<
32)
self.assertEqual(u64(b"
\
000
\
000
\
000
\
001
\
000
\
000
\
000
\
000
"), 1
<<
32)
self.assertEqual(U64(b"
\
000
\
000
\
000
\
001
\
000
\
000
\
000
\
000
"), 1
<<
32)
def test_PersistentIdHandlesDescriptor(self):
from ZODB.serialize import ObjectWriter
class P(Persistent):
pass
...
...
@@ -149,10 +151,10 @@ class TestUtils(unittest.TestCase):
self.assertEqual(e.args[-1], b'123456789')
class ExampleClass(object):
pass
def test_suite():
suite = unittest.defaultTestLoader.loadTestsFromName(__name__)
suite.addTest(
...
...
src/ZODB/tests/testZODB.py
View file @
6e5baffd
...
...
@@ -15,7 +15,6 @@ from persistent import Persistent
from
persistent.mapping
import
PersistentMapping
from
ZODB.POSException
import
TransactionFailedError
import
doctest
from
BTrees.OOBTree
import
OOBTree
import
transaction
import
unittest
...
...
@@ -24,6 +23,7 @@ import ZODB.FileStorage
import
ZODB.MappingStorage
import
ZODB.tests.util
class
P
(
Persistent
):
pass
...
...
@@ -83,7 +83,7 @@ class ZODBTests(ZODB.tests.util.TestCase):
transaction
.
abort
()
else
:
transaction
.
commit
()
except
:
except
:
# noqa: E722 do not use bare 'except'
transaction
.
abort
()
raise
...
...
@@ -290,7 +290,7 @@ class ZODBTests(ZODB.tests.util.TestCase):
# Arrange for commit to fail during tpc_vote.
poisoned_jar
=
PoisonedJar
(
break_tpc_vote
=
True
)
poisoned
=
PoisonedObject
(
poisoned_jar
)
PoisonedObject
(
poisoned_jar
)
transaction
.
get
().
join
(
poisoned_jar
)
self
.
assertRaises
(
PoisonedError
,
transaction
.
get
().
commit
)
...
...
@@ -444,10 +444,13 @@ class ZODBTests(ZODB.tests.util.TestCase):
transaction
.
abort
()
conn
.
close
()
class
PoisonedError
(
Exception
):
pass
# PoisonedJar arranges to raise PoisonedError from interesting places.
class
PoisonedJar
(
object
):
def
__init__
(
self
,
break_tpc_begin
=
False
,
break_tpc_vote
=
False
,
break_savepoint
=
False
):
...
...
@@ -483,10 +486,8 @@ class PoisonedObject(object):
def
__init__
(
self
,
poisonedjar
):
self
.
_p_jar
=
poisonedjar
def
test_suite
():
return
unittest
.
TestSuite
((
unittest
.
makeSuite
(
ZODBTests
,
'check'
),
))
if
__name__
==
"__main__"
:
unittest
.
main
(
defaultTest
=
"test_suite"
)
src/ZODB/tests/test_TransactionMetaData.py
View file @
6e5baffd
...
...
@@ -17,6 +17,7 @@ import warnings
from
.._compat
import
dumps
,
loads
from
..Connection
import
TransactionMetaData
class
TransactionMetaDataTests
(
unittest
.
TestCase
):
def
test_basic
(
self
):
...
...
@@ -118,8 +119,6 @@ class TransactionMetaDataTests(unittest.TestCase):
t
.
data
(
data
)
self
.
assertEqual
(
c
.
exception
.
args
,
(
data
,))
def
test_suite
():
return
unittest
.
makeSuite
(
TransactionMetaDataTests
)
if
__name__
==
'__main__'
:
unittest
.
main
(
defaultTest
=
'test_suite'
)
src/ZODB/tests/test_cache.py
View file @
6e5baffd
...
...
@@ -17,6 +17,7 @@ from ZODB.config import databaseFromString
import
transaction
import
doctest
class
RecalcitrantObject
(
Persistent
):
"""A Persistent object that will not become a ghost."""
...
...
@@ -30,6 +31,7 @@ class RecalcitrantObject(Persistent):
init
=
classmethod
(
init
)
class
RegularObject
(
Persistent
):
deactivations
=
0
...
...
@@ -49,9 +51,11 @@ class RegularObject(Persistent):
init
=
classmethod
(
init
)
class
PersistentObject
(
Persistent
):
pass
class
CacheTests
(
object
):
def
test_cache
(
self
):
...
...
@@ -208,6 +212,7 @@ class CacheTests(object):
>>> RegularObject.deactivations
4
"""
def
test_gc_on_open_connections
(
self
):
r"""Test that automatic GC is not applied to open connections.
...
...
src/ZODB/tests/test_doctest_files.py
View file @
6e5baffd
...
...
@@ -15,7 +15,7 @@ import doctest
import
unittest
__test__
=
dict
(
cross_db_refs_to_blank_db_name
=
"""
cross_db_refs_to_blank_db_name
=
"""
There was a bug that caused bad refs to be generated is a database
name was blank.
...
...
@@ -41,7 +41,7 @@ __test__ = dict(
>>> db2.close()
>>> db1.close()
"""
,
)
)
def
test_suite
():
...
...
src/ZODB/tests/test_fsdump.py
View file @
6e5baffd
...
...
@@ -65,7 +65,7 @@ Now we see two transactions and two changed objects.
Clean up.
>>> db.close()
"""
"""
# noqa: E501 line too long
import
re
import
doctest
...
...
src/ZODB/tests/test_prefetch.py
View file @
6e5baffd
...
...
@@ -5,12 +5,14 @@ import ZODB
from
.MVCCMappingStorage
import
MVCCMappingStorage
class
PrefetchTests
(
unittest
.
TestCase
):
def
test_prefetch
(
self
):
db
=
ZODB
.
DB
(
None
)
fetched
=
[]
def
prefetch
(
oids
,
tid
):
fetched
.
append
((
list
(
map
(
u64
,
oids
)),
tid
))
...
...
src/ZODB/tests/test_storage.py
View file @
6e5baffd
...
...
@@ -29,6 +29,7 @@ from ZODB.tests import StorageTestBase
from
ZODB.tests
import
BasicStorage
,
MTStorage
,
Synchronization
from
ZODB.tests
import
RevisionStorage
class
Transaction
(
object
):
"""Hold data for current transaction for MinimalMemoryStorage."""
...
...
@@ -42,6 +43,7 @@ class Transaction(object):
def
cur
(
self
):
return
dict
.
fromkeys
([
oid
for
oid
,
tid
in
self
.
index
.
keys
()],
self
.
tid
)
class
MinimalMemoryStorage
(
BaseStorage
,
object
):
"""Simple in-memory storage that supports revisions.
...
...
@@ -134,6 +136,7 @@ class MinimalMemoryStorage(BaseStorage, object):
cleanup
=
close
class
MinimalTestSuite
(
StorageTestBase
.
StorageTestBase
,
BasicStorage
.
BasicStorage
,
MTStorage
.
MTStorage
,
...
...
@@ -150,5 +153,6 @@ class MinimalTestSuite(StorageTestBase.StorageTestBase,
def
checkLoadBeforeUndo
(
self
):
pass
def
test_suite
():
return
unittest
.
makeSuite
(
MinimalTestSuite
,
"check"
)
src/ZODB/tests/testblob.py
View file @
6e5baffd
...
...
@@ -47,6 +47,7 @@ except NameError:
from
.
import
util
def
new_time
():
"""Create a _new_ time stamp.
...
...
@@ -144,8 +145,8 @@ class BushyLayoutTests(ZODB.tests.util.TestCase):
non_ascii_oid
=
b'>
\
xf1
<0
\
xe9
Q
\
x99
\
xf0
'
# The argument should already be bytes;
# os.path.sep is native string type under both 2 and 3
# binascii.hexlify takes bytes and produces bytes under both py2 and
py3
# the result should be the native string type
# binascii.hexlify takes bytes and produces bytes under both py2 and
#
py3
the result should be the native string type
oid_as_path
=
BushyLayout
().
oid_to_path
(
non_ascii_oid
)
self
.
assertEqual
(
oid_as_path
,
...
...
@@ -156,7 +157,7 @@ class BushyLayoutTests(ZODB.tests.util.TestCase):
path_as_oid
=
BushyLayout
().
path_to_oid
(
oid_as_path
)
self
.
assertEqual
(
path_as_oid
,
non_ascii_oid
)
non_ascii_oid
)
class
BlobTestBase
(
ZODB
.
tests
.
StorageTestBase
.
StorageTestBase
):
...
...
@@ -200,7 +201,6 @@ class BlobUndoTests(BlobTestBase):
file
.
write
(
b'this is state 2'
)
transaction
.
commit
()
database
.
undo
(
database
.
undoLog
(
0
,
1
)[
0
][
'id'
])
transaction
.
commit
()
with
blob
.
open
(
'r'
)
as
file
:
...
...
@@ -320,9 +320,9 @@ class RecoveryBlobStorage(BlobTestBase,
conn
.
root
()[
3
]
=
ZODB
.
blob
.
Blob
()
with
conn
.
root
()[
3
].
open
(
'w'
)
as
file
:
file
.
write
(
(
b''
.
join
(
struct
.
pack
(
">I"
,
random
.
randint
(
0
,
(
1
<<
32
)
-
1
))
for
i
in
range
(
random
.
randint
(
10000
,
20000
)))
)[:
-
random
.
randint
(
1
,
4
)]
(
b''
.
join
(
struct
.
pack
(
">I"
,
random
.
randint
(
0
,
(
1
<<
32
)
-
1
))
for
i
in
range
(
random
.
randint
(
10000
,
20000
)))
)[:
-
random
.
randint
(
1
,
4
)]
)
transaction
.
commit
()
conn
.
root
()[
2
]
=
ZODB
.
blob
.
Blob
()
...
...
@@ -359,6 +359,7 @@ def gc_blob_removes_uncommitted_data():
False
"""
def
commit_from_wrong_partition
():
"""
It should be possible to commit changes even when a blob is on a
...
...
@@ -379,7 +380,7 @@ def commit_from_wrong_partition():
>>> logger.setLevel(logging.DEBUG)
>>> logger.addHandler(handler)
>>> blob_storage = create_storage()
>>> blob_storage = create_storage()
# noqa: F821 undefined name
>>> database = DB(blob_storage)
>>> connection = database.open()
>>> root = connection.root()
...
...
@@ -425,11 +426,10 @@ def packing_with_uncommitted_data_non_undoing():
blob_directory and confused our packing strategy. We now use a separate
temporary directory that is ignored while packing.
>>> import transaction
>>> from ZODB.DB import DB
>>> from ZODB.serialize import referencesf
>>> blob_storage = create_storage()
>>> blob_storage = create_storage()
# noqa: F821 undefined name
>>> database = DB(blob_storage)
>>> connection = database.open()
>>> root = connection.root()
...
...
@@ -446,6 +446,7 @@ def packing_with_uncommitted_data_non_undoing():
>>> database.close()
"""
def
packing_with_uncommitted_data_undoing
():
"""
This covers regression for bug #130459.
...
...
@@ -456,7 +457,7 @@ def packing_with_uncommitted_data_undoing():
>>> from ZODB.serialize import referencesf
>>> blob_storage = create_storage()
>>> blob_storage = create_storage()
# noqa: F821 undefined name
>>> database = DB(blob_storage)
>>> connection = database.open()
>>> root = connection.root()
...
...
@@ -473,9 +474,10 @@ def packing_with_uncommitted_data_undoing():
>>> database.close()
"""
def
test_blob_file_permissions
():
"""
>>> blob_storage = create_storage()
>>> blob_storage = create_storage()
# noqa: F821 undefined name
>>> conn = ZODB.connection(blob_storage)
>>> conn.root.x = ZODB.blob.Blob(b'test')
>>> conn.transaction_manager.commit()
...
...
@@ -498,6 +500,7 @@ def test_blob_file_permissions():
>>> conn.close()
"""
def
loadblob_tmpstore
():
"""
This is a test for assuring that the TmpStore's loadBlob implementation
...
...
@@ -505,7 +508,7 @@ def loadblob_tmpstore():
First, let's setup a regular database and store a blob:
>>> blob_storage = create_storage()
>>> blob_storage = create_storage()
# noqa: F821 undefined name
>>> database = DB(blob_storage)
>>> connection = database.open()
>>> root = connection.root()
...
...
@@ -537,11 +540,12 @@ def loadblob_tmpstore():
>>> database.close()
"""
def
is_blob_record
():
r"""
>>> from ZODB.utils import load_current
>>> bs = create_storage()
>>> bs = create_storage()
# noqa: F821 undefined name
>>> db = DB(bs)
>>> conn = db.open()
>>> conn.root()['blob'] = ZODB.blob.Blob()
...
...
@@ -568,9 +572,10 @@ def is_blob_record():
>>> db.close()
"""
def
do_not_depend_on_cwd
():
"""
>>> bs = create_storage()
>>> bs = create_storage()
# noqa: F821 undefined name
>>> here = os.getcwd()
>>> os.mkdir('evil')
>>> os.chdir('evil')
...
...
@@ -587,10 +592,11 @@ def do_not_depend_on_cwd():
>>> db.close()
"""
def
savepoint_isolation
():
"""Make sure savepoint data is distinct accross transactions
>>> bs = create_storage()
>>> bs = create_storage()
# noqa: F821 undefined name
>>> db = DB(bs)
>>> conn = db.open()
>>> conn.root.b = ZODB.blob.Blob(b'initial')
...
...
@@ -615,11 +621,12 @@ def savepoint_isolation():
>>> db.close()
"""
def
savepoint_commits_without_invalidations_out_of_order
():
"""Make sure transactions with blobs can be commited without the
invalidations out of order error (LP #509801)
>>> bs = create_storage()
>>> bs = create_storage()
# noqa: F821 undefined name
>>> db = DB(bs)
>>> tm1 = transaction.TransactionManager()
>>> conn1 = db.open(transaction_manager=tm1)
...
...
@@ -647,10 +654,11 @@ def savepoint_commits_without_invalidations_out_of_order():
>>> db.close()
"""
def
savepoint_cleanup
():
"""Make sure savepoint data gets cleaned up.
>>> bs = create_storage()
>>> bs = create_storage()
# noqa: F821 undefined name
>>> tdir = bs.temporaryDirectory()
>>> os.listdir(tdir)
[]
...
...
@@ -676,6 +684,7 @@ def savepoint_cleanup():
>>> db.close()
"""
def
lp440234_Setting__p_changed_of_a_Blob_w_no_uncomitted_changes_is_noop
():
r"""
>>> db = ZODB.DB('data.fs', blob_dir='blobs')
...
...
@@ -694,19 +703,24 @@ def lp440234_Setting__p_changed_of_a_Blob_w_no_uncomitted_changes_is_noop():
>>> db.close()
"""
def
setUp
(
test
):
ZODB
.
tests
.
util
.
setUp
(
test
)
test
.
globs
[
'rmtree'
]
=
zope
.
testing
.
setupstack
.
rmtree
def
timeIncreasesSetUp
(
test
):
setUp
(
test
)
l
=
test
.
globs
[
'time_layer'
]
=
ZODB
.
tests
.
util
.
MonotonicallyIncreasingTimeMinimalTestLayer
(
''
)
l
.
testSetUp
()
layer
=
test
.
globs
[
'time_layer'
]
=
(
ZODB
.
tests
.
util
.
MonotonicallyIncreasingTimeMinimalTestLayer
(
''
))
layer
.
testSetUp
()
def
timeIncreasesTearDown
(
test
):
test
.
globs
[
'time_layer'
].
testTearDown
()
util
.
tearDown
(
test
)
def
setUpBlobAdaptedFileStorage
(
test
):
setUp
(
test
)
...
...
@@ -717,6 +731,7 @@ def setUpBlobAdaptedFileStorage(test):
test
.
globs
[
'create_storage'
]
=
create_storage
def
storage_reusable_suite
(
prefix
,
factory
,
test_blob_storage_recovery
=
False
,
test_packing
=
False
,
...
...
@@ -729,6 +744,7 @@ def storage_reusable_suite(prefix, factory,
def
setup
(
test
):
setUp
(
test
)
def
create_storage
(
name
=
'data'
,
blob_dir
=
None
):
if
blob_dir
is
None
:
blob_dir
=
'%s.bobs'
%
name
...
...
@@ -752,13 +768,15 @@ def storage_reusable_suite(prefix, factory,
"POSKeyError: 'No blob file"
),
# Py3k repr's exceptions with dotted names
(
re
.
compile
(
"^ZODB.interfaces.BlobError:"
),
"BlobError:"
),
(
re
.
compile
(
"^ZODB.POSException.ConflictError:"
),
"ConflictError:"
),
(
re
.
compile
(
"^ZODB.POSException.ConflictError:"
),
"ConflictError:"
),
(
re
.
compile
(
"^ZODB.POSException.POSKeyError:"
),
"POSKeyError:"
),
(
re
.
compile
(
"^ZODB.POSException.Unsupported:"
),
"Unsupported:"
),
# Normalize out blobfile paths for sake of Windows
(
re
.
compile
(
r'([a-zA-Z]:)?\
%(sep)s.*
\%(sep)s(server-)?blobs\
%(sep)s.*
\.blob'
%
dict
(
sep
=
os
.
path
.
sep
)),
'<BLOB STORAGE PATH>'
)
r'([a-zA-Z]:)?\
%(sep)s.*
\%(sep)s(server-)'
r'?blobs\
%(sep)s.*
\.blob'
%
dict
(
sep
=
os
.
path
.
sep
)),
'<BLOB STORAGE PATH>'
)
]),
optionflags
=
doctest
.
ELLIPSIS
,
))
...
...
@@ -769,7 +787,7 @@ def storage_reusable_suite(prefix, factory,
))
suite
.
addTest
(
doctest
.
DocTestSuite
(
setUp
=
setup
,
tearDown
=
util
.
tearDown
,
checker
=
(
checker
=
(
ZODB
.
tests
.
util
.
checker
+
zope
.
testing
.
renormalizing
.
RENormalizing
([
(
re
.
compile
(
r'\
%(sep)s
\%(sep)s'
%
dict
(
sep
=
os
.
path
.
sep
)),
'/'
),
...
...
@@ -794,10 +812,12 @@ def storage_reusable_suite(prefix, factory,
if test_undo:
add_test_based_on_test_class(BlobUndoTests)
suite.layer = ZODB.tests.util.MonotonicallyIncreasingTimeMinimalTestLayer(prefix+'
BlobTests
')
suite.layer = ZODB.tests.util.MonotonicallyIncreasingTimeMinimalTestLayer(
prefix+'
BlobTests
')
return suite
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ZODBBlobConfigTest))
...
...
@@ -821,7 +841,7 @@ def test_suite():
))
suite.addTest(doctest.DocFileSuite(
"blob_layout.txt",
optionflags=doctest.ELLIPSIS
|
doctest.NORMALIZE_WHITESPACE,
optionflags=doctest.ELLIPSIS
|
doctest.NORMALIZE_WHITESPACE,
setUp=setUp,
tearDown=util.tearDown,
checker=ZODB.tests.util.checker +
...
...
@@ -841,6 +861,3 @@ def test_suite():
))
return suite
if __name__ == '__main__':
unittest.main(defaultTest = 'test_suite')
src/ZODB/tests/testconflictresolution.py
View file @
6e5baffd
...
...
@@ -24,12 +24,14 @@ import ZODB.tests.util
import
ZODB.POSException
import
zope.testing.module
def
setUp
(
test
):
ZODB
.
tests
.
util
.
setUp
(
test
)
zope
.
testing
.
module
.
setUp
(
test
,
'ConflictResolution_txt'
)
ZODB
.
ConflictResolution
.
_class_cache
.
clear
()
ZODB
.
ConflictResolution
.
_unresolvable
.
clear
()
def
tearDown
(
test
):
zope
.
testing
.
module
.
tearDown
(
test
)
ZODB
.
tests
.
util
.
tearDown
(
test
)
...
...
@@ -51,9 +53,11 @@ class ResolveableWhenStateDoesNotChange(persistent.Persistent):
# 3-way merge
raise
ZODB
.
POSException
.
ConflictError
class
Unresolvable
(
persistent
.
Persistent
):
pass
def
succeed_with_resolution_when_state_is_unchanged
():
"""
If a conflicting change doesn't change the state, then we must still call
...
...
@@ -130,6 +134,7 @@ mechanism.
>>> db.close()
"""
class
Resolveable
(
persistent
.
Persistent
):
def
_p_resolveConflict
(
self
,
old
,
committed
,
new
):
...
...
@@ -171,6 +176,7 @@ class Resolveable(persistent.Persistent):
return
resolved
def
resolve_even_when_referenced_classes_are_absent
():
"""
...
...
@@ -385,7 +391,7 @@ Cleanup:
>>> handler.uninstall()
>>> db.close()
"""
"""
# noqa: E501 line too long
def
test_suite
():
...
...
src/ZODB/tests/testcrossdatabasereferences.py
View file @
6e5baffd
...
...
@@ -16,14 +16,17 @@ import persistent
import
unittest
import
ZODB.tests.util
class
MyClass
(
persistent
.
Persistent
):
pass
class
MyClass_w_getnewargs
(
persistent
.
Persistent
):
def
__getnewargs__
(
self
):
return
()
def
test_must_use_consistent_connections
():
"""
...
...
@@ -34,7 +37,7 @@ work.
For example, it's tempting to open a second database using the
database open function, but this doesn't work:
>>> import ZODB.tests.util, transaction
, persistent
>>> import ZODB.tests.util, transaction
>>> databases = {}
>>> db1 = ZODB.tests.util.DB(databases=databases, database_name='1')
>>> db2 = ZODB.tests.util.DB(databases=databases, database_name='2')
...
...
@@ -82,6 +85,7 @@ different connections to the same database.
"""
def
test_connection_management_doesnt_get_caching_wrong
():
"""
...
...
@@ -89,7 +93,7 @@ If a connection participates in a multidatabase, then it's
connections must remain so that references between it's cached
objects remain sane.
>>> import ZODB.tests.util, transaction
, persistent
>>> import ZODB.tests.util, transaction
>>> databases = {}
>>> db1 = ZODB.tests.util.DB(databases=databases, database_name='1')
>>> db2 = ZODB.tests.util.DB(databases=databases, database_name='2')
...
...
@@ -125,10 +129,11 @@ if we get the same objects:
>>> db2.close()
"""
def
test_explicit_adding_with_savepoint
():
"""
>>> import ZODB.tests.util, transaction
, persistent
>>> import ZODB.tests.util, transaction
>>> databases = {}
>>> db1 = ZODB.tests.util.DB(databases=databases, database_name='1')
>>> db2 = ZODB.tests.util.DB(databases=databases, database_name='2')
...
...
@@ -150,10 +155,11 @@ def test_explicit_adding_with_savepoint():
"""
def
test_explicit_adding_with_savepoint2
():
"""
>>> import ZODB.tests.util, transaction
, persistent
>>> import ZODB.tests.util, transaction
>>> databases = {}
>>> db1 = ZODB.tests.util.DB(databases=databases, database_name='1')
>>> db2 = ZODB.tests.util.DB(databases=databases, database_name='2')
...
...
@@ -176,10 +182,12 @@ def test_explicit_adding_with_savepoint2():
"""
def
tearDownDbs
(
test
):
test
.
globs
[
'db1'
].
close
()
test
.
globs
[
'db2'
].
close
()
def
test_suite
():
return
unittest
.
TestSuite
((
doctest
.
DocFileSuite
(
...
...
@@ -196,7 +204,3 @@ def test_suite():
),
doctest
.
DocTestSuite
(
checker
=
ZODB
.
tests
.
util
.
checker
),
))
if
__name__
==
'__main__'
:
unittest
.
main
(
defaultTest
=
'test_suite'
)
src/ZODB/tests/testdocumentation.py
View file @
6e5baffd
...
...
@@ -22,19 +22,22 @@ import zope.testing.module
import
ZODB
def
setUp
(
test
):
test
.
globs
.
update
(
ZODB
=
ZODB
,
)
zope
.
testing
.
module
.
setUp
(
test
)
def
tearDown
(
test
):
zope
.
testing
.
module
.
tearDown
(
test
)
def
test_suite
():
base
,
src
=
os
.
path
.
split
(
os
.
path
.
dirname
(
os
.
path
.
dirname
(
ZODB
.
__file__
)))
assert
src
==
'src'
,
src
base
=
join
(
base
,
'doc'
)
base
=
join
(
base
,
'doc
s
'
)
guide
=
join
(
base
,
'guide'
)
reference
=
join
(
base
,
'reference'
)
...
...
@@ -51,6 +54,3 @@ def test_suite():
setUp
=
setUp
,
tearDown
=
tearDown
,
),
))
if
__name__
==
'__main__'
:
unittest
.
main
(
defaultTest
=
'test_suite'
)
src/ZODB/tests/testfsIndex.py
View file @
6e5baffd
...
...
@@ -26,6 +26,7 @@ except NameError:
# Py3: No xrange.
xrange
=
range
class
Test
(
unittest
.
TestCase
):
def
setUp
(
self
):
...
...
@@ -55,16 +56,16 @@ class Test(unittest.TestCase):
def
testInserts
(
self
):
index
=
self
.
index
for
i
in
range
(
0
,
200
):
self
.
assertEqual
((
i
,
index
[
p64
(
i
*
1000
)]),
(
i
,
(
i
*
1000
+
1
)))
for
i
in
range
(
0
,
200
):
self
.
assertEqual
((
i
,
index
[
p64
(
i
*
1000
)]),
(
i
,
(
i
*
1000
+
1
)))
self
.
assertEqual
(
len
(
index
),
200
)
key
=
p64
(
2000
)
key
=
p64
(
2000
)
self
.
assertEqual
(
index
.
get
(
key
),
2001
)
key
=
p64
(
2001
)
key
=
p64
(
2001
)
self
.
assertEqual
(
index
.
get
(
key
),
None
)
self
.
assertEqual
(
index
.
get
(
key
,
''
),
''
)
...
...
@@ -72,20 +73,20 @@ class Test(unittest.TestCase):
def
testUpdate
(
self
):
index
=
self
.
index
d
=
{}
d
=
{}
for
i
in
range
(
200
):
d
[
p64
(
i
*
1000
)]
=
(
i
*
1000
+
1
)
d
[
p64
(
i
*
1000
)]
=
(
i
*
1000
+
1
)
index
.
update
(
d
)
for
i
in
range
(
400
,
600
):
d
[
p64
(
i
*
1000
)]
=
(
i
*
1000
+
1
)
for
i
in
range
(
400
,
600
):
d
[
p64
(
i
*
1000
)]
=
(
i
*
1000
+
1
)
index
.
update
(
d
)
for
i
in
range
(
100
,
500
):
d
[
p64
(
i
*
1000
)]
=
(
i
*
1000
+
2
)
d
[
p64
(
i
*
1000
)]
=
(
i
*
1000
+
2
)
index
.
update
(
d
)
...
...
@@ -194,6 +195,7 @@ class Test(unittest.TestCase):
self
.
assertEqual
(
index
.
minKey
(
b
),
c
)
self
.
assertRaises
(
ValueError
,
index
.
minKey
,
d
)
def
fsIndex_save_and_load
():
"""
fsIndex objects now have save methods for saving them to disk in a new
...
...
@@ -232,6 +234,7 @@ If we save the data in the old format, we can still read it:
"""
def
test_suite
():
suite
=
unittest
.
TestSuite
()
suite
.
addTest
(
unittest
.
makeSuite
(
Test
))
...
...
src/ZODB/tests/testhistoricalconnections.py
View file @
6e5baffd
...
...
@@ -16,6 +16,7 @@ import manuel.footnote
import
manuel.testing
import
ZODB.tests.util
def
test_suite
():
return
manuel
.
testing
.
TestSuite
(
manuel
.
doctest
.
Manuel
(
checker
=
ZODB
.
tests
.
util
.
checker
)
+
...
...
src/ZODB/tests/testmvcc.py
View file @
6e5baffd
...
...
@@ -428,7 +428,8 @@ checker = renormalizing.RENormalizing([
# Python 3 adds module name to exceptions.
(
re
.
compile
(
"ZODB.POSException.ConflictError"
),
r"ConflictError"
),
(
re
.
compile
(
"ZODB.POSException.ReadConflictError"
),
r"ReadConflictError"
),
])
])
def
test_suite
():
return
doctest
.
DocTestSuite
(
checker
=
checker
)
src/ZODB/tests/testpersistentclass.py
View file @
6e5baffd
...
...
@@ -18,6 +18,7 @@ import unittest
import
ZODB.persistentclass
import
ZODB.tests.util
def
class_with_circular_ref_to_self
():
"""
It should be possible for a class to reger to itself.
...
...
@@ -38,6 +39,7 @@ It should be possible for a class to reger to itself.
"""
def
test_new_ghost_w_persistent_class
():
"""
Peristent meta classes work with PickleCache.new_ghost:
...
...
@@ -67,6 +69,8 @@ def test_new_ghost_w_persistent_class():
"""
# XXX need to update files to get newer testing package
class
FakeModule
(
object
):
def
__init__
(
self
,
name
,
dict
):
self
.
__dict__
=
dict
...
...
@@ -79,11 +83,13 @@ def setUp(test):
module
=
FakeModule
(
'ZODB.persistentclass_txt'
,
test
.
globs
)
sys
.
modules
[
module
.
__name__
]
=
module
def
tearDown
(
test
):
test
.
globs
[
'some_database'
].
close
()
del
sys
.
modules
[
'ZODB.persistentclass_txt'
]
ZODB
.
tests
.
util
.
tearDown
(
test
)
def
test_suite
():
return
unittest
.
TestSuite
((
doctest
.
DocFileSuite
(
...
...
@@ -92,6 +98,3 @@ def test_suite():
checker
=
ZODB
.
tests
.
util
.
checker
),
doctest
.
DocTestSuite
(
setUp
=
setUp
,
tearDown
=
tearDown
),
))
if
__name__
==
'__main__'
:
unittest
.
main
(
defaultTest
=
'test_suite'
)
src/ZODB/tests/util.py
View file @
6e5baffd
...
...
@@ -13,7 +13,8 @@
##############################################################################
"""Conventience function for creating test databases
"""
from
ZODB.MappingStorage
import
DB
# BBB
from
ZODB.MappingStorage
import
DB
# noqa: F401 import unused
import
atexit
import
doctest
...
...
@@ -45,7 +46,6 @@ from time import gmtime as _real_gmtime
_current_time
=
_real_time
()
checker
=
renormalizing
.
RENormalizing
([
(
re
.
compile
(
"<(.*?) object at 0x[0-9a-f]*?>"
),
r"<\1 object at 0x000000000000>"
),
...
...
@@ -80,7 +80,8 @@ checker = renormalizing.RENormalizing([
r"Unsupported"
),
(
re
.
compile
(
"ZConfig.ConfigurationSyntaxError"
),
r"ConfigurationSyntaxError"
),
])
])
def
setUp
(
test
,
name
=
'test'
):
clear_transaction_syncs
()
...
...
@@ -94,10 +95,12 @@ def setUp(test, name='test'):
os
.
chdir
(
d
)
zope
.
testing
.
setupstack
.
register
(
test
,
transaction
.
abort
)
def
tearDown
(
test
):
clear_transaction_syncs
()
zope
.
testing
.
setupstack
.
tearDown
(
test
)
class
TestCase
(
unittest
.
TestCase
):
def
setUp
(
self
):
...
...
@@ -110,9 +113,11 @@ class TestCase(unittest.TestCase):
tearDown
=
tearDown
def
pack
(
db
):
db
.
pack
(
time
.
time
()
+
1
)
class
P
(
persistent
.
Persistent
):
def
__init__
(
self
,
name
=
None
):
...
...
@@ -121,10 +126,12 @@ class P(persistent.Persistent):
def
__repr__
(
self
):
return
'P(%s)'
%
self
.
name
class
MininalTestLayer
(
object
):
__bases__
=
()
__module__
=
''
def
__init__
(
self
,
name
):
self
.
__name__
=
name
...
...
@@ -142,10 +149,12 @@ class MininalTestLayer(object):
testSetUp
=
testTearDown
=
lambda
self
:
None
def
clean
(
tmp
):
if
os
.
path
.
isdir
(
tmp
):
zope
.
testing
.
setupstack
.
rmtree
(
tmp
)
class
AAAA_Test_Runner_Hack
(
unittest
.
TestCase
):
"""Hack to work around a bug in the test runner.
...
...
@@ -157,6 +166,7 @@ class AAAA_Test_Runner_Hack(unittest.TestCase):
def
testNothing
(
self
):
pass
def
assert_warning
(
category
,
func
,
warning_text
=
''
):
with
warnings
.
catch_warnings
(
record
=
True
)
as
w
:
warnings
.
simplefilter
(
'default'
)
...
...
@@ -167,9 +177,11 @@ def assert_warning(category, func, warning_text=''):
return
result
raise
AssertionError
(
w
)
def
assert_deprecated
(
func
,
warning_text
=
''
):
return
assert_warning
(
DeprecationWarning
,
func
,
warning_text
)
def
wait
(
func
=
None
,
timeout
=
30
):
if
func
is
None
:
return
lambda
f
:
wait
(
f
,
timeout
)
...
...
@@ -179,6 +191,7 @@ def wait(func=None, timeout=30):
time
.
sleep
(.
01
)
raise
AssertionError
def
store
(
storage
,
oid
,
value
=
'x'
,
serial
=
ZODB
.
utils
.
z64
):
if
not
isinstance
(
oid
,
bytes
):
oid
=
ZODB
.
utils
.
p64
(
oid
)
...
...
@@ -190,8 +203,10 @@ def store(storage, oid, value='x', serial=ZODB.utils.z64):
storage
.
tpc_vote
(
t
)
storage
.
tpc_finish
(
t
)
def
mess_with_time
(
test
=
None
,
globs
=
None
,
now
=
1278864701.5
):
now
=
[
now
]
def
faux_time
():
now
[
0
]
+=
1
return
now
[
0
]
...
...
@@ -204,11 +219,12 @@ def mess_with_time(test=None, globs=None, now=1278864701.5):
import
time
zope
.
testing
.
setupstack
.
register
(
test
,
setattr
,
time
,
'time'
,
time
.
time
)
if
isinstance
(
time
,
type
):
if
isinstance
(
time
,
type
):
time
.
time
=
staticmethod
(
faux_time
)
# jython
else
:
time
.
time
=
faux_time
def
clear_transaction_syncs
():
"""Clear data managers registered with the global transaction manager
...
...
@@ -238,7 +254,8 @@ class _TimeWrapper(object):
def
incr
():
global
_current_time
# pylint:disable=global-statement
with
self
.
_lock
:
_current_time
=
max
(
_real_time
(),
_current_time
+
self
.
_granularity
)
_current_time
=
max
(
_real_time
(),
_current_time
+
self
.
_granularity
)
return
_current_time
self
.
fake_time
.
side_effect
=
incr
...
...
@@ -346,7 +363,8 @@ class MonotonicallyIncreasingTimeMinimalTestLayer(MininalTestLayer):
def
with_high_concurrency
(
f
):
"""
with_high_concurrency decorates f to run with high frequency of thread context switches.
with_high_concurrency decorates f to run with high frequency of thread
context switches.
It is useful for tests that try to probabilistically reproduce race
condition scenarios.
...
...
@@ -357,19 +375,23 @@ def with_high_concurrency(f):
# Python3, by default, switches every 5ms, which turns threads in
# intended "high concurrency" scenarios to execute almost serially.
# Raise the frequency of context switches in order to increase the
# probability to reproduce interesting/tricky overlapping of threads.
# probability to reproduce interesting/tricky overlapping of
# threads.
#
# See https://github.com/zopefoundation/ZODB/pull/345#issuecomment-822188305 and
# https://github.com/zopefoundation/ZEO/issues/168#issuecomment-821829116 for details.
# See https://github.com/zopefoundation/ZODB/pull/345#issuecomment-822188305 and
# noqa: E501 line too long
# https://github.com/zopefoundation/ZEO/issues/168#issuecomment-821829116 for details.
# noqa: E501 line too long
_
=
sys
.
getswitchinterval
()
def
restore
():
sys
.
setswitchinterval
(
_
)
sys
.
setswitchinterval
(
5e-6
)
# ~ 100 simple instructions on modern hardware
# ~ 100 simple instructions on modern hardware
sys
.
setswitchinterval
(
5e-6
)
else
:
# Python2, by default, switches threads every "100 instructions".
# Just make sure we run f with that default.
_
=
sys
.
getcheckinterval
()
def
restore
():
sys
.
setcheckinterval
(
_
)
sys
.
setcheckinterval
(
100
)
...
...
src/ZODB/tests/warnhook.py
View file @
6e5baffd
...
...
@@ -13,6 +13,7 @@
##############################################################################
import
warnings
class
WarningsHook
(
object
):
"""Hook to capture warnings generated by Python.
...
...
src/ZODB/transact.py
View file @
6e5baffd
...
...
@@ -16,12 +16,14 @@
from
ZODB.POSException
import
ReadConflictError
,
ConflictError
import
transaction
def
_commit
(
note
):
t
=
transaction
.
get
()
if
note
:
t
.
note
(
note
)
t
.
commit
()
def
transact
(
f
,
note
=
None
,
retries
=
5
):
"""Returns transactional version of function argument f.
...
...
@@ -42,7 +44,7 @@ def transact(f, note=None, retries=5):
n
-=
1
try
:
r
=
f
(
*
args
,
**
kwargs
)
except
ReadConflictError
as
msg
:
except
ReadConflictError
:
# the only way ReadConflictError can happen here is due to
# simultaneous pack removing objects revision that f could try
# to load.
...
...
@@ -52,7 +54,7 @@ def transact(f, note=None, retries=5):
continue
try
:
_commit
(
note
)
except
ConflictError
as
msg
:
except
ConflictError
:
transaction
.
abort
()
if
not
n
:
raise
...
...
src/ZODB/utils.py
View file @
6e5baffd
...
...
@@ -96,6 +96,7 @@ def p64(v):
except
struct
.
error
as
e
:
raise
ValueError
(
*
(
e
.
args
+
(
v
,)))
def
u64
(
v
):
"""Unpack an 8-byte string into a 64-bit long integer."""
try
:
...
...
@@ -103,6 +104,7 @@ def u64(v):
except
struct
.
error
as
e
:
raise
ValueError
(
*
(
e
.
args
+
(
v
,)))
U64
=
u64
...
...
@@ -121,7 +123,7 @@ def cp(f1, f2, length=None, bufsize=64 * 1024):
if
length
is
None
:
old_pos
=
f1
.
tell
()
f1
.
seek
(
0
,
2
)
f1
.
seek
(
0
,
2
)
length
=
f1
.
tell
()
f1
.
seek
(
old_pos
)
...
...
@@ -134,9 +136,10 @@ def cp(f1, f2, length=None, bufsize=64 * 1024):
write
(
data
)
length
-=
len
(
data
)
def
newTid
(
old
):
t
=
time
.
time
()
ts
=
TimeStamp
(
*
time
.
gmtime
(
t
)[:
5
]
+
(
t
%
60
,))
ts
=
TimeStamp
(
*
time
.
gmtime
(
t
)[:
5
]
+
(
t
%
60
,))
if
old
is
not
None
:
ts
=
ts
.
laterThan
(
TimeStamp
(
old
))
return
ts
.
raw
()
...
...
@@ -155,6 +158,7 @@ def oid_repr(oid):
else
:
return
repr
(
oid
)
def
repr_to_oid
(
repr
):
repr
=
ascii_bytes
(
repr
)
if
repr
.
startswith
(
b"0x"
):
...
...
@@ -163,12 +167,15 @@ def repr_to_oid(repr):
as_bin
=
b"
\
x00
"
*
(
8
-
len
(
as_bin
))
+
as_bin
return
as_bin
serial_repr
=
oid_repr
tid_repr
=
serial_repr
# For example, produce
# '0x03441422948b4399 2002-04-14 20:50:34.815000'
# for 8-byte string tid b'\x03D\x14"\x94\x8bC\x99'.
def
readable_tid_repr
(
tid
):
result
=
tid_repr
(
tid
)
if
isinstance
(
tid
,
bytes
)
and
len
(
tid
)
==
8
:
...
...
@@ -184,7 +191,10 @@ def readable_tid_repr(tid):
# a negative address gives a positive int with the same hex representation as
# the significant bits in the original.
_ADDRESS_MASK
=
256
**
struct
.
calcsize
(
'P'
)
def
positive_id
(
obj
):
"""Return id(obj) as a non-negative integer."""
...
...
@@ -201,6 +211,7 @@ def positive_id(obj):
# docs to be at least as smart. The code here doesn't appear to make sense
# for what serialize.py calls formats 5 and 6.
def
get_pickle_metadata
(
data
):
# Returns a 2-tuple of strings.
...
...
@@ -233,7 +244,7 @@ def get_pickle_metadata(data):
u
=
Unpickler
(
f
)
try
:
class_info
=
u
.
load
()
except
Exception
as
err
:
except
Exception
:
return
''
,
''
if
isinstance
(
class_info
,
tuple
):
if
isinstance
(
class_info
[
0
],
tuple
):
...
...
@@ -248,18 +259,21 @@ def get_pickle_metadata(data):
classname
=
''
return
modname
,
classname
def
mktemp
(
dir
=
None
,
prefix
=
'tmp'
):
"""Create a temp file, known by name, in a semi-secure manner."""
handle
,
filename
=
mkstemp
(
dir
=
dir
,
prefix
=
prefix
)
os
.
close
(
handle
)
return
filename
def
check_precondition
(
precondition
):
if
not
precondition
():
raise
AssertionError
(
"Failed precondition: "
,
precondition
.
__doc__
.
strip
())
class
Locked
(
object
):
def
__init__
(
self
,
func
,
inst
=
None
,
class_
=
None
,
preconditions
=
()):
...
...
@@ -287,6 +301,7 @@ class Locked(object):
return
func
(
*
args
,
**
kw
)
class
locked
(
object
):
def
__init__
(
self
,
*
preconditions
):
...
...
@@ -363,10 +378,11 @@ if os.environ.get('DEBUG_LOCKING'): # pragma: no cover
else
:
from
threading
import
Condition
,
Lock
,
RLock
from
threading
import
Condition
,
Lock
,
RLock
# noqa: F401 import unused
import
ZODB.POSException
# noqa: E402 module level import not at top of file
import
ZODB.POSException
def
load_current
(
storage
,
oid
,
version
=
''
):
"""Load the most recent revision of an object by calling loadBefore
...
...
src/ZODB/valuedoc.py
View file @
6e5baffd
"""Work around an issue with defining class attribute documentation.
See http://stackoverflow.com/questions/9153473/sphinx-values-for-attributes-reported-as-none/39276413
"""
"""
# noqa: E501 line too long
class
ValueDoc
(
object
):
...
...
@@ -10,4 +11,3 @@ class ValueDoc(object):
def
__repr__
(
self
):
return
self
.
text
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment